This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tob...@us...> - 2014-04-11 18:53:21
|
Revision: 8109 http://sourceforge.net/p/bigdata/code/8109 Author: tobycraig Date: 2014-04-11 18:53:18 +0000 (Fri, 11 Apr 2014) Log Message: ----------- Removed RDF types from index.html, dynamically generating list instead, and fixed export filename extensions Modified Paths: -------------- branches/RDR/bigdata-war/src/html/index.html branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-11 18:18:38 UTC (rev 8108) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-11 18:53:18 UTC (rev 8109) @@ -118,13 +118,6 @@ <div id="query-export" class="modal"> <select> - <option value="application/rdf+xml">RDF/XML</option> - <option value="application/x-turtle">N-Triples</option> - <option value="application/x-turtle">Turtle</option> - <option value="text/rdf+n3">N3</option> - <option value="application/trix">TriX</option> - <option value="application/x-trig">TRIG</option> - <option value="text/x-nquads">NQUADS</option> </select> <button id="query-download-rdf">Export</button> <button class="modal-cancel">Cancel</button> Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-11 18:18:38 UTC (rev 8108) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-11 18:53:18 UTC (rev 8109) @@ -483,6 +483,20 @@ $('#query-export-json').click(exportJSON); $('#query-export-xml').click(exportXML); +var rdf_extensions = { + "application/rdf+xml": ['RDF/XML', 'rdf'], + "application/x-turtle": ['N-Triples', 'nt'], + "application/x-turtle": ['Turtle', 'ttl'], + "text/rdf+n3": ['N3', 'n3'], + "application/trix": ['TriX', 'trix'], + "application/x-trig": ['TRIG', 'trig'], + "text/x-nquads": ['NQUADS', 'nq'] +}; + +for(var contentType in rdf_extensions) { + $('#query-export select').append('<option value="' + contentType + '">' + rdf_extensions[contentType][0] + '</option>'); +} + $('#query-download-rdf').click(function() { var dataType = $(this).siblings('select').val(); var settings = { @@ -490,16 +504,16 @@ data: JSON.stringify(QUERY_RESULTS), contentType: 'application/sparql-results+json', headers: { 'Accept': dataType }, - success: downloadRDFSuccess, + success: function(data) { downloadRDFSuccess(data, dataType); }, error: downloadRDFError }; $.ajax('/bigdata/sparql?workbench&convert', settings); $(this).siblings('.modal-cancel').click(); }); -function downloadRDFSuccess(data) { - console.log(data); - downloadFile(data, 'text/plain', 'export'); +function downloadRDFSuccess(data, dataType) { + var filename = 'export.' + rdf_extensions[dataType][1]; + downloadFile(data, dataType, filename); } function downloadRDFError(jqXHR, textStatus, errorThrown) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-11 18:18:42
|
Revision: 8108 http://sourceforge.net/p/bigdata/code/8108 Author: tobycraig Date: 2014-04-11 18:18:38 +0000 (Fri, 11 Apr 2014) Log Message: ----------- Fix panel not displaying initially in Firefox (and now also Chrome) Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-11 14:23:47 UTC (rev 8107) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-11 18:18:38 UTC (rev 8108) @@ -868,7 +868,10 @@ } // handle history buttons and initial display of first tab -window.addEventListener("popstate", function(e) { +window.addEventListener("popstate", handlePopState); +$(handlePopState); + +function handlePopState() { var hash = parseHash(this.location.hash); if(!hash) { $('#tab-selector a:first').click(); @@ -879,7 +882,7 @@ $('a[data-target=' + hash[1] + ']').click(); } } -}); +} function updateExploreError(jqXHR, textStatus, errorThrown) { $('#explore-tab .bottom').show(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-11 12:24:57
|
Revision: 8106 http://sourceforge.net/p/bigdata/code/8106 Author: thompsonbry Date: 2014-04-11 12:24:53 +0000 (Fri, 11 Apr 2014) Log Message: ----------- Removing test cases that did not actually compare against ground truth. Both of these tickets have test cases against ground truth in TestUnions of the data driven AST evaluation test suite. See #831 See #874 Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket831.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket874.java Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket831.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket831.java 2014-04-11 12:13:24 UTC (rev 8105) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket831.java 2014-04-11 12:24:53 UTC (rev 8106) @@ -1,177 +0,0 @@ -/** -Copyright (C) SYSTAP, LLC 2011. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package com.bigdata.rdf.sail; - -import java.util.Collection; -import java.util.LinkedList; -import java.util.Properties; - -import org.apache.commons.io.IOUtils; -import org.apache.log4j.Logger; -import org.openrdf.model.impl.URIImpl; -import org.openrdf.query.BindingSet; -import org.openrdf.query.QueryLanguage; -import org.openrdf.query.TupleQueryResult; -import org.openrdf.query.impl.BindingImpl; -import org.openrdf.repository.RepositoryConnection; -import org.openrdf.repository.sail.SailTupleQuery; -import org.openrdf.rio.RDFFormat; - -import com.bigdata.rdf.axioms.NoAxioms; -import com.bigdata.rdf.vocab.NoVocabulary; - -/** - * Unit test template for use in submission of bugs. - * <p> - * This test case will delegate to an underlying backing store. You can - * specify this store via a JVM property as follows: - * <code>-DtestClass=com.bigdata.rdf.sail.TestBigdataSailWithQuads</code> - * <p> - * There are three possible configurations for the testClass: - * <ul> - * <li>com.bigdata.rdf.sail.TestBigdataSailWithQuads (quads mode)</li> - * <li>com.bigdata.rdf.sail.TestBigdataSailWithoutSids (triples mode)</li> - * <li>com.bigdata.rdf.sail.TestBigdataSailWithSids (SIDs mode)</li> - * </ul> - * <p> - * The default for triples and SIDs mode is for inference with truth maintenance - * to be on. If you would like to turn off inference, make sure to do so in - * {@link #getProperties()}. - * - * @author <a href="mailto:mrp...@us...">Mike Personick</a> - * @version $Id$ - */ -public class TestTicket831 extends ProxyBigdataSailTestCase { - - protected static final Logger log = Logger.getLogger(TestTicket831.class); - - /** - * Please set your database properties here, except for your journal file, - * please DO NOT SPECIFY A JOURNAL FILE. - */ - @Override - public Properties getProperties() { - - Properties props = super.getProperties(); - - /* - * For example, here is a set of five properties that turns off - * inference, truth maintenance, and the free text index. - */ - props.setProperty(BigdataSail.Options.AXIOMS_CLASS, NoAxioms.class.getName()); - props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, NoVocabulary.class.getName()); - props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false"); - props.setProperty(BigdataSail.Options.JUSTIFY, "false"); - props.setProperty(BigdataSail.Options.TEXT_INDEX, "false"); - - return props; - - } - - public TestTicket831() { - } - - public TestTicket831(String arg0) { - super(arg0); - } - - public void testBug1() throws Exception { - - /* - * The bigdata store, backed by a temporary journal file. - */ - final BigdataSail bigdataSail = getSail(); - - /* - * Data file containing the data demonstrating your bug. - */ - final String data = "831.ttl"; - final String baseURI = ""; - final RDFFormat format = RDFFormat.TURTLE; - - try { - - bigdataSail.initialize(); - - final BigdataSailRepository bigdataRepo = new BigdataSailRepository(bigdataSail); - - { // load the data into the bigdata store - - final RepositoryConnection cxn = bigdataRepo.getConnection(); - try { - cxn.setAutoCommit(false); - cxn.add(getClass().getResourceAsStream(data), baseURI, format); -// cxn.add(data); - cxn.commit(); - } finally { - cxn.close(); - } - - } - - { -// final Collection<BindingSet> answer = new LinkedList<BindingSet>(); -// answer.add(createBindingSet( -// new BindingImpl("sub", new URIImpl("http://example.org/B")) -// )); - - final String query = IOUtils.toString(getClass().getResourceAsStream("831.rq")); - - if (log.isInfoEnabled()) { - log.info("running query:\n" + query); - } - - /* - * Run the problem query using the bigdata store and then compare - * the answer. - */ - final RepositoryConnection cxn = bigdataRepo.getReadOnlyConnection(); - try { - - final SailTupleQuery tupleQuery = (SailTupleQuery) - cxn.prepareTupleQuery(QueryLanguage.SPARQL, query); - tupleQuery.setIncludeInferred(false /* includeInferred */); - - final TupleQueryResult result = tupleQuery.evaluate(); -// compare(result, answer); - - while (result.hasNext()) { - log.info(result.next()); - } - - } finally { - cxn.close(); - } - - } - - } finally { - - bigdataSail.__tearDownUnitTest(); - - } - - } - -} Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket874.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket874.java 2014-04-11 12:13:24 UTC (rev 8105) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket874.java 2014-04-11 12:24:53 UTC (rev 8106) @@ -1,177 +0,0 @@ -/** -Copyright (C) SYSTAP, LLC 2011. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package com.bigdata.rdf.sail; - -import java.util.Collection; -import java.util.LinkedList; -import java.util.Properties; - -import org.apache.commons.io.IOUtils; -import org.apache.log4j.Logger; -import org.openrdf.model.impl.URIImpl; -import org.openrdf.query.BindingSet; -import org.openrdf.query.QueryLanguage; -import org.openrdf.query.TupleQueryResult; -import org.openrdf.query.impl.BindingImpl; -import org.openrdf.repository.RepositoryConnection; -import org.openrdf.repository.sail.SailTupleQuery; -import org.openrdf.rio.RDFFormat; - -import com.bigdata.rdf.axioms.NoAxioms; -import com.bigdata.rdf.vocab.NoVocabulary; - -/** - * Unit test template for use in submission of bugs. - * <p> - * This test case will delegate to an underlying backing store. You can - * specify this store via a JVM property as follows: - * <code>-DtestClass=com.bigdata.rdf.sail.TestBigdataSailWithQuads</code> - * <p> - * There are three possible configurations for the testClass: - * <ul> - * <li>com.bigdata.rdf.sail.TestBigdataSailWithQuads (quads mode)</li> - * <li>com.bigdata.rdf.sail.TestBigdataSailWithoutSids (triples mode)</li> - * <li>com.bigdata.rdf.sail.TestBigdataSailWithSids (SIDs mode)</li> - * </ul> - * <p> - * The default for triples and SIDs mode is for inference with truth maintenance - * to be on. If you would like to turn off inference, make sure to do so in - * {@link #getProperties()}. - * - * @author <a href="mailto:mrp...@us...">Mike Personick</a> - * @version $Id$ - */ -public class TestTicket874 extends ProxyBigdataSailTestCase { - - protected static final Logger log = Logger.getLogger(TestTicket874.class); - - /** - * Please set your database properties here, except for your journal file, - * please DO NOT SPECIFY A JOURNAL FILE. - */ - @Override - public Properties getProperties() { - - Properties props = super.getProperties(); - - /* - * For example, here is a set of five properties that turns off - * inference, truth maintenance, and the free text index. - */ - props.setProperty(BigdataSail.Options.AXIOMS_CLASS, NoAxioms.class.getName()); - props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, NoVocabulary.class.getName()); - props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false"); - props.setProperty(BigdataSail.Options.JUSTIFY, "false"); - props.setProperty(BigdataSail.Options.TEXT_INDEX, "false"); - - return props; - - } - - public TestTicket874() { - } - - public TestTicket874(String arg0) { - super(arg0); - } - - public void testBug1() throws Exception { - - /* - * The bigdata store, backed by a temporary journal file. - */ - final BigdataSail bigdataSail = getSail(); - - /* - * Data file containing the data demonstrating your bug. - */ - final String data = "874.ttl"; - final String baseURI = ""; - final RDFFormat format = RDFFormat.TURTLE; - - try { - - bigdataSail.initialize(); - - final BigdataSailRepository bigdataRepo = new BigdataSailRepository(bigdataSail); - - { // load the data into the bigdata store - - final RepositoryConnection cxn = bigdataRepo.getConnection(); - try { - cxn.setAutoCommit(false); - cxn.add(getClass().getResourceAsStream(data), baseURI, format); -// cxn.add(data); - cxn.commit(); - } finally { - cxn.close(); - } - - } - - { -// final Collection<BindingSet> answer = new LinkedList<BindingSet>(); -// answer.add(createBindingSet( -// new BindingImpl("sub", new URIImpl("http://example.org/B")) -// )); - - final String query = IOUtils.toString(getClass().getResourceAsStream("874.rq")); - - if (log.isInfoEnabled()) { - log.info("running query:\n" + query); - } - - /* - * Run the problem query using the bigdata store and then compare - * the answer. - */ - final RepositoryConnection cxn = bigdataRepo.getReadOnlyConnection(); - try { - - final SailTupleQuery tupleQuery = (SailTupleQuery) - cxn.prepareTupleQuery(QueryLanguage.SPARQL, query); - tupleQuery.setIncludeInferred(false /* includeInferred */); - - final TupleQueryResult result = tupleQuery.evaluate(); -// compare(result, answer); - - while (result.hasNext()) { - log.info(result.next()); - } - - } finally { - cxn.close(); - } - - } - - } finally { - - bigdataSail.__tearDownUnitTest(); - - } - - } - -} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-11 12:13:28
|
Revision: 8105 http://sourceforge.net/p/bigdata/code/8105 Author: thompsonbry Date: 2014-04-11 12:13:24 +0000 (Fri, 11 Apr 2014) Log Message: ----------- Reconciled Martyn's edits and my own on the HA1/HA5 branch prior to merge in of delta from the main branch. Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-11 11:43:06 UTC (rev 8104) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-11 12:13:24 UTC (rev 8105) @@ -43,6 +43,7 @@ import com.bigdata.ha.msg.HARootBlockRequest; import com.bigdata.journal.CommitCounterUtility; import com.bigdata.journal.IHABufferStrategy; +import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.Journal; import com.bigdata.rdf.sail.webapp.client.ConnectOptions; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; @@ -146,10 +147,12 @@ * The current commit counter on the server. This is the commit point * that should be restored. */ - final long commitCounterM = serverA - .getRootBlock(new HARootBlockRequest(null/* storeUUID */)) - .getRootBlock().getCommitCounter(); + + final IRootBlockView serverARootBlock = serverA.getRootBlock( + new HARootBlockRequest(null/* storeUUID */)).getRootBlock(); + final long commitCounterM = serverARootBlock.getCommitCounter(); + final File snapshotFile = SnapshotManager.getSnapshotFile( getSnapshotDirA(), commitCounterN); @@ -170,40 +173,26 @@ { final Properties p = new Properties(); - final File aout = out.getAbsoluteFile(); - // log.warn(aout.toString() + " modified: " + aout.lastModified()); - - p.setProperty(Journal.Options.FILE, aout.toString()); - - Journal jnl = new Journal(p); + p.setProperty(Journal.Options.FILE, out.getAbsoluteFile() + .toString()); + + Journal jnl = new Journal(p); + try { // Verify snapshot at the expected commit point. assertEquals(commitCounterN, jnl.getRootBlockView() .getCommitCounter()); -// { -// final MessageDigest digest = MessageDigest -// .getInstance("MD5"); -// -// // digest of restored journal. -// ((IHABufferStrategy) (jnl.getBufferStrategy())) -// .computeDigest(null/* snapshot */, digest); -// -// final byte[] digest2 = digest.digest(); -// -// System.err.println("Pre-restore: " + BytesUtil.toHexString(digest2)); -// } // Verify journal can be dumped without error. dumpJournal(jnl); - + /* * Now roll that journal forward using the HALog directory. */ final HARestore rest = new HARestore(jnl, getHALogDirA()); - // System.err.println("Prior: " + jnl.getRootBlockView().toString()); /* * Note: We can not test where we stop at the specified * commit point in this method because the Journal state on @@ -212,7 +201,6 @@ */ rest.restore(false/* listCommitPoints */, Long.MAX_VALUE/* haltingCommitCounter */); - // System.err.println("Post: " + jnl.getRootBlockView().toString()); /* * FIXME For some reason, we need to close and reopen the * journal before it can be used. See HARestore. @@ -224,12 +212,18 @@ jnl = new Journal(p); } - // System.err.println("Post reopen: " + jnl.getRootBlockView().toString()); + // Verify can dump journal after restore. + dumpJournal(jnl); - // Verify journal now at the expected commit point. + // Verify journal now at the expected commit point. assertEquals(commitCounterM, jnl.getRootBlockView() .getCommitCounter()); + if (!serverARootBlock.equals(jnl.getRootBlockView())) { + fail("Root blocks differ: serverA=" + serverARootBlock + + ", restored=" + jnl.getRootBlockView()); + } + /* * Compute digest of the restored journal. The digest should * agree with the digest of the Journal on A since we rolled @@ -242,14 +236,17 @@ new HADigestRequest(null/* storeUUID */)) .getDigest(); - final MessageDigest digest = MessageDigest - .getInstance("MD5"); + final byte[] digest2; + { + final MessageDigest digest = MessageDigest + .getInstance("MD5"); - // digest of restored journal. - ((IHABufferStrategy) (jnl.getBufferStrategy())) - .computeDigest(null/* snapshot */, digest); + // digest of restored journal. + ((IHABufferStrategy) (jnl.getBufferStrategy())) + .computeDigest(null/* snapshot */, digest); - final byte[] digest2 = digest.digest(); + digest2 = digest.digest(); + } if (!BytesUtil.bytesEqual(digestA, digest2)) { @@ -259,19 +256,13 @@ final String digest2Str = new BigInteger(1, digest2) .toString(16); - System.err.println("Original: " + serverA.getRootBlock(new HARootBlockRequest(null)).getRootBlock().toString()); - System.err.println("Restored: " + jnl.getRootBlockView().toString()); - fail("Digests differ after restore and replay: expected=" + digestAStr + ", actual=" + digest2Str); - + } } - // Verify can dump journal after restore. - dumpJournal(jnl); - } finally { if (jnl != null) { Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-11 11:43:06 UTC (rev 8104) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-11 12:13:24 UTC (rev 8105) @@ -227,7 +227,7 @@ /** * {@link UUID}s for the {@link HAJournalServer}s. */ - protected UUID serverAId = UUID.randomUUID(); + private UUID serverAId = UUID.randomUUID(); private UUID serverBId = UUID.randomUUID(); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-11 11:43:06 UTC (rev 8104) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-11 12:13:24 UTC (rev 8105) @@ -970,7 +970,7 @@ * Verify the the digest of the journal is equal to the digest of the * indicated snapshot on the specified service. * <p> - * Note: This can only succeed if the journal is at the specififed commit + * Note: This can only succeed if the journal is at the specified commit * point. If there are concurrent writes on the journal, then it's digest * will no longer be consistent with the snapshot. * Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-11 11:43:06 UTC (rev 8104) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-11 12:13:24 UTC (rev 8105) @@ -17,6 +17,10 @@ import com.bigdata.journal.Journal; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; +/** + * Test suite for the restore of the HA1 Journal from a snapshot and transaction + * logs. + */ public class TestHA1SnapshotPolicy extends AbstractHA3BackupTestCase { public TestHA1SnapshotPolicy() { @@ -437,8 +441,8 @@ */ public void testA_snapshot_multipleTx_restore_validate() throws Exception { - final int N1 = 7; //7; // #of transactions to run before the snapshot. - final int N2 = 8; //8; // #of transactions to run after the snapshot. + final int N1 = 7; // #of transactions to run before the snapshot. + final int N2 = 8; // #of transactions to run after the snapshot. // Start service. final HAGlue serverA = startA(); @@ -458,13 +462,13 @@ // Now run N transactions. for (int i = 0; i < N1; i++) { + + simpleTransaction(); - simpleTransaction(); - } - - final long commitCounterN1 = N1 + 1; + final long commitCounterN1 = N1 + 1; + awaitCommitCounter(commitCounterN1, serverA); /* @@ -477,7 +481,7 @@ // Snapshot directory is empty. assertEquals(1, recursiveCount(getSnapshotDirA(),SnapshotManager.SNAPSHOT_FILTER)); - + // request snapshot on A. final Future<IHASnapshotResponse> ft = serverA .takeSnapshot(new HASnapshotRequest(0/* percentLogSize */)); @@ -503,6 +507,19 @@ } + { + // Snapshot directory contains just the expected snapshot + assertExpectedSnapshots(getSnapshotDirA(), + new long[] { commitCounterN1 }); + + /* + * Now, get the snapshot that we took above, decompress it, and then + * roll it forward and verify it against the current committed + * journal. + */ + doRestoreA(serverA, commitCounterN1); + } + // Now run M transactions. for (int i = 0; i < N2; i++) { @@ -514,7 +531,6 @@ awaitCommitCounter(commitCounterN2, serverA); - // Snapshot directory contains just the expected snapshot assertExpectedSnapshots(getSnapshotDirA(), new long[]{commitCounterN1}); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-11 11:43:09
|
Revision: 8104 http://sourceforge.net/p/bigdata/code/8104 Author: thompsonbry Date: 2014-04-11 11:43:06 +0000 (Fri, 11 Apr 2014) Log Message: ----------- Modified to trim whitespace iff the hostname override is non-null. Modified Paths: -------------- branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java Modified: branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2014-04-11 11:42:00 UTC (rev 8103) +++ branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2014-04-11 11:43:06 UTC (rev 8104) @@ -92,7 +92,11 @@ static { String s = System.getProperty(BigdataStatics.HOSTNAME); - if (s != null && s.trim().length() != 0) { + if (s != null) { + // Trim whitespace. + s = s.trim(); + } + if (s != null && s.length() != 0) { log.warn("Hostname override: hostname=" + s); } else { try { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-11 11:42:03
|
Revision: 8103 http://sourceforge.net/p/bigdata/code/8103 Author: thompsonbry Date: 2014-04-11 11:42:00 +0000 (Fri, 11 Apr 2014) Log Message: ----------- Added condition that the specified hostname must be non-empty after trimming whitespace. Modified Paths: -------------- branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java Modified: branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2014-04-11 11:40:12 UTC (rev 8102) +++ branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2014-04-11 11:42:00 UTC (rev 8103) @@ -92,7 +92,7 @@ static { String s = System.getProperty(BigdataStatics.HOSTNAME); - if (s != null) { + if (s != null && s.trim().length() != 0) { log.warn("Hostname override: hostname=" + s); } else { try { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-11 11:40:16
|
Revision: 8102 http://sourceforge.net/p/bigdata/code/8102 Author: thompsonbry Date: 2014-04-11 11:40:12 +0000 (Fri, 11 Apr 2014) Log Message: ----------- Bug fix for deployments with bad reverse DNS. #886 (Provide workaround for bad reverse DNS setups) Modified Paths: -------------- branches/RDR/bigdata/src/java/com/bigdata/BigdataStatics.java branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java Modified: branches/RDR/bigdata/src/java/com/bigdata/BigdataStatics.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/BigdataStatics.java 2014-04-10 22:36:29 UTC (rev 8101) +++ branches/RDR/bigdata/src/java/com/bigdata/BigdataStatics.java 2014-04-11 11:40:12 UTC (rev 8102) @@ -27,6 +27,7 @@ package com.bigdata; +import com.bigdata.counters.AbstractStatisticsCollector; import com.bigdata.jini.start.process.ProcessHelper; /** @@ -46,6 +47,20 @@ public static final boolean debug = Boolean.getBoolean("com.bigdata.debug"); /** + * The name of an environment variable whose value will be used as the + * canoncial host name for the host running this JVM. This information is + * used by the {@link AbstractStatisticsCollector}, which is responsible for + * obtaining and reporting the canonical hostname for the {@link Banner} and + * other purposes. + * + * @see AbstractStatisticsCollector + * @see Banner + * @see <a href="http://trac.bigdata.com/ticket/886" >Provide workaround for + * bad reverse DNS setups</a> + */ + public static final String HOSTNAME = "com.bigdata.hostname"; + + /** * The #of lines of output from a child process which will be echoed onto * {@link System#out} when that child process is executed. This makes it * easy to track down why a child process dies during service start. If you Modified: branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2014-04-10 22:36:29 UTC (rev 8101) +++ branches/RDR/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2014-04-11 11:40:12 UTC (rev 8102) @@ -44,6 +44,7 @@ import org.apache.system.SystemUtil; import com.bigdata.Banner; +import com.bigdata.BigdataStatics; import com.bigdata.LRUNexus; import com.bigdata.counters.httpd.CounterSetHTTPD; import com.bigdata.counters.linux.StatisticsCollectorForLinux; @@ -82,9 +83,18 @@ /** The path prefix under which all counters for this host are found. */ static final public String hostPathPrefix; + /** + * This static code block is responsible obtaining the canonical hostname. + * + * @see <a href="http://trac.bigdata.com/ticket/886" >Provide workaround for + * bad reverse DNS setups</a> + */ static { - String s; + String s = System.getProperty(BigdataStatics.HOSTNAME); + if (s != null) { + log.warn("Hostname override: hostname=" + s); + } else { try { /* * Note: This should be the host *name* NOT an IP address of a @@ -98,6 +108,7 @@ s = NicUtil.getIpAddressByLocalHost(); log.warn("Falling back to " + s); } + } fullyQualifiedHostName = s; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-10 22:36:32
|
Revision: 8101 http://sourceforge.net/p/bigdata/code/8101 Author: tobycraig Date: 2014-04-10 22:36:29 +0000 (Thu, 10 Apr 2014) Log Message: ----------- Export RDF now works for CONSTRUCT queries, as long as they have no values for c. If they do, the export RDF button will not be shown Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-10 21:37:38 UTC (rev 8100) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-10 22:36:29 UTC (rev 8101) @@ -607,6 +607,7 @@ } var thead = $('<thead>').appendTo(table); var vars = []; + var varsUsed = {} var tr = $('<tr>'); for(var i=0; i<data.head.vars.length; i++) { tr.append('<td>' + data.head.vars[i] + '</td>'); @@ -618,6 +619,7 @@ var tr = $('<tr>'); for(var j=0; j<vars.length; j++) { if(vars[j] in data.results.bindings[i]) { + varsUsed[vars[j]] = true; var binding = data.results.bindings[i][vars[j]]; if(binding.type == 'sid') { var text = getSID(binding); @@ -649,10 +651,13 @@ } // see if we have RDF data - if(vars.length == 3) { - if(vars[0] == 's' && vars[1] == 'p' && vars[2] == 'o') { - $('#query-export-rdf').show(); + if((vars.length == 3 && vars[0] == 's' && vars[1] == 'p' && vars[2] == 'o') || + (vars.length == 4 && vars[0] == 's' && vars[1] == 'p' && vars[2] == 'o') && vars[3] == 'c' && !('c' in varsUsed)) { + if(vars.length == 4) { + // remove (unused) c variable from JSON + QUERY_RESULTS.head.vars.pop() } + $('#query-export-rdf').show(); } $('#query-response a').click(function(e) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-10 21:37:43
|
Revision: 8100 http://sourceforge.net/p/bigdata/code/8100 Author: tobycraig Date: 2014-04-10 21:37:38 +0000 (Thu, 10 Apr 2014) Log Message: ----------- Initial go at RDF export Modified Paths: -------------- branches/RDR/bigdata-war/src/html/css/style.css branches/RDR/bigdata-war/src/html/index.html branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/css/style.css =================================================================== --- branches/RDR/bigdata-war/src/html/css/style.css 2014-04-10 00:59:10 UTC (rev 8099) +++ branches/RDR/bigdata-war/src/html/css/style.css 2014-04-10 21:37:38 UTC (rev 8100) @@ -121,6 +121,38 @@ border-bottom: 1px solid; } +.modal { + display: none; + z-index: 1; + position: fixed; + top: 0; + left: 0; + width: 50%; + margin-top: 100px; + margin-left: 25%; + background-color: white; + padding: 20px; +} + +#overlay { + display: none; + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + background-color: grey; + opacity: 0.5; +} + +.modal-open #overlay { + display: initial; +} + +.modal-open { + overflow: hidden; +} + .namespace-shortcuts { text-align: right; } Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-10 00:59:10 UTC (rev 8099) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-10 21:37:38 UTC (rev 8100) @@ -107,6 +107,7 @@ </div> <div class="bottom"> + <button id="query-export-rdf">Export RDF</button> <button id="query-export-csv">Export CSV</button> <button id="query-export-json">Export JSON</button> <button id="query-export-xml">Export XML</button> @@ -115,6 +116,20 @@ </div> + <div id="query-export" class="modal"> + <select> + <option value="application/rdf+xml">RDF/XML</option> + <option value="application/x-turtle">N-Triples</option> + <option value="application/x-turtle">Turtle</option> + <option value="text/rdf+n3">N3</option> + <option value="application/trix">TriX</option> + <option value="application/x-trig">TRIG</option> + <option value="text/x-nquads">NQUADS</option> + </select> + <button id="query-download-rdf">Export</button> + <button class="modal-cancel">Cancel</button> + </div> + <div class="tab" id="explore-tab"> <div class="box"> @@ -172,6 +187,8 @@ </div> + <div id="overlay"></div> + <!--[if IE]><script src="//html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> <script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script> <script>window.jQuery || document.write('<script src="/bigdata/html/js/vendor/jquery.min.js"><\/script>')</script> Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-10 00:59:10 UTC (rev 8099) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-10 21:37:38 UTC (rev 8100) @@ -3,6 +3,18 @@ // global variables var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACE_URL, NAMESPACES_READY, NAMESPACE_SHORTCUTS, FILE_CONTENTS, QUERY_RESULTS; +/* Modal functions */ + +function showModal(id) { + $('#' + id).show(); + $('body').addClass('modal-open'); +} + +$('.modal-cancel').click(function() { + $('body').removeClass('modal-open'); + $(this).parents('.modal').hide(); +}); + /* Search */ $('#search-form').submit(function(e) { @@ -466,10 +478,34 @@ $('#query-response, #query-explanation, #query-tab .bottom *').hide(); }); +$('#query-export-rdf').click(function() { showModal('query-export'); }); $('#query-export-csv').click(exportCSV); $('#query-export-json').click(exportJSON); $('#query-export-xml').click(exportXML); +$('#query-download-rdf').click(function() { + var dataType = $(this).siblings('select').val(); + var settings = { + type: 'POST', + data: JSON.stringify(QUERY_RESULTS), + contentType: 'application/sparql-results+json', + headers: { 'Accept': dataType }, + success: downloadRDFSuccess, + error: downloadRDFError + }; + $.ajax('/bigdata/sparql?workbench&convert', settings); + $(this).siblings('.modal-cancel').click(); +}); + +function downloadRDFSuccess(data) { + console.log(data); + downloadFile(data, 'text/plain', 'export'); +} + +function downloadRDFError(jqXHR, textStatus, errorThrown) { + alert(errorThrown); +} + function exportXML() { var xml = '<?xml version="1.0"?>\n<sparql xmlns="http://www.w3.org/2005/sparql-results#">\n\t<head>\n'; var bindings = []; @@ -533,6 +569,7 @@ function showQueryResults(data) { $('#query-response').empty(); + $('#query-export-rdf').hide(); $('#query-response, #query-tab .bottom *').show(); var table = $('<table>').appendTo($('#query-response')); if(this.dataTypes[1] == 'xml') { @@ -611,6 +648,13 @@ table.append(tr); } + // see if we have RDF data + if(vars.length == 3) { + if(vars[0] == 's' && vars[1] == 'p' && vars[2] == 'o') { + $('#query-export-rdf').show(); + } + } + $('#query-response a').click(function(e) { e.preventDefault(); explore(this.textContent); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-09 21:29:49
|
Revision: 8098 http://sourceforge.net/p/bigdata/code/8098 Author: tobycraig Date: 2014-04-09 21:29:45 +0000 (Wed, 09 Apr 2014) Log Message: ----------- Fixed other MIME types for loading data Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-09 20:55:46 UTC (rev 8097) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-09 21:29:45 UTC (rev 8098) @@ -339,14 +339,14 @@ //'xml': 'trix', 'ttl': 'turtle'}; -var rdf_content_types = {'n-quads': 'application/n-quads', +var rdf_content_types = {'n-quads': 'text/x-nquads', 'n-triples': 'text/plain', - 'n3': 'text/n3', + 'n3': 'text/rdf+n3', 'rdf/xml': 'application/rdf+xml', 'json': 'application/sparql-results+json', 'trig': 'application/x-trig', 'trix': 'application/trix', - 'turtle': 'text/turtle'}; + 'turtle': 'application/x-turtle'}; var sparql_update_commands = ['INSERT', 'DELETE']; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-09 20:55:49
|
Revision: 8097 http://sourceforge.net/p/bigdata/code/8097 Author: tobycraig Date: 2014-04-09 20:55:46 +0000 (Wed, 09 Apr 2014) Log Message: ----------- Fixed error preventing trig data from being loaded Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-09 18:20:22 UTC (rev 8096) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-09 20:55:46 UTC (rev 8097) @@ -344,7 +344,7 @@ 'n3': 'text/n3', 'rdf/xml': 'application/rdf+xml', 'json': 'application/sparql-results+json', - 'trig': 'application/trig', + 'trig': 'application/x-trig', 'trix': 'application/trix', 'turtle': 'text/turtle'}; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-09 18:20:25
|
Revision: 8096 http://sourceforge.net/p/bigdata/code/8096 Author: tobycraig Date: 2014-04-09 18:20:22 +0000 (Wed, 09 Apr 2014) Log Message: ----------- Provide feedback to user that data is loading after submitting data, removed race condition from query running message Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-09 18:15:21 UTC (rev 8095) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-09 18:20:22 UTC (rev 8096) @@ -392,7 +392,10 @@ break; } - $.ajax(NAMESPACE_URL, settings); + $('#load-response').show(); + $('#load-response pre').html('Data loading...'); + + $.ajax(NAMESPACE_URL, settings); } $('#load-clear').click(function() { @@ -439,10 +442,10 @@ error: queryResultsError } + $('#query-response').show().html('Query running...'); + $.ajax(NAMESPACE_URL, settings); - $('#query-response').show().html('Query running...'); - $('#query-explanation').empty(); if($('#query-explain').is(':checked')) { settings = { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-09 18:15:25
|
Revision: 8095 http://sourceforge.net/p/bigdata/code/8095 Author: thompsonbry Date: 2014-04-09 18:15:21 +0000 (Wed, 09 Apr 2014) Log Message: ----------- Bug fix for stochastic CI error: java.lang.IllegalArgumentException: n must be positive at java.util.Random.nextInt(Random.java:250) at com.bigdata.btree.TestGetBitsFromByteArray.test_stress_InputBitStream_compatible(TestGetBitsFromByteArray.java:401) Modified Paths: -------------- branches/RDR/bigdata/src/test/com/bigdata/btree/TestGetBitsFromByteArray.java Modified: branches/RDR/bigdata/src/test/com/bigdata/btree/TestGetBitsFromByteArray.java =================================================================== --- branches/RDR/bigdata/src/test/com/bigdata/btree/TestGetBitsFromByteArray.java 2014-04-09 18:06:11 UTC (rev 8094) +++ branches/RDR/bigdata/src/test/com/bigdata/btree/TestGetBitsFromByteArray.java 2014-04-09 18:15:21 UTC (rev 8095) @@ -386,7 +386,8 @@ final long limit = 1000000; // Note: length is guaranteed to be LT int32 bits so [int] index is Ok. - final int len = r.nextInt(Bytes.kilobyte32 * 8) + 1; + // Note: + 4 since we will do [bitlen - 32] below. 4*8==32. + final int len = r.nextInt(Bytes.kilobyte32 * 8) + 4; final int bitlen = len << 3; // Fill array with random data. final byte[] b = new byte[len]; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-09 18:06:17
|
Revision: 8094 http://sourceforge.net/p/bigdata/code/8094 Author: tobycraig Date: 2014-04-09 18:06:11 +0000 (Wed, 09 Apr 2014) Log Message: ----------- Provide feedback to user that query is running after submitting a query Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-09 17:29:19 UTC (rev 8093) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-09 18:06:11 UTC (rev 8094) @@ -441,6 +441,8 @@ $.ajax(NAMESPACE_URL, settings); + $('#query-response').show().html('Query running...'); + $('#query-explanation').empty(); if($('#query-explain').is(':checked')) { settings = { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-04-09 17:29:23
|
Revision: 8093 http://sourceforge.net/p/bigdata/code/8093 Author: mrpersonick Date: 2014-04-09 17:29:19 +0000 (Wed, 09 Apr 2014) Log Message: ----------- capture only shortest paths instead of all paths Modified Paths: -------------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java 2014-04-09 12:35:35 UTC (rev 8092) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java 2014-04-09 17:29:19 UTC (rev 8093) @@ -131,20 +131,22 @@ * first visited the vertex (this helps to avoid multiple * scheduling of a vertex). */ - public boolean visit(final int depth, final Value pred, final URI edge) { + public synchronized boolean visit(final int depth, final Value pred, final URI edge) { + + boolean ret = false; - if (pred != null) { + if (this.depth.compareAndSet(-1/* expect */, depth/* newValue */)) { + // Scheduled by this thread. + ret = true; + } + + if (pred != null && this.depth() > 0 && this.depth() == depth) { // this.predecessors.add(pred); addPredecessor(pred, edge); } - if (this.depth.compareAndSet(-1/* expect */, depth/* newValue */)) { - // Scheduled by this thread. - return true; - } + return ret; - return false; - } @Override @@ -281,6 +283,7 @@ // final VS otherState = state.getState(e.getObject()/* v */); // visit. + if (otherState.visit(state.round() + 1, u/* predecessor */, e.getPredicate())) { /* This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-09 12:35:38
|
Revision: 8092 http://sourceforge.net/p/bigdata/code/8092 Author: thompsonbry Date: 2014-04-09 12:35:35 +0000 (Wed, 09 Apr 2014) Log Message: ----------- Made field private, linked to HA1 ticket. @see #721 (HA1) Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-09 09:38:52 UTC (rev 8091) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-09 12:35:35 UTC (rev 8092) @@ -137,8 +137,12 @@ } - // Used to zero pad slots in buffered writes - final byte[] s_zeros = new byte[256]; + /** + * Used to zero pad slots in buffered writes. + * + * @see <a href="http://trac.bigdata.com/ticket/721#comment:10"> HA1 </a> + */ + private final byte[] s_zeros = new byte[256]; /** * Buffer a write. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-04-09 09:38:56
|
Revision: 8091 http://sourceforge.net/p/bigdata/code/8091 Author: martyncutcher Date: 2014-04-09 09:38:52 +0000 (Wed, 09 Apr 2014) Log Message: ----------- Add HA5 HALog tests for #722 Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java Added Paths: ----------- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java Property Changed: ---------------- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/ Index: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha 2014-04-09 07:44:27 UTC (rev 8090) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha 2014-04-09 09:38:52 UTC (rev 8091) Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha ___________________________________________________________________ Modified: svn:ignore ## -1,3 +1,4 ## log4j.properties logging.properties results.txt +TestRWStoreAddress.java Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-09 07:44:27 UTC (rev 8090) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-09 09:38:52 UTC (rev 8091) @@ -170,17 +170,30 @@ { final Properties p = new Properties(); + final File aout = out.getAbsoluteFile(); + // log.warn(aout.toString() + " modified: " + aout.lastModified()); + + p.setProperty(Journal.Options.FILE, aout.toString()); + + Journal jnl = new Journal(p); - p.setProperty(Journal.Options.FILE, out.getAbsoluteFile() - .toString()); - - Journal jnl = new Journal(p); - try { // Verify snapshot at the expected commit point. assertEquals(commitCounterN, jnl.getRootBlockView() .getCommitCounter()); +// { +// final MessageDigest digest = MessageDigest +// .getInstance("MD5"); +// +// // digest of restored journal. +// ((IHABufferStrategy) (jnl.getBufferStrategy())) +// .computeDigest(null/* snapshot */, digest); +// +// final byte[] digest2 = digest.digest(); +// +// System.err.println("Pre-restore: " + BytesUtil.toHexString(digest2)); +// } // Verify journal can be dumped without error. dumpJournal(jnl); @@ -190,6 +203,7 @@ */ final HARestore rest = new HARestore(jnl, getHALogDirA()); + // System.err.println("Prior: " + jnl.getRootBlockView().toString()); /* * Note: We can not test where we stop at the specified * commit point in this method because the Journal state on @@ -198,7 +212,21 @@ */ rest.restore(false/* listCommitPoints */, Long.MAX_VALUE/* haltingCommitCounter */); - // Verify journal now at the expected commit point. + // System.err.println("Post: " + jnl.getRootBlockView().toString()); + /* + * FIXME For some reason, we need to close and reopen the + * journal before it can be used. See HARestore. + */ + if (true) { + jnl.close(); + + // reopen. + jnl = new Journal(p); + } + + // System.err.println("Post reopen: " + jnl.getRootBlockView().toString()); + + // Verify journal now at the expected commit point. assertEquals(commitCounterM, jnl.getRootBlockView() .getCommitCounter()); @@ -231,25 +259,17 @@ final String digest2Str = new BigInteger(1, digest2) .toString(16); + System.err.println("Original: " + serverA.getRootBlock(new HARootBlockRequest(null)).getRootBlock().toString()); + System.err.println("Restored: " + jnl.getRootBlockView().toString()); + fail("Digests differ after restore and replay: expected=" + digestAStr + ", actual=" + digest2Str); - + } } - /* - * FIXME For some reason, we need to close and reopen the - * journal before it can be used. See HARestore. - */ - if (true) { - jnl.close(); - - // reopen. - jnl = new Journal(p); - } - - // Verify can dump journal after restore. + // Verify can dump journal after restore. dumpJournal(jnl); } finally { Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-09 07:44:27 UTC (rev 8090) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-09 09:38:52 UTC (rev 8091) @@ -227,9 +227,12 @@ /** * {@link UUID}s for the {@link HAJournalServer}s. */ - private UUID serverAId = UUID.randomUUID(), serverBId = UUID.randomUUID(), - serverCId = UUID.randomUUID(); + protected UUID serverAId = UUID.randomUUID(); + private UUID serverBId = UUID.randomUUID(); + + private UUID serverCId = UUID.randomUUID(); + /** * The HTTP ports at which the services will respond. * Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java 2014-04-09 07:44:27 UTC (rev 8090) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java 2014-04-09 09:38:52 UTC (rev 8091) @@ -740,12 +740,12 @@ serverA, serverB, serverC }); // D and E will have go through Rebuild before joining - assertEquals(token2, awaitFullyMetQuorum(10/* ticks */)); + assertEquals(token2, awaitFullyMetQuorum(20/* ticks */)); // Note: I have seen this timeout. This warrants exploring. BBT. // // Wait until C is fully ready. - assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverD, 2*awaitQuorumTimeout, TimeUnit.MILLISECONDS)); - assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverE, 2*awaitQuorumTimeout, TimeUnit.MILLISECONDS)); + assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverD, 4*awaitQuorumTimeout, TimeUnit.MILLISECONDS)); + assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverE, 4*awaitQuorumTimeout, TimeUnit.MILLISECONDS)); // Verify binary equality of ALL journals. assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC, serverD, serverE}); Added: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java (rev 0) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java 2014-04-09 09:38:52 UTC (rev 8091) @@ -0,0 +1,674 @@ +package com.bigdata.journal.jini.ha; + +import java.io.File; +import java.util.Calendar; + +import com.bigdata.ha.HAGlue; +import com.bigdata.ha.halog.HALogReader; +import com.bigdata.ha.halog.IHALogReader; +import com.bigdata.journal.CommitCounterUtility; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ABC; + +import net.jini.config.Configuration; +import junit.framework.TestCase; + +public class TestHA5JournalServerWithHALogs extends AbstractHA5JournalServerTestCase { + + /** + * We need to set the time at which the {@link DefaultSnapshotPolicy} runs + * to some point in the future in order to avoid test failures due to + * violated assumptions when the policy runs up self-triggering (based on + * the specified run time) during a CI run. + * <p> + * We do this by adding one hour to [now] and then converting it into the + * 'hhmm' format as an integer. + * + * @return The "never run" time as hhmm. + */ + static protected String getNeverRunSnapshotTime() { + + // Right now. + final Calendar c = Calendar.getInstance(); + + // Plus an hour. + c.add(Calendar.HOUR_OF_DAY, 1); + + // Get the hour. + final int hh = c.get(Calendar.HOUR_OF_DAY); + + // And the minutes. + final int mm = c.get(Calendar.MINUTE); + + // Format as hhmm. + final String neverRun = "" + hh + (mm < 10 ? "0" : "") + mm; + + return neverRun; + + } + + /** + * {@inheritDoc} + * <p> + * Note: This overrides some {@link Configuration} values for the + * {@link HAJournalServer} in order to establish conditions suitable for + * testing the {@link ISnapshotPolicy} and {@link IRestorePolicy}. + */ + @Override + protected String[] getOverrides() { + + /* + * We need to set the time at which the DefaultSnapshotPolicy runs to + * some point in the Future in order to avoid test failures due to + * violated assumptions when the policy runs up self-triggering (based + * on the specified run time) during a CI run. + */ + final String neverRun = getNeverRunSnapshotTime(); + + return new String[]{ + "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy()", + "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.DefaultSnapshotPolicy("+neverRun+",0)", + "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=5", + }; + + } + + public TestHA5JournalServerWithHALogs() { + } + + public TestHA5JournalServerWithHALogs(String name) { + super(name); + } + + /** + * This is a unit test for the ability to silently remove a logically empty + * HALog file. Three services are started in sequence (A,B,C). A series of + * small commits are applied to the quorum. (C) is then shutdown. A + * logically empty HALog file should exist on each service for the next + * commit point. However, since this might have been removed on C when it + * was shutdown, we copy the logically empty HALog file from (A) to (C). We + * then do one more update. C is then restarted. We verify that C restarts + * and that the logically empty HALog file has been replaced by an HALog + * file that has the same digest as the HALog file for that commit point on + * (A,B). + * <p> + * Note: We can not reliably observe that the logically HALog file was + * removed during startup. However, this is not critical. What is critical + * is that the logically empty HALog file (a) does not prevent (C) from + * starting; (b) is replaced by the correct HALog data from the quorum + * leader; and (c) that (C) resynchronizes with the met quorum and joins + * causing a fully met quorum. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/679" > + * HAJournalServer can not restart due to logically empty log files + * </a> + */ + public void test_startABCDE_logicallyEmptyLogFileDeletedOnRestartC() throws Exception { + + final ABCDE abc = new ABCDE(true/* sequential */); + + final HAGlue serverA = abc.serverA, serverB = abc.serverB; + HAGlue serverC = abc.serverC; + HAGlue serverD = abc.serverD; + HAGlue serverE = abc.serverD; + + // Verify quorum is FULLY met. + awaitFullyMetQuorum(); + + // await the KB create commit point to become visible on each service. + awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC, serverD, serverE }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC, serverD, serverE }); + + // Verify binary equality of ALL HALog files. + assertHALogDigestsEquals(1L/* firstCommitCounter */, + 1/* lastCommitCounter */, new HAGlue[] { serverA, serverB, + serverC, serverD, serverE }); + + /* + * Do a series of small commits. + */ + + final int NSMALL = 5; + + for (int i = 1/* createKB */; i <= NSMALL; i++) { + + simpleTransaction(); + + } + + final long commitCounter1 = 1 + NSMALL; // AKA (6) + + // await the commit points to become visible. + awaitCommitCounter(commitCounter1, + new HAGlue[] { serverA, serverB, serverC, serverD, serverE }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC, serverD, serverE }); + + // Verify binary equality of ALL HALog files. +// assertHALogDigestsEquals(1L/* firstCommitCounter */, commitCounter1, +// new HAGlue[] { serverA, serverB, serverC, serverD, serverE }); + + /* + * Verify the expected #of HALogs on each service. + * + * Note: This is (lastCommitCounter+1) since an empty HALog was created + * for the next commit point. + */ + awaitLogCount(getHALogDirA(), commitCounter1 + 1); + awaitLogCount(getHALogDirB(), commitCounter1 + 1); + awaitLogCount(getHALogDirC(), commitCounter1 + 1); + awaitLogCount(getHALogDirD(), commitCounter1 + 1); + awaitLogCount(getHALogDirE(), commitCounter1 + 1); + + /* + * Shutdown C. + * + * Note: This might cause the empty HALog file on (C) to be deleted. + * That is Ok, since we will copy the desired empty HALOg from (A) to + * (C), thus enforcing the desired test condition. + */ + shutdownC(); + + /* + * Verify that there is an empty HALog file on (A) for the next + * commit point. + */ + + // The next commit point. + final long commitCounter2 = commitCounter1 + 1; // AKA (7) + + // The HALog for that next commit point. + final File fileA = CommitCounterUtility.getCommitCounterFile( + getHALogDirA(), commitCounter2, IHALogReader.HA_LOG_EXT); + + // Verify HALog file for next commit point on A is logically empty. + { + assertTrue(fileA.exists()); + final IHALogReader r = new HALogReader(fileA); + assertTrue(r.isEmpty()); + assertFalse(r.isLive()); + r.close(); + assertTrue(fileA.exists()); + } + + // The name of that HALog file on (C). + final File fileC = CommitCounterUtility.getCommitCounterFile( + getHALogDirC(), commitCounter2, IHALogReader.HA_LOG_EXT); + + // Copy that empty HALog file to (C). + copyFile(fileA, fileC, false/* append */); + + /* + * Do another transaction. This will cause the HALog file for that + * commit point to be non-empty on A. + */ + simpleTransaction(); + + /* + * Await the commit points to become visible. + * + * Note: This is (lastCommitCounter+1) since an empty HALog was created + * for the next commit point. + */ + awaitCommitCounter(commitCounter2, new HAGlue[] { serverA, serverB, serverD, serverE }); + + // Verify the expected #of HALogs on each service. + awaitLogCount(getHALogDirA(), commitCounter2 + 1); + awaitLogCount(getHALogDirB(), commitCounter2 + 1); + awaitLogCount(getHALogDirD(), commitCounter2 + 1); + awaitLogCount(getHALogDirE(), commitCounter2 + 1); + awaitLogCount(getHALogDirC(), commitCounter2); + + // Verify HALog file for next commit point on A is NOT empty. + { + assertTrue(fileA.exists()); + final IHALogReader r = new HALogReader(fileA); + assertFalse(r.isEmpty()); + assertFalse(r.isLive()); + r.close(); + assertTrue(fileA.exists()); + } + + // Verify HALog file for next commit point on C is logically empty. + { + assertTrue(fileC.exists()); + final IHALogReader r = new HALogReader(fileC); + assertTrue(r.isEmpty()); + assertFalse(r.isLive()); + r.close(); + assertTrue(fileC.exists()); + } + + /* + * Restart (C). It should start without complaint. The logically empty + * HALog file should be replaced by the corresponding file from (A) by + * the time the quorum fully meets. At this point all services will have + * the same digests for all HALog files. + */ + + // Restart C. + serverC = startC(); + + // Wait until the quorum is fully met. + awaitFullyMetQuorum(); + + // await the commit points to become visible. + awaitCommitCounter(commitCounter2, + new HAGlue[] { serverA, serverB, serverD, serverE, serverC }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL HALog files. + assertHALogDigestsEquals(1L/* firstCommitCounter */, + commitCounter2 /* lastCommitCounter */, new HAGlue[] { serverA, + serverB, serverD, serverE, serverC }); + + /* + * Verify the expected #of HALogs on each service. + * + * Note: Each service will have an empty HALog for the next commit + * point. + */ + awaitLogCount(getHALogDirA(), commitCounter2+1); + awaitLogCount(getHALogDirB(), commitCounter2+1); + awaitLogCount(getHALogDirD(), commitCounter2+1); + awaitLogCount(getHALogDirE(), commitCounter2+1); + awaitLogCount(getHALogDirC(), commitCounter2+1); + + } + + + /** + * This is a unit test for the ability to silently remove a physically empty + * HALog file. Three services are started in sequence (A,B,C). A series of + * small commits are applied to the quorum. (C) is then shutdown. A + * logically empty HALog file should exist on each service for the next + * commit point. We now overwrite that file with a physically empty HALog + * file (zero length). We then do one more update. C is then restarted. We + * verify that C restarts and that the logically empty HALog file has been + * replaced by an HALog file that has the same digest as the HALog file for + * that commit point on (A,B). + * <p> + * Note: We can not reliably observe that the physically HALog file was + * removed during startup. However, this is not critical. What is critical + * is that the physically empty HALog file (a) does not prevent (C) from + * starting; (b) is replaced by the correct HALog data from the quorum + * leader; and (c) that (C) resynchronizes with the met quorum and joins + * causing a fully met quorum. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/679" > + * HAJournalServer can not restart due to logically empty log files + * </a> + * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/775" > + * HAJournal start() </a> + */ + public void test_startABCDE_physicallyEmptyLogFileDeletedOnRestartC() throws Exception { + + final ABCDE abc = new ABCDE(true/* sequential */); + + final HAGlue serverA = abc.serverA, serverB = abc.serverB; + HAGlue serverC = abc.serverC; + HAGlue serverD = abc.serverD; + HAGlue serverE = abc.serverE; + + // Verify quorum is FULLY met. + awaitFullyMetQuorum(); + + // await the KB create commit point to become visible on each service. + awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC, serverD, serverE }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC, serverD, serverE }); + + // Verify binary equality of ALL HALog files. + assertHALogDigestsEquals(1L/* firstCommitCounter */, + 1/* lastCommitCounter */, new HAGlue[] { serverA, serverB, + serverC, serverD, serverE }); + + /* + * Do a series of small commits. + */ + + final int NSMALL = 5; + + for (int i = 1/* createKB */; i <= NSMALL; i++) { + + simpleTransaction(); + + } + + final long commitCounter1 = 1 + NSMALL; // AKA (6) + + // await the commit points to become visible. + awaitCommitCounter(commitCounter1, + new HAGlue[] { serverA, serverB, serverC, serverD, serverE }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC, serverD, serverE }); + + // Verify binary equality of ALL HALog files. + assertHALogDigestsEquals(1L/* firstCommitCounter */, commitCounter1, + new HAGlue[] { serverA, serverB, serverC, serverD, serverE }); + + /* + * Verify the expected #of HALogs on each service. + * + * Note: This is (lastCommitCounter+1) since an empty HALog was created + * for the next commit point. + */ + awaitLogCount(getHALogDirA(), commitCounter1 + 1); + awaitLogCount(getHALogDirB(), commitCounter1 + 1); + awaitLogCount(getHALogDirC(), commitCounter1 + 1); + awaitLogCount(getHALogDirD(), commitCounter1 + 1); + awaitLogCount(getHALogDirE(), commitCounter1 + 1); + + /* + * Shutdown C. + * + * Note: This might cause the empty HALog file on (C) to be deleted. + * That is Ok, since we will copy the desired empty HALOg from (A) to + * (C), thus enforcing the desired test condition. + */ + shutdownC(); + + /* + * Verify that there is an empty HALog file on (A) for the next + * commit point. + */ + + // The next commit point. + final long commitCounter2 = commitCounter1 + 1; // AKA (7) + + // The HALog for that next commit point. + final File fileA = CommitCounterUtility.getCommitCounterFile( + getHALogDirA(), commitCounter2, IHALogReader.HA_LOG_EXT); + + // Verify HALog file for next commit point on A is logically empty. + { + assertTrue(fileA.exists()); + final IHALogReader r = new HALogReader(fileA); + assertTrue(r.isEmpty()); + assertFalse(r.isLive()); + r.close(); + assertTrue(fileA.exists()); + } + + // The name of that HALog file on (C). + final File fileC = CommitCounterUtility.getCommitCounterFile( + getHALogDirC(), commitCounter2, IHALogReader.HA_LOG_EXT); + +// // Copy that empty HALog file to (C). +// copyFile(fileA, fileC, false/* append */); + + // delete the logically empty file (if it exists). + if (fileC.exists() && !fileC.delete()) + fail("Could not delete: fileC=" + fileC); + + // create the physically empty file. + if (!fileC.createNewFile()) + fail("Could not create: fileC=" + fileC); + + /* + * Do another transaction. This will cause the HALog file for that + * commit point to be non-empty on A. + */ + simpleTransaction(); + + /* + * Await the commit points to become visible. + * + * Note: This is (lastCommitCounter+1) since an empty HALog was created + * for the next commit point. + */ + awaitCommitCounter(commitCounter2, new HAGlue[] { serverA, serverB, serverD, serverE }); + + // Verify the expected #of HALogs on each service. + awaitLogCount(getHALogDirA(), commitCounter2 + 1); + awaitLogCount(getHALogDirB(), commitCounter2 + 1); + awaitLogCount(getHALogDirD(), commitCounter2 + 1); + awaitLogCount(getHALogDirE(), commitCounter2 + 1); + awaitLogCount(getHALogDirC(), commitCounter2); + + // Verify HALog file for next commit point on A is NOT empty. + { + assertTrue(fileA.exists()); + final IHALogReader r = new HALogReader(fileA); + assertFalse(r.isEmpty()); + assertFalse(r.isLive()); + r.close(); + assertTrue(fileA.exists()); + } + + // Verify HALog file for next commit point on C is phsyically empty. + { + assertTrue(fileC.exists()); + assertEquals(0L, fileC.length()); + } + + /* + * Restart (C). It should start without complaint. The logically empty + * HALog file should be replaced by the corresponding file from (A) by + * the time the quorum fully meets. At this point all services will have + * the same digests for all HALog files. + */ + + // Restart C. + serverC = startC(); + + // Wait until the quorum is fully met. + awaitFullyMetQuorum(); + + // await the commit points to become visible. + awaitCommitCounter(commitCounter2, + new HAGlue[] { serverA, serverB, serverD, serverE, serverC }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverD, serverE, serverC }); + + // Verify binary equality of ALL HALog files. + assertHALogDigestsEquals(1L/* firstCommitCounter */, + commitCounter2 /* lastCommitCounter */, new HAGlue[] { serverA, + serverB, serverD, serverE, serverC }); + + /* + * Verify the expected #of HALogs on each service. + * + * Note: Each service will have an empty HALog for the next commit + * point. + */ + awaitLogCount(getHALogDirA(), commitCounter2+1); + awaitLogCount(getHALogDirB(), commitCounter2+1); + awaitLogCount(getHALogDirC(), commitCounter2+1); + awaitLogCount(getHALogDirD(), commitCounter2+1); + awaitLogCount(getHALogDirE(), commitCounter2+1); + + } + + /** + * Unit test for a situation in which A B C D and E start. A quorum meets and the + * final service resyncs with the met quorum. The quorum then fully meets. + * Once the fully met quorum is stable, C is then restarted. This test + * exercises a code path that handles the case where C is current, but is + * forced into RESYNC in case there are writes in progress on the leader. + * <p> + * Note: In this version of the test, the HALog files are NOT purged at each + * commit of the fully met quorum. + */ + public void testStartABCDE_restartE() throws Exception { + + final ABCDE x = new ABCDE(true/*sequential*/); + + final long token = awaitFullyMetQuorum(); + + // Now run several transactions + final int NTX = 5; + for (int i = 0; i < NTX; i++) + simpleTransaction(); + + // wait until the commit point is registered on all services. + awaitCommitCounter(NTX + 1L, new HAGlue[] { x.serverA, x.serverB, + x.serverC, x.serverD, x.serverE }); + + /* + * The same number of HALog files should exist on all services. + * + * Note: the restore policy is setup such that we are NOT purging the HALog + * files at each commit of a fully met quorum. + */ + awaitLogCount(getHALogDirA(), NTX + 2L); + awaitLogCount(getHALogDirB(), NTX + 2L); + awaitLogCount(getHALogDirC(), NTX + 2L); + awaitLogCount(getHALogDirD(), NTX + 2L); + awaitLogCount(getHALogDirE(), NTX + 2L); + + // shutdown E - final service + shutdownE(); + + // wait for C to be gone from zookeeper. + awaitPipeline(new HAGlue[] { x.serverA, x.serverB, x.serverC, x.serverD }); + awaitMembers(new HAGlue[] { x.serverA, x.serverB, x.serverC, x.serverD }); + awaitJoined(new HAGlue[] { x.serverA, x.serverB, x.serverC, x.serverD }); + + // restart C. + /*final HAGlue serverC =*/ startE(); + + // wait until the quorum fully meets again (on the same token). + assertEquals(token, awaitFullyMetQuorum()); + + // Verify expected HALog files. + awaitLogCount(getHALogDirA(), NTX + 2L); + awaitLogCount(getHALogDirB(), NTX + 2L); + awaitLogCount(getHALogDirC(), NTX + 2L); + awaitLogCount(getHALogDirD(), NTX + 2L); + awaitLogCount(getHALogDirE(), NTX + 2L); + + } + + /** + * Unit test for a situation in which A B C D and E start. A quorum mets and the + * third service resyncs with the met quorum. The quorum then fully meets. + * Once the fully met quorum is stable, B is then restarted. The pipeline is + * reorganized when B is shutdown but the quorum does not break. This test + * exercises a code path that handles the case where B is current, but is + * forced into RESYNC in case there are writes in progress on the leader. + * <p> + * Note: In this version of the test, the HALog files are NOT purged at each + * commit of the fully met quorum. + */ + public void testStartABCDE_restartB() throws Exception { + + final ABCDE x = new ABCDE(true/*sequential*/); + + final long token = awaitFullyMetQuorum(); + + // Now run several transactions + final int NTX = 5; + for (int i = 0; i < NTX; i++) + simpleTransaction(); + + // wait until the commit point is registered on all services. + awaitCommitCounter(NTX + 1L, new HAGlue[] { x.serverA, x.serverB, + x.serverC, x.serverD, x.serverE }); + + /* + * The same number of HALog files should exist on all services. + * + * Note: the restore policy is setup such that we are purging the HALog + * files at each commit of a fully met quorum. + */ + awaitLogCount(getHALogDirA(), NTX + 2L); + awaitLogCount(getHALogDirB(), NTX + 2L); + awaitLogCount(getHALogDirC(), NTX + 2L); + awaitLogCount(getHALogDirD(), NTX + 2L); + awaitLogCount(getHALogDirE(), NTX + 2L); + + // shutdown B. + shutdownB(); + + // wait for B to be gone from zookeeper. + awaitPipeline(new HAGlue[] { x.serverA, x.serverC, x.serverD, x.serverE }); + awaitMembers(new HAGlue[] { x.serverA, x.serverC, x.serverD, x.serverE }); + awaitJoined(new HAGlue[] { x.serverA, x.serverC, x.serverD, x.serverE }); + + // restart B. + /*final HAGlue serverB =*/ startB(); + + // wait until the quorum fully meets again (on the same token). + assertEquals(token, awaitFullyMetQuorum()); + + // Verify expected HALog files. + awaitLogCount(getHALogDirA(), NTX + 2L); + awaitLogCount(getHALogDirB(), NTX + 2L); + awaitLogCount(getHALogDirC(), NTX + 2L); + awaitLogCount(getHALogDirD(), NTX + 2L); + awaitLogCount(getHALogDirE(), NTX + 2L); + + } + + /** + * Unit test for a situation in which A B C D and E start. A quorum mets and the + * third service resyncs with the met quorum. The quorum then fully meets. + * Once the fully met quorum is stable, A is then restarted. The pipeline is + * reorganized when A is shutdown and a new leader is elected. This test + * exercises a code path that handles the case where A is current, but is + * forced into RESYNC in case there are writes in progress on the leader. + * <p> + * Note: In this version of the test, the HALog files are NOT purged at each + * commit of the fully met quorum. + */ + public void testStartABCDE_restartA() throws Exception { + + final ABCDE x = new ABCDE(true/*sequential*/); + + final long token = awaitFullyMetQuorum(); + + // Now run several transactions + final int NTX = 5; + for (int i = 0; i < NTX; i++) + simpleTransaction(); + + // wait until the commit point is registered on all services. + awaitCommitCounter(NTX + 1L, new HAGlue[] { x.serverA, x.serverB, + x.serverC, x.serverD, x.serverE }); + + /* + * The same number of HALog files should exist on all services. + * + * Note: the restore policy is setup such that we are NOT purging the HALog + * files at each commit of a fully met quorum. + */ + awaitLogCount(getHALogDirA(), NTX + 2L); + awaitLogCount(getHALogDirB(), NTX + 2L); + awaitLogCount(getHALogDirC(), NTX + 2L); + awaitLogCount(getHALogDirD(), NTX + 2L); + awaitLogCount(getHALogDirE(), NTX + 2L); + + // shutdown A. + shutdownA(); + + // wait for A to be gone from zookeeper. +// awaitPipeline(new HAGlue[] { x.serverA, x.serverC }); +// awaitMembers(new HAGlue[] { x.serverA, x.serverC }); +// awaitJoined(new HAGlue[] { x.serverA, x.serverC }); + + // since the leader failed over, the quorum meets on a new token. + final long token2 = awaitNextQuorumMeet(token); + + // restart A. + /*final HAGlue serverA =*/ startA(); + + // wait until the quorum fully meets again (on the same token). + assertEquals(token2, awaitFullyMetQuorum()); + + // Verify expected HALog files. + awaitLogCount(getHALogDirA(), NTX + 2L); + awaitLogCount(getHALogDirB(), NTX + 2L); + awaitLogCount(getHALogDirC(), NTX + 2L); + awaitLogCount(getHALogDirD(), NTX + 2L); + awaitLogCount(getHALogDirE(), NTX + 2L); + } + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-04-09 07:44:35
|
Revision: 8090 http://sourceforge.net/p/bigdata/code/8090 Author: martyncutcher Date: 2014-04-09 07:44:27 +0000 (Wed, 09 Apr 2014) Log Message: ----------- For ticket #721: fix to BufferedWrite to ensure buffers are zero padded to the slot size when eliding contiguous writes. This caused a potential problem with binary equivalence for snapshots. Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-08 22:24:37 UTC (rev 8089) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-09 07:44:27 UTC (rev 8090) @@ -137,6 +137,9 @@ } + // Used to zero pad slots in buffered writes + final byte[] s_zeros = new byte[256]; + /** * Buffer a write. * @@ -188,6 +191,19 @@ } // copy the caller's record into the buffer. m_data.put(data); + + // if data_len < slot_len then clear remainder of buffer + int padding = slot_len - data_len; + while (padding > 0) { + if (padding > s_zeros.length) { + m_data.put(s_zeros); + padding -= s_zeros.length; + } else { + m_data.put(s_zeros, 0, padding); + break; + } + } + // update the file offset by the size of the allocation slot m_endAddr += slot_len; // update the buffer position by the size of the allocation slot. @@ -250,8 +266,9 @@ final ByteBuffer m_data = tmp.buffer(); // reset the buffer state. - m_data.position(0); - m_data.limit(m_data.capacity()); + //m_data.position(0); + //m_data.limit(m_data.capacity()); + m_data.clear(); m_startAddr = -1; m_endAddr = 0; Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2014-04-08 22:24:37 UTC (rev 8089) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2014-04-09 07:44:27 UTC (rev 8090) @@ -839,6 +839,7 @@ m_statsBucket.allocate(size); } + return value; } else { StringBuilder sb = new StringBuilder(); @@ -1300,4 +1301,33 @@ return count; } + /** + * Determines if the provided physical address is within an allocated slot + * @param addr + * @return + */ + public boolean verifyAllocatedAddress(long addr) { + if (log.isTraceEnabled()) + log.trace("Checking Allocator " + m_index + ", size: " + m_size); + + final Iterator<AllocBlock> blocks = m_allocBlocks.iterator(); + final long range = m_size * m_bitSize * 32; + while (blocks.hasNext()) { + final int startAddr = blocks.next().m_addr; + if (startAddr != 0) { + final long start = RWStore.convertAddr(startAddr); + final long end = start + range; + + if (log.isTraceEnabled()) + log.trace("Checking " + addr + " between " + start + " - " + end); + + if (addr >= start && addr < end) + return true; + } else { + break; + } + } + return false; + } + } Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-08 22:24:37 UTC (rev 8089) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-09 07:44:27 UTC (rev 8090) @@ -6963,7 +6963,7 @@ if (log.isDebugEnabled()) log.debug("writeRaw: " + offset); - + // Guard IO against concurrent file extension. final Lock lock = m_extensionLock.readLock(); @@ -7068,6 +7068,22 @@ } } + /** + * Can be used to determine if an address is within an allocated slot. + * + * @param addr + * @return whether addr is within slot allocated area + */ + public boolean verifyAllocatedAddress(final long addr) { + for (int index = 0; index < m_allocs.size(); index++) { + final FixedAllocator xfa = m_allocs.get(index); + if (xfa.verifyAllocatedAddress(addr)) + return true; + } + + return false; + } + public StoreState getStoreState() { final RWStoreState ret = new RWStoreState(this); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-08 22:24:37 UTC (rev 8089) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-09 07:44:27 UTC (rev 8090) @@ -6,6 +6,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import net.jini.config.Configuration; + import com.bigdata.ha.HAGlue; import com.bigdata.ha.HAStatusEnum; import com.bigdata.ha.msg.HARootBlockRequest; @@ -13,11 +15,8 @@ import com.bigdata.ha.msg.IHASnapshotResponse; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.Journal; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.LargeLoadTask; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; -import net.jini.config.Configuration; - public class TestHA1SnapshotPolicy extends AbstractHA3BackupTestCase { public TestHA1SnapshotPolicy() { @@ -438,8 +437,8 @@ */ public void testA_snapshot_multipleTx_restore_validate() throws Exception { - final int N1 = 7; // #of transactions to run before the snapshot. - final int N2 = 8; // #of transactions to run after the snapshot. + final int N1 = 7; //7; // #of transactions to run before the snapshot. + final int N2 = 8; //8; // #of transactions to run after the snapshot. // Start service. final HAGlue serverA = startA(); @@ -459,13 +458,13 @@ // Now run N transactions. for (int i = 0; i < N1; i++) { + + simpleTransaction(); - simpleTransaction(); - } + + final long commitCounterN1 = N1 + 1; - final long commitCounterN1 = N1 + 1; - awaitCommitCounter(commitCounterN1, serverA); /* @@ -478,7 +477,7 @@ // Snapshot directory is empty. assertEquals(1, recursiveCount(getSnapshotDirA(),SnapshotManager.SNAPSHOT_FILTER)); - + // request snapshot on A. final Future<IHASnapshotResponse> ft = serverA .takeSnapshot(new HASnapshotRequest(0/* percentLogSize */)); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-08 22:24:42
|
Revision: 8089 http://sourceforge.net/p/bigdata/code/8089 Author: tobycraig Date: 2014-04-08 22:24:37 +0000 (Tue, 08 Apr 2014) Log Message: ----------- Changed type labels, RDF format selector always visible Modified Paths: -------------- branches/RDR/bigdata-war/src/html/css/style.css branches/RDR/bigdata-war/src/html/index.html branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/css/style.css =================================================================== --- branches/RDR/bigdata-war/src/html/css/style.css 2014-04-08 22:22:47 UTC (rev 8088) +++ branches/RDR/bigdata-war/src/html/css/style.css 2014-04-08 22:24:37 UTC (rev 8089) @@ -145,10 +145,6 @@ box-sizing: border-box; } -#rdf-type-container { - display: none; -} - hr { background: #929292; border: none; Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-08 22:22:47 UTC (rev 8088) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-08 22:24:37 UTC (rev 8089) @@ -42,24 +42,22 @@ <input type="file" id="load-file"><br> <label for="load-type">Type:</label> <select id="load-type"> - <option value="sparql" selected="selected">SPARQL</option> - <option value="rdf">RDF</option> - <option value="path">File path</option> + <option value="sparql" selected="selected">SPARQL Update</option> + <option value="rdf">RDF Data</option> + <option value="path">File Path or URL</option> </select> - <span id="rdf-type-container"> - <label for="rdf-type">Format:</label> - <select id="rdf-type"> - <option value="">Select RDF format</option> - <option value="n-quads">N-Quads</option> - <option value="n-triples">N-Triples</option> - <option value="n3">Notation3</option> - <option value="rdf/xml">RDF/XML</option> - <option value="json">JSON</option> - <option value="trig">TriG</option> - <option value="trix">TriX</option> - <option value="turtle">Turtle</option> - </select> - </span> + <label for="rdf-type">Format:</label> + <select id="rdf-type"> + <option value="">Select RDF format</option> + <option value="n-quads">N-Quads</option> + <option value="n-triples">N-Triples</option> + <option value="n3">Notation3</option> + <option value="rdf/xml">RDF/XML</option> + <option value="json">JSON</option> + <option value="trig">TriG</option> + <option value="trix">TriX</option> + <option value="turtle">Turtle</option> + </select> </p> <hr class="shadow"> <button id="load-load">Load</button> Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-08 22:22:47 UTC (rev 8088) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-08 22:24:37 UTC (rev 8089) @@ -317,17 +317,10 @@ } } -function handleTypeChange(e) { - $('#rdf-type-container').toggle($(this).val() == 'rdf'); -} - function setType(type, format) { $('#load-type').val(type); if(type == 'rdf') { - $('#rdf-type-container').show(); $('#rdf-type').val(format); - } else { - $('#rdf-type-container').hide(); } } @@ -362,7 +355,6 @@ .on('drop', handleFile) .on('paste', handlePaste) .bind('keydown', 'ctrl+return', submitLoad); -$('#load-type').change(handleTypeChange); $('#clear-file').click(clearFile); $('#load-load').click(submitLoad); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-08 22:22:52
|
Revision: 8088 http://sourceforge.net/p/bigdata/code/8088 Author: thompsonbry Date: 2014-04-08 22:22:47 +0000 (Tue, 08 Apr 2014) Log Message: ----------- Modified test to be more robust to startup conditions for HA3. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java 2014-04-08 21:12:50 UTC (rev 8087) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java 2014-04-08 22:22:47 UTC (rev 8088) @@ -93,14 +93,14 @@ // Verify quorum is FULLY met. awaitFullyMetQuorum(); + // Verify leader vs followers. + awaitHAStatus(serverA, HAStatusEnum.Leader); + awaitHAStatus(serverB, HAStatusEnum.Follower); + awaitHAStatus(serverC, HAStatusEnum.Follower); + // await the KB create commit point to become visible on each service. awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); - // Verify leader vs followers. - assertEquals(HAStatusEnum.Leader, serverA.getHAStatus()); - assertEquals(HAStatusEnum.Follower, serverB.getHAStatus()); - assertEquals(HAStatusEnum.Follower, serverC.getHAStatus()); - /* * Do CANCEL for each service using the default namespace. */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-04-08 21:12:57
|
Revision: 8087 http://sourceforge.net/p/bigdata/code/8087 Author: mrpersonick Date: 2014-04-08 21:12:50 +0000 (Tue, 08 Apr 2014) Log Message: ----------- exposed the predecessor depth as output Modified Paths: -------------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java 2014-04-08 20:09:14 UTC (rev 8086) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java 2014-04-08 21:12:50 UTC (rev 8087) @@ -15,6 +15,7 @@ */ package com.bigdata.rdf.graph.analytics; +import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashMap; @@ -275,6 +276,8 @@ final Value v = state.getOtherVertex(u, e); final VS otherState = state.getState(v); + + final int otherDepth = otherState.depth(); // final VS otherState = state.getState(e.getObject()/* v */); // visit. @@ -417,6 +420,62 @@ }); + tmp.add(new IBinder<PATHS.VS, PATHS.ES, Void>() { + + @Override + public int getIndex() { + return Bindings.PRED_DEPTH; + } + + @Override + @SuppressWarnings({ "rawtypes", "unchecked" }) + public List<Value> bind(final ValueFactory vf, + final IGASState<PATHS.VS, PATHS.ES, Void> state, + final Value u, final IVariable<?>[] outVars, + final IBindingSet bs) { + + /* + * We want to return a different set of edges depending on + * which predecessor the caller is asking about. We can + * find that information in the binding set. + */ + + final IVariable<?> var = outVars[Bindings.PREDECESSORS]; + + if (!bs.isBound(var)) { + + if (log.isTraceEnabled()) { + log.trace("no predecessors"); + } + + return Collections.EMPTY_LIST; + + } + + final IV predIV = (IV) bs.get(var).get(); + + final Value predVal; + + if (predIV instanceof Value) { + + predVal = (Value) predIV; + + } else if (predIV.hasValue()) { + + predVal = predIV.getValue(); + + } else { + + throw new RuntimeException("FIXME"); + + } + + return Arrays.asList(new Value[] { vf.createLiteral(state.getState(predVal).depth.get()) }); + + } + + }); + return tmp; } @@ -444,6 +503,8 @@ */ int EDGES = 3; + int PRED_DEPTH = 4; + } /* @@ -453,6 +514,9 @@ public void prunePaths(final IGASContext<VS, ES, Void> ctx, final Value[] targetVertices) { +// if(true) +// return; + if (ctx == null) throw new IllegalArgumentException(); @@ -491,24 +555,38 @@ final PATHS.VS currentState = gasState.getState(v); +// final int curDepth = currentState.depth.get(); + for (Value pred : currentState.predecessors().keySet()) { - if (pred == null) { - - continue; - - } +// if (pred == null) { +// +// continue; +// +// } - if (retainSet.contains(pred)) { +// if (retainSet.contains(pred)) { +// +// continue; +// +// } + +// final int predDepth = gasState.getState(pred).depth.get(); +// +// if (predDepth >= curDepth) { +// +// continue; +// +// } + + if (!retainSet.contains(pred)) { - continue; - + retainSet.add(pred); + + visitPredecessors(gasState, pred, retainSet); + } - retainSet.add(pred); - - visitPredecessors(gasState, pred, retainSet); - } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-08 20:09:18
|
Revision: 8086 http://sourceforge.net/p/bigdata/code/8086 Author: tobycraig Date: 2014-04-08 20:09:14 +0000 (Tue, 08 Apr 2014) Log Message: ----------- #875 - Fixed RDF/JSON export and added import capability Modified Paths: -------------- branches/RDR/bigdata-war/src/html/index.html branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-08 15:22:08 UTC (rev 8085) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-08 20:09:14 UTC (rev 8086) @@ -54,6 +54,7 @@ <option value="n-triples">N-Triples</option> <option value="n3">Notation3</option> <option value="rdf/xml">RDF/XML</option> + <option value="json">JSON</option> <option value="trig">TriG</option> <option value="trix">TriX</option> <option value="turtle">Turtle</option> Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-08 15:22:08 UTC (rev 8085) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-08 20:09:14 UTC (rev 8086) @@ -1,5 +1,8 @@ $(function() { +// global variables +var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACE_URL, NAMESPACES_READY, NAMESPACE_SHORTCUTS, FILE_CONTENTS, QUERY_RESULTS; + /* Search */ $('#search-form').submit(function(e) { @@ -169,7 +172,6 @@ useNamespace(DEFAULT_NAMESPACE, url); }); } -var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACE_URL, NAMESPACES_READY, fileContents, NAMESPACE_SHORTCUTS; getDefaultNamespace(); @@ -245,7 +247,7 @@ $('#filename').html(f.name); $('#large-file-message, #clear-file').show() $('#load-box').val(''); - fileContents = e2.target.result; + FILE_CONTENTS = e2.target.result; } else { // display file contents in the textarea clearFile(); @@ -265,7 +267,7 @@ } $('#load-box').prop('disabled', false) $('#large-file-message, #clear-file').hide() - fileContents = null; + FILE_CONTENTS = null; } function guessType(extension, content) { @@ -338,6 +340,7 @@ 'rdfs': 'rdf/xml', 'owl': 'rdf/xml', 'xml': 'rdf/xml', + 'json': 'json', 'trig': 'trig', 'trix': 'trix', //'xml': 'trix', @@ -347,6 +350,7 @@ 'n-triples': 'text/plain', 'n3': 'text/n3', 'rdf/xml': 'application/rdf+xml', + 'json': 'application/sparql-results+json', 'trig': 'application/trig', 'trix': 'application/trix', 'turtle': 'text/turtle'}; @@ -368,7 +372,7 @@ var settings = { type: 'POST', - data: fileContents == null ? $('#load-box').val() : fileContents, + data: FILE_CONTENTS == null ? $('#load-box').val() : FILE_CONTENTS, success: updateResponseXML, error: updateResponseError } @@ -505,40 +509,7 @@ } function exportJSON() { - var json = {} - if($('#query-response table').hasClass('boolean')) { - json.head = {}; - json['boolean'] = $('#query-response td').text(); - } else { - json.head = {vars: []}; - $('#query-response thead tr td').each(function(i, td) { - json.head.vars.push(td.textContent); - }); - json.bindings = []; - $('#query-response tbody tr').each(function(i, tr) { - var binding = {}; - $(tr).find('td').each(function(j, td) { - var bindingFields = {} - var bindingType = td.className; - if(bindingType == 'unbound') { - return; - } - bindingFields.type = bindingType; - var dataType = $(td).data('datatype'); - if(dataType) { - bindingFields.type = dataType; - } - var lang = $(td).data('lang'); - if(lang) { - bindingFields.lang = lang; - } - bindingFields.value = td.textContent; - binding[json.head.vars[j]] = bindingFields; - }); - json.bindings.push(binding); - }); - } - json = JSON.stringify(json); + var json = JSON.stringify(QUERY_RESULTS); downloadFile(json, 'application/sparql-results+json', 'export.json'); } @@ -593,6 +564,8 @@ } } else { // JSON + // save data for export + QUERY_RESULTS = data; if(typeof(data.boolean) != 'undefined') { // ASK query table.append('<tr><td>' + data.boolean + '</td></tr>').addClass('boolean'); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-04-08 15:22:11
|
Revision: 8085 http://sourceforge.net/p/bigdata/code/8085 Author: mrpersonick Date: 2014-04-08 15:22:08 +0000 (Tue, 08 Apr 2014) Log Message: ----------- fixed a CI bug Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java 2014-04-08 13:08:10 UTC (rev 8084) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java 2014-04-08 15:22:08 UTC (rev 8085) @@ -220,7 +220,8 @@ log.info("Replaced " + nmods + " instances of " + oldVal + " with " + newVal); - assert nmods > 0; // Failed to replace something. + // mods will no longer always be > 0 (subgroups) +// assert nmods > 0; // Failed to replace something. ntotal += nmods; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-08 13:08:14
|
Revision: 8084 http://sourceforge.net/p/bigdata/code/8084 Author: thompsonbry Date: 2014-04-08 13:08:10 +0000 (Tue, 08 Apr 2014) Log Message: ----------- Bug fix for POST of CANCEL on follower in HA mode. See #883 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java 2014-04-08 12:22:11 UTC (rev 8083) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java 2014-04-08 13:08:10 UTC (rev 8084) @@ -122,7 +122,7 @@ * Do CANCEL for each service using the SPARQL end point associated with * a non-default namespace: * - * /sparql/namespace/NAMESPACE + * /namespace/NAMESPACE/sparql */ { final String namespace = "kb"; @@ -146,7 +146,7 @@ * instance associated with the given <i>namespace</i>. The * {@link RemoteRepository} will use a URL for the SPARQL end point that is * associated with the specified namespace and formed as - * <code>/sparql/namespace/<i>namespace</i></code> rather than the default + * <code>/namespace/<i>namespace</i>/sparql</code> rather than the default * KB SPARQL end point (<code>/sparql</code>). * * @param haGlue Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-04-08 12:22:11 UTC (rev 8083) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-04-08 13:08:10 UTC (rev 8084) @@ -106,20 +106,20 @@ protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (!isWritable(req, resp)) { - // Service must be writable. - return; - } + if (req.getRequestURI().endsWith("/namespace")) { - if (req.getRequestURI().endsWith("/namespace")) { - + // CREATE NAMESPACE. doCreateNamespace(req, resp); return; } - // Pass through to the SPARQL end point REST API. + /* + * Pass through to the SPARQL end point REST API. + * + * Note: This also handles CANCEL QUERY, which is a POST. + */ m_restServlet.doPost(req, resp); } @@ -220,6 +220,11 @@ private void doCreateNamespace(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { + if (!isWritable(req, resp)) { + // Service must be writable. + return; + } + final BigdataRDFContext context = getBigdataRDFContext(); final IIndexManager indexManager = context.getIndexManager(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-04-08 12:22:11 UTC (rev 8083) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-04-08 13:08:10 UTC (rev 8084) @@ -579,8 +579,7 @@ */ try { EntityUtils.consume(response.getEntity()); - } catch (IOException ex) { - } + } catch (IOException ex) {log.warn(ex); } } } @@ -637,7 +636,7 @@ if (resp != null) EntityUtils.consume(resp.getEntity()); - } catch (Exception ex) { } + } catch (Exception ex) {log.warn(ex); } } @@ -690,7 +689,7 @@ if (resp != null) EntityUtils.consume(resp.getEntity()); - } catch (Exception ex) { } + } catch (Exception ex) {log.warn(ex); } } @@ -749,7 +748,7 @@ if (response != null) EntityUtils.consume(response.getEntity()); - } catch (Exception ex) { } + } catch (Exception ex) {log.warn(ex); } } @@ -829,7 +828,7 @@ if (response != null) EntityUtils.consume(response.getEntity()); - } catch (Exception ex) { } + } catch (Exception ex) {log.warn(ex); } } @@ -917,7 +916,7 @@ if (response != null) EntityUtils.consume(response.getEntity()); - } catch (Exception ex) { } + } catch (Exception ex) {log.warn(ex); } } @@ -1487,7 +1486,7 @@ // conn.disconnect(); } catch (Throwable t2) { - // ignored. + log.warn(t2); // ignored. } throw new RuntimeException(sparqlEndpointURL + " : " + t, t); } @@ -1665,7 +1664,7 @@ try { cancel(queryId); - } catch (Exception ex) { } + } catch (Exception ex) {log.warn(ex); } } @@ -1693,13 +1692,13 @@ if (entity != null && result == null) { try { EntityUtils.consume(entity); - } catch (IOException ex) { } + } catch (IOException ex) {log.warn(ex); } } if (response != null && tqrImpl == null) { try { cancel(queryId); - } catch(Exception ex) { } + } catch(Exception ex) {log.warn(ex); } } } @@ -1811,7 +1810,7 @@ try { cancel(queryId); - } catch (Exception ex) { } + } catch (Exception ex) {log.warn(ex); } } @@ -1843,11 +1842,11 @@ if (response != null && result == null) { try { EntityUtils.consume(entity); - } catch (IOException ex) { } + } catch (IOException ex) {log.warn(ex); } try { cancel(queryId); - } catch (Exception ex) { } + } catch (Exception ex) {log.warn(ex); } } } @@ -1912,11 +1911,11 @@ if (result == null) { try { EntityUtils.consume(entity); - } catch (IOException ex) { } + } catch (IOException ex) {log.warn(ex); } try { cancel(queryId); - } catch (Exception ex) { } + } catch (Exception ex) {log.warn(ex); } } } @@ -1996,7 +1995,7 @@ // response.disconnect(); try { EntityUtils.consume(entity); - } catch (IOException ex) { } + } catch (IOException ex) {log.warn(ex); } } @@ -2058,7 +2057,7 @@ // response.disconnect(); try { EntityUtils.consume(entity); - } catch (IOException ex) { } + } catch (IOException ex) {log.warn(ex); } } @@ -2120,7 +2119,7 @@ // response.disconnect(); try { EntityUtils.consume(entity); - } catch (IOException ex) { } + } catch (IOException ex) {log.warn(ex); } } @@ -2178,7 +2177,7 @@ // response.disconnect(); try { EntityUtils.consume(entity); - } catch (IOException ex) { } + } catch (IOException ex) {log.warn(ex); } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-08 12:22:14
|
Revision: 8083 http://sourceforge.net/p/bigdata/code/8083 Author: thompsonbry Date: 2014-04-08 12:22:11 +0000 (Tue, 08 Apr 2014) Log Message: ----------- Modified test case to demonstrate failure for #883 (CANCEL Query fails on non-default kb namespace on HA follower). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java 2014-04-07 23:57:41 UTC (rev 8082) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java 2014-04-08 12:22:11 UTC (rev 8083) @@ -26,39 +26,24 @@ */ package com.bigdata.journal.jini.ha; +import java.io.IOException; import java.util.UUID; import net.jini.config.Configuration; +import org.apache.http.client.HttpClient; +import org.apache.http.impl.client.DefaultHttpClient; + import com.bigdata.ha.HAGlue; import com.bigdata.ha.HAStatusEnum; -import com.bigdata.journal.AbstractJournal; -import com.bigdata.quorum.Quorum; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; +import com.bigdata.rdf.sail.webapp.client.RemoteRepositoryManager; /** - * Test suites for an {@link HAJournalServer} quorum with a replication factor - * of THREE (3) and a fully met {@link Quorum}. + * Test suite for the SPARQL query and SPARQL update request cancellation + * protocol for an {@link HAJournalServer} quorum with a replication factor of + * THREE (3). * - * TODO Do we have any guards against rolling back a service in RESYNC if the - * other services are more than 2 commit points before it? We probably should - * not automatically roll it back to the other services in this case, but that - * could also reduce the ergonomics of the HA3 configuration. - * - * TODO All of these live load remains met tests could also be done with BOUNCE - * rather than SHUTDOWN/RESTART. BOUNCE exercises different code paths and - * corresponds to a zookeeper timeout, e.g., as might occur during a full GC - * pause. - * - * TODO Update the existing tests to verify that the quorum token is properly - * set on C when C resyncs with A+B and that - * {@link AbstractJournal#getHAReady()} reports the correct token. This tests - * for a problem where we did not call setQuorumToken() again when we resync and - * transition into the met quorum. This meant that the HAReady token is not set - * for a service unless it is part of the initial quorum meet. One of the HA3 - * backup tests covers this, but we should be checking the HAReadyToken in this - * test suite as well. - * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ public class TestHA3CancelQuery extends AbstractHA3JournalServerTestCase { @@ -95,6 +80,9 @@ * order will be A, B, C. Issues cancel request to each of the services and * verifies that all services are willing to accept a POST of the CANCEL * request. + * + * @see <a href="http://trac.bigdata.com/ticket/883">CANCEL Query fails on + * non-default kb namespace on HA follower</a> */ public void test_ABC_CancelQuery() throws Exception { @@ -108,22 +96,90 @@ // await the KB create commit point to become visible on each service. awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); - // get RemoteRepository for each service. - final RemoteRepository[] repo = new RemoteRepository[3]; - - repo[0] = getRemoteRepository(serverA); - repo[1] = getRemoteRepository(serverB); - repo[2] = getRemoteRepository(serverC); - // Verify leader vs followers. assertEquals(HAStatusEnum.Leader, serverA.getHAStatus()); assertEquals(HAStatusEnum.Follower, serverB.getHAStatus()); assertEquals(HAStatusEnum.Follower, serverC.getHAStatus()); - repo[0].cancel(UUID.randomUUID()); - repo[1].cancel(UUID.randomUUID()); - repo[2].cancel(UUID.randomUUID()); + /* + * Do CANCEL for each service using the default namespace. + */ + { + // Get RemoteRepository for each service. + final RemoteRepository[] repo = new RemoteRepository[3]; + + repo[0] = getRemoteRepository(serverA); + repo[1] = getRemoteRepository(serverB); + repo[2] = getRemoteRepository(serverC); + + repo[0].cancel(UUID.randomUUID()); + repo[1].cancel(UUID.randomUUID()); + repo[2].cancel(UUID.randomUUID()); + + } + /* + * Do CANCEL for each service using the SPARQL end point associated with + * a non-default namespace: + * + * /sparql/namespace/NAMESPACE + */ + { + final String namespace = "kb"; + + // Get RemoteRepository for each service. + final RemoteRepository[] repo = new RemoteRepository[3]; + + repo[0] = getRemoteRepositoryForNamespace(serverA, namespace); + repo[1] = getRemoteRepositoryForNamespace(serverB, namespace); + repo[2] = getRemoteRepositoryForNamespace(serverC, namespace); + + repo[0].cancel(UUID.randomUUID()); + repo[1].cancel(UUID.randomUUID()); + repo[2].cancel(UUID.randomUUID()); + } + } + /** + * Return a {@link RemoteRepository} that will communicate with the KB + * instance associated with the given <i>namespace</i>. The + * {@link RemoteRepository} will use a URL for the SPARQL end point that is + * associated with the specified namespace and formed as + * <code>/sparql/namespace/<i>namespace</i></code> rather than the default + * KB SPARQL end point (<code>/sparql</code>). + * + * @param haGlue + * The service. + * @param namespace + * The namespace. + * @return The {@link RemoteRepository} for that namespace. + * + * @throws IOException + * + * TODO Push down into the abstract base class when reconciling + * with the RDR branch which has changes to the abstract base + * class to support the LBS. + */ + protected RemoteRepository getRemoteRepositoryForNamespace( + final HAGlue haGlue, final String namespace) throws IOException { + + final String sparqlEndpointURL = getNanoSparqlServerURL(haGlue); + + // Client for talking to the NSS. + final HttpClient httpClient = new DefaultHttpClient(ccm); + + final RemoteRepositoryManager repositoryManager = new RemoteRepositoryManager( + sparqlEndpointURL, httpClient, executorService); + + final RemoteRepository repo = repositoryManager + .getRepositoryForNamespace(namespace); + + // Note: This is not required in order to demonstrate the problem. +// repo.setMaxRequestURLLength(65536); +// repo.setQueryMethod("GET"); + + return repo; + + } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |