|
From: <jer...@us...> - 2014-05-08 01:49:37
|
Revision: 8226
http://sourceforge.net/p/bigdata/code/8226
Author: jeremy_carroll
Date: 2014-05-08 01:49:33 +0000 (Thu, 08 May 2014)
Log Message:
-----------
Tests for the AnalyzerFactory's. The tests are for their shared behavior.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractSearchTest.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestKeyBuilder.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestConfigurableAsDefaultAnalyzerFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestDefaultAnalyzerFactory.java
Added: branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs 2014-05-08 01:49:33 UTC (rev 8226)
@@ -0,0 +1,2 @@
+eclipse.preferences.version=1
+encoding//bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java=UTF-8
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java 2014-05-08 01:49:13 UTC (rev 8225)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java 2014-05-08 01:49:33 UTC (rev 8226)
@@ -547,7 +547,7 @@
// RussianAnalyzer is missing any way to access stop words.
if (RussianAnalyzer.class.equals(cls) && useDefaultStopWords()) {
- return new AnalyzerPair(languageRange, new RussianAnalyzer(Version.LUCENE_CURRENT, Collections.EMPTY_SET), new RussianAnalyzer(Version.LUCENE_CURRENT));
+ return new AnalyzerPair(languageRange, new RussianAnalyzer(Version.LUCENE_CURRENT), new RussianAnalyzer(Version.LUCENE_CURRENT, Collections.EMPTY_SET));
}
return new VersionSetAnalyzerPair(this, cls);
}
@@ -612,7 +612,8 @@
*/
private static final int MAX_LANG_CACHE_SIZE = 500;
- private final String defaultLanguage;
+ private String defaultLanguage;
+ private final FullTextIndex<?> fullTextIndex;
public ConfigurableAnalyzerFactory(final FullTextIndex<?> fullTextIndex) {
@@ -621,9 +622,9 @@
if (fullTextIndex == null)
throw new IllegalArgumentException();
- defaultLanguage = getDefaultLanguage(fullTextIndex);
+ this.fullTextIndex = fullTextIndex;
- final Properties properties = initProperties(fullTextIndex);
+ final Properties properties = initProperties();
final Map<String, ConfigOptionsToAnalyzer> analyzers = new HashMap<String, ConfigOptionsToAnalyzer>();
@@ -686,6 +687,12 @@
}
}
+ private String getDefaultLanguage() {
+ if (defaultLanguage == null) {
+ defaultLanguage = getDefaultLanguage(fullTextIndex);
+ }
+ return defaultLanguage;
+ }
private static boolean hasConstructor(Class<? extends Analyzer> cls, Class<?> ... parameterTypes) {
return getConstructor(cls, parameterTypes) != null;
@@ -731,7 +738,7 @@
}
- protected Properties initProperties(final FullTextIndex<?> fullTextIndex) {
+ protected Properties initProperties() {
final Properties parentProperties = fullTextIndex.getProperties();
Properties myProps;
if (Boolean.getBoolean(parentProperties.getProperty(Options.INCLUDE_DEFAULTS, Options.DEFAULT_INCLUDE_DEFAULTS))) {
@@ -773,7 +780,8 @@
public Analyzer getAnalyzer(String languageCode, boolean filterStopwords) {
if (languageCode == null || languageCode.equals("")) {
- languageCode = defaultLanguage;
+
+ languageCode = getDefaultLanguage();
}
AnalyzerPair pair = langTag2AnalyzerPair.get(languageCode);
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java 2014-05-08 01:49:33 UTC (rev 8226)
@@ -0,0 +1,174 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*
+ * Created on May 7, 2014
+ */
+package com.bigdata.search;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+
+public abstract class AbstractAnalyzerFactoryTest extends AbstractSearchTest {
+
+ public AbstractAnalyzerFactoryTest() {
+ }
+
+ public AbstractAnalyzerFactoryTest(String arg0) {
+ super(arg0);
+ }
+
+ public void setUp() throws Exception {
+ super.setUp();
+ init(getExtraProperties());
+ }
+ abstract String[] getExtraProperties();
+
+ private Analyzer getAnalyzer(String lang, boolean filterStopWords) {
+ return getNdx().getAnalyzer(lang, filterStopWords);
+ }
+
+ private void comparisonTest(String lang,
+ boolean stopWordsSignificant,
+ String text,
+ String spaceSeparated) throws IOException {
+ compareTokenStream(getAnalyzer(lang, stopWordsSignificant), text,
+ spaceSeparated.split(" "));
+ }
+ private void compareTokenStream(Analyzer a, String text, String expected[]) throws IOException {
+ TokenStream s = a.tokenStream(null, new StringReader(text));
+ int ix = 0;
+ while (s.incrementToken()) {
+ final TermAttribute term = s.getAttribute(TermAttribute.class);
+ final String word = term.term();
+ assertTrue(ix < expected.length);
+ assertEquals(word, expected[ix++]);
+ }
+ assertEquals(ix, expected.length);
+ }
+
+
+ public void testEnglishFilterStopWords() throws IOException {
+ for (String lang: new String[]{ "eng", null, "" }) {
+ comparisonTest(lang,
+ true,
+ "The test to end all tests! Forever.",
+ "test end all tests forever"
+ );
+ }
+ }
+ public void testEnglishNoFilter() throws IOException {
+ for (String lang: new String[]{ "eng", null, "" }) {
+ comparisonTest(lang,
+ false,
+ "The test to end all tests! Forever.",
+ "the test to end all tests forever"
+ );
+ }
+ }
+
+ // Note we careful use a three letter language code for german.
+ // 'de' is more standard, but the DefaultAnalyzerFactory does not
+ // implement 'de' correctly.
+ public void testGermanFilterStopWords() throws IOException {
+ comparisonTest("ger",
+ true,
+ "Hanoi - Im Streit um die Vorherrschaft im Südchinesischen Meer ist es zu einer " +
+ "erneuten Auseinandersetzung gekommen:",
+ "hanoi strei um vorherrschaf sudchinesisch meer zu erneu auseinandersetzung gekomm"
+ );
+
+ }
+
+ // Note we careful use a three letter language code for Russian.
+ // 'ru' is more standard, but the DefaultAnalyzerFactory does not
+ // implement 'ru' correctly.
+ public void testRussianFilterStopWords() throws IOException {
+ comparisonTest("rus",
+ true,
+ // I hope this is not offensive text.
+ "Они ответственны полностью и за ту, и за другую трагедию. " +
+ "Мы уже получили данные от сочувствующих нам офицеров СБУ.",
+ "ответствен полност ту друг трагед получ дан сочувств нам офицер сбу"
+ );
+
+ }
+ public void testGermanNoStopWords() throws IOException {
+ comparisonTest("ger",
+ false,
+ "Hanoi - Im Streit um die Vorherrschaft im Südchinesischen Meer ist es zu einer " +
+ "erneuten Auseinandersetzung gekommen:",
+ "hanoi im strei um die vorherrschaf im sudchinesisch meer ist es zu ein erneu auseinandersetzung gekomm"
+ );
+
+ }
+ public void testRussianNoStopWords() throws IOException {
+ comparisonTest("rus",
+ false,
+ // I hope this is not offensive text.
+ "Они ответственны полностью и за ту, и за другую трагедию. " +
+ "Мы уже получили данные от сочувствующих нам офицеров СБУ.",
+ "он ответствен полност и за ту и за друг трагед мы уж получ дан от сочувств нам офицер сбу"
+ );
+
+ }
+ public void testJapanese() throws IOException {
+ for (boolean filterStopWords: new Boolean[]{true, false}) {
+ comparisonTest("jpn",
+ filterStopWords,
+ // I hope this is not offensive text.
+ "高林純示 生態学研究センター教授らの研究グループと松井健二 山口大学医学系研究科(農学系)教授らの研究グループは、",
+ "高林 林純 純示 生態 態学 学研 研究 究セ セン ンタ ター ー教 教授 授ら らの の研 研究 究グ グル ルー " +
+ "ープ プと と松 松井 井健 健二 山口 口大 大学 学医 医学 学系 系研 " +
+ "研究 究科 農学 学系 教授 授ら らの の研 研究 究グ グル ルー ープ プは");
+ }
+ }
+ public void testConfiguredLanguages() {
+ checkConfig("BrazilianAnalyzer", "por", "pt");
+ checkConfig("ChineseAnalyzer", "zho", "chi", "zh");
+ checkConfig("CJKAnalyzer", "jpn", "ja", "kor", "ko");
+ checkConfig("CzechAnalyzer", "ces", "cze", "cs");
+ checkConfig("DutchAnalyzer", "dut", "nld", "nl");
+ checkConfig("GermanAnalyzer", "deu", "ger", "de");
+ checkConfig("GreekAnalyzer", "gre", "ell", "el");
+ checkConfig("RussianAnalyzer", "rus", "ru");
+ checkConfig("ThaiAnalyzer", "th", "tha");
+ checkConfig("StandardAnalyzer", "en", "eng", "", null);
+ }
+
+ private void checkConfig(String classname, String ...langs) {
+ for (String lang:langs) {
+ // The DefaultAnalyzerFactory only works for language tags of length exactly three.
+// if (lang != null && lang.length()==3)
+ {
+ assertEquals(classname, getAnalyzer(lang,true).getClass().getSimpleName());
+ assertEquals(classname, getAnalyzer(lang+"-x-foobar",true).getClass().getSimpleName());
+ }
+ }
+
+ }
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractSearchTest.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractSearchTest.java 2014-05-08 01:49:13 UTC (rev 8225)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractSearchTest.java 2014-05-08 01:49:33 UTC (rev 8226)
@@ -1,3 +1,29 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*
+ * Created on May 7, 2014
+ */
package com.bigdata.search;
import java.util.Properties;
@@ -2,3 +28,2 @@
-import com.bigdata.btree.IndexMetadata;
import com.bigdata.journal.IIndexManager;
@@ -11,7 +36,6 @@
private String namespace;
private IIndexManager indexManager;
private FullTextIndex<Long> ndx;
- private IndexMetadata indexMetadata;
private Properties properties;
public AbstractSearchTest() {
@@ -22,19 +46,29 @@
}
void init(String ...propertyValuePairs) {
- namespace = getName();
- properties = getProperties();
+ namespace = getClass().getName()+"#"+getName();
+ indexManager = getStore();
+ properties = (Properties) getProperties().clone();
+ ndx = createFullTextIndex(namespace, properties, propertyValuePairs);
+ }
+
+ private FullTextIndex<Long> createFullTextIndex(String namespace, Properties properties, String ...propertyValuePairs) {
for (int i=0; i<propertyValuePairs.length; ) {
properties.setProperty(propertyValuePairs[i++], propertyValuePairs[i++]);
}
- indexManager = getStore();
- ndx = new FullTextIndex<Long>(indexManager, namespace, ITx.UNISOLATED, properties);
+ FullTextIndex<Long> ndx = new FullTextIndex<Long>(indexManager, namespace, ITx.UNISOLATED, properties);
ndx.create();
- indexMetadata = ndx.getIndex().getIndexMetadata();
- }
+ return ndx;
+ }
+
+ FullTextIndex<Long> createFullTextIndex(String namespace, String ...propertyValuePairs) {
+ return createFullTextIndex(namespace, getProperties(), propertyValuePairs);
+ }
public void tearDown() throws Exception {
- indexManager.destroy();
+ if (indexManager != null) {
+ indexManager.destroy();
+ }
super.tearDown();
}
@@ -54,15 +88,8 @@
return ndx;
}
- IndexMetadata getIndexMetadata() {
- return indexMetadata;
- }
-
-
Properties getSearchProperties() {
return properties;
}
-
-
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestAll.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestAll.java 2014-05-08 01:49:13 UTC (rev 8225)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestAll.java 2014-05-08 01:49:33 UTC (rev 8226)
@@ -104,6 +104,14 @@
// test verifies search index is restart safe.
suite.addTestSuite(TestSearchRestartSafe.class);
+
+ // Check behavior of DefaultAnalyzerFactory, see also trac 915
+ suite.addTestSuite(TestDefaultAnalyzerFactory.class);
+
+ // Check default behavior of ConfigurableAnalyzerFactory
+ // which is intended to be the same as the intended
+ // behavior of DefaultAnalyzerFactory
+ suite.addTestSuite(TestConfigurableAsDefaultAnalyzerFactory.class);
return suite;
}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestConfigurableAsDefaultAnalyzerFactory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestConfigurableAsDefaultAnalyzerFactory.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestConfigurableAsDefaultAnalyzerFactory.java 2014-05-08 01:49:33 UTC (rev 8226)
@@ -0,0 +1,43 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*
+ * Created on May 7, 2014
+ */
+package com.bigdata.search;
+
+public class TestConfigurableAsDefaultAnalyzerFactory extends AbstractAnalyzerFactoryTest {
+
+ public TestConfigurableAsDefaultAnalyzerFactory() {
+ }
+
+ public TestConfigurableAsDefaultAnalyzerFactory(String arg0) {
+ super(arg0);
+ }
+
+ @Override
+ String[] getExtraProperties() {
+ return new String[]{FullTextIndex.Options.ANALYZER_FACTORY_CLASS, ConfigurableAnalyzerFactory.class.getName()};
+ }
+
+}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestDefaultAnalyzerFactory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestDefaultAnalyzerFactory.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestDefaultAnalyzerFactory.java 2014-05-08 01:49:33 UTC (rev 8226)
@@ -0,0 +1,43 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*
+ * Created on May 7, 2014
+ */
+package com.bigdata.search;
+
+public class TestDefaultAnalyzerFactory extends AbstractAnalyzerFactoryTest {
+
+ public TestDefaultAnalyzerFactory() {
+ }
+
+ public TestDefaultAnalyzerFactory(String arg0) {
+ super(arg0);
+ }
+
+ @Override
+ String[] getExtraProperties() {
+ return new String[0];
+ }
+
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestKeyBuilder.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestKeyBuilder.java 2014-05-08 01:49:13 UTC (rev 8225)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestKeyBuilder.java 2014-05-08 01:49:33 UTC (rev 8226)
@@ -93,6 +93,10 @@
return keyBuilder;
}
+
+ IndexMetadata getIndexMetadata() {
+ return getNdx().getIndex().getIndexMetadata();
+ }
private IKeyBuilder keyBuilder;
/**
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <mrp...@us...> - 2014-05-08 01:52:14
|
Revision: 8227
http://sourceforge.net/p/bigdata/code/8227
Author: mrpersonick
Date: 2014-05-08 01:52:09 +0000 (Thu, 08 May 2014)
Log Message:
-----------
fixed the gremlin installer, added a loadGraphML method to all BigdataGraph impls
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java
branches/BIGDATA_RELEASE_1_3_0/build.xml
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-08 01:49:33 UTC (rev 8226)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-08 01:52:09 UTC (rev 8227)
@@ -54,6 +54,7 @@
import com.tinkerpop.blueprints.GraphQuery;
import com.tinkerpop.blueprints.Vertex;
import com.tinkerpop.blueprints.util.DefaultGraphQuery;
+import com.tinkerpop.blueprints.util.io.graphml.GraphMLReader;
/**
* A base class for a Blueprints wrapper around a bigdata back-end.
@@ -93,6 +94,13 @@
return getClass().getSimpleName().toLowerCase();
}
+ /**
+ * Post a GraphML file to the remote server. (Bulk-upload operation.)
+ */
+ public void loadGraphML(final String file) throws Exception {
+ GraphMLReader.inputGraph(this, file);
+ }
+
protected abstract RepositoryConnection cxn() throws Exception;
// public BigdataSailRepositoryConnection getConnection() {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-08 01:49:33 UTC (rev 8226)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-08 01:52:09 UTC (rev 8227)
@@ -80,8 +80,9 @@
/**
* Post a GraphML file to the remote server. (Bulk-upload operation.)
*/
- public long postGraphML(final String file) throws Exception {
- return this.repo.getRemoteRepository().postGraphML(file);
+ @Override
+ public void loadGraphML(final String file) throws Exception {
+ this.repo.getRemoteRepository().postGraphML(file);
}
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-08 01:49:33 UTC (rev 8226)
+++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-08 01:52:09 UTC (rev 8227)
@@ -2530,7 +2530,7 @@
<delete file="${build.dir}/gremlin-groovy-2.5.0.zip"/>
</target>
- <target name="install-gremlin" depends="prepare,compile,jar">
+ <target name="install-gremlin" depends="prepare,compile,jar,bundle">
<delete>
<fileset dir="${build.dir}/gremlin-groovy-2.5.0/lib">
<include name="blueprints-graph-sail-2.5.0.jar"/>
@@ -2577,12 +2577,17 @@
</fileset>
</delete>
<copy toDir="${build.dir}/gremlin-groovy-2.5.0/lib" flatten="true">
+ <!--
<fileset dir="${bigdata.dir}/bigdata-rdf/lib">
<include name="openrdf-sesame-${sesame.version}-onejar.jar" />
</fileset>
<fileset dir="${bigdata.dir}/bigdata-sails/lib/httpcomponents">
<include name="httpmime-${apache.httpmime.version}.jar" />
</fileset>
+ -->
+ <fileset dir="${build.dir}/lib">
+ <include name="*.jar" />
+ </fileset>
<fileset dir="${build.dir}">
<include name="${version}.jar" />
</fileset>
@@ -2594,9 +2599,10 @@
1. Start the gremlin console:
> ./${build.dir}/gremlin-groovy-2.5.0/bin/gremlin.sh
2. Connect to the bigdata server:
- > g = com.bigdata.blueprints.BigdataGraphFactory.connect("http://localhost:9999/bigdata")
+ gremlin> import com.bigdata.blueprints.*
+ gremlin> g = BigdataGraphFactory.connect("http://localhost:9999")
3. Don't forget to shut down the connection when you're done:
- > g.shutdown()
+ gremlin> g.shutdown()
</echo>
</target>
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <jer...@us...> - 2014-05-08 03:13:01
|
Revision: 8230
http://sourceforge.net/p/bigdata/code/8230
Author: jeremy_carroll
Date: 2014-05-08 03:12:55 +0000 (Thu, 08 May 2014)
Log Message:
-----------
externalized Japanese, Russian and German strings to address encoding issues
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/NonEnglishExamples.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/examples.properties
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs
Deleted: branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs 2014-05-08 02:57:15 UTC (rev 8229)
+++ branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs 2014-05-08 03:12:55 UTC (rev 8230)
@@ -1,2 +0,0 @@
-eclipse.preferences.version=1
-encoding//bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java=UTF-8
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java 2014-05-08 02:57:15 UTC (rev 8229)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java 2014-05-08 03:12:55 UTC (rev 8230)
@@ -57,7 +57,7 @@
String text,
String spaceSeparated) throws IOException {
compareTokenStream(getAnalyzer(lang, stopWordsSignificant), text,
- spaceSeparated.split(" "));
+ spaceSeparated.split(" ")); //$NON-NLS-1$
}
private void compareTokenStream(Analyzer a, String text, String expected[]) throws IOException {
TokenStream s = a.tokenStream(null, new StringReader(text));
@@ -73,20 +73,20 @@
public void testEnglishFilterStopWords() throws IOException {
- for (String lang: new String[]{ "eng", null, "" }) {
+ for (String lang: new String[]{ "eng", null, "" }) { //$NON-NLS-1$ //$NON-NLS-2$
comparisonTest(lang,
true,
- "The test to end all tests! Forever.",
- "test end all tests forever"
+ "The test to end all tests! Forever.", //$NON-NLS-1$
+ "test end all tests forever" //$NON-NLS-1$
);
}
}
public void testEnglishNoFilter() throws IOException {
- for (String lang: new String[]{ "eng", null, "" }) {
+ for (String lang: new String[]{ "eng", null, "" }) { //$NON-NLS-1$ //$NON-NLS-2$
comparisonTest(lang,
false,
- "The test to end all tests! Forever.",
- "the test to end all tests forever"
+ "The test to end all tests! Forever.", //$NON-NLS-1$
+ "the test to end all tests forever" //$NON-NLS-1$
);
}
}
@@ -95,11 +95,11 @@
// 'de' is more standard, but the DefaultAnalyzerFactory does not
// implement 'de' correctly.
public void testGermanFilterStopWords() throws IOException {
- comparisonTest("ger",
+ comparisonTest("ger", //$NON-NLS-1$
true,
- "Hanoi - Im Streit um die Vorherrschaft im Südchinesischen Meer ist es zu einer " +
- "erneuten Auseinandersetzung gekommen:",
- "hanoi strei um vorherrschaf sudchinesisch meer zu erneu auseinandersetzung gekomm"
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.10") + //$NON-NLS-1$
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.11"), //$NON-NLS-1$
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.12") //$NON-NLS-1$
);
}
@@ -108,56 +108,54 @@
// 'ru' is more standard, but the DefaultAnalyzerFactory does not
// implement 'ru' correctly.
public void testRussianFilterStopWords() throws IOException {
- comparisonTest("rus",
+ comparisonTest("rus", //$NON-NLS-1$
true,
// I hope this is not offensive text.
- "Они ответственны полностью и за ту, и за другую трагедию. " +
- "Мы уже получили данные от сочувствующих нам офицеров СБУ.",
- "ответствен полност ту друг трагед получ дан сочувств нам офицер сбу"
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.14") + //$NON-NLS-1$
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.15"), //$NON-NLS-1$
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.16") //$NON-NLS-1$
);
}
public void testGermanNoStopWords() throws IOException {
- comparisonTest("ger",
+ comparisonTest("ger", //$NON-NLS-1$
false,
- "Hanoi - Im Streit um die Vorherrschaft im Südchinesischen Meer ist es zu einer " +
- "erneuten Auseinandersetzung gekommen:",
- "hanoi im strei um die vorherrschaf im sudchinesisch meer ist es zu ein erneu auseinandersetzung gekomm"
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.18") + //$NON-NLS-1$
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.19"), //$NON-NLS-1$
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.20") //$NON-NLS-1$
);
}
public void testRussianNoStopWords() throws IOException {
- comparisonTest("rus",
+ comparisonTest("rus", //$NON-NLS-1$
false,
- // I hope this is not offensive text.
- "Они ответственны полностью и за ту, и за другую трагедию. " +
- "Мы уже получили данные от сочувствующих нам офицеров СБУ.",
- "он ответствен полност и за ту и за друг трагед мы уж получ дан от сочувств нам офицер сбу"
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.22") + //$NON-NLS-1$
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.23"), //$NON-NLS-1$
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.24") //$NON-NLS-1$
);
}
public void testJapanese() throws IOException {
for (boolean filterStopWords: new Boolean[]{true, false}) {
- comparisonTest("jpn",
+ comparisonTest("jpn", //$NON-NLS-1$
filterStopWords,
- // I hope this is not offensive text.
- "高林純示 生態学研究センター教授らの研究グループと松井健二 山口大学医学系研究科(農学系)教授らの研究グループは、",
- "高林 林純 純示 生態 態学 学研 研究 究セ セン ンタ ター ー教 教授 授ら らの の研 研究 究グ グル ルー " +
- "ープ プと と松 松井 井健 健二 山口 口大 大学 学医 医学 学系 系研 " +
- "研究 究科 農学 学系 教授 授ら らの の研 研究 究グ グル ルー ープ プは");
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.26"), //$NON-NLS-1$
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.27") + //$NON-NLS-1$
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.28") + //$NON-NLS-1$
+ NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.29")); //$NON-NLS-1$
}
}
public void testConfiguredLanguages() {
- checkConfig("BrazilianAnalyzer", "por", "pt");
- checkConfig("ChineseAnalyzer", "zho", "chi", "zh");
- checkConfig("CJKAnalyzer", "jpn", "ja", "kor", "ko");
- checkConfig("CzechAnalyzer", "ces", "cze", "cs");
- checkConfig("DutchAnalyzer", "dut", "nld", "nl");
- checkConfig("GermanAnalyzer", "deu", "ger", "de");
- checkConfig("GreekAnalyzer", "gre", "ell", "el");
- checkConfig("RussianAnalyzer", "rus", "ru");
- checkConfig("ThaiAnalyzer", "th", "tha");
- checkConfig("StandardAnalyzer", "en", "eng", "", null);
+ checkConfig("BrazilianAnalyzer", "por", "pt"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
+ checkConfig("ChineseAnalyzer", "zho", "chi", "zh"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$
+ checkConfig("CJKAnalyzer", "jpn", "ja", "kor", "ko"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ //$NON-NLS-5$
+ checkConfig("CzechAnalyzer", "ces", "cze", "cs"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$
+ checkConfig("DutchAnalyzer", "dut", "nld", "nl"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$
+ checkConfig("GermanAnalyzer", "deu", "ger", "de"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$
+ checkConfig("GreekAnalyzer", "gre", "ell", "el"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$
+ checkConfig("RussianAnalyzer", "rus", "ru"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
+ checkConfig("ThaiAnalyzer", "th", "tha"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
+ checkConfig("StandardAnalyzer", "en", "eng", "", null); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$
}
private void checkConfig(String classname, String ...langs) {
@@ -166,7 +164,7 @@
// if (lang != null && lang.length()==3)
{
assertEquals(classname, getAnalyzer(lang,true).getClass().getSimpleName());
- assertEquals(classname, getAnalyzer(lang+"-x-foobar",true).getClass().getSimpleName());
+ assertEquals(classname, getAnalyzer(lang+NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.0"),true).getClass().getSimpleName()); //$NON-NLS-1$
}
}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/NonEnglishExamples.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/NonEnglishExamples.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/NonEnglishExamples.java 2014-05-08 03:12:55 UTC (rev 8230)
@@ -0,0 +1,21 @@
+package com.bigdata.search;
+
+import java.util.MissingResourceException;
+import java.util.ResourceBundle;
+
+public class NonEnglishExamples {
+ private static final String BUNDLE_NAME = "com.bigdata.search.examples"; //$NON-NLS-1$
+
+ private static final ResourceBundle RESOURCE_BUNDLE = ResourceBundle.getBundle(BUNDLE_NAME);
+
+ private NonEnglishExamples() {
+ }
+
+ public static String getString(String key) {
+ try {
+ return RESOURCE_BUNDLE.getString(key);
+ } catch (MissingResourceException e) {
+ return '!' + key + '!';
+ }
+ }
+}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/examples.properties
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/examples.properties (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/examples.properties 2014-05-08 03:12:55 UTC (rev 8230)
@@ -0,0 +1,17 @@
+AbstractAnalyzerFactoryTest.0=-x-foobar
+AbstractAnalyzerFactoryTest.10=Hanoi - Im Streit um die Vorherrschaft im S\xFCdchinesischen Meer ist es zu einer
+AbstractAnalyzerFactoryTest.11=erneuten Auseinandersetzung gekommen:
+AbstractAnalyzerFactoryTest.12=hanoi strei um vorherrschaf sudchinesisch meer zu erneu auseinandersetzung gekomm
+AbstractAnalyzerFactoryTest.14=\u041E\u043D\u0438 \u043E\u0442\u0432\u0435\u0442\u0441\u0442\u0432\u0435\u043D\u043D\u044B \u043F\u043E\u043B\u043D\u043E\u0441\u0442\u044C\u044E \u0438 \u0437\u0430 \u0442\u0443, \u0438 \u0437\u0430 \u0434\u0440\u0443\u0433\u0443\u044E \u0442\u0440\u0430\u0433\u0435\u0434\u0438\u044E.
+AbstractAnalyzerFactoryTest.15=\u041C\u044B \u0443\u0436\u0435 \u043F\u043E\u043B\u0443\u0447\u0438\u043B\u0438 \u0434\u0430\u043D\u043D\u044B\u0435 \u043E\u0442 \u0441\u043E\u0447\u0443\u0432\u0441\u0442\u0432\u0443\u044E\u0449\u0438\u0445 \u043D\u0430\u043C \u043E\u0444\u0438\u0446\u0435\u0440\u043E\u0432 \u0421\u0411\u0423.
+AbstractAnalyzerFactoryTest.16=\u043E\u0442\u0432\u0435\u0442\u0441\u0442\u0432\u0435\u043D \u043F\u043E\u043B\u043D\u043E\u0441\u0442 \u0442\u0443 \u0434\u0440\u0443\u0433 \u0442\u0440\u0430\u0433\u0435\u0434 \u043F\u043E\u043B\u0443\u0447 \u0434\u0430\u043D \u0441\u043E\u0447\u0443\u0432\u0441\u0442\u0432 \u043D\u0430\u043C \u043E\u0444\u0438\u0446\u0435\u0440 \u0441\u0431\u0443
+AbstractAnalyzerFactoryTest.18=Hanoi - Im Streit um die Vorherrschaft im S\xFCdchinesischen Meer ist es zu einer
+AbstractAnalyzerFactoryTest.19=erneuten Auseinandersetzung gekommen:
+AbstractAnalyzerFactoryTest.20=hanoi im strei um die vorherrschaf im sudchinesisch meer ist es zu ein erneu auseinandersetzung gekomm
+AbstractAnalyzerFactoryTest.22=\u041E\u043D\u0438 \u043E\u0442\u0432\u0435\u0442\u0441\u0442\u0432\u0435\u043D\u043D\u044B \u043F\u043E\u043B\u043D\u043E\u0441\u0442\u044C\u044E \u0438 \u0437\u0430 \u0442\u0443, \u0438 \u0437\u0430 \u0434\u0440\u0443\u0433\u0443\u044E \u0442\u0440\u0430\u0433\u0435\u0434\u0438\u044E.
+AbstractAnalyzerFactoryTest.23=\u041C\u044B \u0443\u0436\u0435 \u043F\u043E\u043B\u0443\u0447\u0438\u043B\u0438 \u0434\u0430\u043D\u043D\u044B\u0435 \u043E\u0442 \u0441\u043E\u0447\u0443\u0432\u0441\u0442\u0432\u0443\u044E\u0449\u0438\u0445 \u043D\u0430\u043C \u043E\u0444\u0438\u0446\u0435\u0440\u043E\u0432 \u0421\u0411\u0423.
+AbstractAnalyzerFactoryTest.24=\u043E\u043D \u043E\u0442\u0432\u0435\u0442\u0441\u0442\u0432\u0435\u043D \u043F\u043E\u043B\u043D\u043E\u0441\u0442 \u0438 \u0437\u0430 \u0442\u0443 \u0438 \u0437\u0430 \u0434\u0440\u0443\u0433 \u0442\u0440\u0430\u0433\u0435\u0434 \u043C\u044B \u0443\u0436 \u043F\u043E\u043B\u0443\u0447 \u0434\u0430\u043D \u043E\u0442 \u0441\u043E\u0447\u0443\u0432\u0441\u0442\u0432 \u043D\u0430\u043C \u043E\u0444\u0438\u0446\u0435\u0440 \u0441\u0431\u0443
+AbstractAnalyzerFactoryTest.26=\u9AD8\u6797\u7D14\u793A \u751F\u614B\u5B66\u7814\u7A76\u30BB\u30F3\u30BF\u30FC\u6559\u6388\u3089\u306E\u7814\u7A76\u30B0\u30EB\u30FC\u30D7\u3068\u677E\u4E95\u5065\u4E8C \u5C71\u53E3\u5927\u5B66\u533B\u5B66\u7CFB\u7814\u7A76\u79D1\uFF08\u8FB2\u5B66\u7CFB\uFF09\u6559\u6388\u3089\u306E\u7814\u7A76\u30B0\u30EB\u30FC\u30D7\u306F\u3001
+AbstractAnalyzerFactoryTest.27=\u9AD8\u6797 \u6797\u7D14 \u7D14\u793A \u751F\u614B \u614B\u5B66 \u5B66\u7814 \u7814\u7A76 \u7A76\u30BB \u30BB\u30F3 \u30F3\u30BF \u30BF\u30FC \u30FC\u6559 \u6559\u6388 \u6388\u3089 \u3089\u306E \u306E\u7814 \u7814\u7A76 \u7A76\u30B0 \u30B0\u30EB \u30EB\u30FC
+AbstractAnalyzerFactoryTest.28=\u30FC\u30D7 \u30D7\u3068 \u3068\u677E \u677E\u4E95 \u4E95\u5065 \u5065\u4E8C \u5C71\u53E3 \u53E3\u5927 \u5927\u5B66 \u5B66\u533B \u533B\u5B66 \u5B66\u7CFB \u7CFB\u7814
+AbstractAnalyzerFactoryTest.29=\u7814\u7A76 \u7A76\u79D1 \u8FB2\u5B66 \u5B66\u7CFB \u6559\u6388 \u6388\u3089 \u3089\u306E \u306E\u7814 \u7814\u7A76 \u7A76\u30B0 \u30B0\u30EB \u30EB\u30FC \u30FC\u30D7 \u30D7\u306F
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <mrp...@us...> - 2014-05-08 17:53:55
|
Revision: 8231
http://sourceforge.net/p/bigdata/code/8231
Author: mrpersonick
Date: 2014-05-08 17:53:52 +0000 (Thu, 08 May 2014)
Log Message:
-----------
rolling back changes to build.xml and RESTServlet from r8223
Revision Links:
--------------
http://sourceforge.net/p/bigdata/code/8223
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java
branches/BIGDATA_RELEASE_1_3_0/build.xml
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-05-08 03:12:55 UTC (rev 8230)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-05-08 17:53:52 UTC (rev 8231)
@@ -1,157 +0,0 @@
-/**
-Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-package com.bigdata.rdf.sail.webapp;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.log4j.Logger;
-
-import com.bigdata.blueprints.BigdataGraphBulkLoad;
-import com.bigdata.rdf.sail.BigdataSailRepositoryConnection;
-import com.bigdata.rdf.sail.webapp.client.MiniMime;
-import com.bigdata.rdf.store.AbstractTripleStore;
-import com.tinkerpop.blueprints.util.io.graphml.GraphMLReader;
-
-/**
- * Helper servlet for the blueprints layer.
- */
-public class BlueprintsServlet extends BigdataRDFServlet {
-
- /**
- *
- */
- private static final long serialVersionUID = 1L;
-
- static private final transient Logger log = Logger.getLogger(BlueprintsServlet.class);
-
- static public final List<String> mimeTypes = Arrays.asList(new String[] {
- "application/graphml+xml"
- }) ;
-
- /**
- * Flag to signify a blueprints operation.
- */
- static final transient String ATTR_BLUEPRINTS = "blueprints";
-
-// /**
-// * Flag to signify a convert operation. POST an RDF document with a
-// * content type and an accept header for what it should be converted to.
-// */
-// static final transient String ATTR_CONVERT = "convert";
-
-
- public BlueprintsServlet() {
-
- }
-
- /**
- * Post a GraphML file to the blueprints layer.
- */
- @Override
- protected void doPost(final HttpServletRequest req,
- final HttpServletResponse resp) throws IOException {
-
- final long begin = System.currentTimeMillis();
-
- final String namespace = getNamespace(req);
-
- final long timestamp = getTimestamp(req);
-
- final AbstractTripleStore tripleStore = getBigdataRDFContext()
- .getTripleStore(namespace, timestamp);
-
- if (tripleStore == null) {
- /*
- * There is no such triple/quad store instance.
- */
- buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN);
- return;
- }
-
- final String contentType = req.getContentType();
-
- if (log.isInfoEnabled())
- log.info("Request body: " + contentType);
-
- final String mimeType = new MiniMime(contentType).getMimeType().toLowerCase();
-
- if (!mimeTypes.contains(mimeType)) {
-
- buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN,
- "Content-Type not recognized as graph data: " + contentType);
-
- return;
-
- }
-
- try {
-
- BigdataSailRepositoryConnection conn = null;
- try {
-
- conn = getBigdataRDFContext()
- .getUnisolatedConnection(namespace);
-
- final BigdataGraphBulkLoad graph = new BigdataGraphBulkLoad(conn);
-
- GraphMLReader.inputGraph(graph, req.getInputStream());
-
- graph.commit();
-
- final long nmodified = graph.getMutationCountLastCommit();
-
- final long elapsed = System.currentTimeMillis() - begin;
-
- reportModifiedCount(resp, nmodified, elapsed);
-
- return;
-
- } catch(Throwable t) {
-
- if(conn != null)
- conn.rollback();
-
- throw new RuntimeException(t);
-
- } finally {
-
- if (conn != null)
- conn.close();
-
- }
-
- } catch (Exception ex) {
-
- // Will be rendered as an INTERNAL_ERROR.
- throw new RuntimeException(ex);
-
- }
-
- }
-
-}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2014-05-08 03:12:55 UTC (rev 8230)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2014-05-08 17:53:52 UTC (rev 8231)
@@ -59,7 +59,6 @@
private DeleteServlet m_deleteServlet;
private UpdateServlet m_updateServlet;
private WorkbenchServlet m_workbenchServlet;
- private BlueprintsServlet m_blueprintsServlet;
/**
* @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/584">
@@ -85,7 +84,6 @@
m_deleteServlet = new DeleteServlet();
m_describeServlet = new DescribeCacheServlet();
m_workbenchServlet = new WorkbenchServlet();
- m_blueprintsServlet = new BlueprintsServlet();
m_queryServlet.init(getServletConfig());
m_insertServlet.init(getServletConfig());
@@ -93,7 +91,6 @@
m_deleteServlet.init(getServletConfig());
m_describeServlet.init(getServletConfig());
m_workbenchServlet.init(getServletConfig());
- m_blueprintsServlet.init(getServletConfig());
}
@@ -133,11 +130,6 @@
m_workbenchServlet = null;
}
- if (m_blueprintsServlet != null) {
- m_blueprintsServlet.destroy();
- m_blueprintsServlet = null;
- }
-
super.destroy();
}
@@ -250,10 +242,6 @@
m_workbenchServlet.doPost(req, resp);
- } else if (req.getParameter(BlueprintsServlet.ATTR_BLUEPRINTS) != null) {
-
- m_blueprintsServlet.doPost(req, resp);
-
} else if (req.getParameter("uri") != null) {
// INSERT via w/ URIs
Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-08 03:12:55 UTC (rev 8230)
+++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-08 17:53:52 UTC (rev 8231)
@@ -46,131 +46,128 @@
<project name="bigdata" default="bundleJar" basedir=".">
- <property file="build.properties" />
+ <property file="build.properties" />
- <!-- build-time classpath. -->
- <path id="build.classpath">
- <fileset dir="${bigdata.dir}/bigdata/lib">
- <include name="**/*.jar" />
- </fileset>
- <fileset dir="${bigdata.dir}/bigdata-jini/lib">
- <include name="**/*.jar" />
- </fileset>
- <fileset dir="${bigdata.dir}/bigdata-rdf/lib">
- <include name="**/*.jar" />
- </fileset>
- <fileset dir="${bigdata.dir}/bigdata-sails/lib">
- <include name="**/*.jar" />
- </fileset>
+ <!-- build-time classpath. -->
+ <path id="build.classpath">
+ <fileset dir="${bigdata.dir}/bigdata/lib">
+ <include name="**/*.jar" />
+ </fileset>
+ <fileset dir="${bigdata.dir}/bigdata-jini/lib">
+ <include name="**/*.jar" />
+ </fileset>
+ <fileset dir="${bigdata.dir}/bigdata-rdf/lib">
+ <include name="**/*.jar" />
+ </fileset>
+ <fileset dir="${bigdata.dir}/bigdata-sails/lib">
+ <include name="**/*.jar" />
+ </fileset>
<fileset dir="${bigdata.dir}/bigdata-gom/lib">
<include name="**/*.jar" />
</fileset>
- <fileset dir="${bigdata.dir}/bigdata-blueprints/lib">
- <include name="**/*.jar" />
- </fileset>
- <!--
- <fileset dir="${bigdata.dir}/ctc-striterator/lib">
- <include name="**/*.jar" />
- </fileset> -->
- </path>
+ <!--
+ <fileset dir="${bigdata.dir}/ctc-striterator/lib">
+ <include name="**/*.jar" />
+ </fileset> -->
+ </path>
- <!-- runtime classpath w/o install. -->
- <path id="runtime.classpath">
- <pathelement location="${build.dir}/classes" />
- <path refid="build.classpath" />
- </path>
+ <!-- runtime classpath w/o install. -->
+ <path id="runtime.classpath">
+ <pathelement location="${build.dir}/classes" />
+ <path refid="build.classpath" />
+ </path>
- <!-- classpath as installed. -->
- <!-- @todo .so and .dll -->
- <path id="install.classpath">
- <fileset dir="${install.lib.dir}">
- <include name="**/*.jar" />
- </fileset>
- </path>
+ <!-- classpath as installed. -->
+ <!-- @todo .so and .dll -->
+ <path id="install.classpath">
+ <fileset dir="${install.lib.dir}">
+ <include name="**/*.jar" />
+ </fileset>
+ </path>
- <target name="clean" description="cleans everything in [build.dir].">
- <delete dir="${build.dir}" />
- <delete dir="${bigdata.dir}/bigdata-test" quiet="true" />
- <delete dir="${bigdata.dir}/dist" quiet="true" />
- </target>
+ <target name="clean" description="cleans everything in [build.dir].">
+ <delete dir="${build.dir}" />
+ <delete dir="${bigdata.dir}/bigdata-test" quiet="true" />
+ <delete dir="${bigdata.dir}/dist" quiet="true" />
+ </target>
- <target name="prepare">
- <!-- setup ${version} for regular or snapshot. -->
- <tstamp>
- <format property="today" pattern="yyyyMMdd" locale="en,US" />
- <format property="osgiDate" pattern="yyyyMMdd" locale="en,US" />
- </tstamp>
+ <target name="prepare">
+ <!-- setup ${version} for regular or snapshot. -->
+ <tstamp>
+ <format property="today" pattern="yyyyMMdd" locale="en,US" />
+ <format property="osgiDate" pattern="yyyyMMdd" locale="en,US" />
+ </tstamp>
<condition property="client-version" value="bigdata-client-${build.ver}-${today}" else="bigdata-client-${build.ver}">
<istrue value="${snapshot}" />
</condition>
- <condition property="version" value="bigdata-${build.ver}-${today}" else="bigdata-${build.ver}">
- <istrue value="${snapshot}" />
- </condition>
- <condition property="osgi.version" value="${build.ver.osgi}.${osgiDate}" else="${build.ver.osgi}.0">
- <istrue value="${snapshot}" />
- </condition>
- <!--<echo message="today=${today}"/>-->
- <echo message="version=${version}" />
- <available property="svn.checkout" file="./.svn/entries"/>
- <echo message="svn.checkout=${svn.checkout}" />
- <!-- create directories. -->
- <mkdir dir="${build.dir}" />
- <mkdir dir="${build.dir}/classes" />
- <mkdir dir="${build.dir}/docs" />
- <mkdir dir="${build.dir}/lib" />
- </target>
+ <condition property="version" value="bigdata-${build.ver}-${today}" else="bigdata-${build.ver}">
+ <istrue value="${snapshot}" />
+ </condition>
+ <condition property="osgi.version" value="${build.ver.osgi}.${osgiDate}" else="${build.ver.osgi}.0">
+ <istrue value="${snapshot}" />
+ </condition>
+ <!--<echo message="today=${today}"/>-->
+ <echo message="version=${version}" />
+ <available property="svn.checkout" file="./.svn/entries"/>
+ <echo message="svn.checkout=${svn.checkout}" />
+ <!-- create directories. -->
+ <mkdir dir="${build.dir}" />
+ <mkdir dir="${build.dir}/classes" />
+ <mkdir dir="${build.dir}/docs" />
+ <mkdir dir="${build.dir}/lib" />
+ </target>
- <target name="buildinfo" depends="prepare" if="svn.checkout"
- description="Generate a BuildInfo.java file with metadata about this build.">
- <property name="buildinfo.file"
- value="${bigdata.dir}\bigdata\src\java\com\bigdata\BuildInfo.java"/>
- <loadfile property="svn.revision" srcFile="./.svn/entries">
- <filterchain>
- <headfilter lines="1" skip="3"/>
- <striplinebreaks/>
- </filterchain>
- </loadfile>
- <loadfile property="svn.url" srcFile="./.svn/entries">
- <filterchain>
- <headfilter lines="1" skip="4"/>
- <striplinebreaks/>
- </filterchain>
- </loadfile>
- <tstamp>
- <format property="build.timestamp" pattern="yyyy/MM/dd HH:mm:ss z" locale="en,US" />
- </tstamp>
- <property environment="env" />
- <echo file="${buildinfo.file}">
+ <target name="buildinfo" depends="prepare" if="svn.checkout"
+ description="Generate a BuildInfo.java file with metadata about this build.">
+ <property name="buildinfo.file"
+ value="${bigdata.dir}\bigdata\src\java\com\bigdata\BuildInfo.java"/>
+ <loadfile property="svn.revision" srcFile="./.svn/entries">
+ <filterchain>
+ <headfilter lines="1" skip="3"/>
+ <striplinebreaks/>
+ </filterchain>
+ </loadfile>
+ <loadfile property="svn.url" srcFile="./.svn/entries">
+ <filterchain>
+ <headfilter lines="1" skip="4"/>
+ <striplinebreaks/>
+ </filterchain>
+ </loadfile>
+ <tstamp>
+ <format property="build.timestamp" pattern="yyyy/MM/dd HH:mm:ss z" locale="en,US" />
+ </tstamp>
+ <property environment="env" />
+ <echo file="${buildinfo.file}">
package com.bigdata;
public class BuildInfo {
public static final String buildVersion="${build.ver}";
public static final String buildVersionOSGI="${build.ver.osgi}";
- public static final String svnRevision="${svn.revision}";
+ public static final String svnRevision="${svn.revision}";
public static final String svnURL="${svn.url}";
- public static final String buildTimestamp="${build.timestamp}";
- public static final String buildUser="${user.name}";
- public static final String buildHost="${env.COMPUTERNAME}";
- public static final String osArch="${os.arch}";
- public static final String osName="${os.name}";
- public static final String osVersion="${os.version}";
+ public static final String buildTimestamp="${build.timestamp}";
+ public static final String buildUser="${user.name}";
+ public static final String buildHost="${env.COMPUTERNAME}";
+ public static final String osArch="${os.arch}";
+ public static final String osName="${os.name}";
+ public static final String osVersion="${os.version}";
}
</echo>
- <loadfile property="buildinfo" srcFile="${buildinfo.file}"/>
- <echo message="${buildinfo}"/>
- </target>
-
+ <loadfile property="buildinfo" srcFile="${buildinfo.file}"/>
+ <echo message="${buildinfo}"/>
+ </target>
+
<!-- Note: I had to explicitly specify the location of the jdepend jar
in Preferences => Ant => Runtime in order to get this to work under
eclipse. This is odd since eclipse bundles the jar with the ant
plugin.
- http://www.ryanlowe.ca/blog/archives/001038_junit_ant_task_doesnt_work_in_eclipse.php
-
- outputfile="${build.dir}/docs/jdepend-report.txt"
+ http://www.ryanlowe.ca/blog/archives/001038_junit_ant_task_doesnt_work_in_eclipse.php
+
+ outputfile="${build.dir}/docs/jdepend-report.txt"
-->
<target name="jdepend" depends="jar">
<jdepend format="xml"
- outputfile="${build.dir}/docs/jdepend-report.xml">
+ outputfile="${build.dir}/docs/jdepend-report.xml">
<exclude name="java.*"/>
<exclude name="javax.*"/>
<classespath>
@@ -185,89 +182,84 @@
<!-- Note: This will (re-)compile the SPARQL grammar. Compilation is -->
<!-- fast, but javacc must be installed. -->
<target name="javacc" depends="prepare"
- description="Compile the SPARQL grammar.">
+ description="Compile the SPARQL grammar.">
<jjtree
- javacchome="${javacc.home}"
- target="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/sparql.jjt"
- outputfile="sparql.jj"
+ javacchome="${javacc.home}"
+ target="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/sparql.jjt"
+ outputfile="sparql.jj"
outputdirectory="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/"
/>
<javacc
- javacchome="${javacc.home}"
- target="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/sparql.jj"
- outputdirectory="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/"
- />
+ javacchome="${javacc.home}"
+ target="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/sparql.jj"
+ outputdirectory="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/"
+ />
</target>
- <!-- Note: javac error results often if verbose is disabled. -->
- <!-- I was able to perform a build with 1.6.0_07. -->
- <!-- I set the target to 1.5 to support deployment on non-1.6 JVMs. -->
- <target name="compile" depends="prepare, buildinfo"
- description="Compile the code base.">
- <mkdir dir="${build.dir}" />
- <echo>javac</echo>
- <echo> destdir="${build.dir}"</echo>
- <echo> fork="yes"</echo>
- <echo> memorymaximumsize="1g"</echo>
- <echo> debug="yes"</echo>
- <echo> debuglevel="${javac.debuglevel}"</echo>
- <echo> verbose="${javac.verbose}"</echo>
- <echo> encoding="${javac.encoding}"</echo>
- <echo> source="${javac.source}"</echo>
- <echo> target="${javac.target}"</echo>
- <javac classpathref="build.classpath"
- destdir="${build.dir}/classes"
- fork="yes"
- memorymaximumsize="1g"
- debug="${javac.debug}"
- debuglevel="${javac.debuglevel}"
- verbose="${javac.verbose}"
- encoding="${javac.encoding}"
- source="${javac.source}"
- target="${javac.target}"
- includeantruntime="false"
- >
- <!-- note: must also specify -bootclasspath and -extdirs when cross-compiling -->
- <src path="${bigdata.dir}/bigdata/src/java" />
- <src path="${bigdata.dir}/bigdata-jini/src/java" />
+ <!-- Note: javac error results often if verbose is disabled. -->
+ <!-- I was able to perform a build with 1.6.0_07. -->
+ <!-- I set the target to 1.5 to support deployment on non-1.6 JVMs. -->
+ <target name="compile" depends="prepare, buildinfo"
+ description="Compile the code base.">
+ <mkdir dir="${build.dir}" />
+ <echo>javac</echo>
+ <echo> destdir="${build.dir}"</echo>
+ <echo> fork="yes"</echo>
+ <echo> memorymaximumsize="1g"</echo>
+ <echo> debug="yes"</echo>
+ <echo> debuglevel="${javac.debuglevel}"</echo>
+ <echo> verbose="${javac.verbose}"</echo>
+ <echo> encoding="${javac.encoding}"</echo>
+ <echo> source="${javac.source}"</echo>
+ <echo> target="${javac.target}"</echo>
+ <javac classpathref="build.classpath"
+ destdir="${build.dir}/classes"
+ fork="yes"
+ memorymaximumsize="1g"
+ debug="${javac.debug}"
+ debuglevel="${javac.debuglevel}"
+ verbose="${javac.verbose}"
+ encoding="${javac.encoding}"
+ source="${javac.source}"
+ target="${javac.target}"
+ includeantruntime="false"
+ >
+ <!-- note: must also specify -bootclasspath and -extdirs when cross-compiling -->
+ <src path="${bigdata.dir}/bigdata/src/java" />
+ <src path="${bigdata.dir}/bigdata-jini/src/java" />
<src path="${bigdata.dir}/bigdata-rdf/src/java" />
- <src path="${bigdata.dir}/bigdata-blueprints/src/java" />
- <src path="${bigdata.dir}/bigdata-sails/src/java" />
+ <src path="${bigdata.dir}/bigdata-sails/src/java" />
<src path="${bigdata.dir}/bigdata-gom/src/java" />
<src path="${bigdata.dir}/bigdata-ganglia/src/java" />
<src path="${bigdata.dir}/bigdata-gas/src/java" />
- <src path="${bigdata.dir}/ctc-striterators/src/java" />
- <!-- Do not include the unit tests @todo conditionally include?
+ <src path="${bigdata.dir}/ctc-striterators/src/java" />
+ <!-- Do not include the unit tests @todo conditionally include?
<src path="${bigdata.dir}/bigdata/src/test"/>
<src path="${bigdata.dir}/bigdata-jini/src/test"/>
<src path="${bigdata.dir}/bigdata-rdf/src/test"/>
<src path="${bigdata.dir}/bigdata-sails/src/test"/>
-->
- <compilerarg value="-version" />
- </javac>
- <!-- copy resources. -->
- <copy toDir="${build.dir}/classes">
- <fileset dir="${bigdata.dir}/bigdata/src/java">
- <exclude name="**/*.java" />
- <exclude name="**/package.html" />
- <exclude name="**/BytesUtil.c" />
- </fileset>
- <fileset dir="${bigdata.dir}/bigdata-jini/src/java">
- <exclude name="**/*.java" />
- <exclude name="**/package.html" />
- </fileset>
- <fileset dir="${bigdata.dir}/bigdata-rdf/src/java">
- <exclude name="**/*.java" />
- <exclude name="**/package.html" />
- </fileset>
- <fileset dir="${bigdata.dir}/bigdata-sails/src/java">
- <exclude name="**/*.java" />
- <exclude name="**/package.html" />
- </fileset>
- <fileset dir="${bigdata.dir}/bigdata-blueprints/src/java">
+ <compilerarg value="-version" />
+ </javac>
+ <!-- copy resources. -->
+ <copy toDir="${build.dir}/classes">
+ <fileset dir="${bigdata.dir}/bigdata/src/java">
<exclude name="**/*.java" />
<exclude name="**/package.html" />
+ <exclude name="**/BytesUtil.c" />
</fileset>
+ <fileset dir="${bigdata.dir}/bigdata-jini/src/java">
+ <exclude name="**/*.java" />
+ <exclude name="**/package.html" />
+ </fileset>
+ <fileset dir="${bigdata.dir}/bigdata-rdf/src/java">
+ <exclude name="**/*.java" />
+ <exclude name="**/package.html" />
+ </fileset>
+ <fileset dir="${bigdata.dir}/bigdata-sails/src/java">
+ <exclude name="**/*.java" />
+ <exclude name="**/package.html" />
+ </fileset>
<fileset dir="${bigdata.dir}/bigdata-gom/src/java">
<exclude name="**/*.java" />
<exclude name="**/package.html" />
@@ -276,37 +268,37 @@
<exclude name="**/*.java" />
<exclude name="**/package.html" />
</fileset>
- <!-- Note: This simple copy works so long as there is just one service
- provider file per interface. It will not combine (append) multiple
- files for the same interface. -->
- <fileset dir="${bigdata.dir}/bigdata-rdf/src/resources/service-providers">
- <include name="META-INF/**" />
- </fileset>
- <fileset dir="${bigdata.dir}/bigdata-sails/src/resources/sesame-server">
- <include name="META-INF/**" />
- </fileset>
- <!-- Copy WAR resources for the embedded NanoSparqlServer. -->
- <!-- TODO: This could cause problem since the files exist in -->
- <!-- both the JAR and the staged artifact (bigdata/var/jetty). -->
- <!-- This makes it difficult to override the ones in the JAR. -->
- <!-- See also "run-junit" for an alterative to getting CI to run. -->
- <!-- newer approach. -->
- <!--fileset dir="${bigdata.dir}/bigdata-war/src">
- <include name="**"/>
- </fileset-->
- <!-- older approach. -->
- <fileset dir="." includes="bigdata-war/src/**"/>
- </copy>
- </target>
+ <!-- Note: This simple copy works so long as there is just one service
+ provider file per interface. It will not combine (append) multiple
+ files for the same interface. -->
+ <fileset dir="${bigdata.dir}/bigdata-rdf/src/resources/service-providers">
+ <include name="META-INF/**" />
+ </fileset>
+ <fileset dir="${bigdata.dir}/bigdata-sails/src/resources/sesame-server">
+ <include name="META-INF/**" />
+ </fileset>
+ <!-- Copy WAR resources for the embedded NanoSparqlServer. -->
+ <!-- TODO: This could cause problem since the files exist in -->
+ <!-- both the JAR and the staged artifact (bigdata/var/jetty). -->
+ <!-- This makes it difficult to override the ones in the JAR. -->
+ <!-- See also "run-junit" for an alterative to getting CI to run. -->
+ <!-- newer approach. -->
+ <!--fileset dir="${bigdata.dir}/bigdata-war/src">
+ <include name="**"/>
+ </fileset-->
+ <!-- older approach. -->
+ <fileset dir="." includes="bigdata-war/src/**"/>
+ </copy>
+ </target>
- <!-- Builds the bigdata JAR and bundles it together with all of its dependencies in the ${build.dir}/lib directory. -->
- <target name="bundleJar" depends="clean, bundle, jar" description="Builds the bigdata JAR and bundles it together with all of its dependencies in the ${build.dir}/lib directory.">
- <copy file="${build.dir}/${version}.jar" todir="${build.dir}/lib"/>
- <!--<property name="myclasspath" refid="runtime.classpath" />
- <echo message="${myclasspath}"/>-->
- </target>
+ <!-- Builds the bigdata JAR and bundles it together with all of its dependencies in the ${build.dir}/lib directory. -->
+ <target name="bundleJar" depends="clean, bundle, jar" description="Builds the bigdata JAR and bundles it together with all of its dependencies in the ${build.dir}/lib directory.">
+ <copy file="${build.dir}/${version}.jar" todir="${build.dir}/lib"/>
+ <!--<property name="myclasspath" refid="runtime.classpath" />
+ <echo message="${myclasspath}"/>-->
+ </target>
- <target name="sourceJar" depends="prepare" description="Generates the sources jar.">
+ <target name="sourceJar" depends="prepare" description="Generates the sources jar.">
<jar destfile="${build.dir}/${version}-sources.jar">
<fileset dir="${bigdata.dir}/bigdata/src/java" />
<fileset dir="${bigdata.dir}/bigdata/src/samples" />
@@ -321,192 +313,187 @@
<fileset dir="${bigdata.dir}/bigdata-gom/src/java" />
<fileset dir="${bigdata.dir}/bigdata-gom/src/samples" />
<fileset dir="${bigdata.dir}/ctc-striterators/src/java" />
- <fileset dir="${bigdata.dir}/bigdata-blueprints/src/java" />
</jar>
</target>
-
- <!-- This generates the jar, but does not bundled the dependencies.
- See 'bundleJar'. -->
- <target name="jar" depends="compile" description="Generates the jar (see also bundleJar).">
- <jar destfile="${build.dir}/${version}.jar">
- <fileset dir="${build.dir}/classes" excludes="test/**" />
- <!-- Copy the copyright top-level NOTICE file. -->
- <fileset file="${bigdata.dir}/NOTICE"/>
- <!-- Copy the copyright top-level LICENSE file. -->
- <fileset file="${bigdata.dir}/LICENSE.txt"/>
- <!-- Copy licenses for any project from which have imported something. -->
- <fileset dir="${bigdata.dir}/bigdata">
- <include name="LEGAL/apache-license-2_0.txt"/>
- </fileset>
- <fileset dir="${bigdata.dir}/bigdata-rdf">
- <include name="LEGAL/sesame2.x-license.txt"/>
- </fileset>
- <manifest>
- <!--<attribute name="Main-Class" value="com/bigdata/rdf/rio/TestRioIntegration"/>-->
- </manifest>
- </jar>
- </target>
-
- <!-- Deploy the JAR to the maven repository. -->
+
+ <!-- This generates the jar, but does not bundled the dependencies.
+ See 'bundleJar'. -->
+ <target name="jar" depends="compile" description="Generates the jar (see also bundleJar).">
+ <jar destfile="${build.dir}/${version}.jar">
+ <fileset dir="${build.dir}/classes" excludes="test/**" />
+ <!-- Copy the copyright top-level NOTICE file. -->
+ <fileset file="${bigdata.dir}/NOTICE"/>
+ <!-- Copy the copyright top-level LICENSE file. -->
+ <fileset file="${bigdata.dir}/LICENSE.txt"/>
+ <!-- Copy licenses for any project from which have imported something. -->
+ <fileset dir="${bigdata.dir}/bigdata">
+ <include name="LEGAL/apache-license-2_0.txt"/>
+ </fileset>
+ <fileset dir="${bigdata.dir}/bigdata-rdf">
+ <include name="LEGAL/sesame2.x-license.txt"/>
+ </fileset>
+ <manifest>
+ <!--<attribute name="Main-Class" value="com/bigdata/rdf/rio/TestRioIntegration"/>-->
+ </manifest>
+ </jar>
+ </target>
+
+ <!-- Deploy the JAR to the maven repository. -->
<target name="maven-deploy" depends="jar...
[truncated message content] |
|
From: <tho...@us...> - 2014-05-13 16:15:39
|
Revision: 8294
http://sourceforge.net/p/bigdata/code/8294
Author: thompsonbry
Date: 2014-05-13 16:15:33 +0000 (Tue, 13 May 2014)
Log Message:
-----------
Commit includes fixes for #920 (content-negotation) and $624 (HA Load
Balancer). I have run through the NSS, AST evaluation, and QUADS mode
test suites and everything is green. The TestAll_LBS test suite is
also green (HA).
- CONNEG was broken in previous releases and would return available
Content-Type corresponding to the least desired MIME Type as
specified by the Accept header. See #920. Changes to ConnegScore,
ConnegUtil, TestConneg.
- RemoteRepository: A bug was identified where the openrdf binary RDF
interchange type could not be used because a non-null charset would
cause a Reader to be allocated rather than an InputStream within the
BackgroundGraphResult. Historically, due to #920, this interchange
type was not preferred and hence this code path was not tested. The
fix was to use the default charset for the format associated with
the Content-Type of the response unless overridden by an explicit
charset in the encoding.
- Added a new LBS policy (CountersLBSPolicy) based on the
/bigdata/counters servlet. This policy is more chatty than the
GangliaLBSPolicy, but it can be used in environments that do not
support multicast and can be secured using standard techniques for
httpd. The GangliaLBSPolicy was heavily refactored to create an
abstract base class that is now shared by both the CountersLBSPolicy
and the GangliaLBSPolicy. Added documentation to web.xml and the
HALoadBalancer page of the wiki. See #624.
- Release a new bigdata-ganglia.jar (v1.0.4). This release permits
the Comparator to be null, which is useful since we want to order
the hosts based on our IHostScoringRule rather than a simple ganglia
metric comparison.
- AbstractStatisticsCollection: Added @Override tags and FIXME on
getCounters().
- CounterSet: private and final attributes. ignoring some unchecked
conversions or raw types. @Override attributes.
- ICounterSetSelector: expanded the interface slightly to allow
optional filtering for HistoryInstruments (was implicit and
manditory). This was necessary in order to support XML rendering of
/bigdata/counters.
- CounterSetFormat: Added to support CONNEG for the different kinds of
counter set interchange (text/plain, text/html, application/xml).
This was in support of the new CountersLBSPolicy.
- IOStatCollector, VMStatCollector: Fixed some bugs in the OSX
platforn metrics collectors, mostly around data races.
- BigdataSailRemoteRepositoryConnection: added link to #914 (Set
timeout on remote query). I have not worked on this ticket yet, but
these comments mark the integration points. The other integration
point is BigdataRDFContext.newQuery(), which is also linked to the
ticket in this commit.
- CountersServlet: modified to support CONNEG.
- ConnegOptions: added toString(). clean up.
- jetty.xml: refactored per guidence from webtide.
- web.xml: comments on the CountersLBSPolicy.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/.classpath
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/CounterSet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/osx/IOStatCollector.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/osx/VMStatCollector.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/CounterSetBTreeSelector.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/CounterSetSelector.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/ICounterSelector.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/URLQueryModel.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/render/TextRenderer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/render/XHTMLRenderer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/render/XMLRenderer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/build.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll_LBS.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/properties/PropertiesFormat.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConnegScore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConnegUtil.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CountersServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/HostScore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/IHALoadBalancerPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/ServiceScore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/NOPLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/RoundRobinLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/DefaultHostScoringRule.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/GangliaLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/LoadOneHostScoringRule.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestConneg.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestProtocolAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml
branches/BIGDATA_RELEASE_1_3_0/build.properties
branches/BIGDATA_RELEASE_1_3_0/pom.xml
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.4.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/format/
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/format/CounterSetFormat.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_CountersLBS.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostMetrics.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/HostTable.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/IHostMetrics.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/IHostScoringRule.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/NOPHostScoringRule.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/counters/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/counters/CounterSetHostMetricsWrapper.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/counters/CountersLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/counters/DefaultHostScoringRule.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/GangliaHostMetricWrapper.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/lbs/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/lbs/TestAbstractHostLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/lbs/TestAll.java
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.3.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/HostTable.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/IHostScoringRule.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/NOPHostScoringRule.java
Modified: branches/BIGDATA_RELEASE_1_3_0/.classpath
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-13 12:41:52 UTC (rev 8293)
+++ branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-13 16:15:33 UTC (rev 8294)
@@ -76,7 +76,7 @@
<classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-fileupload-1.2.2.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-io-2.1.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/apache/log4j-1.2.17.jar"/>
- <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.6.10-onejar.jar"/>
+ <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.6.10-onejar.jar" sourcepath="/Users/bryan/Documents/workspace/org.openrdf.sesame-2.6.10"/>
<classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/sesame-rio-testsuite-2.6.10.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata-sails/lib/sesame-sparql-testsuite-2.6.10.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata-sails/lib/sesame-store-testsuite-2.6.10.jar"/>
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.3.jar
===================================================================
(Binary files differ)
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.4.jar
===================================================================
(Binary files differ)
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.4.jar
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.4.jar 2014-05-13 12:41:52 UTC (rev 8293)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.4.jar 2014-05-13 16:15:33 UTC (rev 8294)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.4.jar
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2014-05-13 12:41:52 UTC (rev 8293)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2014-05-13 16:15:33 UTC (rev 8294)
@@ -137,6 +137,7 @@
* The interval in seconds at which the counter values are read from the
* host platform.
*/
+ @Override
public int getInterval() {
return interval;
@@ -225,8 +226,15 @@
* <p>
* Note: Subclasses MUST extend this method to initialize their own
* counters.
+ *
+ * TODO Why does this use the older <code>synchronized</code> pattern with a
+ * shared {@link #countersRoot} object rather than returning a new object
+ * per request? Check assumptions in the scale-out and local journal code
+ * bases for this.
*/
- synchronized public CounterSet getCounters() {
+ @Override
+ synchronized
+ public CounterSet getCounters() {
if (countersRoot == null) {
@@ -319,6 +327,7 @@
serviceRoot.addCounter(IProcessCounters.Memory_runtimeFreeMemory,
new Instrument<Long>() {
+ @Override
public void sample() {
setValue(Runtime.getRuntime().freeMemory());
}
@@ -326,6 +335,7 @@
serviceRoot.addCounter(IProcessCounters.Memory_runtimeTotalMemory,
new Instrument<Long>() {
+ @Override
public void sample() {
setValue(Runtime.getRuntime().totalMemory());
}
@@ -599,6 +609,7 @@
* Start collecting host performance data -- must be extended by the
* concrete subclass.
*/
+ @Override
public void start() {
if (log.isInfoEnabled())
@@ -612,6 +623,7 @@
* Stop collecting host performance data -- must be extended by the concrete
* subclass.
*/
+ @Override
public void stop() {
if (log.isInfoEnabled())
@@ -634,6 +646,7 @@
final Thread t = new Thread() {
+ @Override
public void run() {
AbstractStatisticsCollector.this.stop();
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/CounterSet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/CounterSet.java 2014-05-13 12:41:52 UTC (rev 8293)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/CounterSet.java 2014-05-13 16:15:33 UTC (rev 8294)
@@ -87,7 +87,7 @@
*/
public class CounterSet extends AbstractCounterSet implements ICounterSet {
- static protected final Logger log = Logger.getLogger(CounterSet.class);
+ static private final Logger log = Logger.getLogger(CounterSet.class);
// private String pathx;
private final Map<String,ICounterNode> children = new ConcurrentHashMap<String,ICounterNode>();
@@ -107,7 +107,7 @@
* @param name
* The name of the child.
*/
- private CounterSet(String name,CounterSet parent) {
+ private CounterSet(final String name, final CounterSet parent) {
super(name,parent);
@@ -159,6 +159,9 @@
//
// }
+ /**
+ * Return <code>true</code> iff there are no children.
+ */
public boolean isLeaf() {
return children.isEmpty();
@@ -216,7 +219,6 @@
}
- @SuppressWarnings("unchecked")
private void attach2(final ICounterNode src, final boolean replace) {
if (src == null)
@@ -286,7 +288,7 @@
} else {
- ((Counter)src).parent = this;
+ ((Counter<?>)src).parent = this;
}
@@ -311,7 +313,8 @@
* @return The node -or- <code>null</code> if there is no node with that
* path.
*/
- synchronized public ICounterNode detach(String path) {
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ synchronized public ICounterNode detach(final String path) {
final ICounterNode node = getPath(path);
@@ -347,7 +350,7 @@
* @todo optimize for patterns that are anchored by filtering the child
* {@link ICounterSet}.
*/
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({ "unchecked", "rawtypes" })
public Iterator<ICounter> counterIterator(final Pattern filter) {
final IStriterator src = new Striterator(directChildIterator(
@@ -391,7 +394,7 @@
*
* @return
*/
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({ "unchecked", "rawtypes" })
public Iterator<ICounterNode> getNodes(final Pattern filter) {
IStriterator src = ((IStriterator) postOrderIterator())
@@ -414,7 +417,8 @@
}
- @SuppressWarnings("unchecked")
+ @Override
+ @SuppressWarnings({ "unchecked", "rawtypes" })
public Iterator<ICounter> getCounters(final Pattern filter) {
IStriterator src = ((IStriterator) postOrderIterator())
@@ -450,8 +454,9 @@
* When <code>null</code> all directly attached children
* (counters and counter sets) are visited.
*/
- public Iterator directChildIterator(boolean sorted,
- Class<? extends ICounterNode> type) {
+ @SuppressWarnings("rawtypes")
+ public Iterator directChildIterator(final boolean sorted,
+ final Class<? extends ICounterNode> type) {
/*
* Note: In order to avoid concurrent modification problems under
@@ -514,7 +519,7 @@
* child with a post-order traversal of its children and finally visits this
* node itself.
*/
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator postOrderIterator() {
/*
@@ -531,6 +536,7 @@
* child with a pre-order traversal of its children and finally visits this
* node itself.
*/
+ @SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator preOrderIterator() {
/*
@@ -562,7 +568,9 @@
/*
* Expand each child in turn.
*/
- protected Iterator expand(Object childObj) {
+ @Override
+ @SuppressWarnings("rawtypes")
+ protected Iterator expand(final Object childObj) {
/*
* A child of this node.
@@ -603,7 +611,9 @@
/*
* Expand each child in turn.
*/
- protected Iterator expand(Object childObj) {
+ @Override
+ @SuppressWarnings("rawtypes")
+ protected Iterator expand(final Object childObj) {
/*
* A child of this node.
@@ -624,7 +634,8 @@
}
- public ICounterNode getChild(String name) {
+ @Override
+ public ICounterNode getChild(final String name) {
if (name == null)
throw new IllegalArgumentException();
@@ -642,6 +653,7 @@
*
* @return The {@link CounterSet} described by the path.
*/
+ @Override
synchronized public CounterSet makePath(String path) {
if (path == null) {
@@ -740,6 +752,7 @@
* The object that is used to take the measurements from which
* the counter's value will be determined.
*/
+ @SuppressWarnings("rawtypes")
synchronized public ICounter addCounter(final String path,
final IInstrument instrument) {
@@ -767,7 +780,7 @@
}
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({ "unchecked", "rawtypes" })
private ICounter addCounter2(final String name, final IInstrument instrument) {
if (name == null)
@@ -831,12 +844,14 @@
*
* @throws IOException
*/
+ @Override
public void asXML(Writer w, Pattern filter) throws IOException {
XMLUtility.INSTANCE.writeXML(this, w, filter);
}
+ @Override
public void readXML(final InputStream is,
final IInstrumentFactory instrumentFactory, final Pattern filter)
throws IOException, ParserConfigurationException, SAXException {
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/format/CounterSetFormat.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/format/CounterSetFormat.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/format/CounterSetFormat.java 2014-05-13 16:15:33 UTC (rev 8294)
@@ -0,0 +1,214 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*
+Portions of this code are:
+
+Copyright Aduna (http://www.aduna-software.com/) � 2001-2007
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of the copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+/*
+ * Created on Jul 25, 2012
+ */
+package com.bigdata.counters.format;
+
+import info.aduna.lang.FileFormat;
+
+import java.nio.charset.Charset;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.concurrent.CopyOnWriteArraySet;
+
+import com.bigdata.counters.ICounterSet;
+
+/**
+ * Formats for {@link ICounterSet}s.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+public class CounterSetFormat extends FileFormat implements Iterable<CounterSetFormat> {
+
+ /**
+ * All known/registered formats for this class.
+ */
+ private static final CopyOnWriteArraySet<CounterSetFormat> formats = new CopyOnWriteArraySet<CounterSetFormat>();
+
+ /**
+ * A thread-safe iterator that will visit all known formats (declared by
+ * {@link Iterable}).
+ */
+ @Override
+ public Iterator<CounterSetFormat> iterator() {
+
+ return formats.iterator();
+
+ }
+
+ /**
+ * Alternative static method signature.
+ */
+ static public Iterator<CounterSetFormat> getFormats() {
+
+ return formats.iterator();
+
+ }
+
+ /**
+ * Text properties file using <code>text/plain</code> and
+ * <code>UTF-8</code>.
+ */
+ public static final CounterSetFormat TEXT = new CounterSetFormat(//
+ "text/plain",//
+ Arrays.asList("text/plain"),//
+ Charset.forName("UTF-8"), //
+ Arrays.asList("counterSet")//
+ );
+
+ /**
+ * XML properties file using <code>application/xml</code> and
+ * <code>UTF-8</code>.
+ */
+ public static final CounterSetFormat XML = new CounterSetFormat(//
+ "application/xml",//
+ Arrays.asList("application/xml"),//
+ Charset.forName("UTF-8"),// charset
+ Arrays.asList("xml")// known-file-extensions
+ );
+
+ /**
+ * XML properties file using <code>text/html</code> and <code>UTF-8</code>.
+ */
+ public static final CounterSetFormat HTML = new CounterSetFormat(//
+ "text/html",//
+ Arrays.asList("text/html"),//
+ Charset.forName("UTF-8"),// charset
+ Arrays.asList("html")// known-file-extensions
+ );
+
+ /**
+ * Registers the specified format.
+ */
+ public static void register(final CounterSetFormat format) {
+
+ formats.add(format);
+
+ }
+
+ static {
+
+ register(HTML);
+ register(TEXT);
+ register(XML);
+
+ }
+
+ /**
+ * Creates a new RDFFormat object.
+ *
+ * @param name
+ * The name of the RDF file format, e.g. "RDF/XML".
+ * @param mimeTypes
+ * The MIME types of the RDF file format, e.g.
+ * <tt>application/rdf+xml</tt> for the RDF/XML file format.
+ * The first item in the list is interpreted as the default
+ * MIME type for the format.
+ * @param charset
+ * The default character encoding of the RDF file format.
+ * Specify <tt>null</tt> if not applicable.
+ * @param fileExtensions
+ * The RDF format's file extensions, e.g. <tt>rdf</tt> for
+ * RDF/XML files. The first item in the list is interpreted
+ * as the default file extension for the format.
+ */
+ public CounterSetFormat(final String name,
+ final Collection<String> mimeTypes, final Charset charset,
+ final Collection<String> fileExtensions) {
+
+ super(name, mimeTypes, charset, fileExtensions);
+
+ }
+
+ /**
+ * Tries to determine the appropriate file format based on the a MIME type
+ * that describes the content type.
+ *
+ * @param mimeType
+ * A MIME type, e.g. "text/html".
+ * @return An {@link CounterSetFormat} object if the MIME type was
+ * recognized, or <tt>null</tt> otherwise.
+ * @see #forMIMEType(String,PropertiesFormat)
+ * @see #getMIMETypes()
+ */
+ public static CounterSetFormat forMIMEType(final String mimeType) {
+
+ return forMIMEType(mimeType, null);
+
+ }
+
+ /**
+ * Tries to determine the appropriate file format based on the a MIME type
+ * that describes the content type. The supplied fallback format will be
+ * returned when the MIME type was not recognized.
+ *
+ * @param mimeType
+ * A file name.
+ * @return An {@link CounterSetFormat} that matches the MIME type, or the
+ * fallback format if the extension was not recognized.
+ * @see #forMIMEType(String)
+ * @see #getMIMETypes()
+ */
+ public static CounterSetFormat forMIMEType(String mimeType,
+ CounterSetFormat fallback) {
+
+ return matchMIMEType(mimeType, formats/* Iterable<FileFormat> */,
+ fallback);
+
+ }
+
+}
\ No newline at end of file
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/osx/IOStatCollector.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/osx/IOStatCollector.java 2014-05-13 12:41:52 UTC (rev 8293)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/osx/IOStatCollector.java 2014-05-13 16:15:33 UTC (rev 8294)
@@ -28,11 +28,11 @@
package com.bigdata.counters.osx;
-import java.util.HashMap;
-import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
import java.util.regex.Pattern;
import com.bigdata.counters.AbstractProcessCollector;
@@ -48,14 +48,13 @@
import com.bigdata.rawstore.Bytes;
/**
- * Collects some counters using <code>iostat</code>. Unfortunately,
+ * Collects some counters using <code>iostat</code> under OSX. Unfortunately,
* <code>iostat</code> does not break down the reads and writes and does not
* report IO Wait. This information is obviously available from OSX as it is
* provided by the ActivityMonitor, but we can not get it from
* <code>iostat</code>.
*
* @author <a href="mailto:tho...@us......
[truncated message content] |
|
From: <mrp...@us...> - 2014-05-13 19:32:16
|
Revision: 8298
http://sourceforge.net/p/bigdata/code/8298
Author: mrpersonick
Date: 2014-05-13 19:32:10 +0000 (Tue, 13 May 2014)
Log Message:
-----------
full blueprints integration commit
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphEmbedded.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphQuery.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataPredicate.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataVertex.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BlueprintsRDFFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/AbstractTestBigdataGraph.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/BigdataSailNSSWrapper.java
branches/BIGDATA_RELEASE_1_3_0/build.xml
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphEmbedded.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataVertex.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BlueprintsRDFFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/edge.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/edges.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/edgesByProperty.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/vertex.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java
branches/BIGDATA_RELEASE_1_3_0/build.xml
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java 2014-05-13 18:15:26 UTC (rev 8297)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java 2014-05-13 19:32:10 UTC (rev 8298)
@@ -1,107 +0,0 @@
-/**
-Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-package com.bigdata.blueprints;
-
-import java.util.Arrays;
-import java.util.List;
-
-import org.openrdf.model.Statement;
-import org.openrdf.model.URI;
-import org.openrdf.model.vocabulary.RDFS;
-
-import com.tinkerpop.blueprints.Direction;
-import com.tinkerpop.blueprints.Edge;
-import com.tinkerpop.blueprints.Vertex;
-
-/**
- * Edge implementation that wraps an Edge statement and points to a
- * {@link BigdataGraph} instance.
- *
- * @author mikepersonick
- *
- */
-public class BigdataEdge extends BigdataElement implements Edge {
-
- private static final List<String> blacklist = Arrays.asList(new String[] {
- "id", "", "label"
- });
-
- protected final Statement stmt;
-
- public BigdataEdge(final Statement stmt, final BigdataGraph graph) {
- super(stmt.getPredicate(), graph);
-
- this.stmt = stmt;
- }
-
- @Override
- public Object getId() {
- return graph.factory.fromEdgeURI(uri);
- }
-
- @Override
- public void remove() {
- graph.removeEdge(this);
- }
-
- @Override
- public String getLabel() {
- return (String) graph.getProperty(uri, RDFS.LABEL);
- }
-
- @Override
- public Vertex getVertex(final Direction dir) throws IllegalArgumentException {
-
- if (dir == Direction.BOTH) {
- throw new IllegalArgumentException();
- }
-
- final URI uri = (URI)
- (dir == Direction.OUT ? stmt.getSubject() : stmt.getObject());
-
- final String id = graph.factory.fromVertexURI(uri);
-
- return graph.getVertex(id);
-
- }
-
- @Override
- public void setProperty(final String property, final Object val) {
-
- if (property == null || blacklist.contains(property)) {
- throw new IllegalArgumentException();
- }
-
- super.setProperty(property, val);
-
- }
-
- @Override
- public String toString() {
- final URI s = (URI) stmt.getSubject();
- final URI p = (URI) stmt.getPredicate();
- final URI o = (URI) stmt.getObject();
- return "e["+p.getLocalName()+"]["+s.getLocalName()+"->"+o.getLocalName()+"]";
- }
-
-}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java 2014-05-13 19:32:10 UTC (rev 8298)
@@ -0,0 +1,115 @@
+/**
+Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.blueprints;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.openrdf.model.Statement;
+import org.openrdf.model.URI;
+import org.openrdf.model.vocabulary.RDFS;
+
+import com.tinkerpop.blueprints.Direction;
+import com.tinkerpop.blueprints.Edge;
+import com.tinkerpop.blueprints.Vertex;
+
+/**
+ * Edge implementation that wraps an Edge statement and points to a
+ * {@link BigdataGraph} instance.
+ *
+ * @author mikepersonick
+ *
+ */
+public class BigdataEdge extends BigdataElement implements Edge {
+
+ private static final List<String> blacklist = Arrays.asList(new String[] {
+ "id", "", "label"
+ });
+
+ protected final Statement stmt;
+
+ public BigdataEdge(final Statement stmt, final BigdataGraph graph) {
+ super(stmt.getPredicate(), graph);
+
+ this.stmt = stmt;
+ }
+
+ @Override
+ public Object getId() {
+
+ return graph.factory.fromEdgeURI(uri);
+
+ }
+
+ @Override
+ public void remove() {
+
+ graph.removeEdge(this);
+
+ }
+
+ @Override
+ public String getLabel() {
+
+ return (String) graph.getProperty(uri, RDFS.LABEL);
+
+ }
+
+ @Override
+ public Vertex getVertex(final Direction dir) throws IllegalArgumentException {
+
+ if (dir == Direction.BOTH) {
+ throw new IllegalArgumentException();
+ }
+
+ final URI uri = (URI)
+ (dir == Direction.OUT ? stmt.getSubject() : stmt.getObject());
+
+ final String id = graph.factory.fromVertexURI(uri);
+
+ return graph.getVertex(id);
+
+ }
+
+ @Override
+ public void setProperty(final String prop, final Object val) {
+
+ if (prop == null || blacklist.contains(prop)) {
+ throw new IllegalArgumentException();
+ }
+
+ super.setProperty(prop, val);
+
+ }
+
+ @Override
+ public String toString() {
+
+ final URI s = (URI) stmt.getSubject();
+ final URI p = (URI) stmt.getPredicate();
+ final URI o = (URI) stmt.getObject();
+ return "e["+p.getLocalName()+"]["+s.getLocalName()+"->"+o.getLocalName()+"]";
+
+ }
+
+}
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java 2014-05-13 18:15:26 UTC (rev 8297)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java 2014-05-13 19:32:10 UTC (rev 8298)
@@ -1,134 +0,0 @@
-/**
-Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-package com.bigdata.blueprints;
-
-import java.util.Arrays;
-import java.util.List;
-import java.util.Set;
-
-import org.openrdf.model.Literal;
-import org.openrdf.model.URI;
-
-import com.tinkerpop.blueprints.Element;
-
-/**
- * Base class for {@link BigdataVertex} and {@link BigdataEdge}. Handles
- * property-related methods.
- *
- * @author mikepersonick
- *
- */
-public abstract class BigdataElement implements Element {
-
- private static final List<String> blacklist = Arrays.asList(new String[] {
- "id", ""
- });
-
- protected final URI uri;
- protected final BigdataGraph graph;
-
- public BigdataElement(final URI uri, final BigdataGraph graph) {
- this.uri = uri;
- this.graph = graph;
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public <T> T getProperty(final String property) {
-
- final URI p = graph.factory.toPropertyURI(property);
-
- return (T) graph.getProperty(uri, p);
-
- }
-
- @Override
- public Set<String> getPropertyKeys() {
-
- return graph.getPropertyKeys(uri);
-
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public <T> T removeProperty(final String property) {
-
- final URI p = graph.factory.toPropertyURI(property);
-
- return (T) graph.removeProperty(uri, p);
-
- }
-
- @Override
- public void setProperty(final String property, final Object val) {
-
- if (property == null || blacklist.contains(property)) {
- throw new IllegalArgumentException();
- }
-
- final URI p = graph.factory.toPropertyURI(property);
-
- final Literal o = graph.factory.toLiteral(val);
-
- graph.setProperty(uri, p, o);
-
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + ((graph == null) ? 0 : graph.hashCode());
- result = prime * result + ((uri == null) ? 0 : uri.hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (obj == null)
- return false;
- if (getClass() != obj.getClass())
- return false;
- BigdataElement other = (BigdataElement) obj;
- if (graph == null) {
- if (other.graph != null)
- return false;
- } else if (!graph.equals(other.graph))
- return false;
- if (uri == null) {
- if (other.uri != null)
- return false;
- } else if (!uri.equals(other.uri))
- return false;
- return true;
- }
-
- @Override
- public String toString() {
- return uri.toString();
- }
-
-
-}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java 2014-05-13 19:32:10 UTC (rev 8298)
@@ -0,0 +1,154 @@
+/**
+Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.blueprints;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+
+import org.openrdf.model.URI;
+
+import com.tinkerpop.blueprints.Element;
+
+/**
+ * Base class for {@link BigdataVertex} and {@link BigdataEdge}. Handles
+ * property-related methods.
+ *
+ * @author mikepersonick
+ *
+ */
+public abstract class BigdataElement implements Element {
+
+ private static final List<String> blacklist = Arrays.asList(new String[] {
+ "id", ""
+ });
+
+ protected final URI uri;
+ protected final BigdataGraph graph;
+
+ public BigdataElement(final URI uri, final BigdataGraph graph) {
+ this.uri = uri;
+ this.graph = graph;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <T> T getProperty(final String property) {
+
+ return (T) graph.getProperty(uri, property);
+
+ }
+
+ @Override
+ public Set<String> getPropertyKeys() {
+
+ return graph.getPropertyKeys(uri);
+
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <T> T removeProperty(final String property) {
+
+ return (T) graph.removeProperty(uri, property);
+
+ }
+
+ @Override
+ public void setProperty(final String prop, final Object val) {
+
+ if (prop == null || blacklist.contains(prop)) {
+ throw new IllegalArgumentException();
+ }
+
+ graph.setProperty(uri, prop, val);
+
+ }
+
+ /**
+ * Simple extension for multi-valued properties.
+ */
+ public void addProperty(final String prop, final Object val) {
+
+ if (prop == null || blacklist.contains(prop)) {
+ throw new IllegalArgumentException();
+ }
+
+ graph.addProperty(uri, prop, val);
+
+ }
+
+ /**
+ * Simple extension for multi-valued properties.
+ */
+ @SuppressWarnings("unchecked")
+ public <T> List<T> getProperties(final String property) {
+
+ return (List<T>) graph.getProperties(uri, property);
+
+ }
+
+ /**
+ * Generated code.
+ */
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((graph == null) ? 0 : graph.hashCode());
+ result = prime * result + ((uri == null) ? 0 : uri.hashCode());
+ return result;
+ }
+
+ /**
+ * Generated code.
+ */
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ BigdataElement other = (BigdataElement) obj;
+ if (graph == null) {
+ if (other.graph != null)
+ return false;
+ } else if (!graph.equals(other.graph))
+ return false;
+ if (uri == null) {
+ if (other.uri != null)
+ return false;
+ } else if (!uri.equals(other.uri))
+ return false;
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return uri.toString();
+ }
+
+
+}
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-13 18:15:26 UTC (rev 8297)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-13 19:32:10 UTC (rev 8298)
@@ -1,851 +0,0 @@
-/**
-Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-package com.bigdata.blueprints;
-
-import info.aduna.iteration.CloseableIteration;
-
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-import java.util.UUID;
-
-import org.apache.commons.io.IOUtils;
-import org.openrdf.OpenRDFException;
-import org.openrdf.model.Literal;
-import org.openrdf.model.Statement;
-import org.openrdf.model.URI;
-import org.openrdf.model.Value;
-import org.openrdf.model.impl.StatementImpl;
-import org.openrdf.model.impl.URIImpl;
-import org.openrdf.model.vocabulary.RDF;
-import org.openrdf.model.vocabulary.RDFS;
-import org.openrdf.query.GraphQueryResult;
-import org.openrdf.query.QueryLanguage;
-import org.openrdf.repository.RepositoryConnection;
-import org.openrdf.repository.RepositoryResult;
-
-import com.bigdata.rdf.store.BD;
-import com.tinkerpop.blueprints.Direction;
-import com.tinkerpop.blueprints.Edge;
-import com.tinkerpop.blueprints.Features;
-import com.tinkerpop.blueprints.Graph;
-import com.tinkerpop.blueprints.GraphQuery;
-import com.tinkerpop.blueprints.Vertex;
-import com.tinkerpop.blueprints.util.DefaultGraphQuery;
-import com.tinkerpop.blueprints.util.io.graphml.GraphMLReader;
-
-/**
- * A base class for a Blueprints wrapper around a bigdata back-end.
- *
- * @author mikepersonick
- *
- */
-public abstract class BigdataGraph implements Graph {
-
- public static final URI VERTEX = new URIImpl(BD.NAMESPACE + "Vertex");
-
- public static final URI EDGE = new URIImpl(BD.NAMESPACE + "Edge");
-
-// final BigdataSailRepository repo;
-//
-// transient BigdataSailRepositoryConnection cxn;
-
- final BlueprintsRDFFactory factory;
-
-// public BigdataGraph(final BigdataSailRepository repo) {
-// this(repo, BigdataRDFFactory.INSTANCE);
-// }
-
- public BigdataGraph(//final BigdataSailRepository repo,
- final BlueprintsRDFFactory factory) {
-// try {
-// this.repo = repo;
-// this.cxn = repo.getUnisolatedConnection();
-// this.cxn.setAutoCommit(false);
- this.factory = factory;
-// } catch (RepositoryException ex) {
-// throw new RuntimeException(ex);
-// }
- }
-
- public String toString() {
- return getClass().getSimpleName().toLowerCase();
- }
-
- /**
- * Post a GraphML file to the remote server. (Bulk-upload operation.)
- */
- public void loadGraphML(final String file) throws Exception {
- GraphMLReader.inputGraph(this, file);
- }
-
- protected abstract RepositoryConnection cxn() throws Exception;
-
-// public BigdataSailRepositoryConnection getConnection() {
-// return this.cxn;
-// }
-//
-// public BlueprintsRDFFactory getFactory() {
-// return this.factory;
-// }
-
-// public Value getValue(final URI s, final URI p) {
-//
-// try {
-//
-// final RepositoryResult<Statement> result =
-// cxn.getStatements(s, p, null, false);
-//
-// if (result.hasNext()) {
-//
-// final Value o = result.next().getObject();
-//
-// if (result.hasNext()) {
-// throw new RuntimeException(s
-// + ": more than one value for p: " + p
-// + ", did you mean to call getValues()?");
-// }
-//
-// return o;
-//
-// }
-//
-// return null;
-//
-// } catch (Exception ex) {
-// throw new RuntimeException(ex);
-// }
-//
-// }
-
- public Object getProperty(final URI s, final URI p) {
-
- try {
-
- final RepositoryResult<Statement> result =
- cxn().getStatements(s, p, null, false);
-
- if (result.hasNext()) {
-
- final Value value = result.next().getObject();
-
- if (result.hasNext()) {
- throw new RuntimeException(s
- + ": more than one value for p: " + p
- + ", did you mean to call getValues()?");
- }
-
- if (!(value instanceof Literal)) {
- throw new RuntimeException("not a property: " + value);
- }
-
- final Literal lit = (Literal) value;
-
- return factory.fromLiteral(lit);
-
- }
-
- return null;
-
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
-
- }
-
-// public List<Value> getValues(final URI s, final URI p) {
-//
-// try {
-//
-// final RepositoryResult<Statement> result =
-// cxn().getStatements(s, p, null, false);
-//
-// final List<Value> values = new LinkedList<Value>();
-//
-// while (result.hasNext()) {
-//
-// final Value o = result.next().getObject();
-//
-// values.add(o);
-//
-// }
-//
-// return values;
-//
-// } catch (Exception ex) {
-// throw new RuntimeException(ex);
-// }
-//
-// }
-
- public List<Object> getProperties(final URI s, final URI p) {
-
- try {
-
- final RepositoryResult<Statement> result =
- cxn().getStatements(s, p, null, false);
-
- final List<Object> props = new LinkedList<Object>();
-
- while (result.hasNext()) {
-
- final Value value = result.next().getObject();
-
- if (!(value instanceof Literal)) {
- throw new RuntimeException("not a property: " + value);
- }
-
- final Literal lit = (Literal) value;
-
- props.add(factory.fromLiteral(lit));
-
- }
-
- return props;
-
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
-
- }
-
- public Set<String> getPropertyKeys(final URI s) {
-
- try {
-
- final RepositoryResult<Statement> result =
- cxn().getStatements(s, null, null, false);
-
- final Set<String> properties = new LinkedHashSet<String>();
-
- while (result.hasNext()) {
-
- final Statement stmt = result.next();
-
- if (!(stmt.getObject() instanceof Literal)) {
- continue;
- }
-
- if (stmt.getPredicate().equals(RDFS.LABEL)) {
- continue;
- }
-
- final String p =
- factory.fromPropertyURI(stmt.getPredicate());
-
- properties.add(p);
-
- }
-
- return properties;
-
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
-
- }
-
- public Object removeProperty(final URI s, final URI p) {
-
- try {
-
- final Object oldVal = getProperty(s, p);
-
- cxn().remove(s, p, null);
-
- return oldVal;
-
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
-
- }
-
- public void setProperty(final URI s, final URI p, final Literal o) {
-
- try {
-
- cxn().remove(s, p, null);
-
- cxn().add(s, p, o);
-
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
-
- }
-
- @Override
- public Edge addEdge(final Object key, final Vertex from, final Vertex to,
- final String label) {
-
- if (label == null) {
- throw new IllegalArgumentException();
- }
-
- final String eid = key != null ? key.toString() : UUID.randomUUID().toString();
-
- final URI edgeURI = factory.toEdgeURI(eid);
-
- if (key != null) {
-
- final Edge edge = getEdge(key);
-
- if (edge != null) {
- if (!(edge.getVertex(Direction.OUT).equals(from) &&
- (edge.getVertex(Direction.OUT).equals(to)))) {
- throw new IllegalArgumentException("edge already exists: " + key);
- }
- }
-
- }
-
- try {
-
-// if (cxn().hasStatement(edgeURI, RDF.TYPE, EDGE, false)) {
-// throw new IllegalArgumentException("edge " + eid + " already exists");
-// }
-
- final URI fromURI = factory.toVertexURI(from.getId().toString());
- final URI toURI = factory.toVertexURI(to.getId().toString());
-
- cxn().add(fromURI, edgeURI, toURI);
- cxn().add(edgeURI, RDF.TYPE, EDGE);
- cxn().add(edgeURI, RDFS.LABEL, factory.toLiteral(label));
-
- return new BigdataEdge(new StatementImpl(fromURI, edgeURI, toURI), this);
-
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
-
- }
-
- @Override
- public Vertex addVertex(final Object key) {
-
- try {
-
- final String vid = key != null ?
- key.toString() : UUID.randomUUID().toString();
-
- final URI uri = factory.toVertexURI(vid);
-
-// if (cxn().hasStatement(vertexURI, RDF.TYPE, VERTEX, false)) {
-// throw new IllegalArgumentException("vertex " + vid + " already exists");
-// }
-
- cxn().add(uri, RDF.TYPE, VERTEX);
-
- return new BigdataVertex(uri, this);
-
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
-
- }
-
- @Override
- public Edge getEdge(final Object key) {
-
- if (key == null)
- throw new IllegalArgumentException();
-
- try {
-
- final URI edge = factory.toEdgeURI(key.toString());
-
- final RepositoryResult<Statement> result =
- cxn().getStatements(null, edge, null, false);
-
- if (result.hasNext()) {
-
- final Statement stmt = result.next();
-
- if (result.hasNext()) {
- throw new RuntimeException(
- "duplicate edge: " + key);
- }
-
- return new BigdataEdge(stmt, this);
-
- }
-
- return null;
-
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
-
- }
-
- @Override
- public Iterable<Edge> getEdges() {
-
- final URI wild = null;
- return getEdges(wild, wild);
-
- }
-
- public Iterable<Edge> getEdges(final URI s, final URI o, final String... labels) {
-
- try {
-
-// final RepositoryResult<Statement> result =
-// cxn().getStatements(s, p, o, false);
-//
-// return new EdgeIterable(result);
-
- final StringBuilder sb = new StringBuilder();
- sb.append("construct { ?from ?edge ?to . } where {\n");
- sb.append("?edge rdf:type bd:Edge . ?from ?edge ?to .\n");
- if (labels != null && labels.length > 0) {
- if (labels.length == 1) {
- sb.append("?edge rdfs:label \"").append(labels[0]).append("\" .\n");
- } else {
- sb.append("?edge rdfs:label ?label .\n");
- sb.append("filter(?label in (");
- for (String label : labels) {
- sb.append("\""+label+"\", ");
- }
-...
[truncated message content] |
|
From: <tho...@us...> - 2014-05-14 15:32:59
|
Revision: 8314
http://sourceforge.net/p/bigdata/code/8314
Author: thompsonbry
Date: 2014-05-14 15:32:55 +0000 (Wed, 14 May 2014)
Log Message:
-----------
LBS policy fix.
Moved the JVM_OPTS into /etc/defaults/bigdataHA. This is the more standard practice.
See #624 (HA LBS)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java
branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices
branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java 2014-05-14 15:16:27 UTC (rev 8313)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java 2014-05-14 15:32:55 UTC (rev 8314)
@@ -802,7 +802,7 @@
double sum = 0d;
for (HostScore tmp : hostScores) {
hostScore = tmp;
- sum += (1d - hostScore.getScore());
+ sum += hostScore.getScore();
if (sum >= d) {
// found desired host.
break;
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-14 15:16:27 UTC (rev 8313)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-14 15:32:55 UTC (rev 8314)
@@ -21,17 +21,6 @@
pidFile=$lockDir/pid
##
-# ServiceStarter JVM options.
-#
-# The ServiceStarter is launched as a JVM with the following JVM options.
-# The other services (including the HAJournalServer) will run inside of
-# this JVM. This is where you specify the size of the Java heap and the
-# size of the direct memory heap (used for the write cache buffers and
-# some related things).
-##
-export JVM_OPTS="-server -Xmx4G -XX:MaxDirectMemorySize=3000m"
-
-##
# HAJournalServer configuration parameter overrides (see HAJournal.config).
#
# The bigdata HAJournal.config file may be heavily parameterized through
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA 2014-05-14 15:16:27 UTC (rev 8313)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA 2014-05-14 15:32:55 UTC (rev 8314)
@@ -14,6 +14,18 @@
#pidFile=
##
+# ServiceStarter JVM options.
+#
+# The ServiceStarter is launched as a JVM with the following JVM options.
+# The other services (including the HAJournalServer) will run inside of
+# this JVM. This is where you specify the size of the Java heap and the
+# size of the direct memory heap (used for the write cache buffers and
+# some related things).
+##
+export JVM_OPTS="-server -Xmx4G -XX:MaxDirectMemorySize=3000m"
+#export JVM_OPTS="-server -Xmx4G -XX:MaxDirectMemorySize=3000m -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1046"
+
+##
# The following variables configure the startHAServices script, which
# passes them through to HAJournal.config.
##
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-14 15:48:19
|
Revision: 8315
http://sourceforge.net/p/bigdata/code/8315
Author: thompsonbry
Date: 2014-05-14 15:48:14 +0000 (Wed, 14 May 2014)
Log Message:
-----------
Added worksheet for the HA LBS LOAD => AVAILABILITY normalization logic and link to worksheet in the HA LBS code. See #624 (HA LBS)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/architecture/HA_LBS.xlsx
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/architecture/HA_LBS.xlsx
===================================================================
(Binary files differ)
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/architecture/HA_LBS.xlsx
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/architecture/HA_LBS.xlsx 2014-05-14 15:32:55 UTC (rev 8314)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/architecture/HA_LBS.xlsx 2014-05-14 15:48:14 UTC (rev 8315)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/architecture/HA_LBS.xlsx
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java 2014-05-14 15:32:55 UTC (rev 8314)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java 2014-05-14 15:48:14 UTC (rev 8315)
@@ -762,6 +762,8 @@
* be proxied -or- <code>null</code> if the request should not be
* proxied (because we lack enough information to identify a target
* host).
+ *
+ * @see bigdata/src/resources/architecture/HA_LBS.xls
*/
static HostScore getHost(//
final double d, //
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-15 18:39:41
|
Revision: 8340
http://sourceforge.net/p/bigdata/code/8340
Author: thompsonbry
Date: 2014-05-15 18:39:35 +0000 (Thu, 15 May 2014)
Log Message:
-----------
Modified startHAServices to pass along environment variables to control the jetty thread pool.
Modified jetty.xml to unpack the war per webtide guidence. This only happens if necessary.
Modified NanoSparqlServer to detect a failure to start and throw out an exception.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml
branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-15 18:38:16 UTC (rev 8339)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-15 18:39:35 UTC (rev 8340)
@@ -28,6 +28,8 @@
import java.net.URL;
import java.util.LinkedHashMap;
import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
import javax.servlet.ServletContextListener;
@@ -106,6 +108,14 @@
*/
String DEFAULT_JETTY_XML = "jetty.xml";
+ /**
+ * The timeout in seconds that we will await the start of the jetty
+ * {@link Server} (default {@value #DEFAULT_JETTY_START_TIMEOUT}).
+ */
+ String JETTY_STARTUP_TIMEOUT = "jetty.start.timeout";
+
+ String DEFAULT_JETTY_STARTUP_TIMEOUT = "10";
+
}
/**
@@ -328,26 +338,12 @@
initParams.put(ConfigParams.SERVLET_CONTEXT_LISTENER_CLASS,
servletContextListenerClass);
- final Server server;
+ final long jettyStartTimeout = Long.parseLong(System.getProperty(
+ SystemProperties.JETTY_STARTUP_TIMEOUT,
+ SystemProperties.DEFAULT_JETTY_STARTUP_TIMEOUT));
- boolean ok = false;
- try {
- // Create the service.
- server = NanoSparqlServer.newInstance(port, jettyXml,
- null/* indexManager */, initParams);
- // Start Server.
- server.start();
- // Await running.
- while (server.isStarting() && !server.isRunning()) {
- Thread.sleep(100/* ms */);
- }
- ok = true;
- } finally {
- if (!ok) {
- // Complain if Server did not start.
- System.err.println("Server did not start.");
- }
- }
+ final Server server = awaitServerStart(port, jettyXml, initParams,
+ jettyStartTimeout, TimeUnit.SECONDS);
/*
* Report *an* effective URL of this service.
@@ -384,6 +380,68 @@
}
/**
+ * Await a {@link Server} start up to a timeout.
+ *
+ * @param port
+ * The port (maybe ZERO for a random port).
+ * @param jettyXml
+ * The location of the <code>jetty.xml</code> file.
+ * @param initParams
+ * The init-param overrides.
+ * @param timeout
+ * The timeout.
+ * @param units
+ *
+ * @return The server iff the server started before the timeout.
+ *
+ * @throws InterruptedException
+ * @throws TimeoutException
+ * @throws Exception
+ */
+ private static Server awaitServerStart(final int port,
+ final String jettyXml, final Map<String, String> initParams,
+ final long timeout, final TimeUnit units)
+ throws InterruptedException, TimeoutException, Exception {
+
+ Server server = null;
+ boolean ok = false;
+ final long begin = System.nanoTime();
+ final long nanos = units.toNanos(timeout);
+ long remaining = nanos;
+ try {
+ // Create the service.
+ server = NanoSparqlServer.newInstance(port, jettyXml,
+ null/* indexManager */, initParams);
+ // Start Server.
+ server.start();
+ // Await running.
+ remaining = nanos - (System.nanoTime() - begin);
+ while (server.isStarting() && !server.isRunning() && remaining > 0) {
+ Thread.sleep(100/* ms */);
+ // remaining = nanos - (now - begin) [aka elapsed]
+ remaining = nanos - (System.nanoTime() - begin);
+ }
+ if (remaining < 0) {
+ throw new TimeoutException();
+ }
+ ok = true;
+ } finally {
+ if (!ok) {
+ // Complain if Server did not start.
+ final String msg = "Server did not start.";
+ System.err.println(msg);
+ log.fatal(msg);
+ if (server != null) {
+ server.stop();
+ server.destroy();
+ }
+ }
+ }
+ return server;
+
+ }
+
+ /**
* Start the embedded {@link Server}.
* <p>
* Note: The port override argument given here is applied by setting the
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-15 18:38:16 UTC (rev 8339)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-15 18:39:35 UTC (rev 8340)
@@ -149,7 +149,7 @@
<Set name="contextPath">/bigdata</Set>
<Set name="descriptor">WEB-INF/web.xml</Set>
<Set name="parentLoaderPriority">true</Set>
- <Set name="extractWAR">false</Set>
+ <Set name="extractWAR">true</Set>
</New>
</Arg>
</Call>
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-15 18:38:16 UTC (rev 8339)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-15 18:39:35 UTC (rev 8340)
@@ -73,6 +73,9 @@
-DHA_PORT=${HA_PORT}\
"-Dcom.bigdata.hostname=${BIGDATA_HOSTNAME}"\
"-Djetty.port=${JETTY_PORT}"\
+ "-Djetty.threads.min=${JETTY_THREADS_MIN}"\
+ "-Djetty.threads.max=${JETTY_THREADS_MAX}"\
+ "-Djetty.threads.timeout=${JETTY_THREADS_TIMEOUT}\"
"-Djetty.resourceBase=${JETTY_RESOURCE_BASE}"\
"-DJETTY_XML=${JETTY_XML}"\
-DCOLLECT_QUEUE_STATISTICS=${COLLECT_QUEUE_STATISTICS}\
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-15 19:38:11
|
Revision: 8341
http://sourceforge.net/p/bigdata/code/8341
Author: thompsonbry
Date: 2014-05-15 19:38:04 +0000 (Thu, 15 May 2014)
Log Message:
-----------
Working to chase down a problem with locating bigdata-war/src in the JAR when running the NSS from the command line.
Refactored the logic to await the NSS start up to a timeout into the three main invocations of the NSS. This also places the code to interpret jetty.dump.start into each of these code paths in order to provide additional information on the startup contexts.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-15 18:39:35 UTC (rev 8340)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-15 19:38:04 UTC (rev 8341)
@@ -29,7 +29,6 @@
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.net.InetSocketAddress;
-import java.net.URL;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedByInterruptException;
import java.rmi.Remote;
@@ -105,6 +104,7 @@
import com.bigdata.rdf.sail.webapp.ConfigParams;
import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet;
import com.bigdata.rdf.sail.webapp.NanoSparqlServer;
+import com.bigdata.rdf.sail.webapp.NanoSparqlServer.SystemProperties;
import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy;
import com.bigdata.rwstore.RWStore;
import com.bigdata.service.AbstractHATransactionService;
@@ -114,7 +114,6 @@
import com.bigdata.util.StackInfoReport;
import com.bigdata.util.concurrent.LatchedExecutor;
import com.bigdata.util.concurrent.MonitoredFutureTask;
-import com.bigdata.util.config.NicUtil;
import com.sun.jini.start.LifeCycle;
/**
@@ -4544,55 +4543,9 @@
jettyServer = NanoSparqlServer
.newInstance(jettyXml, journal, null/* initParams */);
- log.warn("Starting NSS");
-
- // Start the server.
- jettyServer.start();
+ // Wait until the server starts (up to a timeout).
+ NanoSparqlServer.awaitServerStart(jettyServer);
- if (Boolean.getBoolean("jetty.dump.start")) {
-
- // Support the jetty dump-after-start semantics.
- log.warn(jettyServer.dump());
-
- }
-
- /*
- * Report *an* effective URL of this service.
- *
- * Note: This is an effective local URL (and only one of them, and
- * even then only one for the first connector). It does not reflect
- * any knowledge about the desired external deployment URL for the
- * service end point.
- */
- final String serviceURL;
- {
-
- final int actualPort = getNSSPort();
-// final int actualPort = jettyServer.getConnectors()[0]
-// .getLocalPort();
-
- String hostAddr = NicUtil.getIpAddress("default.nic",
- "default", true/* loopbackOk */);
-
- if (hostAddr == null) {
-
- hostAddr = "localhost";
-
- }
-
- serviceURL = new URL("http", hostAddr, actualPort, ""/* file */)
- .toExternalForm();
-
- final String msg = "logicalServiceZPath: "
- + logicalServiceZPath + "\n" + "serviceURL: "
- + serviceURL;
-
- System.out.println(msg);
- if (log.isInfoEnabled())
- log.warn(msg);
-
- }
-
} catch (Exception e1) {
// Log and ignore.
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-15 18:39:35 UTC (rev 8340)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-15 19:38:04 UTC (rev 8341)
@@ -115,6 +115,12 @@
String JETTY_STARTUP_TIMEOUT = "jetty.start.timeout";
String DEFAULT_JETTY_STARTUP_TIMEOUT = "10";
+
+ /**
+ * When <code>true</code>, the state of jetty will be dumped onto a
+ * logger after the server start.
+ */
+ String JETTY_DUMP_START = "jetty.dump.start";
}
@@ -338,42 +344,12 @@
initParams.put(ConfigParams.SERVLET_CONTEXT_LISTENER_CLASS,
servletContextListenerClass);
- final long jettyStartTimeout = Long.parseLong(System.getProperty(
- SystemProperties.JETTY_STARTUP_TIMEOUT,
- SystemProperties.DEFAULT_JETTY_STARTUP_TIMEOUT));
+ // Create the service.
+ final Server server = NanoSparqlServer.newInstance(port, jettyXml,
+ null/* indexManager */, initParams);
- final Server server = awaitServerStart(port, jettyXml, initParams,
- jettyStartTimeout, TimeUnit.SECONDS);
+ awaitServerStart(server);
- /*
- * Report *an* effective URL of this service.
- *
- * Note: This is an effective local URL (and only one of them, and
- * even then only one for the first connector). It does not reflect
- * any knowledge about the desired external deployment URL for the
- * service end point.
- */
- final String serviceURL;
- {
-
- final int actualPort = getLocalPort(server);
-
- String hostAddr = NicUtil.getIpAddress("default.nic", "default",
- true/* loopbackOk */);
-
- if (hostAddr == null) {
-
- hostAddr = "localhost";
-
- }
-
- serviceURL = new URL("http", hostAddr, actualPort, ""/* file */)
- .toExternalForm();
-
- System.out.println("serviceURL: " + serviceURL);
-
- }
-
// Wait for the service to terminate.
server.join();
@@ -382,37 +358,25 @@
/**
* Await a {@link Server} start up to a timeout.
*
- * @param port
- * The port (maybe ZERO for a random port).
- * @param jettyXml
- * The location of the <code>jetty.xml</code> file.
- * @param initParams
- * The init-param overrides.
- * @param timeout
- * The timeout.
- * @param units
- *
- * @return The server iff the server started before the timeout.
- *
+ * @parma server The {@link Server} to start.
* @throws InterruptedException
* @throws TimeoutException
* @throws Exception
*/
- private static Server awaitServerStart(final int port,
- final String jettyXml, final Map<String, String> initParams,
- final long timeout, final TimeUnit units)
+ public static void awaitServerStart(final Server server)
throws InterruptedException, TimeoutException, Exception {
- Server server = null;
+ final long timeout = Long.parseLong(System.getProperty(
+ SystemProperties.JETTY_STARTUP_TIMEOUT,
+ SystemProperties.DEFAULT_JETTY_STARTUP_TIMEOUT));
+
boolean ok = false;
final long begin = System.nanoTime();
- final long nanos = units.toNanos(timeout);
+ final long nanos = TimeUnit.SECONDS.toNanos(timeout);
long remaining = nanos;
try {
- // Create the service.
- server = NanoSparqlServer.newInstance(port, jettyXml,
- null/* indexManager */, initParams);
// Start Server.
+ log.warn("Starting NSS");
server.start();
// Await running.
remaining = nanos - (System.nanoTime() - begin);
@@ -432,13 +396,59 @@
System.err.println(msg);
log.fatal(msg);
if (server != null) {
+ /*
+ * Support the jetty dump-after-start semantics.
+ */
+ if (Boolean.getBoolean(SystemProperties.JETTY_DUMP_START)) {
+ log.warn(server.dump());
+ }
server.stop();
server.destroy();
}
}
}
- return server;
+ /*
+ * Support the jetty dump-after-start semantics.
+ */
+ if (Boolean.getBoolean(SystemProperties.JETTY_DUMP_START)) {
+ log.warn(server.dump());
+ }
+
+ /*
+ * Report *an* effective URL of this service.
+ *
+ * Note: This is an effective local URL (and only one of them, and even
+ * then only one for the first connector). It does not reflect any
+ * knowledge about the desired external deployment URL for the service
+ * end point.
+ */
+ final String serviceURL;
+ {
+
+ final int actualPort = getLocalPort(server);
+
+ String hostAddr = NicUtil.getIpAddress("default.nic", "default",
+ true/* loopbackOk */);
+
+ if (hostAddr == null) {
+
+ hostAddr = "localhost";
+
+ }
+
+ serviceURL = new URL("http", hostAddr, actualPort, ""/* file */)
+ .toExternalForm();
+
+ final String msg = "serviceURL: " + serviceURL;
+
+ System.out.println(msg);
+
+ if (log.isInfoEnabled())
+ log.warn(msg);
+
+ }
+
}
/**
@@ -528,9 +538,7 @@
}
/**
- * Variant used when you already have the {@link IIndexManager} on hand and
- * want to use <code>web.xml</code> to configure the {@link WebAppContext}
- * and <code>jetty.xml</code> to configure the jetty {@link Server}.
+ * Variant used when you already have the {@link IIndexManager}.
* <p>
* When the optional {@link IIndexManager} argument is specified, it will be
* set as an attribute on the {@link WebAppContext}. This will cause the
@@ -563,9 +571,11 @@
* Allow configuration of embedded NSS jetty server using jetty-web.xml
* </a>
*/
- static public Server newInstance(final String jettyXml,
- final IIndexManager indexManager,
- final Map<String, String> initParams) throws Exception {
+ static public Server newInstance(//
+ final String jettyXml,//
+ final IIndexManager indexManager,//
+ final Map<String, String> initParams//
+ ) throws Exception {
if (jettyXml == null)
throw new IllegalArgumentException();
@@ -676,10 +686,12 @@
*/
if (initParams != null) {
- wac.setAttribute(BigdataRDFServletContextListener.INIT_PARAM_OVERRIDES, initParams);
+ wac.setAttribute(
+ BigdataRDFServletContextListener.INIT_PARAM_OVERRIDES,
+ initParams);
}
-
+
}
return server;
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java 2014-05-15 18:39:35 UTC (rev 8340)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java 2014-05-15 19:38:04 UTC (rev 8341)
@@ -1,6 +1,5 @@
package com.bigdata.samples;
-import java.net.URL;
import java.util.LinkedHashMap;
import java.util.Map;
@@ -10,7 +9,6 @@
import com.bigdata.journal.IIndexManager;
import com.bigdata.rdf.sail.BigdataSail;
import com.bigdata.rdf.sail.webapp.NanoSparqlServer;
-import com.bigdata.util.config.NicUtil;
/**
* Class demonstrates how to start the {@link NanoSparqlServer} from within
@@ -56,24 +54,8 @@
server = NanoSparqlServer.newInstance(port, indexManager,
initParams);
- server.start();
+ NanoSparqlServer.awaitServerStart(server);
- final int actualPort = NanoSparqlServer.getLocalPort(server);
-
- String hostAddr = NicUtil.getIpAddress("default.nic",
- "default", true/* loopbackOk */);
-
- if (hostAddr == null) {
-
- hostAddr = "localhost";
-
- }
-
- final String serviceURL = new URL("http", hostAddr, actualPort, ""/* file */)
- .toExternalForm();
-
- System.out.println("serviceURL: " + serviceURL);
-
// Block and wait. The NSS is running.
server.join();
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <mrp...@us...> - 2014-05-15 21:22:00
|
Revision: 8342
http://sourceforge.net/p/bigdata/code/8342
Author: mrpersonick
Date: 2014-05-15 21:21:54 +0000 (Thu, 15 May 2014)
Log Message:
-----------
upgraded to blueprints 2.5.0. added a rexster 2.5.0 dependency.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/.classpath
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Depends.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java
branches/BIGDATA_RELEASE_1_3_0/build.properties
branches/BIGDATA_RELEASE_1_3_0/build.xml
branches/BIGDATA_RELEASE_1_3_0/pom.xml
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml
branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.4.0.jar
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.4.0.jar
Modified: branches/BIGDATA_RELEASE_1_3_0/.classpath
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-15 19:38:04 UTC (rev 8341)
+++ branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-15 21:21:54 UTC (rev 8342)
@@ -94,8 +94,10 @@
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar" sourcepath="/Users/bryan/Downloads/org.eclipse.jetty.project-jetty-9.1.4.v20140401"/>
<classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-xml-9.1.4.v20140401.jar"/>
<classpathentry exported="true" kind="lib" path="bigdata-sails/lib/jackson-core-2.2.3.jar"/>
- <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-core-2.4.0.jar"/>
- <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-test-2.4.0.jar"/>
<classpathentry kind="lib" path="bigdata-blueprints/lib/jettison-1.3.3.jar"/>
+ <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-core-2.5.0.jar"/>
+ <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-test-2.5.0.jar"/>
+ <classpathentry kind="lib" path="bigdata-blueprints/lib/rexster-core-2.5.0.jar"/>
+ <classpathentry kind="lib" path="bigdata-blueprints/lib/commons-configuration-1.10.jar"/>
<classpathentry kind="output" path="bin"/>
</classpath>
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Depends.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Depends.java 2014-05-15 19:38:04 UTC (rev 8341)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Depends.java 2014-05-15 21:21:54 UTC (rev 8342)
@@ -277,6 +277,10 @@
"https://github.com/tinkerpop/blueprints",
"https://github.com/tinkerpop/blueprints/blob/master/LICENSE.txt");
+ private final static Dep rexsterCore = new Dep("rexster-core",
+ "https://github.com/tinkerpop/rexster",
+ "https://github.com/tinkerpop/rexster/blob/master/LICENSE.txt");
+
static private final Dep[] depends;
static {
depends = new Dep[] { //
@@ -306,6 +310,7 @@
servletApi,//
jacksonCore,//
blueprintsCore,//
+ rexsterCore,//
bigdataGanglia,//
// scale-out
jini,//
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt 2014-05-15 21:21:54 UTC (rev 8342)
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt 2014-05-15 21:21:54 UTC (rev 8342)
@@ -0,0 +1,24 @@
+Copyright (c) 2009-Infinity, TinkerPop [http://tinkerpop.com]
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the TinkerPop nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL TINKERPOP BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.4.0.jar
===================================================================
(Binary files differ)
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar
===================================================================
(Binary files differ)
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar 2014-05-15 19:38:04 UTC (rev 8341)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar 2014-05-15 21:21:54 UTC (rev 8342)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.4.0.jar
===================================================================
(Binary files differ)
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar
===================================================================
(Binary files differ)
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar 2014-05-15 19:38:04 UTC (rev 8341)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar 2014-05-15 21:21:54 UTC (rev 8342)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar
===================================================================
(Binary files differ)
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar 2014-05-15 19:38:04 UTC (rev 8341)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar 2014-05-15 21:21:54 UTC (rev 8342)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar
===================================================================
(Binary files differ)
Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar 2014-05-15 19:38:04 UTC (rev 8341)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar 2014-05-15 21:21:54 UTC (rev 8342)
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-15 19:38:04 UTC (rev 8341)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-15 21:21:54 UTC (rev 8342)
@@ -1,5 +1,5 @@
/**
-Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved.
+Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved.
Contact:
SYSTAP, LLC
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java 2014-05-15 21:21:54 UTC (rev 8342)
@@ -0,0 +1,146 @@
+/**
+Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.blueprints;
+
+import org.apache.commons.configuration.Configuration;
+
+import com.tinkerpop.rexster.config.GraphConfiguration;
+import com.tinkerpop.rexster.config.GraphConfigurationContext;
+import com.tinkerpop.rexster.config.GraphConfigurationException;
+
+/**
+ * Create and configure a BigdataGraph for Rexster.
+ *
+ * @author mikepersonick
+ *
+ */
+public class BigdataGraphConfiguration implements GraphConfiguration {
+
+ public interface Options {
+
+ /**
+ * Specify the type of bigdata instance to use - embedded or remote.
+ */
+ String TYPE = "properties.type";
+
+ /**
+ * Specifies that an embedded bigdata instance should be used.
+ */
+ String TYPE_EMBEDDED = "embedded";
+
+ /**
+ * Specifies that a remote bigdata instance should be used.
+ */
+ String TYPE_REMOTE = "remote";
+
+ /**
+ * Journal file for an embedded bigdata instance.
+ */
+ String FILE = "properties.file";
+
+ /**
+ * Host for a remote bigdata instance.
+ */
+ String HOST = "properties.host";
+
+ /**
+ * Port for a remote bigdata instance.
+ */
+ String PORT = "properties.port";
+
+ }
+
+ /**
+ * Configure and return a BigdataGraph based on the supplied configuration
+ * parameters.
+ *
+ * @see {@link Options}
+ * @see com.tinkerpop.rexster.config.GraphConfiguration#configureGraphInstance(com.tinkerpop.rexster.config.GraphConfigurationContext)
+ */
+ @Override
+ public BigdataGraph configureGraphInstance(final GraphConfigurationContext context)
+ throws GraphConfigurationException {
+
+ try {
+
+ return configure(context);
+
+ } catch (Exception ex) {
+
+ throw new GraphConfigurationException(ex);
+
+ }
+
+ }
+
+ protected BigdataGraph configure(final GraphConfigurationContext context)
+ throws Exception {
+
+ final Configuration config = context.getProperties();
+
+ if (!config.containsKey(Options.TYPE)) {
+ throw new GraphConfigurationException("missing required parameter: " + Options.TYPE);
+ }
+
+ final String type = config.getString(Options.TYPE).toLowerCase();
+
+ if (Options.TYPE_EMBEDDED.equals(type)) {
+
+ if (config.containsKey(Options.FILE)) {
+
+ final String journal = config.getString(Options.FILE);
+
+ return BigdataGraphFactory.create(journal);
+
+ } else {
+
+ return BigdataGraphFactory.create();
+
+ }
+
+ } else if (Options.TYPE_REMOTE.equals(type)) {
+
+ if (!config.containsKey(Options.HOST)) {
+ throw new GraphConfigurationException("missing required parameter: " + Options.HOST);
+ }
+
+ if (!config.containsKey(Options.PORT)) {
+ throw new GraphConfigurationException("missing required parameter: " + Options.PORT);
+ }
+
+ final String host = config.getString(Options.HOST);
+
+ final int port = config.getInt(Options.PORT);
+
+ return BigdataGraphFactory.connect(host, port);
+
+ } else {
+
+ throw new GraphConfigurationException("unrecognized value for "
+ + Options.TYPE + ": " + type);
+
+ }
+
+ }
+
+}
Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml 2014-05-15 21:21:54 UTC (rev 8342)
@@ -0,0 +1,113 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rexster>
+ <http>
+ <server-port>8182</server-port>
+ <server-host>0.0.0.0</server-host>
+ <base-uri>http://localhost</base-uri>
+ <web-root>public</web-root>
+ <character-set>UTF-8</character-set>
+ <enable-jmx>false</enable-jmx>
+ <enable-doghouse>true</enable-doghouse>
+ <max-post-size>2097152</max-post-size>
+ <max-header-size>8192</max-header-size>
+ <upload-timeout-millis>30000</upload-timeout-millis>
+ <thread-pool>
+ <worker>
+ <core-size>8</core-size>
+ <max-size>8</max-size>
+ </worker>
+ <kernal>
+ <core-size>4</core-size>
+ <max-size>4</max-size>
+ </kernal>
+ </thread-pool>
+ <io-strategy>leader-follower</io-strategy>
+ </http>
+ <rexpro>
+ <server-port>8184</server-port>
+ <server-host>0.0.0.0</server-host>
+ <session-max-idle>1790000</session-max-idle>
+ <session-check-interval>3000000</session-check-interval>
+ <connection-max-idle>180000</connection-max-idle>
+ <connection-check-interval>3000000</connection-check-interval>
+ <read-buffer>65536</read-buffer>
+ <enable-jmx>false</enable-jmx>
+ <thread-pool>
+ <worker>
+ <core-size>8</core-size>
+ <max-size>8</max-size>
+ </worker>
+ <kernal>
+ <core-size>4</core-size>
+ <max-size>4</max-size>
+ </kernal>
+ </thread-pool>
+ <io-strategy>leader-follower</io-strategy>
+ </rexpro>
+ <shutdown-port>8183</shutdown-po...
[truncated message content] |
|
From: <mrp...@us...> - 2014-05-15 21:43:24
|
Revision: 8344
http://sourceforge.net/p/bigdata/code/8344
Author: mrpersonick
Date: 2014-05-15 21:43:20 +0000 (Thu, 15 May 2014)
Log Message:
-----------
again, fixed the rexster URL for the fetch-rexster task
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/build.xml
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml
Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 21:39:21 UTC (rev 8343)
+++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 21:43:20 UTC (rev 8344)
@@ -2592,10 +2592,10 @@
<target name="fetch-rexster" depends="prepare,compile,jar">
<echo>Installing Rexster...</echo>
<get
- src="http://www.tinkerpop.com/downloads/rexster/rexster-console-2.5.0.zip"
- dest="${build.dir}/rexster-console-2.5.0.zip"/>
- <unzip src="${build.dir}/rexster-console-2.5.0.zip" dest="${build.dir}/"/>
- <delete file="${build.dir}/rexster-console-2.5.0.zip"/>
+ src="http://www.tinkerpop.com/downloads/rexster/rexster-server-2.5.0.zip"
+ dest="${build.dir}/rexster-server-2.5.0.zip"/>
+ <unzip src="${build.dir}/rexster-server-2.5.0.zip" dest="${build.dir}/"/>
+ <delete file="${build.dir}/rexster-server-2.5.0.zip"/>
</target>
<target name="install-rexster" depends="prepare,compile,jar,bundle">
Deleted: branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml 2014-05-15 21:39:21 UTC (rev 8343)
+++ branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml 2014-05-15 21:43:20 UTC (rev 8344)
@@ -1,54 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
- http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
- <key id="weight" for="edge" attr.name="weight" attr.type="float"/>
- <key id="name" for="node" attr.name="name" attr.type="string"/>
- <key id="age" for="node" attr.name="age" attr.type="int"/>
- <key id="lang" for="node" attr.name="lang" attr.type="string"/>
- <graph id="G" edgedefault="directed">
- <node id="1">
- <data key="name">marko</data>
- <data key="age">29</data>
- </node>
- <node id="2">
- <data key="name">vadas</data>
- <data key="age">27</data>
- </node>
- <node id="3">
- <data key="name">lop</data>
- <data key="lang">java</data>
- </node>
- <node id="4">
- <data key="name">josh</data>
- <data key="age">32</data>
- </node>
- <node id="5">
- <data key="name">ripple</data>
- <data key="lang">java</data>
- </node>
- <node id="6">
- <data key="name">peter</data>
- <data key="age">35</data>
- </node>
- <edge id="7" source="1" target="2" label="knows">
- <data key="weight">0.5</data>
- </edge>
- <edge id="8" source="1" target="4" label="knows">
- <data key="weight">1.0</data>
- </edge>
- <edge id="9" source="1" target="3" label="created">
- <data key="weight">0.4</data>
- </edge>
- <edge id="10" source="4" target="5" label="created">
- <data key="weight">1.0</data>
- </edge>
- <edge id="11" source="4" target="3" label="created">
- <data key="weight">0.4</data>
- </edge>
- <edge id="12" source="6" target="3" label="created">
- <data key="weight">0.2</data>
- </edge>
- </graph>
-</graphml>
\ No newline at end of file
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <mrp...@us...> - 2014-05-16 18:36:37
|
Revision: 8348
http://sourceforge.net/p/bigdata/code/8348
Author: mrpersonick
Date: 2014-05-16 18:36:34 +0000 (Fri, 16 May 2014)
Log Message:
-----------
fixed some Blueprints CI errors related to the 2.5.0 upgrade
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-16 14:59:23 UTC (rev 8347)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-16 18:36:34 UTC (rev 8348)
@@ -144,9 +144,11 @@
return null;
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -192,9 +194,11 @@
return props;
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -233,9 +237,11 @@
return properties;
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -297,9 +303,11 @@
cxn().add(uri, prop, val);
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -325,6 +333,8 @@
cxn().add(uri, prop, val);
+ } catch (RuntimeException e) {
+ throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -384,9 +394,11 @@
return new BigdataEdge(new StatementImpl(fromURI, edgeURI, toURI), this);
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -412,9 +424,11 @@
return new BigdataVertex(uri, this);
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -449,9 +463,11 @@
return null;
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -539,8 +555,10 @@
return stmts;
- } catch (Exception ex) {
- throw new RuntimeException(ex);
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
}
}
@@ -564,9 +582,11 @@
return new EdgeIterable(stmts);
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -621,9 +641,11 @@
return new VertexIterable(stmts, subject);
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -656,9 +678,11 @@
return getEdges(queryStr);
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -681,9 +705,11 @@
return null;
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -701,9 +727,11 @@
return new VertexIterable(result, true);
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -723,9 +751,11 @@
return new VertexIterable(result, true);
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -761,9 +791,11 @@
// remove its properties
cxn().remove(uri, wild, wild);
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
}
@@ -789,7 +821,9 @@
// remove incoming edges
cxn().remove(wild, wild, uri);
- } catch (Exception e) {
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
throw new RuntimeException(e);
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-16 14:59:23 UTC (rev 8347)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-16 18:36:34 UTC (rev 8348)
@@ -29,19 +29,19 @@
/**
* This is a thin-client implementation of a Blueprints wrapper around the
- * client library that interacts with the NanoSparqlServer. This is a functional
+ * client library that interacts with the NanoSparqlServer. This is a functional
* implementation suitable for writing POCs - it is not a high performance
- * implementation by any means (currently does not support caching, batched
- * update, or Blueprints query re-writes). Does have a single "bulk upload"
- * operation that wraps a method on RemoteRepository that will POST a graphml
- * file to the blueprints layer of the bigdata server.
+ * implementation by any means (currently does not support caching or batched
+ * update). Does have a single "bulk upload" operation that wraps a method on
+ * RemoteRepository that will POST a graphml file to the blueprints layer of the
+ * bigdata server.
*
* @see {@link BigdataSailRemoteRepository}
* @see {@link BigdataSailRemoteRepositoryConnection}
* @see {@link RemoteRepository}
*
* @author mikepersonick
- *
+ *
*/
public class BigdataGraphClient extends BigdataGraph {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java 2014-05-16 14:59:23 UTC (rev 8347)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java 2014-05-16 18:36:34 UTC (rev 8348)
@@ -30,6 +30,7 @@
import org.openrdf.model.URI;
import org.openrdf.model.ValueFactory;
import org.openrdf.model.impl.ValueFactoryImpl;
+import org.openrdf.model.vocabulary.RDFS;
import com.bigdata.rdf.internal.XSD;
import com.tinkerpop.blueprints.Edge;
@@ -138,7 +139,19 @@
try {
- return vf.createURI(GRAPH_NAMESPACE, URLEncoder.encode(property, "UTF-8"));
+ if (property.equals("label")) {
+
+ /*
+ * Label is a reserved property for edge labels, we use
+ * rdfs:label for that.
+ */
+ return RDFS.LABEL;
+
+ } else {
+
+ return vf.createURI(GRAPH_NAMESPACE, URLEncoder.encode(property, "UTF-8"));
+
+ }
} catch (UnsupportedEncodingException e) {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java 2014-05-16 14:59:23 UTC (rev 8347)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java 2014-05-16 18:36:34 UTC (rev 8348)
@@ -68,7 +68,8 @@
test.doTestSuite(new TransactionalGraphTestSuite(test));
GraphTest.printTestPerformance("TransactionalGraphTestSuite",
test.stopWatch());
- }
+ }
+
// public void testGraphSuite() throws Exception {
// final GraphTest test = newBigdataGraphTest();
// test.stopWatch();
@@ -77,12 +78,12 @@
//}
-// public void testTransactionIsolationCommitCheck() throws Exception {
+// public void testGetEdgesByLabel() throws Exception {
// final BigdataGraphTest test = new BigdataGraphTest();
// test.stopWatch();
// final BigdataTestSuite testSuite = new BigdataTestSuite(test);
// try {
-// testSuite.testTransactionIsolationCommitCheck();
+// testSuite.testGetEdgesByLabel();
// } finally {
// test.shutdown();
// }
@@ -95,71 +96,25 @@
super(graphTest);
}
- public void testTransactionIsolationCommitCheck() throws Exception {
- // the purpose of this test is to simulate rexster access to a graph instance, where one thread modifies
- // the graph and a separate thread cannot affect the transaction of the first
- final TransactionalGraph graph = (TransactionalGraph) graphTest.generateGraph();
-
- final CountDownLatch latchCommittedInOtherThread = new CountDownLatch(1);
- final CountDownLatch latchCommitInOtherThread = new CountDownLatch(1);
-
- // this thread starts a transaction then waits while the second thread tries to commit it.
- final Thread threadTxStarter = new Thread() {
- public void run() {
- System.err.println(Thread.currentThread().getId() + ": 1");
- final Vertex v = graph.addVertex(null);
-
- // System.out.println("added vertex");
-
- System.err.println(Thread.currentThread().getId() + ": 2");
- latchCommitInOtherThread.countDown();
-
- System.err.println(Thread.currentThread().getId() + ": 3");
- try {
- latchCommittedInOtherThread.await();
- } catch (InterruptedException ie) {
- throw new RuntimeException(ie);
- }
-
- System.err.println(Thread.currentThread().getId() + ": 4");
- graph.rollback();
-
- System.err.println(Thread.currentThread().getId() + ": 5");
- // there should be no vertices here
- // System.out.println("reading vertex before tx");
- assertFalse(graph.getVertices().iterator().hasNext());
- // System.out.println("read vertex before tx");
- }
- };
-
- threadTxStarter.start();
-
- // this thread tries to commit the transaction started in the first thread above.
- final Thread threadTryCommitTx = new Thread() {
- public void run() {
- System.err.println(Thread.currentThread().getId() + ": 6");
- try {
- latchCommitInOtherThread.await();
- } catch (InterruptedException ie) {
- throw new RuntimeException(ie);
- }
-
- System.err.println(Thread.currentThread().getId() + ": 7");
- // try to commit the other transaction
- graph.commit();
-
- System.err.println(Thread.currentThread().getId() + ": 8");
- latchCommittedInOtherThread.countDown();
- System.err.println(Thread.currentThread().getId() + ": 9");
- }
- };
-
- threadTryCommitTx.start();
-
- threadTxStarter.join();
- threadTryCommitTx.join();
- graph.shutdown();
-
+ public void testGetEdgesByLabel() {
+ Graph graph = graphTest.generateGraph();
+ if (graph.getFeatures().supportsEdgeIteration) {
+ Vertex v1 = graph.addVertex(null);
+ Vertex v2 = graph.addVertex(null);
+ Vertex v3 = graph.addVertex(null);
+
+ Edge e1 = graph.addEdge(null, v1, v2, graphTest.convertLabel("test1"));
+ Edge e2 = graph.addEdge(null, v2, v3, graphTest.convertLabel("test2"));
+ Edge e3 = graph.addEdge(null, v3, v1, graphTest.convertLabel("test3"));
+
+ assertEquals(e1, getOnlyElement(graph.query().has("label", graphTest.convertLabel("test1")).edges()));
+ assertEquals(e2, getOnlyElement(graph.query().has("label", graphTest.convertLabel("test2")).edges()));
+ assertEquals(e3, getOnlyElement(graph.query().has("label", graphTest.convertLabel("test3")).edges()));
+
+ assertEquals(e1, getOnlyElement(graph.getEdges("label", graphTest.convertLabel("test1"))));
+ assertEquals(e2, getOnlyElement(graph.getEdges("label", graphTest.convertLabel("test2"))));
+ assertEquals(e3, getOnlyElement(graph.getEdges("label", graphTest.convertLabel("test3"))));
+ }
}
@@ -173,6 +128,7 @@
private class BigdataGraphTest extends GraphTest {
private List<String> exclude = Arrays.asList(new String[] {
+ // this one creates a deadlock, no way around it
"testTransactionIsolationCommitCheck"
});
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java 2014-05-16 14:59:23 UTC (rev 8347)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java 2014-05-16 18:36:34 UTC (rev 8348)
@@ -646,7 +646,7 @@
public void add(final Statement stmt, final Resource... c)
throws RepositoryException {
- log.warn("single statement updates not recommended");
+// log.warn("single statement updates not recommended");
final Graph g = new GraphImpl();
g.add(stmt);
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-17 16:08:26
|
Revision: 8351
http://sourceforge.net/p/bigdata/code/8351
Author: thompsonbry
Date: 2014-05-17 16:08:21 +0000 (Sat, 17 May 2014)
Log Message:
-----------
Working on #939 (NSS does not start from command line: bigdata-war/src not found).
NanoSparqlServer: code has been modified to explicitly search (if jetty.resourceBase is not defined) (a) the local file system; (b) the classpath; and then (c) default to whatever is the default value in jetty.xml for the jetty.resourceBase property.
TestNSSHealthCheck: added a basic test suite for checking the health of an NSS instance once deployed. This is a starting point for CI based tests of the various deployment models.
build.xml: modified to illustrate a possible way of performing the CI deployment tests. More needs to be done here!
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml
branches/BIGDATA_RELEASE_1_3_0/build.xml
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-17 16:04:33 UTC (rev 8350)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-17 16:08:21 UTC (rev 8351)
@@ -25,6 +25,7 @@
import java.io.File;
import java.io.InputStream;
+import java.net.MalformedURLException;
import java.net.URL;
import java.util.LinkedHashMap;
import java.util.Map;
@@ -122,6 +123,60 @@
*/
String JETTY_DUMP_START = "jetty.dump.start";
+ /**
+ * This property specifies the resource path for the web application. In
+ * order for this mechanism to work, the <code>jetty.xml</code> file
+ * MUST contain a line which allows the resourceBase of the web
+ * application to be set from an environment variable. For example:
+ *
+ * <pre>
+ * <SystemProperty name="jetty.resourceBase" default="bigdata-war/src" />
+ * </pre>
+ *
+ * The <code>jetty.resourceBase</code> variable may identify either a
+ * file or a resource on the class path. To force the use of the web
+ * application embedded within the <code>bigdata.jar</code> you need to
+ * specify a JAR URL along the following lines (using the appropriate
+ * file path and jar name and version:
+ *
+ * <pre>
+ * jar:file:../lib/bigdata-1.3.0.jar!/bigdata-war/src
+ * </pre>
+ *
+ * The use of absolute file paths are recommended for reliable
+ * resolution.
+ * <p>
+ * The order of preference is:
+ * <ol>
+ * <li><code>jetty.resourceBase</code> is specified. The value of this
+ * environment variable will be used to locate the web application.</li>
+ * <li>
+ * <code>jetty.resourceBase</code> is not specified (either
+ * <code>null</code> or whitespace). An attempt is made to locate the
+ * <code>bigdata-war/src</code> resource in the file system (relative to
+ * the current working directory). If found, the
+ * <code>jetty.resourceBase</code> environment variable is set to this
+ * resource using a <code>file:</code> style URL. This will cause jetty
+ * to use the web application directory in the file system.
+ * <p>
+ * If the resource is not found in the file system, then an attempt is
+ * made to locate that resource using the classpath. If found, the the
+ * <code>jetty.resourceBase</code> is set to the URL for the located
+ * resource. This will cause jetty to use the web application resource
+ * on the classpath. If there are multiple such resources on the
+ * classpath, the first such resource will be discovered and used.</li>
+ * <li>
+ * Otherwise, the <code>jetty.resourceBase</code> environment variable
+ * is not modified and the default location specified in the
+ * <code>jetty.xml</code> file will be used. If jetty is unable to
+ * resolve that resource, then the web application will not start.</li>
+ * </ol>
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/939" > NSS does not
+ * start from command line: bigdata-war/src not found </a>
+ */
+ String JETTY_RESOURCE_BASE = "jetty.resourceBase";
+
}
/**
@@ -163,7 +218,7 @@
* <dt>-jettyXml</dt>
* <dd>The location of the jetty.xml resource that will be used
* to start the {@link Server} (default is the file in the JAR).
- * * The default will locate the <code>jetty.xml</code> resource
+ * The default will locate the <code>jetty.xml</code> resource
* that is bundled with the JAR. This preserves the historical
* behavior. If you want to use a different
* <code>jetty.xml</code> file, just override this property on
@@ -216,7 +271,11 @@
* use a different jetty.xml file, just override this property on the
* command line.
*/
- String jettyXml = "bigdata-war/src/jetty.xml";
+ String jettyXml = System.getProperty(//
+ SystemProperties.JETTY_XML,//
+ "bigdata-war/src/jetty.xml"//
+// SystemProperties.DEFAULT_JETTY_XML
+ );
/*
* Handle all arguments starting with "-". These should appear before
@@ -589,45 +648,26 @@
/*
* Configure the jetty Server using a jetty.xml file. In turn, the
* jetty.xml file configures the webapp using a web.xml file. The caller
- * can override the location of the jetty.xml file if they need to
- * change the way in which either jetty or the webapp are configured.
- * You can also override many of the properties in the jetty.xml file
- * using environment variables.
+ * can override the location of the jetty.xml file using the [jetty.xml]
+ * environment variable if they need to change the way in which either
+ * jetty or the webapp are configured. You can also override many of the
+ * properties in the [jetty.xml] file using environment variables. For
+ * example, they can also override the location of the web application
+ * (including the web.xml file) using the [jetty.resourceBase]
+ * environment variable.
*/
final Server server;
{
- // Locate jetty.xml.
- final URL jettyXmlUrl;
- if (new File(jettyXml).exists()) {
+ // Find the effective jetty.xml URL.
+ final URL jettyXmlURL = getEffectiveJettyXmlURL(classLoader,
+ jettyXml);
- // Check the file system.
-// jettyXmlUrl = new File(jettyXml).toURI();
- jettyXmlUrl = new URL("file:" + jettyXml);
-
- } else {
-
- // Check the classpath.
- jettyXmlUrl = classLoader.getResource(jettyXml);
-// jettyXmlUrl = classLoader.getResource("bigdata-war/src/jetty.xml");
-
- }
-
- if (jettyXmlUrl == null) {
-
- throw new RuntimeException("Not found: " + jettyXml);
-
- }
-
- if (log.isInfoEnabled())
- log.info("jetty configuration: jettyXml=" + jettyXml
- + ", jettyXmlUrl=" + jettyXmlUrl);
-
- // Build configuration from that resource.
+ // Build the server configuration from that jetty.xml resource.
final XmlConfiguration configuration;
{
// Open jetty.xml resource.
- final Resource jettyConfig = Resource.newResource(jettyXmlUrl);
+ final Resource jettyConfig = Resource.newResource(jettyXmlURL);
InputStream is = null;
try {
is = jettyConfig.getInputStream();
@@ -639,65 +679,208 @@
}
}
}
-
+
+ // Configure/apply jetty.resourceBase overrides.
+ configureEffectiveResourceBase(classLoader);
+
// Configure the jetty server.
server = (Server) configuration.configure();
}
/*
- * Configure the webapp (overrides, IIndexManager, etc.)
+ * Configure any overrides for the web application init-params.
*/
- {
+ configureWebAppOverrides(server, indexManager, initParams);
- final WebAppContext wac = getWebApp(server);
+ return server;
+
+ }
- if (wac == null) {
+ private static URL getEffectiveJettyXmlURL(final ClassLoader classLoader,
+ final String jettyXml) throws MalformedURLException {
- /*
- * This is a fatal error. If we can not set the IIndexManager,
- * the NSS will try to interpret the propertyFile in web.xml
- * rather than using the one that is already open and specified
- * by the caller. Among other things, that breaks the
- * HAJournalServer startup.
- */
+ // Locate jetty.xml.
+ final URL jettyXmlUrl;
+ boolean isFile = false;
+ boolean isClassPath = false;
+ if (new File(jettyXml).exists()) {
- throw new RuntimeException("Could not locate "
- + WebAppContext.class.getName());
+ // Check the file system.
+ // jettyXmlUrl = new File(jettyXml).toURI();
+ jettyXmlUrl = new URL("file:" + jettyXml);
+ isFile = true;
- }
+ } else {
+ // Check the classpath.
+ jettyXmlUrl = classLoader.getResource(jettyXml);
+ // jettyXmlUrl =
+ // classLoader.getResource("bigdata-war/src/jetty.xml");
+ isClassPath = true;
+
+ }
+
+ if (jettyXmlUrl == null) {
+
+ throw new RuntimeException("Not found: " + jettyXml);
+
+ }
+
+ if (log.isInfoEnabled())
+ log.info("jetty configuration: jettyXml=" + jettyXml + ", isFile="
+ + isFile + ", isClassPath=" + isClassPath
+ + ", jettyXmlUrl=" + jettyXmlUrl);
+
+ return jettyXmlUrl;
+
+ }
+
+ /**
+ * Search (a) the local file system; and (b) the classpath for the web
+ * application. If the resource is located, then set the
+ * [jetty.resourceBase] property. This search sequence gives preference to
+ * the local file system and then searches the classpath (which jetty does
+ * not known how to do by itself.)
+ *
+ * @throws MalformedURLException
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/939" > NSS does not start
+ * from command line: bigdata-war/src not found </a>
+ */
+ private static void configureEffectiveResourceBase(
+ final ClassLoader classLoader) throws MalformedURLException {
+
+ // Check the environment variable.
+ String resourceBaseStr = System
+ .getProperty(SystemProperties.JETTY_RESOURCE_BASE);
+
+ // true iff declared as an environment variable.
+ final boolean isDeclared = resourceBaseStr != null
+ && resourceBaseStr.trim().length() > 0;
+ boolean isFile = false; // iff found in local file system.
+ boolean isClassPath = false; // iff found on classpath.
+
+ if (!isDeclared) {
+
/*
- * Force the use of the caller's IIndexManager. This is how we get the
- * NSS to use the already open Journal for the HAJournalServer.
+ * jetty.resourceBase not declared in the environment.
*/
- if (indexManager != null) {
- // Set the IIndexManager attribute on the WebAppContext.
- wac.setAttribute(IIndexManager.class.getName(), indexManager);
-
+ // default location: TODO To DEFAULT_JETTY_RESOURCE_BASE
+ resourceBaseStr = "./bigdata-war/src";
+
+ final URL resourceBaseURL;
+ if (new File(resourceBaseStr).exists()) {
+
+ // Check the file system.
+ resourceBaseURL = new URL("file:" + resourceBaseStr);
+ isFile = true;
+
+ } else {
+
+ // Check the classpath.
+ resourceBaseURL = classLoader.getResource(resourceBaseStr);
+ isClassPath = resourceBaseURL != null;
+
}
-
- /*
- * Note: You simply can not override the init parameters specified
- * in web.xml. Therefore, this sets the overrides on an attribute.
- * The attribute is then consulted when the web app starts and its
- * the override values are used if given.
- */
- if (initParams != null) {
- wac.setAttribute(
- BigdataRDFServletContextListener.INIT_PARAM_OVERRIDES,
- initParams);
+ if (resourceBaseURL != null) {
+ /*
+ * We found the resource either in the file system or in the
+ * classpath.
+ *
+ * Explicitly set the discovered value on the jetty.resourceBase
+ * property. This will cause jetty to use the version of that
+ * resource that we discovered above.
+ *
+ * Note: If we did not find the resource, then the default value
+ * from the jetty.xml SystemProperty expression will be used by
+ * jetty. If it can not find a resource using that default
+ * value, then the startup will fail. We leave this final check
+ * to jetty itself since it will interpret the jetty.xml file
+ * itself.
+ */
+ System.setProperty(SystemProperties.JETTY_RESOURCE_BASE,
+ resourceBaseURL.toExternalForm());
+
}
}
- return server;
+ if (log.isInfoEnabled())
+ log.info("jetty configuration"//
+ + ": resourceBaseStr=" + resourceBaseStr
+ + ", isDeclared="
+ + isDeclared + ", isFile=" + isFile
+ + ", isClassPath="
+ + isClassPath
+ + ", jetty.resourceBase(effective)="
+ + System.getProperty(SystemProperties.JETTY_RESOURCE_BASE));
}
+
+ /**
+ * Configure the webapp (overrides, IIndexManager, etc.)
+ * <p>
+ * Note: These overrides are achieved by setting the {@link WebAppContext}
+ * attribute named
+ * {@link BigdataRDFServletContextListener#INIT_PARAM_OVERRIDES}. The
+ * {@link BigdataRDFServletContextListener} then consults the attribute when
+ * reporting the effective value of the init-params. This convoluted
+ * mechanism is required because you can not otherwise override the
+ * init-params without editing <code>web.xml</code>.
+ */
+ private static void configureWebAppOverrides(//
+ final Server server,//
+ final IIndexManager indexManager,//
+ final Map<String, String> initParams//
+ ) {
+ final WebAppContext wac = getWebApp(server);
+
+ if (wac == null) {
+
+ /*
+ * This is a fatal error. If we can not set the IIndexManager, the
+ * NSS will try to interpret the propertyFile in web.xml rather than
+ * using the one that is already open and specified by the caller.
+ * Among other things, that breaks the HAJournalServer startup.
+ */
+
+ throw new RuntimeException("Could not locate "
+ + WebAppContext.class.getName());
+
+ }
+
+ /*
+ * Force the use of the caller's IIndexManager. This is how we get the
+ * NSS to use the already open Journal for the HAJournalServer.
+ */
+ if (indexManager != null) {
+
+ // Set the IIndexManager attribute on the WebAppContext.
+ wac.setAttribute(IIndexManager.class.getName(), indexManager);
+
+ }
+
+ /*
+ * Note: You simply can not override the init parameters specified in
+ * web.xml. Therefore, this sets the overrides on an attribute. The
+ * attribute is then consulted when the web app starts and its the
+ * override values are used if given.
+ */
+ if (initParams != null) {
+
+ wac.setAttribute(
+ BigdataRDFServletContextListener.INIT_PARAM_OVERRIDES,
+ initParams);
+
+ }
+
+ }
+
/**
* Return the {@link WebAppContext} for the {@link Server}.
*
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java 2014-05-17 16:08:21 UTC (rev 8351)
@@ -0,0 +1,642 @@
+/**
+Copyright (C) SYSTAP, LLC 2013. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.rdf.sail.webapp.health;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import junit.framework.AssertionFailedError;
+import junit.framework.Test;
+import junit.framework.TestCase2;
+import junit.framework.TestListener;
+import junit.framework.TestResult;
+import junit.framework.TestSuite;
+import junit.textui.ResultPrinter;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.conn.ClientConnectionManager;
+import org.apache.http.impl.client.DefaultHttpClient;
+import org.apache.http.impl.client.DefaultRedirectStrategy;
+import org.apache.http.util.EntityUtils;
+
+import com.bigdata.BigdataStatics;
+import com.bigdata.rdf.sail.webapp.NanoSparqlServer;
+import com.bigdata.rdf.sail.webapp.client.ConnectOptions;
+import com.bigdata.rdf.sail.webapp.client.DefaultClientConnectionManagerFactory;
+import com.bigdata.rdf.sail.webapp.client.HttpException;
+import com.bigdata.rdf.sail.webapp.client.RemoteRepository;
+import com.bigdata.rdf.sail.webapp.client.RemoteRepositoryManager;
+import com.bigdata.util.concurrent.DaemonThreadFactory;
+
+/**
+ * Utility test suite provides a health check for a deployed instance.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+public class TestNSSHealthCheck extends TestCase2 {
+
+ /**
+ * A marker placed into index.html so we can recognize when that page is
+ * served.
+ */
+ private static final String JUNIT_TEST_MARKER_INDEX_HTML = "junit test marker: index.html";
+
+ /**
+ * The executor used by the http client.
+ */
+ private ExecutorService executorService;
+
+ /**
+ * The {@link ClientConnectionManager} for the {@link HttpClient} used by
+ * the {@link RemoteRepository}. This is used when we tear down the
+ * {@link RemoteRepository}.
+ */
+ private ClientConnectionManager m_cm;
+
+ /**
+ * Exposed to tests that do direct HTTP GET/POST operations.
+ */
+ protected HttpClient m_httpClient;
+
+ /**
+ * The client-API wrapper to the NSS.
+ */
+ protected RemoteRepositoryManager m_repo;
+
+ /**
+ * The effective {@link NanoSparqlServer} http end point (including the
+ * ContextPath).
+ * <pre>
+ * http://localhost:8080/bigdata -- webapp URL (includes "/bigdata" context path.
+ * </pre>
+ */
+ protected String m_serviceURL;
+
+ /**
+ * The URL of the root of the web application server. This does NOT include
+ * the ContextPath for the webapp.
+ *
+ * <pre>
+ * http://localhost:8080 -- root URL
+ * </pre>
+ */
+ protected String m_rootURL;
+
+ public TestNSSHealthCheck(final String name) {//, final String requestURI) {
+
+ super(name);
+
+// m_requestURI = requestURI;
+
+ }
+
+ /**
+ * FIXME hacked in test suite constructor.
+ */
+ private static String requestURI;
+
+ @Override
+ protected void setUp() throws Exception {
+
+ super.setUp();
+
+ m_rootURL = requestURI;
+
+ m_serviceURL = m_rootURL + BigdataStatics.getContextPath();
+
+ m_cm = DefaultClientConnectionManagerFactory.getInstance()
+ .newInstance();
+
+ final DefaultHttpClient httpClient = new DefaultHttpClient(m_cm);
+ m_httpClient = httpClient;
+
+ /*
+ * Ensure that the client follows redirects using a standard policy.
+ *
+ * Note: This is necessary for tests of the webapp structure since the
+ * container may respond with a redirect (302) to the location of the
+ * webapp when the client requests the root URL.
+ */
+ httpClient.setRedirectStrategy(new DefaultRedirectStrategy());
+
+ executorService = Executors.newCachedThreadPool(DaemonThreadFactory
+ .defaultThreadFactory());
+
+ m_repo = new RemoteRepositoryManager(m_serviceURL, m_httpClient,
+ executorService);
+
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+
+ m_rootURL = null;
+ m_serviceURL = null;
+
+ if (m_cm != null) {
+ m_cm.shutdown();
+ m_cm = null;
+ }
+
+ m_httpClient = null;
+ m_repo = null;
+
+ if (executorService != null) {
+ executorService.shutdownNow();
+ executorService = null;
+ }
+
+ super.tearDown();
+
+ }
+
+ static class HealthCheckTestSuite extends TestSuite {
+
+ /**
+ * The URL of the bigdata web application.
+ */
+ @SuppressWarnings("unused")
+ private final String requestURI;
+
+ /**
+ *
+ * @param name
+ * @param requestURI
+ * The URL of the bigdata web application.
+ */
+ private HealthCheckTestSuite(final String name, final String requestURI) {
+
+ super(name);
+
+ this.requestURI = requestURI;
+
+ // FIXME Hacked through static field.
+ TestNSSHealthCheck.requestURI = requestURI;
+
+ }
+
+ }
+
+ static HealthCheckTestSuite createTestSuite(final String name,
+ final String requestURI) {
+
+ final HealthCheckTestSuite suite = new HealthCheckTestSuite(name,
+ requestURI);
+
+ suite.addTestSuite(TestNSSHealthCheck.class);
+
+ return suite;
+
+ }
+
+ /**
+ * bare URL of the server
+ *
+ * <pre>
+ * http://localhost:8080
+ * </pre>
+ *
+ * The response is should be <code>index.html</code> since we want the
+ * bigdata webapp to respond for the top-level context.
+ *
+ * <p>
+ * Note: You must ensure that the client follows redirects using a standard
+ * policy. This is necessary for tests of the webapp structure since the
+ * container may respond with a redirect (302) to the location of the webapp
+ * when the client requests the root URL.
+ */
+ public void test_webapp_structure_rootURL() throws Exception {
+
+ final String content = doGET(m_rootURL);
+
+ assertTrue(content.contains(JUNIT_TEST_MARKER_INDEX_HTML));
+
+ }
+
+ /**
+ * URL with correct context path
+ *
+ * <pre>
+ * http://localhost:8080/bigdata
+ * </pre>
+ *
+ * The response is should be <code>index.html</code>, which is specified
+ * through the welcome files list.
+ */
+ public void test_webapp_structure_contextPath() throws Exception {
+
+ final String content = doGET(m_serviceURL);
+
+ assertTrue(content.contains(JUNIT_TEST_MARKER_INDEX_HTML));
+ }
+
+ /**
+ * URL with context path and index.html reference
+ *
+ * <pre>
+ * http://localhost:8080/bigdata/index.html
+ * </pre>
+ *
+ * This URL does NOT get mapped to anything (404).
+ */
+ public void test_webapp_structure_contextPath_indexHtml() throws Exception {
+
+ try {
+
+ doGET(m_serviceURL + "/index.html");
+
+ } catch (HttpException ex) {
+
+ assertEquals(404, ex.getStatusCode());
+
+ }
+
+ }
+
+ /**
+ * The <code>favicon.ico</code> file.
+ *
+ * @see <a href="http://www.w3.org/2005/10/howto-favicon"> How to add a
+ * favicon </a>
+ */
+ public void test_webapp_structure_favicon() throws Exception {
+
+ doGET(m_serviceURL + "/html/favicon.ico");
+
+ }
+
+ /**
+ * The <code>/status</code> servlet responds.
+ */
+ public void test_webapp_structure_status() throws Exception {
+
+ doGET(m_serviceURL + "/status");
+
+ }
+
+ /**
+ * The <code>/counters</code> servlet responds.
+ */
+ public void test_webapp_structure_counters() throws Exception {
+
+ doGET(m_serviceURL + "/counters");
+
+ }
+
+// /**
+// * The <code>/namespace/</code> servlet responds (multi-tenancy API).
+// */
+// public void test_webapp_structure_namespace() throws Exception {
+//
+// doGET(m_serviceURL + "/namespace/");
+//
+// }
+
+ /**
+ * The fully qualified URL for <code>index.html</code>
+ *
+ * <pre>
+ * http://localhost:8080/bigdata/html/index.html
+ * </pre>
+ *
+ * The response is should be <code>index.html</code>, which is specified
+ * through the welcome files list.
+ */
+ public void test_webapp_structure_contextPath_html_indexHtml() throws Exception {
+
+ doGET(m_serviceURL + "/html/index.html");
+ }
+
+ private String doGET(final String url) throws Exception {
+
+ HttpResponse response = null;
+ HttpEntity entity = null;
+
+ try {
+
+ final ConnectOptions opts = new ConnectOptions(url);
+ opts.method = "GET";
+
+ response = doConnect(opts);
+
+ checkResponseCode(url, response);
+
+ entity = response.getEntity();
+
+ final String content = EntityUtils.toString(entity);
+
+ return content;
+
+ } finally {
+
+ try {
+ EntityUtils.consume(entity);
+ } catch (IOException ex) {
+ log.warn(ex, ex);
+ }
+
+ }
+
+ }
+
+ /**
+ * Connect to a SPARQL end point (GET or POST query only).
+ *
+ * @param opts
+ * The connection options.
+ *
+ * @return The connection.
+ *
+ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/619">
+ * RemoteRepository class should use application/x-www-form-urlencoded
+ * for large POST requests </a>
+ */
+ private HttpResponse doConnect(final ConnectOptions opts) throws Exception {
+
+ /*
+ * Generate the fully formed and encoded URL.
+ */
+
+ final StringBuilder urlString = new StringBuilder(opts.serviceURL);
+
+ ConnectOptions.addQueryParams(urlString, opts.requestParams);
+
+ final boolean isLongRequestURL = urlString.length() > 1024;
+
+ if (isLongRequestURL && opts.method.equals("POST")
+ && opts.entity == null) {
+
+ /*
+ * URL is too long. Reset the URL to just the service endpoint and
+ * use application/x-www-form-urlencoded entity instead. Only in
+ * cases where there is not already a request entity (SPARQL query
+ * and SPARQL update).
+ */
+
+ urlString.setLength(0);
+ urlString.append(opts.serviceURL);
+
+ opts.entity = ConnectOptions.getFormEntity(opts.requestParams);
+
+ } else if (isLongRequestURL && opts.method.equals("GET")
+ && opts.entity == null) {
+
+...
[truncated message content] |
|
From: <tho...@us...> - 2014-05-18 13:35:44
|
Revision: 8353
http://sourceforge.net/p/bigdata/code/8353
Author: thompsonbry
Date: 2014-05-18 13:35:40 +0000 (Sun, 18 May 2014)
Log Message:
-----------
Added test for #887 - ticket is closed. Problem can not be demonstrated against the current code base. Suspect was fixed for the 1.3.0 release (heisenbug).
Javadoc update for jetty.resourceBase for the NSS.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket887.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.rq
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.srx
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.trig
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2014-05-17 17:16:32 UTC (rev 8352)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2014-05-18 13:35:40 UTC (rev 8353)
@@ -128,6 +128,7 @@
// Test suite for SPARQL 1.1 BINDINGS clause
suite.addTestSuite(TestBindings.class);
suite.addTestSuite(TestBindHeisenbug708.class);
+ suite.addTestSuite(TestTicket887.class);
// Complex queries.
suite.addTestSuite(TestComplexQuery.class);
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket887.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket887.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket887.java 2014-05-18 13:35:40 UTC (rev 8353)
@@ -0,0 +1,78 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2013. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+package com.bigdata.rdf.sparql.ast.eval;
+
+
+/**
+ * Test suite for a hesienbug involving BIND. Unlike the other issues this
+ * sometimes happens, and is sometimes OK, so we run the test in a loop 20
+ * times.
+ *
+ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/708">
+ * Heisenbug </a>
+ *
+ * @version $Id$
+ */
+public class TestTicket887 extends AbstractDataDrivenSPARQLTestCase {
+
+ public TestTicket887() {
+ }
+
+ public TestTicket887(String name) {
+ super(name);
+ }
+
+ /**
+ * <pre>
+ * SELECT *
+ * WHERE {
+ *
+ * GRAPH ?g {
+ *
+ * BIND( "hello" as ?hello ) .
+ * BIND( CONCAT(?hello, " world") as ?helloWorld ) .
+ *
+ * ?member a ?class .
+ *
+ * }
+ *
+ * }
+ * LIMIT 1
+ * </pre>
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/887" > BIND is leaving a
+ * variable unbound </a>
+ */
+ public void test_ticket_887_bind() throws Exception {
+
+ new TestHelper(
+ "ticket_887_bind", // testURI,
+ "ticket_887_bind.rq",// queryFileURL
+ "ticket_887_bind.trig",// dataFileURL
+ "ticket_887_bind.srx"// resultFileURL
+ ).runTest();
+
+ }
+
+}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.rq
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.rq (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.rq 2014-05-18 13:35:40 UTC (rev 8353)
@@ -0,0 +1,14 @@
+SELECT *
+WHERE {
+
+ GRAPH ?g {
+
+ BIND( "hello" as ?hello ) .
+ BIND( CONCAT(?hello, " world") as ?helloWorld ) .
+
+ ?member a ?class .
+
+ }
+
+}
+LIMIT 1
\ No newline at end of file
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.srx
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.srx (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.srx 2014-05-18 13:35:40 UTC (rev 8353)
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<sparql
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:xs="http://www.w3.org/2001/XMLSchema#"
+ xmlns="http://www.w3.org/2005/sparql-results#" >
+ <head>
+ <variable name="?hello"/>
+ <variable name="?helloWorld"/>
+ <variable name="?member"/>
+ <variable name="?class"/>
+ <variable name="?g"/>
+ </head>
+ <results>
+ <result>
+ <binding name="hello">
+ <literal>hello</literal>
+ </binding>
+ <binding name="helloWorld">
+ <literal>hello world</literal>
+ </binding>
+ <binding name="member">
+ <uri>http://www.bigdata.com/member</uri>
+ </binding>
+ <binding name="class">
+ <uri>http://www.bigdata.com/cls</uri>
+ </binding>
+ <binding name="g">
+ <uri>http://www.bigdata.com/</uri>
+ </binding>
+ </result>
+ </results>
+</sparql>
\ No newline at end of file
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.trig
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.trig (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.trig 2014-05-18 13:35:40 UTC (rev 8353)
@@ -0,0 +1,6 @@
+@prefix : <http://www.bigdata.com/> .
+@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
+
+: {
+ :member a :cls
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-17 17:16:32 UTC (rev 8352)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-18 13:35:40 UTC (rev 8353)
@@ -152,25 +152,39 @@
* environment variable will be used to locate the web application.</li>
* <li>
* <code>jetty.resourceBase</code> is not specified (either
- * <code>null</code> or whitespace). An attempt is made to locate the
- * <code>bigdata-war/src</code> resource in the file system (relative to
- * the current working directory). If found, the
- * <code>jetty.resourceBase</code> environment variable is set to this
- * resource using a <code>file:</code> style URL. This will cause jetty
- * to use the web application directory in the file system.
- * <p>
- * If the resource is not found in the file system, then an attempt is
- * made to locate that resource using the classpath. If found, the the
- * <code>jetty.resourceBase</code> is set to the URL for the located
- * resource. This will cause jetty to use the web application resource
- * on the classpath. If there are multiple such resources on the
- * classpath, the first such resource will be discovered and used.</li>
+ * <code>null</code> or whitespace).
+ * <ol>
+ * <li>An attempt is made to locate the <code>bigdata-war/src</code>
+ * resource in the file system (relative to the current working
+ * directory). If found, the <code>jetty.resourceBase</code> environment
+ * variable is set to this resource using a <code>file:</code> style
+ * URL. This will cause jetty to use the web application directory in
+ * the file system.</li>
* <li>
+ * An attempt is made to locate the resource
+ * <code>/WEB-INF/web.xml</code> using the classpath (this handles the
+ * case when running under the eclipse IDE). If found, the the
+ * <code>jetty.resourceBase</code> is set to the URL formed by removing
+ * the trailing <code>WEB-INF/web.xml</code> for the located resource.
+ * This will cause jetty to use the web application resource on the
+ * classpath. If there are multiple such resources on the classpath, the
+ * first such resource will be discovered and used.</li>
+ * <li>An attempt is made to locate the resource
+ * <code>bigdata-war/src/WEB-INF/web.xml</code> using the classpath
+ * (this handles the case when running from the command line using a
+ * bigdata JAR). If found, the the <code>jetty.resourceBase</code> is
+ * set to the URL formed by the trailing <code>WEB-INF/web.xml</code>
+ * for the located resource. This will cause jetty to use the web
+ * application resource on the classpath. If there are multiple such
+ * resources on the classpath, the first such resource will be
+ * discovered and used.</li>
+ * <li>
* Otherwise, the <code>jetty.resourceBase</code> environment variable
* is not modified and the default location specified in the
* <code>jetty.xml</code> file will be used. If jetty is unable to
* resolve that resource, then the web application will not start.</li>
* </ol>
+ * </ol>
*
* @see <a href="http://trac.bigdata.com/ticket/939" > NSS does not
* start from command line: bigdata-war/src not found </a>
@@ -825,7 +839,7 @@
}
if (tmp != null) {
if (src != null) {
- if (log.isInfoEnabled())
+ if(log.isInfoEnabled())
log.info("Found: src=" + src + ", url=" + tmp);
}
final String s = tmp.toExternalForm();
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-19 14:26:16
|
Revision: 8359
http://sourceforge.net/p/bigdata/code/8359
Author: thompsonbry
Date: 2014-05-19 14:26:12 +0000 (Mon, 19 May 2014)
Log Message:
-----------
Bug fix for #940 (HA LBS breaks tomcat deployment).
The root cause is that the ProxyServlet is not available under tomcat (or anything else besides jetty). Therefore it can not be configured from the same web.xml file that is used for other platforms.
To address this, I extracted the HA LBS configuration into a new override-web.xml file and then modified the NanoSparqlServer to locate that resource.
The HA test suite also needed to be modified to explictly locate this resource.
See #940 (HA LBS breaks tomcat deployment).
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/override-web.xml
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-05-18 15:49:11 UTC (rev 8358)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-05-19 14:26:12 UTC (rev 8359)
@@ -233,10 +233,10 @@
serviceDir = bigdata.serviceDir;
+ logicalServiceId = bigdata.logicalServiceId;
+
haLogDir = bigdata.logDir;
- logicalServiceId = bigdata.logicalServiceId;
-
writePipelineAddr = new InetSocketAddress("localhost",bigdata.haPort);
/*
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-05-18 15:49:11 UTC (rev 8358)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-05-19 14:26:12 UTC (rev 8359)
@@ -61,7 +61,7 @@
private static fedname = "benchmark";
// The RMI port for the HAGlue interface (may be ZERO for a random port).
- private static rmiPort = ConfigMath.add(9080,1);
+ private static rmiPort = ConfigMath.add(9080,2);
// write replication pipeline port (listener).
private static haPort = ConfigMath.add(9090,2);
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-05-18 15:49:11 UTC (rev 8358)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-05-19 14:26:12 UTC (rev 8359)
@@ -95,6 +95,7 @@
import com.bigdata.quorum.QuorumException;
import com.bigdata.quorum.zk.ZKQuorumClient;
import com.bigdata.quorum.zk.ZKQuorumImpl;
+import com.bigdata.rdf.sail.webapp.NanoSparqlServer;
import com.bigdata.rdf.sail.webapp.client.HttpException;
import com.bigdata.rdf.sail.webapp.client.RemoteRepository;
import com.bigdata.service.jini.JiniClientConfig;
@@ -135,6 +136,7 @@
*/
static class ServiceListener implements IServiceListener {
+ @SuppressWarnings("unused")
private volatile HAGlue haGlue;
private volatile ProcessHelper processHelper;
private volatile boolean dead = false;
@@ -2226,7 +2228,7 @@
* Used to override the port at which jetty sets up the http
* connection.
*/
- private final String TEST_JETTY_PORT = "jetty.port";
+ private final String TEST_JETTY_PORT = NanoSparqlServer.SystemProperties.JETTY_PORT;
/**
* The path in the local file system to the root of the web
@@ -2234,13 +2236,15 @@
* code, but the webapp gets deployed to the serviceDir for this
* test suite.
*/
- private final String JETTY_RESOURCE_BASE = "jetty.resourceBase";
-
+ private final String JETTY_RESOURCE_BASE = NanoSparqlServer.SystemProperties.JETTY_RESOURCE_BASE;
+
+ private final String JETTY_OVERRIDE_WEB_XML = NanoSparqlServer.SystemProperties.JETTY_OVERRIDE_WEB_XML;
+
/**
* Used to override the <code>jetty.dump.start</code> environment
* property.
*/
- private final String TEST_JETTY_DUMP_START = "jetty.dump.start";
+ private final String TEST_JETTY_DUMP_START = NanoSparqlServer.SystemProperties.JETTY_DUMP_START;
/**
* The absolute effective path of the service directory. This is
@@ -2290,6 +2294,9 @@
// Override the location of the webapp as deployed.
cmds.add("-D" + JETTY_RESOURCE_BASE + "=.");
+ // Override the location of the override-web.xml file as deployed.
+ cmds.add("-D" + JETTY_OVERRIDE_WEB_XML + "=./WEB-INF/override-web.xml");
+
// Override the jetty.dump.start.
cmds.add("-D" + TEST_JETTY_DUMP_START + "=" + jettyDumpStart);
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-18 15:49:11 UTC (rev 8358)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-19 14:26:12 UTC (rev 8359)
@@ -191,6 +191,23 @@
*/
String JETTY_RESOURCE_BASE = "jetty.resourceBase";
+ /**
+ * The location of the <code>override-web.xml</code> resource. The
+ * default is given in <code>jetty.xml</code> and serves to locate the
+ * resource when deployed under an IDE. If not explicitly given, value
+ * of the environment variable is set by the same logic that sets the
+ * {@link #JETTY_RESOURCE_BASE} environment variable. This allows the
+ * <code>override-web.xml</code> resource to be found in its default
+ * location (which is the same directory / package as the
+ * <code>web.xml</code> file) while still preserving the ability to
+ * override the location of that resource explicitly by setting the
+ * environment variable before starting the server.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/940" > ProxyServlet in
+ * web.xml breaks tomcat WAR (HA LBS) </a>
+ */
+ String JETTY_OVERRIDE_WEB_XML = "jetty.overrideWebXml";
+
}
/**
@@ -439,10 +456,17 @@
public static void awaitServerStart(final Server server)
throws InterruptedException, TimeoutException, Exception {
+// Note: Does not appear to help.
+//
+// final WebAppContext wac = getWebApp(server);
+//
+// if (wac == null)
+// throw new Exception("WebApp is not available?");
+
final long timeout = Long.parseLong(System.getProperty(
SystemProperties.JETTY_STARTUP_TIMEOUT,
SystemProperties.DEFAULT_JETTY_STARTUP_TIMEOUT));
-
+
boolean ok = false;
final long begin = System.nanoTime();
final long nanos = TimeUnit.SECONDS.toNanos(timeout);
@@ -453,7 +477,8 @@
server.start();
// Await running.
remaining = nanos - (System.nanoTime() - begin);
- while (server.isStarting() && !server.isRunning() && remaining > 0) {
+ while (server.isStarting() && !server.isRunning()
+ /* && !wac.isRunning() */ && remaining > 0) {
Thread.sleep(100/* ms */);
// remaining = nanos - (now - begin) [aka elapsed]
remaining = nanos - (System.nanoTime() - begin);
@@ -461,6 +486,8 @@
if (remaining < 0) {
throw new TimeoutException();
}
+// if (!wac.isRunning())
+// throw new Exception("WebApp is not running?");
ok = true;
} finally {
if (!ok) {
@@ -870,9 +897,18 @@
* to jetty itself since it will interpret the jetty.xml file
* itself.
*/
+ final String tmp = resourceBaseURL.toExternalForm();
+
System.setProperty(SystemProperties.JETTY_RESOURCE_BASE,
- resourceBaseURL.toExternalForm());
+ tmp);
+ final URL overrideWebXmlURL = new URL(tmp
+ + (tmp.endsWith("/") ? "" : "/")
+ + "WEB-INF/override-web.xml");
+
+ System.setProperty(SystemProperties.JETTY_OVERRIDE_WEB_XML,
+ overrideWebXmlURL.toExternalForm());
+
}
}
@@ -885,7 +921,9 @@
+ ", isClassPath="
+ isClassPath
+ ", jetty.resourceBase(effective)="
- + System.getProperty(SystemProperties.JETTY_RESOURCE_BASE));
+ + System.getProperty(SystemProperties.JETTY_RESOURCE_BASE)
+ + ", jetty.overrideWebXml(effective)="
+ + System.getProperty(SystemProperties.JETTY_OVERRIDE_WEB_XML));
}
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/override-web.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/override-web.xml (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/override-web.xml 2014-05-19 14:26:12 UTC (rev 8359)
@@ -0,0 +1,100 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<web-app xmlns="http://java.sun.com/xml/ns/javaee"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_1.xsd"
+ version="3.1">
+ <servlet>
+ <servlet-name>Load Balancer</servlet-name>
+ <description>
+ The HA Load Balancer servlet provides a transparent proxy for
+ requests arriving its configured URL pattern (the "external"
+ interface for the load balancer) to the root of the web
+ application.
+
+ The use of the load balancer is entirely optional. If the
+ security rules permit, then clients MAY make requests directly
+ against a specific service. Thus, no specific provision exists
+ to disable the load balancer servlet, but you may choose not to
+ deploy it.
+
+ When successfully deployed, requests having prefix corresponding to
+ the URL pattern for the load balancer are automatically redirected
+ to a joined service in the met quorum based on the configured load
+ balancer policy.
+
+ Requests directed to /bigdata/LBS/leader are proxied to the quorum
+ leader - this URL must be used for non-idempotent requests
+ (updates).
+
+ Requests directed to /bigdata/LBS/read are load balanced over the
+ services joined with the met quourm. This URL may only be used
+ with idempotent requests (reads).
+
+ For non-HA deployments, requests are simply forwarded to the local
+ service after stripping off the /LBS/leader or /LBS/read prefix.
+ Thus, it is always safe to use the LBS request URLs.
+
+ The load balancer policies are "HA aware." They will always
+ redirect update requests to the quorum leader. The default
+ polices will load balance read requests over the leader and
+ followers in a manner that reflects the CPU, IO Wait, and GC
+ Time associated with each service. The PlatformStatsPlugIn
+ and GangliaPlugIn MUST be enabled for the default load
+ balancer policy to operate. It depends on those plugins to
+ maintain a model of the load on the HA replication cluster.
+ The GangliaPlugIn should be run only as a listener if you are
+ are running the real gmond process on the host. If you are
+ not running gmond, then the GangliaPlugIn should be configured
+ as both a listener and a sender.
+ </description>
+ <servlet-class>com.bigdata.rdf.sail.webapp.HALoadBalancerServlet</servlet-class>
+ <load-on-startup>1</load-on-startup>
+ <async-supported>true</async-supported>
+ <init-param>
+ <param-name>policy</param-name>
+ <param-value>com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy</param-value>
+ <description>
+ The load balancer policy. This must be an instance of the
+ IHALoadBalancerPolicy interface. A default policy (NOPLBSPolicy) is
+ used when no value is specified.
+
+ The policies differ ONLY in how they handle READ requests. All policies
+ proxy updates to the leader. If you do not want update proxying, then
+ use a URL that does not address the HALoadBalancerServlet.
+
+ The following policies are pre-defined:
+
+ com.bigdata.rdf.sail.webapp.lbs.policy.NOPLBSPolicy:
+
+ Does not load balance read requests.
+
+ com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy:
+
+ Round robin for read requests.
+
+ com.bigdata.rdf.sail.webapp.lbs.policy.counters.CountersLBSPolicy:
+
+ Load based proxying for read requests using the build in http
+ service for reporting performance counters. This policy requires
+ the PlatformStatsPlugIn and may also require platform specific
+ metrics collection dependencies, e.g., sysstat.
+
+ com.bigdata.rdf.sail.webapp.lbs.policy.ganglia.GangliaLBSPolicy:
+
+ Load based proxying for read requests using ganglia. This policy
+ requires the requires the PlatformStatsPlugIn. In addition, either
+ gmond must be installed on each node or the embedded GangliaService
+ must be enabled such that performance metrics are collected and
+ reported.
+
+ Some of these policies can be further configured using additional
+ init-param elements that they understand. See the javadoc for the
+ individual policies for more information.
+ </description>
+ </init-param>
+ </servlet>
+ <servlet-mapping>
+ <servlet-name>Load Balancer</servlet-name>
+ <url-pattern>/LBS/*</url-pattern>
+ </servlet-mapping>
+</web-app>
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml 2014-05-18 15:49:11 UTC (rev 8358)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml 2014-05-19 14:26:12 UTC (rev 8359)
@@ -89,102 +89,8 @@
<description>Performance counters.</description>
<servlet-class>com.bigdata.rdf.sail.webapp.CountersServlet</servlet-class>
<async-supported>true</async-supported>
- </servlet><!-- -->
- <servlet>
- <servlet-name>Load Balancer</servlet-name>
- <description>
- The HA Load Balancer servlet provides a transparent proxy for
- requests arriving its configured URL pattern (the "external"
- interface for the load balancer) to the root of the web
- application.
-
- The use of the load balancer is entirely optional. If the
- security rules permit, then clients MAY make requests directly
- against a specific service. Thus, no specific provision exists
- to disable the load balancer servlet, but you may choose not to
- deploy it.
-
- When successfully deployed, requests having prefix corresponding to
- the URL pattern for the load balancer are automatically redirected
- to a joined service in the met quorum based on the configured load
- balancer policy.
-
- Requests directed to /bigdata/LBS/leader are proxied to the quorum
- leader - this URL must be used for non-idempotent requests
- (updates).
-
- Requests directed to /bigdata/LBS/read are load balanced over the
- services joined with the met quourm. This URL may only be used
- with idempotent requests (reads).
-
- For non-HA deployments, requests are simply forwarded to the local
- service after stripping off the /LBS/leader or /LBS/read prefix.
- Thus, it is always safe to use the LBS request URLs.
-
- The load balancer policies are "HA aware." They will always
- redirect update requests to the quorum leader. The default
- polices will load balance read requests over the leader and
- followers in a manner that reflects the CPU, IO Wait, and GC
- Time associated with each service. The PlatformStatsPlugIn
- and GangliaPlugIn MUST be enabled for the default load
- balancer policy to operate. It depends on those plugins to
- maintain a model of the load on the HA replication cluster.
- The GangliaPlugIn should be run only as a listener if you are
- are running the real gmond process on the host. If you are
- not running gmond, then the GangliaPlugIn should be configured
- as both a listener and a sender.
- </description>
- <servlet-class>com.bigdata.rdf.sail.webapp.HALoadBalancerServlet</servlet-class>
- <load-on-startup>1</load-on-startup>
- <async-supported>true</async-supported>
- <init-param>
- <param-name>policy</param-name>
- <param-value>com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy</param-value>
- <description>
- The load balancer policy. This must be an instance of the
- IHALoadBalancerPolicy interface. A default policy (NOPLBSPolicy) is
- used when no value is specified.
-
- The policies differ ONLY in how they handle READ requests. All policies
- proxy updates to the leader. If you do not want update proxying, then
- use a URL that does not address the HALoadBalancerServlet.
-
- The following policies are pre-defined:
-
- com.bigdata.rdf.sail.webapp.lbs.policy.NOPLBSPolicy:
-
- Does not load balance read requests.
-
- com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy:
-
- Round robin for read requests.
-
- com.bigdata.rdf.sail.webapp.lbs.policy.counters.CountersLBSPolicy:
-
- Load based proxying for read requests using the build in http
- service for reporting performance counters. This policy requires
- the PlatformStatsPlugIn and may also require platform specific
- metrics collection dependencies, e.g., sysstat.
-
- com.bigdata.rdf.sail.webapp.lbs.policy.ganglia.GangliaLBSPolicy:
-
- Load based proxying for read requests using ganglia. This policy
- requires the requires the PlatformStatsPlugIn. In addition, either
- gmond must be installed on each node or the embedded GangliaService
- must be enabled such that performance metrics are collected and
- reported.
-
- Some of these policies can be further configured using additional
- init-param elements that they understand. See the javadoc for the
- individual policies for more information.
- </description>
- </init-param>
</servlet>
- <servlet-mapping>
- <servlet-name>Load Balancer</servlet-name>
- <url-pattern>/LBS/*</url-pattern>
- </servlet-mapping>
- <!-- -->
+ <!-- Note: The HALoadBalancerServlet is deployed from override-web.xml -->
<!-- Serve anything under /html/* as a simple file. -->
<servlet-mapping>
<servlet-name>default</servlet-name>
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-18 15:49:11 UTC (rev 8358)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-19 14:26:12 UTC (rev 8359)
@@ -142,14 +142,12 @@
<Arg>
<!-- This is the bigdata web application. -->
<New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext">
- <Set name="war">
- <!-- The location of the top-level of the bigdata webapp. -->
- <SystemProperty name="jetty.resourceBase" default="bigdata-war/src" />
- </Set>
- <Set name="contextPath">/bigdata</Set>
+ <Set name="war"><SystemProperty name="jetty.resourceBase" default="bigdata-war/src"/></Set>
+ <Set name="contextPath">/bigdata</Set>
<Set name="descriptor">WEB-INF/web.xml</Set>
<Set name="parentLoaderPriority">true</Set>
<Set name="extractWAR">false</Set>
+ <Set name="overrideDescriptor"><SystemProperty name="jetty.overrideWebXml" default="bigdata-war/src/WEB-INF/override-web.xml"/></Set>
</New>
</Arg>
</Call>
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-20 13:47:55
|
Revision: 8377
http://sourceforge.net/p/bigdata/code/8377
Author: thompsonbry
Date: 2014-05-20 13:47:52 +0000 (Tue, 20 May 2014)
Log Message:
-----------
Modified to stage the javadoc as well.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/build.xml
branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README
Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-20 13:28:29 UTC (rev 8376)
+++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-20 13:47:52 UTC (rev 8377)
@@ -408,7 +408,7 @@
encoding="utf-8"
private="false"
>
- <arg value="-J-Xmx1000m" />
+ <arg value="-J-Xmx2000m" />
<arg value="-quiet" />
<packageset dir="${bigdata.dir}/bigdata/src/java" />
<packageset dir="${bigdata.dir}/bigdata/src/samples" />
@@ -917,7 +917,7 @@
<!-- -->
<target name="stage"
description="stages resources (jar, config, policy, logging files) needed to package or execute the bigdata distribution."
- depends="jar">
+ depends="jar, javadoc">
<!-- Create staging directories -->
<property name="dist.dir" location="${bigdata.dir}/dist/bigdata" />
@@ -933,6 +933,7 @@
<property name="dist.var.config.jini" location="${dist.var.config}/jini" />
<property name="dist.var.jetty" location="${dist.var}/jetty" />
<property name="dist.doc" location="${dist.dir}/doc" />
+ <property name="dist.doc.api" location="${dist.dir}/doc/api" />
<property name="dist.doc.legal" location="${dist.dir}/doc/LEGAL" />
<delete dir="${dist.dir}" quiet="true" />
@@ -947,6 +948,7 @@
<mkdir dir="${dist.var.config.logging}" />
<mkdir dir="${dist.var.config.jini}" />
<mkdir dir="${dist.doc}" />
+ <mkdir dir="${dist.doc.api}" />
<mkdir dir="${dist.doc.legal}" />
<mkdir dir="${dist.dir}/etc" />
<mkdir dir="${dist.dir}/etc/init.d" />
@@ -1232,12 +1234,18 @@
</fileset>
</copy>
- <!-- Stage top-level license file and copyright NOTICE file. -->
+ <!-- Stage top-level license file and copyright NOTICE file. -->
<copy toDir="${dist.doc}">
<fileset file="${bigdata.dir}/LICENSE.txt"/>
<fileset file="${bigdata.dir}/NOTICE"/>
</copy>
+ <!-- Stage javadoc (iff generated). -->
+ <copy toDir="${dist.doc.api}" failonerror="false">
+ <fileset dir="${build.dir}/docs/api">
+ </fileset>
+ </copy>
+
<!-- Stage license files for dependencies (LEGAL). -->
<copy toDir="${dist.doc.legal}" flatten="true">
<fileset dir="${bigdata.dir}">
@@ -1245,11 +1253,11 @@
</fileset>
</copy>
- <!-- Stage README. -->
+ <!-- Stage README. -->
<copy file="${src.resources}/HAJournal/README"
todir="${dist.dir}/doc" />
- <!-- Stage documentation from the wiki. -->
+ <!-- Stage documentation from the wiki. -->
<get dest="${dist.doc}/HAJournalServer.html" ignoreerrors="true"
src="http://wiki.bigdata.com/wiki/index.php/HAJournalServer?printable=yes"
/>
@@ -1299,10 +1307,11 @@
bigdata/var/jetty - the webapp.
bigdata/var/jetty/jetty.xml - jetty server configuration.
bigdata/var/jetty/bigdata/WEB-INF/web.xml - webapp configuration.
+ bigdata/doc - documentation
bigdata/doc/LEGAL - license files for dependencies.
bigdata/doc/LICENSE.txt - bigdata license file.
bigdata/doc/NOTICE - copyright NOTICE files.
- bigdata/doc/docs - javadoc (FIXME INSTALL JAVADOC, HA wiki page)
+ bigdata/doc/api - javadoc
bigdata/etc/init.d/bigdataHA - HA services start/stop script.
bigdata/etc/default/bigdataHA - HA services required config file.
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README 2014-05-20 13:28:29 UTC (rev 8376)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README 2014-05-20 13:47:52 UTC (rev 8377)
@@ -1,5 +1,7 @@
Bigdata Highly Available Replication Cluster
+*** See the HAJournalServer on the wiki for more information ***
+
========== INSTALL ==========
0. The nodes MUST have synchronized clocks, both for logging and to
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <mrp...@us...> - 2014-05-20 18:55:47
|
Revision: 8386
http://sourceforge.net/p/bigdata/code/8386
Author: mrpersonick
Date: 2014-05-20 18:55:44 +0000 (Tue, 20 May 2014)
Log Message:
-----------
added a means of just running the AST optimizers without actually running the query
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailTupleQuery.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java 2014-05-20 17:13:40 UTC (rev 8385)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java 2014-05-20 18:55:44 UTC (rev 8386)
@@ -411,7 +411,53 @@
}
}
+
+ /**
+ * Optimize a SELECT query.
+ *
+ * @param store
+ * The {@link AbstractTripleStore} having the data.
+ * @param queryPlan
+ * The {@link ASTContainer}.
+ * @param bs
+ * The initial solution to kick things off.
+ *
+ * @return An optimized AST.
+ *
+ * @throws QueryEvaluationException
+ */
+ static public QueryRoot optimizeTupleQuery(
+ final AbstractTripleStore store, final ASTContainer astContainer,
+ final QueryBindingSet bs) throws QueryEvaluationException {
+ final AST2BOpContext context = new AST2BOpContext(astContainer, store);
+
+ // Clear the optimized AST.
+ astContainer.clearOptimizedAST();
+
+ // Batch resolve Values to IVs and convert to bigdata binding set.
+ final IBindingSet[] bindingSets = mergeBindingSets(astContainer,
+ batchResolveIVs(store, bs));
+
+ // Convert the query (generates an optimized AST as a side-effect).
+ AST2BOpUtility.convert(context, bindingSets);
+
+ // Get the projection for the query.
+ final IVariable<?>[] projected = astContainer.getOptimizedAST()
+ .getProjection().getProjectionVars();
+
+ final List<String> projectedSet = new LinkedList<String>();
+
+ for (IVariable<?> var : projected)
+ projectedSet.add(var.getName());
+
+ // The optimized AST.
+ final QueryRoot optimizedQuery = astContainer.getOptimizedAST();
+
+ return optimizedQuery;
+
+ }
+
/**
* Evaluate a CONSTRUCT/DESCRIBE query.
* <p>
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailTupleQuery.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailTupleQuery.java 2014-05-20 17:13:40 UTC (rev 8385)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailTupleQuery.java 2014-05-20 18:55:44 UTC (rev 8386)
@@ -98,4 +98,32 @@
}
+ public QueryRoot optimize() throws QueryEvaluationException {
+
+ return optimize((BindingsClause) null);
+
+ }
+
+ public QueryRoot optimize(final BindingsClause bc)
+ throws QueryEvaluationException {
+
+ final QueryRoot originalQuery = astContainer.getOriginalAST();
+
+ if (bc != null)
+ originalQuery.setBindingsClause(bc);
+
+ if (getMaxQueryTime() > 0)
+ originalQuery.setTimeout(TimeUnit.SECONDS
+ .toMillis(getMaxQueryTime()));
+
+ originalQuery.setIncludeInferred(getIncludeInferred());
+
+ final QueryRoot optimized = ASTEvalHelper.optimizeTupleQuery(
+ getTripleStore(), astContainer, new QueryBindingSet(
+ getBindings()));
+
+ return optimized;
+
+ }
+
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-05-23 15:29:44
|
Revision: 8414
http://sourceforge.net/p/bigdata/code/8414
Author: thompsonbry
Date: 2014-05-23 15:29:40 +0000 (Fri, 23 May 2014)
Log Message:
-----------
See #941 (merge deployments branch to main branch).
- HARestore.sh: You can not safely rely on the limited classpath that
is used in this script. This is very likely to break based merely
on the imports into the HARestore, Journal, AbstractJournal and
related classes. At a minimum, we would need to test this classpath
for each release or in CI. I would prefer that we had a means to
assemble a better classpath. The startHAServices script has a
similar problem. The classpath is currently hacked there using the
incantation
export HAJOURNAL_CLASSPATH=`find ${LIB_DIR} -name '*.jar' -print0 | tr '\0' ':'`
- What is the purpose of the "src/resources/deployment" directory? Is
this the "single-server, non-HA" NSS deployment?
- /bigdata/deployment - we put all of this stuff under /src/resources NOT /bigdata.
- I have deleted /bigdata/deployment entirely from branches/BIGDATA_RELEASE_1_3_0.
- I have copied the files (but not the SVN folders) from the
DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment into
/src/resources/deployment.
- jetty.xml: copied from the DEPLOYMENTS branch.
- /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh
- This has been removed. The src/resources/deployment/nss directory
has similar scripts. It is Ok to add an ant task to start the nss
for developers, but deployments should be based on the "ant stage"
pattern.
- src/resources/deployment/nss/WEB-INF/RWStore.properties should be
removed. The brew script should replace the following line in the
version from bigdata-war/src/WEB-INF/RWStore.properties with an
absolute filename.
com.bigdata.journal.AbstractJournal.file=ZZZZ
- src/resources/deployment/nss/WEB-INF/log4j.properties should be
removed. The brew script should replace the following lines in the
version from dist/var/config/logging/log4j.properties in order to
setup (a) logging to a file; and (b) to specify the absolution
location of that file.
log4j.rootCategory=XXXX
log4j.appender.file.File=YYYY
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties
branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml
branches/BIGDATA_RELEASE_1_3_0/build.xml
branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/WEB-INF/classes/log4j.properties
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS
branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/README.txt
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/
branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdata
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/etc/jetty.xml
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties 2014-05-22 19:23:15 UTC (rev 8413)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties 2014-05-23 15:29:40 UTC (rev 8414)
@@ -1,9 +1,11 @@
# Default log4j configuration. See the individual classes for the
# specific loggers, but generally they are named for the class in
# which they are defined.
-
-# Default log4j configuration for testing purposes.
#
+# This configuration gets used by the bigdata.war artifact when deployed
+# into a servlet container. It also might be used by the bigdata webapp
+# if -Dlog4j.configuration is not specified when starting bigdata.
+#
# You probably want to set the default log level to ERROR.
#
log4j.rootCategory=WARN, dest1
@@ -36,7 +38,7 @@
##
# Rule execution log. This is a formatted log file (comma delimited).
-log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog
+#log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog
log4j.additivity.com.bigdata.relation.rule.eval.RuleLog=false
log4j.appender.ruleLog=org.apache.log4j.FileAppender
log4j.appender.ruleLog.Threshold=ALL
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-22 19:23:15 UTC (rev 8413)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-23 15:29:40 UTC (rev 8414)
@@ -29,46 +29,46 @@
<Set name="detailedDump">false</Set>
</Get>
- <!-- =========================================================== -->
- <!-- Get the platform mbean server -->
- <!-- =========================================================== -->
- <Call id="MBeanServer" class="java.lang.management.ManagementFactory"
- name="getPlatformMBeanServer" />
-
- <!-- =========================================================== -->
- <!-- Initialize the Jetty MBean container -->
- <!-- =========================================================== -->
- <!-- Note: This breaks CI if it is enabled
- <Call name="addBean">
- <Arg>
- <New id="MBeanContainer" class="org.eclipse.jetty.jmx.MBeanContainer">
- <Arg>
- <Ref refid="MBeanServer" />
- </Arg>
- </New>
- </Arg>
- </Call>-->
-
- <!-- Add the static log to the MBean server.
- <Call name="addBean">
- <Arg>
- <New class="org.eclipse.jetty.util.log.Log" />
- </Arg>
- </Call>-->
+ <!-- =========================================================== -->
+ <!-- Get the platform mbean server -->
+ <!-- =========================================================== -->
+ <Call id="MBeanServer" class="java.lang.management.ManagementFactory"
+ name="getPlatformMBeanServer" />
+
+ <!-- =========================================================== -->
+ <!-- Initialize the Jetty MBean container -->
+ <!-- =========================================================== -->
+ <!-- Note: This breaks CI if it is enabled
+ <Call name="addBean">
+ <Arg>
+ <New id="MBeanContainer" class="org.eclipse.jetty.jmx.MBeanContainer">
+ <Arg>
+ <Ref refid="MBeanServer" />
+ </Arg>
+ </New>
+ </Arg>
+ </Call>-->
+
+ <!-- Add the static log to the MBean server.
+ <Call name="addBean">
+ <Arg>
+ <New class="org.eclipse.jetty.util.log.Log" />
+ </Arg>
+ </Call>-->
- <!-- For remote MBean access (optional)
- <New id="ConnectorServer" class="org.eclipse.jetty.jmx.ConnectorServer">
- <Arg>
- <New class="javax.management.remote.JMXServiceURL">
- <Arg type="java.lang.String">rmi</Arg>
- <Arg type="java.lang.String" />
- <Arg type="java.lang.Integer"><SystemProperty name="jetty.jmxrmiport" default="1090"/></Arg>
- <Arg type="java.lang.String">/jndi/rmi://<SystemProperty name="jetty.jmxrmihost" default="localhost"/>:<SystemProperty name="jetty.jmxrmiport" default="1099"/>/jmxrmi</Arg>
- </New>
- </Arg>
- <Arg>org.eclipse.jetty.jmx:name=rmiconnectorserver</Arg>
- <Call name="start" />
- </New>-->
+ <!-- For remote MBean access (optional)
+ <New id="ConnectorServer" class="org.eclipse.jetty.jmx.ConnectorServer">
+ <Arg>
+ <New class="javax.management.remote.JMXServiceURL">
+ <Arg type="java.lang.String">rmi</Arg>
+ <Arg type="java.lang.String" />
+ <Arg type="java.lang.Integer"><Property name="jetty.jmxrmiport" default="1090"/></Arg>
+ <Arg type="java.lang.String">/jndi/rmi://<Property name="jetty.jmxrmihost" default="localhost"/>:<Property name="jetty.jmxrmiport" default="1099"/>/jmxrmi</Arg>
+ </New>
+ </Arg>
+ <Arg>org.eclipse.jetty.jmx:name=rmiconnectorserver</Arg>
+ <Call name="start" />
+ </New>-->
<!-- =========================================================== -->
<!-- Http Configuration. -->
@@ -97,25 +97,25 @@
</New>
<!-- Configure the HTTP endpoint. -->
- <Call name="addConnector">
- <Arg>
- <New class="org.eclipse.jetty.server.ServerConnector">
- <Arg name="server"><Ref refid="Server" /></Arg>
- <Arg name="factories">
- <Array type="org.eclipse.jetty.server.ConnectionFactory">
- <Item>
- <New class="org.eclipse.jetty.server.HttpConnectionFactory">
- <Arg name="config"><Ref refid="httpConfig" /></Arg>
- </New>
- </Item>
- </Array>
- </Arg>
- <Set name="host"><SystemProperty name="jetty.host" /></Set>
- <Set name="port"><SystemProperty name="jetty.port" default="8080" /></Set>
- <Set name="idleTimeout"><SystemProperty name="http.timeout" default="30000"/></Set>
- </New>
- </Arg>
- </Call>
+ <Call name="addConnector">
+ <Arg>
+ <New class="org.eclipse.jetty.server.ServerConnector">
+ <Arg name="server"><Ref refid="Server" /></Arg>
+ <Arg name="factories">
+ <Array type="org.eclipse.jetty.server.ConnectionFactory">
+ <Item>
+ <New class="org.eclipse.jetty.server.HttpConnectionFactory">
+ <Arg name="config"><Ref refid="httpConfig" /></Arg>
+ </New>
+ </Item>
+ </Array>
+ </Arg>
+ <Set name="host"><Property name="jetty.host" /></Set>
+ <Set name="port"><Property name="jetty.port" default="8080" /></Set>
+ <Set name="idleTimeout"><Property name="http.timeout" default="30000"/></Set>
+ </New>
+ </Arg>
+ </Call>
<!-- =========================================================== -->
<!-- Set handler Collection Structure -->
@@ -142,12 +142,12 @@
<Arg>
<!-- This is the bigdata web application. -->
<New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext">
- <Set name="war"><SystemProperty name="jetty.resourceBase" default="bigdata-war/src"/></Set>
+ <Set name="war"><Property name="jetty.resourceBase" default="bigdata-war/src"/></Set>
<Set name="contextPath">/bigdata</Set>
<Set name="descriptor">WEB-INF/web.xml</Set>
<Set name="parentLoaderPriority">true</Set>
<Set name="extractWAR">false</Set>
- <Set name="overrideDescriptor"><SystemProperty name="jetty.overrideWebXml" default="bigdata-war/src/WEB-INF/override-web.xml"/></Set>
+ <Set name="overrideDescriptor"><Property name="jetty.overrideWebXml" default="bigdata-war/src/WEB-INF/override-web.xml"/></Set>
</New>
</Arg>
</Call>
@@ -166,4 +166,4 @@
<Set name="dumpAfterStart"><Property name="jetty.dump.start" default="false"/></Set>
<Set name="dumpBeforeStop"><Property name="jetty.dump.stop" default="false"/></Set>
-</Configure>
\ No newline at end of file
+</Configure>
Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-22 19:23:15 UTC (rev 8413)
+++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-23 15:29:40 UTC (rev 8414)
@@ -935,6 +935,9 @@
<property name="dist.doc" location="${dist.dir}/doc" />
<property name="dist.doc.api" location="${dist.dir}/doc/api" />
<property name="dist.doc.legal" location="${dist.dir}/doc/LEGAL" />
+ <!-- deployment directories having stuff to be staged. -->
+ <property name="deploy" location="src/resources/deployment"/>
+ <property name="deploy.nss" location="${deploy}/nss"/>
<delete dir="${dist.dir}" quiet="true" />
<mkdir dir="${dist.dir}" />
@@ -966,7 +969,7 @@
<property name="bigdata-jini.lib" location="${bigdata.dir}/bigdata-jini/lib/jini/lib" />
<property name="bigdata-rdf.lib" location="${bigdata.dir}/bigdata-rdf/lib" />
<property name="bigdata-sails.lib" location="${bigdata.dir}/bigdata-sails/lib" />
- <property name="bigdata-blueprints.lib" location="${bigdata.dir}/bigdata-blueprints/lib" />
+ <property name="bigdata-blueprints.lib" location="${bigdata.dir}/bigdata-blueprints/lib" />
<property name="bigdata-gom.lib" location="${bigdata.dir}/bigdata-gom/lib" />
<property name="bigdata-jetty.lib" location="${bigdata.dir}/bigdata/lib/jetty" />
<property name="bigdata-http.lib" location="${bigdata.dir}/bigdata-sails/lib/httpcomponents" />
@@ -1265,6 +1268,30 @@
src="http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer?printable=yes"
/>
+ <!-- Stage files specific to NSS deployments provided by Brew and Chef. -->
+ <chmod file="${dist.bin}/bigdata" perm="755" />
+ <copy file="${deploy.nss}/bin/bigdataNSS"
+ todir="${dist.bin}" />
+ <chmod file="${dist.bin}/bigdata" perm="755" />
+ <copy file="${deploy.nss}/bin/startNSS"
+ todir="${dist.bin}" />
+ <chmod file="${dist.bin}/startNSS" perm="755" />
+<!--
+TODO These lines were removed per #951 (Deployments branch merge). They
+break the other deployment models by introducing metavariables for regex
+substitutions.
+
+ bigdata-war/src/WEB-INF/RWStore.properties (staged into bigdata/var/jetty/bigdata/WEB-INF/RWStore.properties)
+
+ and
+
+ bigdata/src/resources/log4j.properties (staged into dist/var/config/logging/log4j.properties).
+ <copy file="${deploy.nss}/WEB-INF/RWStore.properties"
+ todir="${dist.var.jetty}/WEB-INF" overwrite="true" />
+ <copy file="${deploy.nss}/WEB-INF/classes/log4j.properties"
+ todir="${dist.var.jetty}/WEB-INF/classes" overwrite="true" />
+-->
+
</target>
<!-- -->
@@ -1344,8 +1371,41 @@
</target>
+ <target name="package-nss-brew" depends="clean, stage"
+ description="Create compressed tar file for Jetty based deployment via Brew and Chef installers.">
- <!-- FIXME DEBUG and add 'depends="javadoc, stage" (should stage stage javadoc?)' -->
+ <tar destfile="${bigdata.dir}/REL-NSS.${version}.tgz"
+ compression="gzip">
+
+ <tarfileset dir="${bigdata.dir}/dist">
+ <include name="bigdata/doc/**" />
+ <exclude name="bigdata/doc/api/**" />
+ <exclude name="bigdata/doc/HAJournalServer.html" />
+ <include name="bigdata/lib/**" />
+ <exclude name="bigdata/lib/bigdata-ganglia.jar" />
+ <exclude name="bigdata/lib/browser.jar" />
+ <exclude name="bigdata/lib/reggie.jar" />
+ <exclude name="bigdata/lib/zookeeper.jar" />
+ <exclude name="bigdata/lib/jsk-*.jar" />
+ <exclude name="bigdata/lib-dl" />
+ <exclude name="bigdata/lib-ext" />
+ <include name="bigdata/var/jetty/**" />
+ <include name="bigdata/var/config/logging/logging.properties" />
+ <exclude name="bigdata/var/jetty/html/new.html" />
+ <exclude name="bigdata/var/jetty/html/old.html" />
+ </tarfileset>
+
+ <!-- Add scripts separately, making them executable -->
+
+ <tarfileset dir="${bigdata.dir}/dist" filemode="755">
+ <include name="bigdata/bin/bigdataNSS" />
+ <include name="bigdata/bin/startNSS" />
+ </tarfileset>
+ </tar>
+
+ </target>
+
+ <!-- FIXME DEBUG and add 'depends="javadoc, stage" (should stage stage javadoc?)' -->
<!-- Note: can require 'rpm' and 'rpm-build. -->
<!-- TODO: We do not need both this and "deploy-artifact". -->
<target name="rpm" depends="prepare" description="Build RPM installer.">
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore 2014-05-22 19:23:15 UTC (rev 8413)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore 2014-05-23 15:29:40 UTC (rev 8414)
@@ -1,10 +1,31 @@
#!/bin/bash
+#
+# This script has been developed for the "systap-aws-bigdata-ha" cluster
+# deployment package.
+#
+# The HARestore script will recreate the Bigdata HA journal file as of
+# the most recent commit point from log and snapshot files. The
+# intended use of the script is to restore a journal file that resides
+# on an ephemeral storage media (especially, an SSD instance disk)
+# from a combination of full backups and transaction logs on durable
+# media (e.g., EBS) following a system reboot. The script should not
+# be executed while Bigdata is running (it requires exclusive access
+# to the journal and will not be able to run if bigdata is already
+# running).
+#
+# HARestore takes no arguments and assumes the Bigdata journal filename\
+# convention: "bigdata-ha.jnl".
+#
+
source /etc/default/bigdataHA
SERVICE_DIR="$FED_DIR/$FEDNAME/$LOGICAL_SERVICE_ID/HAJournalServer"
LIB_DIR="$FED_DIR/lib"
-java -cp $LIB_DIR/bigdata.jar:$LIB_DIR/commons-logging.jar:$LIB_DIR/log4j.jar:$LIB_DIR/highscalelib.jar:$LIB_DIR/fastutil.jar:$LIB_DIR/dsiutils.jar:$LIB_DIR/lgplutils.jar:$LIB_DIR/icu4j.jar -Dlog4j.configuration=file:var/config/logging/log4j.properties com.bigdata.journal.jini.ha.HARestore -o $DATA_DIR/bigdata-ha.jnl $SERVICE_DIR/snapshot $SERVICE_DIR/HALog
-
-
+java -cp $LIB_DIR/bigdata.jar:$LIB_DIR/commons-logging.jar:$LIB_DIR/log4j.jar:$LIB_DIR/highscalelib.jar:$LIB_DIR/fastutil.jar:$LIB_DIR/dsiutils.jar:$LIB_DIR/lgplutils.jar:$LIB_DIR/icu4j.jar\
+ -Dlog4j.configuration=file:var/config/logging/log4j.properties\
+ com.bigdata.journal.jini.ha.HARestore\
+ -o $DATA_DIR/bigdata-ha.jnl\
+ $SERVICE_DIR/snapshot\
+ $SERVICE_DIR/HALog
Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh 2014-05-22 19:23:15 UTC (rev 8413)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh 2014-05-23 15:29:40 UTC (rev 8414)
@@ -1,61 +0,0 @@
-#!/bin/bash
-
-# Start the services and put the JVM in the background. All services will
-# run in a single JVM. See Apache River com.sun.jini.start.ServiceStarter
-# for more details. The services are configured in the accompanying
-# startHAServices.config file. Specific configuration options for each
-# service are defined in the documentation for that service.
-#
-# Note: One drawback with running each service in the same JVM is that the
-# GC load of all services is combined and all services would be suspended
-# at the same time by a Full GC pass. If this is a problem, then you can
-# break out the river services (ClassServer and Reggie) into a separate
-# ServiceStarter instance from the HAJournalServer.
-
-# The top-level of the installation.
-pushd `dirname $0` > /dev/null;cd ..;INSTALL_DIR=`pwd`;popd > /dev/null
-
-##
-# HAJournalServer configuration parameter overrides (see HAJournal.config).
-#
-# The bigdata HAJournal.config file may be heavily parameterized through
-# environment variables that get passed through into the JVM started by
-# this script and are thus made available to the HAJournalServer when it
-# interprets the contents of the HAJournal.config file. See HAJournal.config
-# for the meaning of these environment variables.
-#
-# Note: Many of these properties have defaults.
-##
-
-export JETTY_XML="${INSTALL_DIR}/var/jetty/jetty.xml"
-export JETTY_RESOURCE_BASE="${INSTALL_DIR}/var/jetty"
-export LIB_DIR=${INSTALL_DIR}/lib
-export CONFIG_DIR=${INSTALL_DIR}/var/config
-export LOG4J_CONFIG=${CONFIG_DIR}/logging/log4j.properties
-
-# TODO Explicitly enumerate JARs so we can control order if necessary and
-# deploy on OS without find and tr.
-export HAJOURNAL_CLASSPATH=`find ${LIB_DIR} -name '*.jar' -print0 | tr '\0' ':'`
-
-export JAVA_OPTS="\
- -server -Xmx4G\
- -Dlog4j.configuration=${LOG4J_CONFIG}\
- -Djetty.resourceBase=${JETTY_RESOURCE_BASE}\
- -DJETTY_XML=${JETTY_XML}\
-"
-
-cmd="java ${JAVA_OPTS} \
- -server -Xmx4G \
- -cp ${HAJOURNAL_CLASSPATH} \
- com.bigdata.rdf.sail.webapp.NanoSparqlServer \
- 9999 kb \
- ${INSTALL_DIR}/var/jetty/WEB-INF/GraphStore.properties \
-"
-echo "Running: $cmd"
-$cmd&
-pid=$!
-# echo "PID=$pid"
-echo "kill $pid" > stop.sh
-chmod +w stop.sh
-
-# Note: To obtain the pid, do: read pid < "$pidFile"
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/README.txt
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/README.txt (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/README.txt 2014-05-23 15:29:40 UTC (rev 8414)
@@ -0,0 +1,26 @@
+brew - homebrew installer. installation is the NSS using jetty. No HA features.
+
+chef - cook book has recipes for bigdata under tomcat; bigdata HA; MapGraph;
+ NSS using jetty.
+
+nss - NSS using jetty. The directory contains shell scripts to (a) control
+ the run state of bigdata in an init.d style script; and (b) start the
+ NSS using jetty.
+
+vagrant - HA cluster launcher for AWS; MapGraph launcher; NSS using jetty
+ launcher; tomcat + bigdata.war install.
+
+====== Maintenance ======
+
+TODO Rename these things to be less ambiguous once we agree on names.
+
+TODO Document how things are structured from a support and maintenance
+perspective.
+
+TODO Document on the wiki what these various deployments are, how to
+choose the right one, and where to get it. See the following tickets.
+Also capture the deployment matrix that Daniel has sent by email.
+
+#926 Add Wiki Entry for Brew Deployment
+#925 Add Wiki Entry for Vagrant Deployments
+#924 Add Wiki Entry for Chef Cookbooks
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/WEB-INF/classes/log4j.properties
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/WEB-INF/classes/log4j.properties 2014-05-22 19:23:15 UTC (rev 8413)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/WEB-INF/classes/log4j.properties 2014-05-23 15:29:40 UTC (rev 8414)
@@ -53,7 +53,7 @@
##
# Rule execution log. This is a formatted log file (comma delimited).
-log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog
+#log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog
log4j.additivity.com.bigdata.relation.rule.eval.RuleLog=false
log4j.appender.ruleLog=org.apache.log4j.FileAppender
log4j.appender.ruleLog.Threshold=ALL
Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdata
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdata 2014-05-22 19:23:15 UTC (rev 8413)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdata 2014-05-23 15:29:40 UTC (rev 8414)
@@ -1,109 +0,0 @@
-#!/bin/bash
-
-# init.d style script for bigdata HA services. The script can be used
-# to 'start' or 'stop' services.
-#
-# Environment:
-#
-# binDir - The directory containing the installed scripts.
-# pidFile - The pid is written on this file.
-#
-# Misc.
-#
-# See http://tldp.org/LDP/abs/html/index.html
-#
-# Note: Blank lines are significant in shell scripts.
-#
-# Note: Children must do "exit 0" to indicate success.
-#
-# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix
-
-# Source function library (just used for 'action'). If you don't have this
-# it SHOULD automatically use the inline definition for "action()".
-
-#
-# the following template line will be replaced by a deployer application (e.g. brew, chef)
-#
-export INSTALL_TYPE="<%= INSTALL_TYPE %>"
-export BD_HOME="<%= BD_HOME %>"
-pidFile=${BD_HOME}/var/lock/pid
-binDir=${BD_HOME}/bin
-
-
-#
-# See how we were called.
-#
-case "$1" in
- start)
-#
-# Start the ServiceStarter and child services if not running.
-#
- if [ -f "$pidFile" ]; then
- read pid < "$pidFile"
- pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid )
- if [ -z "$pidno" ]; then
-# The process has died so remove the old pid file.
- echo $"`date` : `hostname` : $pid died?"
- rm -f "$pidFile"
- fi
- fi
- if [ ! -f "$pidFile" ]; then
- echo -ne $"`date` : `hostname` : bringing bigdata services up ... "
- $binDir/startNSS
- echo "done!"
- else
- echo $"`date` : `hostname` : running as $pid"
- fi
- ;;
- stop)
-#
-# Stop the ServiceStarter and all child services.
-#
- if [ -f "$pidFile" ]; then
- read pid < "$pidFile"
- pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid )
- if [ -z "$pidno" ]; then
-# The process has died so remove the old pid file.
- echo $"`date` : `hostname` : $pid died?"
- rm -f "$pidFile"
- else
- echo -ne $"`date` : `hostname` : bringing bigdata service down ... "
- kill $pid
- rm -f "$pidFile"
- echo "done!"
- fi
- fi
- ;;
- status)
-#
-# Report status for the ServicesManager (up or down).
-#
- if [ -f "$pidFile" ]; then
- read pid < "$pidFile"
- pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid )
- if [ -z "$pidno" ]; then
- echo $"`date` : `hostname` : process died? pid=$pid."
- else
- echo $"`date` : `hostname` : running as $pid."
- fi
- else
- echo $"`date` : `hostname` : not running."
- fi
- ;;
-#
-# Simply stop then start.
-#
- restart)
- $0 stop
- $0 start
- ;;
- *)
-#
-# Usage
-#
- me=`basename $0`
- echo $"Usage: $0 {start|stop|status|restart}"
- exit 1
-esac
-
-exit 0
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS 2014-05-23 15:29:40 UTC (rev 8414)
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+# init.d style script for bigdata HA services. The script can be used
+# to 'start' or 'stop' services.
+#
+# Environment:
+#
+# binDir - The directory containing the installed scripts.
+# pidFile - The pid is written on this file.
+#
+# Misc.
+#
+# See http://tldp.org/LDP/abs/html/index.html
+#
+# Note: Blank lines are significant in shell scripts.
+#
+# Note: Children must do "exit 0" to indicate success.
+#
+# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix
+
+# Source function library (just used for 'action'). If you don't have this
+# it SHOULD automatically use the inline definition for "action()".
+
+#
+# the following template line will be replaced by a deployer application (e.g. brew, chef)
+#
+export INSTALL_TYPE="<%= INSTALL_TYPE %>"
+export BD_HOME="<%= BD_HOME %>"
+pidFile=${BD_HOME}/var/lock/pid
+binDir=${BD_HOME}/bin
+
+
+#
+# See how we were called.
+#
+case "$1" in
+ start)
+#
+# Start the ServiceStarter and child services if not running.
+#
+ if [ -f "$pidFile" ]; then
+ read pid < "$pidFile"
+ pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid )
+ if [ -z "$pidno" ]; then
+# The process has died so remove the old pid file.
+ echo $"`date` : `hostname` : $pid died?"
+ rm -f "$pidFile"
+ fi
+ fi
+ if [ ! -f "$pidFile" ]; then
+ echo -ne $"`date` : `hostname` : bringing bigdata services up ... "
+ $binDir/startNSS
+ echo "done!"
+ else
+ echo $"`date` : `hostname` : running as $pid"
+ fi
+ ;;
+ stop)
+#
+# Stop the ServiceStarter and all child services.
+#
+ if [ -f "$pidFile" ]; then
+ read pid < "$pidFile"
+ pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid )
+ if [ -z "$pidno" ]; then
+# The process has died so remove the old pid file.
+ echo $"`date` : `hostname` : $pid died?"
+ rm -f "$pidFile"
+ else
+ echo -ne $"`date` : `hostname` : bringing bigdata service down ... "
+ kill $pid
+ rm -f "$pidFile"
+ echo "done!"
+ fi
+ fi
+ ;;
+ status)
+#
+# Report status for the ServicesManager (up or down).
+#
+ if [ -f "$pidFile" ]; then
+ read pid < "$pidFile"
+ pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid )
+ if [ -z "$pidno" ]; then
+ echo $"`date` : `hostname` : process died? pid=$pid."
+ else
+ echo $"`date` : `hostname` : running as $pid."
+ fi
+ else
+ echo $"`date` : `hostname` : not running."
+ fi
+ ;;
+#
+# Simply stop then start.
+#
+ restart)
+ $0 stop
+ $0 start
+ ;;
+ *)
+#
+# Usage
+#
+ me=`basename $0`
+ echo $"Usage: $0 {start|stop|status|restart}"
+ exit 1
+esac
+
+exit 0
Property changes on: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS 2014-05-22 19:23:15 UTC (rev 8413)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS 2014-05-23 15:29:40 UTC (rev 8414)
@@ -2,9 +2,9 @@
export INSTALL_DIR=${BD_HOME}
if [ $INSTALL_TYPE == "BREW" ]; then
- export LIB_DIR=${INSTALL_DIR}/libexec
+ export LIB_DIR=${INSTALL_DIR}/libexec
else
- export LIB_DIR=${INSTALL_DIR}/lib
+ export LIB_DIR=${INSTALL_DIR}/lib
fi
export JETTY_CLASSPATH=`find ${LIB_DIR} -name '*.jar' -print0 | tr '...
[truncated message content] |
|
From: <tho...@us...> - 2014-05-27 13:28:11
|
Revision: 8422
http://sourceforge.net/p/bigdata/code/8422
Author: thompsonbry
Date: 2014-05-27 13:28:02 +0000 (Tue, 27 May 2014)
Log Message:
-----------
- Declared an interface that exposes a post-constructor Callable to initialize a service. This will be used for the SnapshotManager, HALogNexus, and HAJournal.
- Modified the SnapshotManager to use a parallel scan and the new IServiceInit interface.
- Added test to verify that snapshots are located after a service restart.
- Defined, exposed, and tested a variety of constants for the CommitCounterUtility. These were added to support a parallel scan of the files in a leaf directory.
- Declared a "startupThreads" parameter that controls the number of parallel scans for the HAJournal startup processes.
Snapshot test suites are green locally.
See #775 (HAJournal.start() - optimization)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3SnapshotPolicy2.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IServiceInit.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-05-27 13:14:23 UTC (rev 8421)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-05-27 13:28:02 UTC (rev 8422)
@@ -1752,6 +1752,7 @@
}
+ @Override
final public File getFile() {
final IBufferStrategy tmp = getBufferStrategy();
@@ -1915,6 +1916,7 @@
* @exception IllegalStateException
* if the journal is open.
*/
+ @Override
public void deleteResources() {
if (isOpen())
@@ -2307,12 +2309,14 @@
}
+ @Override
final public UUID getUUID() {
return journalMetadata.get().getUUID();
}
+ @Override
final public IResourceMetadata getResourceMetadata() {
return journalMetadata.get();
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java 2014-05-27 13:14:23 UTC (rev 8421)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java 2014-05-27 13:28:02 UTC (rev 8422)
@@ -37,6 +37,17 @@
/**
* Utility class for operations on files that are named using a commit counter.
+ * <p>
+ * The commit counter based files are arranged in a heirarchial directory
+ * structure with 3 digits per directory and 7 directory levels. These levels
+ * are labeled with depths <code>[0..6]</code>. The root directory is at depth
+ * ZERO (0). Each directory contains up to <code>1000</code> children. The
+ * children in the non-leaf directories are subdirectories labeled
+ * <code>0..999</code>. The leaf directories are at depth SIX (6). Leaf
+ * directories contain files. Each file in a leaf directory is labeled with a
+ * <code>21</code> digit base name and some purpose specific file extension.
+ * Each such file has data for the specific commit point encoded by the basename
+ * of the file.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
*/
@@ -46,6 +57,89 @@
.getLogger(CommitCounterUtility.class);
/**
+ * The number of base-10 digits per directory level. This allows children
+ * having labels <code>000...999</code>. Thus there are <code>1000</code>
+ * children per directory.
+ */
+ private static final int DIGITS_PER_DIR = 3;
+
+ /** The number of files per directory. */
+ private static final int FILES_PER_DIR = 1000;
+
+ /** The depth of the root directory. */
+ private static final int ROOT_DIR_DEPTH = 0;
+
+ /** The depth of a leaf directory. */
+ private static final int LEAF_DIR_DEPTH = 6;
+
+ /**
+ * The #of digits (21) in the base file name for a commit counter as
+ * formatted by {@link #getCommitCounterStr(long)}.
+ * <p>
+ * Note: 21 := (leafDirDepth+1) * digitsPerDir
+ */
+ private static final int BASENAME_DIGITS = 21;
+
+ /**
+ * The {@link Formatter} string that is used to generate the base name of
+ * the files in the leaf directories. This string represents the commit
+ * counter value with leading zeros. The leading zeros are relied upon to
+ * impose an ordering over the base names of the files using a sort.
+ */
+ private static final String FORMAT_STR = "%0" + BASENAME_DIGITS + "d";
+
+ /**
+ * The #of digits (21) in the base file name for a commit counter as
+ * formatted by {@link #getCommitCounterStr(long)}.
+ * <p>
+ * Note: 21 := (leafDirDepth+1) * digitsPerDir
+ */
+ public static int getBasenameDigits() {
+
+ return BASENAME_DIGITS;
+
+ }
+
+ /**
+ * The number of base-10 digits per directory level (
+ * {@value #DIGITS_PER_DIR}). This allows children having labels
+ * <code>000...999</code>. Thus there are <code>1000</code> children per
+ * directory.
+ */
+ public static int getDigitsPerDirectory() {
+
+ return DIGITS_PER_DIR;
+
+ }
+
+ /**
+ * The number of files per directory ({@value #FILES_PER_DIR}).
+ */
+ public static int getFilesPerDirectory() {
+
+ return FILES_PER_DIR;
+
+ }
+
+ /**
+ * The depth of the root directory ({@value #ROOT_DIR_DEPTH}).
+ */
+ public static int getRootDirectoryDepth() {
+
+ return ROOT_DIR_DEPTH;
+
+ }
+
+ /**
+ * The depth of a leaf directory ({@value #LEAF_DIR_DEPTH}).
+ */
+ public static int getLeafDirectoryDepth() {
+
+ return LEAF_DIR_DEPTH;
+
+ }
+
+ /**
* Return the name of the {@link File} associated with the commitCounter.
*
* @param dir
@@ -79,15 +173,11 @@
* Now figure out the recursive directory name.
*/
File t = dir;
+
+ for (int i = 0; i < (BASENAME_DIGITS - DIGITS_PER_DIR); i += DIGITS_PER_DIR) {
- if (true) {
+ t = new File(t, basename.substring(i, i + DIGITS_PER_DIR));
- for (int i = 0; i < (21 - 3); i += 3) {
-
- t = new File(t, basename.substring(i, i + 3));
-
- }
-
}
final File file = new File(t, basename + ext);
@@ -108,11 +198,11 @@
*/
public static String getCommitCounterStr(final long commitCounter) {
- final StringBuilder sb = new StringBuilder(21);
+ final StringBuilder sb = new StringBuilder(BASENAME_DIGITS);
final Formatter f = new Formatter(sb);
- f.format("%021d", commitCounter);
+ f.format(FORMAT_STR, commitCounter);
f.flush();
f.close();
Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IServiceInit.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IServiceInit.java (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IServiceInit.java 2014-05-27 13:28:02 UTC (rev 8422)
@@ -0,0 +1,46 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*
+ * Created on May 27th, 2014
+ */
+package com.bigdata.service;
+
+import java.util.concurrent.Callable;
+
+/**
+ * Interface for post-constructor initialization.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ *
+ * @param <T>
+ * The generic type of the object to which the initialization task
+ * will be evaluated.
+ */
+public interface IServiceInit<T> {
+
+ /**
+ * Return a task that must be used to initialize the service.
+ */
+ Callable<T> init();
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java 2014-05-27 13:14:23 UTC (rev 8421)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java 2014-05-27 13:28:02 UTC (rev 8422)
@@ -42,10 +42,35 @@
public TestCommitCounterUtility() {
}
- public TestCommitCounterUtility(String name) {
+ public TestCommitCounterUtility(final String name) {
super(name);
}
+ /**
+ * Verify the value of specific constants. These constants must not be
+ * modified since they define the hierarchical structure of the durable data
+ * and a relied upon to generate and parse the fully qualified names of the
+ * files within a managed commit counter based directory system.
+ */
+ public void test_constants() {
+
+ assertEquals("filesPerDirectory", 1000,
+ CommitCounterUtility.getFilesPerDirectory());
+
+ assertEquals("digitsPerDirectory", 3,
+ CommitCounterUtility.getDigitsPerDirectory());
+
+ assertEquals("basenameDigits", 21,
+ CommitCounterUtility.getBasenameDigits());
+
+ assertEquals("rootDirectoryDepth", 0,
+ CommitCounterUtility.getRootDirectoryDepth());
+
+ assertEquals("leafDirectoryDepth", 6,
+ CommitCounterUtility.getLeafDirectoryDepth());
+
+ }
+
public void test01() {
final File dir = new File("/tmp");
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2014-05-27 13:14:23 UTC (rev 8421)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2014-05-27 13:28:02 UTC (rev 8422)
@@ -39,6 +39,8 @@
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.Callable;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.FutureTask;
import java.util.concurrent.locks.Lock;
@@ -420,6 +422,16 @@
// Snapshot manager.
snapshotManager = new SnapshotManager(server, this, config);
+ try {
+ getExecutorService().submit(snapshotManager.init()).get();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e); // TODO Do not wrap.
+ } catch (CancellationException e) {
+ throw e;
+ } catch (ExecutionException e) {
+ throw new RuntimeException(e);
+ }
+
}
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-27 13:14:23 UTC (rev 8421)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-27 13:28:02 UTC (rev 8422)
@@ -383,6 +383,19 @@
String DEFAULT_SNAPSHOT_DIR = "snapshot";
/**
+ * The number of threads that will be used for a parallel scan of the
+ * files in the {@link #HA_LOG_DIR} and {@link #SNAPSHOT_DIR} in order
+ * to accelerate the service start. The minimum is ONE (1). The default
+ * is {@value #DEFAULT_STARTUP_THREADS}.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/775" > HAJournal start()
+ * (optimization) </a>
+ */
+ String STARTUP_THREADS = "startupThreads";
+
+ int DEFAULT_STARTUP_THREADS = 20;
+
+ /**
* The policy that specifies when a new snapshot will be taken. The
* decision to take a snapshot is a local decision and the snapshot is
* assumed to be written to local disk. However, offsite replication of
@@ -871,6 +884,36 @@
* {@inheritDoc}
* <p>
* Note: called from {@link AbstractServer#run()}
+ *
+ * FIXME We should be able to start the NSS while still reading the HALog
+ * files from the disk. The action to start the {@link HAQuorumService}
+ * should await a {@link Future} for the journal start. Thus, the
+ * {@link HAJournal} start needs to be turned into a {@link Callable} or
+ * {@link Runnable}.
+ * <p>
+ * In fact, the journal open is very fast. The slow part is the building an
+ * index over the HALogs and (to a lesser extent) over the snapshots. Those
+ * index builds can run in parallel, but we need to have a critical section
+ * in which we check some necessary conditions, especially whether the last
+ * HALog is valid.
+ * <p>
+ * We need to push a start() computation into both the {@link HALogNexus}
+ * and the {@link SnapshotManager}. This could be done with an interface
+ * that is also shared by the {@link HAJournal}. The interface could provide
+ * some reporting on the startup process, but most critical is that it
+ * provides a {@link Future} for evaluating that process.
+ * <p>
+ * The {@link Future} can evaluate to the outcome of that startup procedure.
+ * <p>
+ * The startup procedure should use multiple threads (or async IO) to reduce
+ * the startup latency. It could use the executor on the journal for this.
+ * <p>
+ * We could parallelize the HALog and snapshot startup then enter a critical
+ * section in which we validate the consistency of those resources with
+ * respect to the HAJournal's current root block.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/775" > HAJournal start()
+ * (optimization) </a>
*/
@Override
protected void startUpHook() {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2014-05-27 13:14:23 UTC (rev 8421)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2014-05-27 13:28:02 UTC (rev 8422)
@@ -128,11 +128,11 @@
*/
volatile IHAWriteMessage lastLiveHAWriteMessage = null;
- /*
- * Set to protect log files against deletion while a digest is
- * computed. This is checked by deleteHALogs.
+ /**
+ * Set to protect log files against deletion while a digest is computed.
+ * This is checked by {@link #deleteHALogs(long, long)}.
*/
- private final AtomicInteger logAccessors = new AtomicInteger();
+ private final AtomicInteger logAccessors = new AtomicInteger();
/**
* Filter visits all HALog files <strong>except</strong> the current HALog
@@ -1042,23 +1042,26 @@
/**
* Protects logs from removal while a digest is being computed
- * @param earliestDigest
*/
void addAccessor() {
- if (logAccessors.incrementAndGet() == 1) {
- if (log.isInfoEnabled())
- log.info("Access protection added");
- }
+ if (logAccessors.incrementAndGet() == 1) {
+ if (log.isDebugEnabled())
+ log.debug("Access protection added");
+ }
}
-
+
/**
* Releases current protection against log removal
*/
void releaseAccessor() {
- if (logAccessors.decrementAndGet() == 0) {
- if (log.isInfoEnabled())
- log.info("Access protection removed");
- }
+ final long tmp;
+ if ((tmp = logAccessors.decrementAndGet()) == 0) {
+ if (log.isDebugEnabled())
+ log.debug("Access protection removed");
+ }
+ if (tmp < 0)
+ throw new RuntimeException("Decremented to a negative value: "
+ + tmp);
}
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-05-27 13:14:23 UTC (rev 8421)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-05-27 13:28:02 UTC (rev 8422)
@@ -36,10 +36,13 @@
import java.nio.ByteBuffer;
import java.security.DigestException;
import java.security.MessageDigest;
+import java.util.ArrayList;
import java.util.Iterator;
+import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
+import java.util.concurrent.FutureTask;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.zip.GZIPInputStream;
@@ -73,17 +76,19 @@
import com.bigdata.quorum.Quorum;
import com.bigdata.quorum.QuorumException;
import com.bigdata.rawstore.Bytes;
+import com.bigdata.service.IServiceInit;
import com.bigdata.striterator.Resolver;
import com.bigdata.striterator.Striterator;
import com.bigdata.util.ChecksumError;
import com.bigdata.util.ChecksumUtility;
+import com.bigdata.util.concurrent.LatchedExecutor;
/**
* Class to manage the snapshot files.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
*/
-public class SnapshotManager {
+public class SnapshotManager implements IServiceInit<Void> {
private static final Logger log = Logger.getLogger(SnapshotManager.class);
@@ -185,6 +190,11 @@
private final IRestorePolicy restorePolicy;
/**
+ * @see HAJournalServer.ConfigurationOptions#STARTUP_THREADS
+ */
+ private final int startupThreads;
+
+ /**
* An in memory index over the last commit time of each snapshot. This is
* populated when the {@link HAJournal} starts from the file system and
* maintained as snapshots are taken or destroyed.
@@ -299,62 +309,241 @@
IRestorePolicy.class, //
HAJournalServer.ConfigurationOptions.DEFAULT_RESTORE_POLICY);
+ {
+
+ startupThreads = (Integer) config
+ .getEntry(
+ HAJournalServer.ConfigurationOptions.COMPONENT,
+ HAJournalServer.ConfigurationOptions.STARTUP_THREADS,
+ Integer.TYPE,
+ HAJournalServer.ConfigurationOptions.DEFAULT_STARTUP_THREADS);
+
+ if (startupThreads <= 0) {
+ throw new ConfigurationException(
+ HAJournalServer.ConfigurationOptions.STARTUP_THREADS
+ + "=" + startupThreads + " : must be GT ZERO");
+ }
+
+ }
+
snapshotIndex = SnapshotIndex.createTransient();
- /*
- * Delete any temporary files that were left lying around in the
- * snapshot directory.
- */
- CommitCounterUtility.recursiveDelete(false/* errorIfDeleteFails */,
- getSnapshotDir(), TEMP_FILE_FILTER);
+ }
- // Make sure the snapshot directory exists.
- ensureSnapshotDirExists();
+ @Override
+ public Callable<Void> init() {
- // Populate the snapshotIndex from the snapshotDir.
- populateIndexRecursive(getSnapshotDir(), SNAPSHOT_FILTER);
+ return new InitTask();
- // Initialize the snapshot policy. It can self-schedule.
- snapshotPolicy.init(journal);
-
}
- private void ensureSnapshotDirExists() throws IOException {
+ /**
+ * Task that is used to initialize the {@link SnapshotManager}.
+ *
+ * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
+ */
+ private class InitTask implements Callable<Void> {
- if (!snapshotDir.exists()) {
+ @Override
+ public Void call() throws Exception {
- // Create the directory.
- if (!snapshotDir.mkdirs())
- throw new IOException("Could not create directory: "
- + snapshotDir);
+ lock.lock();
+
+ try {
+
+ doRunWithLock();
+
+ // Done.
+ return (Void) null;
+
+ } finally {
+
+ lock.unlock();
+
+ }
+
+ }
+ private void doRunWithLock() throws IOException, InterruptedException,
+ ExecutionException {
+
+ if (log.isInfoEnabled())
+ log.info("Starting cleanup.");
+
+ /*
+ * Delete any temporary files that were left lying around in the
+ * snapshot directory.
+ *
+ * TODO This may be relatively lengthy. It would be better to
+ * combine this with the scan in which we read the root blocks and
+ * index the snapshots. However, this will require another refactor
+ * of the parallel scan logic. For now, I am merely reporting out
+ * the times for these different scans so I can get a better sense
+ * of the latencies involved.
+ */
+ CommitCounterUtility.recursiveDelete(false/* errorIfDeleteFails */,
+ getSnapshotDir(), TEMP_FILE_FILTER);
+
+ // Make sure the snapshot directory exists.
+ ensureSnapshotDirExists();
+
+ if (log.isInfoEnabled())
+ log.info("Starting scan.");
+
+ final LatchedExecutor executor = new LatchedExecutor(
+ journal.getExecutorService(), startupThreads);
+
+ // Populate the snapshotIndex from the snapshotDir.
+ populateIndexRecursive(//
+ executor,//
+ getSnapshotDir(), //
+ SNAPSHOT_FILTER, //
+ 0 // depth@root
+ );
+
+ if (log.isInfoEnabled())
+ log.info("Starting policy.");
+
+ // Initialize the snapshot policy. It can self-schedule.
+ snapshotPolicy.init(journal);
+
+ if (log.isInfoEnabled())
+ log.info("Done.");
+
}
- }
-
- /**
- * Scans the {@link #snapshotDir} and populates the {@link #snapshotIndex}
- * from the root blocks in snapshot files found in that directory.
- *
- * @throws IOException
- */
- private void populateIndexRecursive(final File f,
- final FileFilter fileFilter) throws IOException {
+ /**
+ * Scans the {@link #snapshotDir} and populates the {@link #snapshotIndex}
+ * from the root blocks in snapshot files found in that directory.
+ *
+ * @throws IOException
+ * @throws ExecutionException
+ * @throws InterruptedException
+ */
+ private void populateIndexRecursive(final LatchedExecutor executor,
+ final File f, final FileFilter fileFilter, final int depth)
+ throws IOException, InterruptedException, ExecutionException {
- if (f.isDirectory()) {
+ if (depth == CommitCounterUtility.getLeafDirectoryDepth()) {
- final File[] children = f.listFiles(fileFilter);
+ /*
+ * Leaf directory.
+ */
+
+ final File[] children = f.listFiles(fileFilter);
- for (int i = 0; i < children.length; i++) {
+ /*
+ * Setup tasks for parallel threads to read the commit record from
+ * each file.
+ */
+ final List<FutureTask<SnapshotRecord>> futures = new ArrayList<FutureTask<SnapshotRecord>>(
+ children.length);
- populateIndexRecursive(children[i], fileFilter);
+ for (int i = 0; i < children.length; i++) {
+ final File child = children[i];
+
+ final FutureTask<SnapshotRecord> ft = new FutureTask<SnapshotRecord>(
+
+ new Callable<SnapshotRecord>() {
+
+ @Override
+ public SnapshotRecord call() throws Exception {
+
+ return getSnapshotRecord(child);
+
+ }
+
+ });
+
+ futures.add(ft);
+
+ }
+
+ try {
+
+ /*
+ * Schedule all futures.
+ */
+ for (FutureTask<SnapshotRecord> ft : futures) {
+
+ executor.execute(ft);
+
+ }
+
+ /*
+ * Await futures, obtaining snapshot records for the current
+ * leaf directory.
+ */
+ final List<SnapshotRecord> records = new ArrayList<SnapshotRecord>(
+ children.length);
+
+ for (int i = 0; i < children.length; i++) {
+
+ final Future<SnapshotRecord> ft = futures.get(i);
+
+ final SnapshotRecord r = ft.get();
+
+ records.add(r);
+
+ }
+
+ // Add all records in the caller's thread.
+ for (SnapshotRecord r : records) {
+
+ snapshotIndex.add(r);
+
+ }
+
+ } finally {
+
+ /*
+ * Ensure tasks are terminated.
+ */
+
+ for (Future<SnapshotRecord> ft : futures) {
+
+ ft.cancel(true/* mayInterruptIfRunning */);
+
+ }
+
+ }
+
+ } else if (f.isDirectory()) {
+
+ /*
+ * Sequential recursion into a child directory.
+ */
+
+ final File[] children = f.listFiles(fileFilter);
+
+ for (int i = 0; i < children.length; i++) {
+
+ final File child = children[i];
+
+ populateIndexRecursive(executor, child, fileFilter, depth + 1);
+
+ }
+
+ } else {
+
+ log.warn("Ignoring file in non-leaf directory: " + f);
+
}
- } else {
+ }
- addSnapshot(f);
+ }
+
+ private void ensureSnapshotDirExists() throws IOException {
+ if (!snapshotDir.exists()) {
+
+ // Create the directory.
+ if (!snapshotDir.mkdirs())
+ throw new IOException("Could not create directory: "
+ + snapshotDir);
+
}
}
@@ -434,7 +623,26 @@
* if the file can not be read.
* @throws ChecksumError
* if there is a checksum problem with the root blocks.
+ */
+ private void addSnapshot(final File file) throws IOException {
+
+ snapshotIndex.add(getSnapshotRecord(file));
+
+ }
+
+ /**
+ * Create a {@link SnapshotRecord} from a file.
*
+ * @param file
+ * The snapshot file.
+ *
+ * @throws IllegalArgumentException
+ * if argument is <code>null</code>.
+ * @throws IOException
+ * if the file can not be read.
+ * @throws ChecksumError
+ * if there is a checksum problem with the root blocks.
+ *
* TODO If the root blocks are bad, then this will throw an
* IOException and that will prevent the startup of the
* HAJournalServer. However, if we start up the server with a
@@ -449,8 +657,8 @@
* with that HALog file unless it also happens to correspond to
* a snapshot.
*/
- private void addSnapshot(final File file) throws IOException {
-
+ private SnapshotRecord getSnapshotRecord(final File file) throws IOException {
+ ...
[truncated message content] |
|
From: <tho...@us...> - 2014-06-02 16:44:02
|
Revision: 8436
http://sourceforge.net/p/bigdata/code/8436
Author: thompsonbry
Date: 2014-06-02 16:43:56 +0000 (Mon, 02 Jun 2014)
Log Message:
-----------
See #966 (Failed to get namespace list under concurrent update)
Martyn and I worked through the REST API transaction semantics and have found and fixed a few issues. He is going to continue a review to normalize:
- use of launderThrowable()
- patterns for try/finally for methods that perform mutations.
The desired pattern looks like this:
{{{
} finally {
if (conn != null) {
if (!success)
conn.rollback();
conn.close();
}
}
} catch (Throwable t) {
throw BigdataRDFServlet.launderThrowable(t, resp, ""/*summary-of-REST_API_CALL*/);
}
}}}
This commit includes the following changes:
- DefaultResourceLocator: identified and marked a possible hotspot.
- GlobalRowStoreHelper: get(timestamp) now invokes getGlobalRowStore() when timestamp==ITx.UNISOLATED. getGlobalRowStore() has implicit creation semantics for the GRS. This way the two methods have the same semantics for that timestamp.
- AbstractTripleStore: @Override annotations.
- TestLocalTripleStoreDestroy: modified to check post-conditions after calling tripleStore.commit()
- BigdataSail.createLTS(): fixed issues with some abnormal code paths which could leave the global semaphore or the write lock held and thus block further updates against the DB/SAIL.
Webapp:
- MultiTenancyServlet: fixed some issues with failure to hold a transaction open across the operation that was the root cause of this ticket.
- Documentation and throwable handling fixes to several servlets. Martyn will continue to work on this aspect of the ticket.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CountersServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DescribeCacheServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java 2014-06-02 16:41:49 UTC (rev 8435)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java 2014-06-02 16:43:56 UTC (rev 8436)
@@ -482,7 +482,7 @@
protected Properties locateResource(final String namespace,
final long timestamp, final AtomicReference<IIndexManager> foundOn) {
- synchronized (seeAlso) {
+ synchronized (seeAlso) { // FIXME Probably a read/write lock since [seeAlso] normally empty.
for (IIndexManager indexManager : seeAlso.keySet()) {
@@ -1126,7 +1126,7 @@
*
* @see #locateResource(String)
*/
- public void add(IIndexManager indexManager) {
+ public void add(final IIndexManager indexManager) {
if (indexManager == null)
throw new IllegalArgumentException();
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java 2014-06-02 16:41:49 UTC (rev 8435)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java 2014-06-02 16:43:56 UTC (rev 8436)
@@ -178,6 +178,13 @@
if (log.isInfoEnabled())
log.info(TimestampUtility.toString(timestamp));
+ if (timestamp == ITx.UNISOLATED) {
+
+ /* This version does an implicit create if the GRS does not exist. */
+ return getGlobalRowStore();
+
+ }
+
final IIndex ndx;
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2014-06-02 16:41:49 UTC (rev 8435)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2014-06-02 16:43:56 UTC (rev 8436)
@@ -1730,7 +1730,8 @@
}
- public void destroy() {
+ @Override
+ final public void destroy() {
assertWritable();
@@ -2142,6 +2143,7 @@
* @throws IllegalStateException
* if the view is read only.
*/
+ @Override
public long commit() {
if (isReadOnly())
@@ -2163,6 +2165,7 @@
}
+ @Override
final public long getTermCount() {
long rangeCount = 0L;
@@ -2175,6 +2178,7 @@
}
+ @Override
final public long getURICount() {
long rangeCount = 0L;
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java 2014-06-02 16:41:49 UTC (rev 8435)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java 2014-06-02 16:43:56 UTC (rev 8436)
@@ -41,6 +41,7 @@
import com.bigdata.relation.RelationSchema;
import com.bigdata.relation.locator.DefaultResourceLocator;
import com.bigdata.sparse.ITPS;
+import com.bigdata.sparse.SparseRowStore;
/**
* Test suite to verify the semantics of destroying a {@link LocalTripleStore},
@@ -94,12 +95,19 @@
try {
+ final long lastCommitTime = store.getIndexManager().getLastCommitTime();
+
// Note: Will be in lexical order for Unicode.
- final String[] namespaces = getNamespaces(indexManager).toArray(
- new String[] {});
+ assertEquals(
+ new String[] { namespace },
+ getNamespaces(indexManager, ITx.UNISOLATED).toArray(
+ new String[] {}));
+ // Note found before the create.
+ assertEquals(
+ new String[] {},
+ getNamespaces(indexManager, lastCommitTime - 1).toArray(
+ new String[] {}));
- assertEquals(new String[] { namespace }, namespaces);
-
assertTrue(store == indexManager.getResourceLocator().locate(
store.getNamespace(), ITx.UNISOLATED));
assertTrue(store.getLexiconRelation() == indexManager
@@ -118,9 +126,16 @@
*/
store.destroy();
+ // Did not go through a commit on the LTS.
+ assertEquals(lastCommitTime, store.getIndexManager()
+ .getLastCommitTime());
+
// global row store entry is gone.
- assertTrue(getNamespaces(indexManager).isEmpty());
+ assertTrue(getNamespaces(indexManager, ITx.UNISOLATED).isEmpty());
+ // but not in the last commited view.
+ assertFalse(getNamespaces(indexManager, lastCommitTime).isEmpty());
+
// resources can not be located.
assertTrue(null == indexManager.getResourceLocator().locate(
namespace, ITx.UNISOLATED));
@@ -134,7 +149,19 @@
ITx.UNISOLATED));
assertNull(indexManager.getIndex(primaryStatementIndexName,
ITx.UNISOLATED));
+ // but not at the last commit time.
+ assertNotNull(indexManager.getIndex(primaryStatementIndexName,
+ lastCommitTime));
+
+ /*
+ * Commit.
+ */
+ store.commit();
+ // No longer present at the last commit time.
+ assertTrue(getNamespaces(indexManager,
+ store.getIndexManager().getLastCommitTime()).isEmpty());
+
} finally {
indexManager.destroy();
@@ -175,8 +202,8 @@
store.addTerm(store.getValueFactory().createLiteral("bigdata"));
// Note: Will be in lexical order for Unicode.
- final String[] namespaces = getNamespaces(indexManager).toArray(
- new String[] {});
+ final String[] namespaces = getNamespaces(indexManager,
+ ITx.UNISOLATED).toArray(new String[] {});
assertEquals(new String[] { namespace }, namespaces);
@@ -202,7 +229,7 @@
store.destroy();
// global row store entry is gone.
- assertTrue(getNamespaces(indexManager).isEmpty());
+ assertTrue(getNamespaces(indexManager,ITx.UNISOLATED).isEmpty());
// resources can not be located.
assertTrue(null == indexManager.getResourceLocator().locate(
@@ -222,6 +249,32 @@
assertNotNull(indexManager.getResourceLocator().locate(namespace,
commitTime-1));
+ /*
+ * Commit the destroy.
+ */
+ store.commit();
+
+
+ // global row store entry is gone.
+ assertTrue(getNamespaces(indexManager,ITx.UNISOLATED).isEmpty());
+
+ // resources can not be located.
+ assertTrue(null == indexManager.getResourceLocator().locate(
+ namespace, ITx.UNISOLATED));
+ assertTrue(null == indexManager.getResourceLocator().locate(
+ namespaceLexiconRelation, ITx.UNISOLATED));
+ assertTrue(null == indexManager.getResourceLocator().locate(
+ namespaceSPORelation, ITx.UNISOLATED));
+
+ // indicies are gone.
+ assertNull(indexManager.getIndex(lexiconRelationIndexName,
+ ITx.UNISOLATED));
+ assertNull(indexManager.getIndex(primaryStatementIndexName,
+ ITx.UNISOLATED));
+
+ // The committed version of the triple store remains visible.
+ assertNotNull(indexManager.getResourceLocator().locate(namespace,
+ commitTime-1));
} finally {
indexManager.destroy();
@@ -234,15 +287,24 @@
* Return a list of the namespaces for the {@link AbstractTripleStore}s
* registered against the bigdata instance.
*/
- static private List<String> getNamespaces(final IIndexManager indexManager) {
+ static private List<String> getNamespaces(final IIndexManager indexManager,
+ final long timestamp) {
// the triple store namespaces.
final List<String> namespaces = new LinkedList<String>();
+ final SparseRowStore grs = indexManager.getGlobalRowStore(timestamp);
+
+ if (grs == null) {
+
+ return namespaces;
+
+ }
+
// scan the relation schema in the global row store.
@SuppressWarnings("unchecked")
- final Iterator<ITPS> itr = (Iterator<ITPS>) indexManager
- .getGlobalRowStore().rangeIterator(RelationSchema.INSTANCE);
+ final Iterator<ITPS> itr = (Iterator<ITPS>) grs
+ .rangeIterator(RelationSchema.INSTANCE);
while (itr.hasNext()) {
@@ -348,7 +410,7 @@
*
* Note: Will be in lexical order for Unicode.
*/
- final String[] namespaces = getNamespaces(indexManager)
+ final String[] namespaces = getNamespaces(indexManager,ITx.UNISOLATED)
.toArray(new String[] {});
assertEquals(new String[] { namespace, namespace1 }, namespaces);
@@ -404,7 +466,7 @@
kb.destroy();
// global row store entry is gone.
- final String[] namespaces = getNamespaces(indexManager).toArray(
+ final String[] namespaces = getNamespaces(indexManager,ITx.UNISOLATED).toArray(
new String[] {});
assertEquals(new String[] { namespace1 }, namespaces);
@@ -438,7 +500,7 @@
*
* Note: Will be in lexical order for Unicode.
*/
- final String[] namespaces = getNamespaces(indexManager).toArray(
+ final String[] namespaces = getNamespaces(indexManager,ITx.UNISOLATED).toArray(
new String[] {});
assertEquals(new String[] { namespace1 }, namespaces);
@@ -477,7 +539,7 @@
kb1.destroy();
// global row store entry is gone.
- assertTrue(getNamespaces(indexManager).isEmpty());
+ assertTrue(getNamespaces(indexManager,ITx.UNISOLATED).isEmpty());
// resources can not be located.
assertTrue(null == indexManager.getResourceLocator().locate(
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2014-06-02 16:41:49 UTC (rev 8435)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2014-06-02 16:43:56 UTC (rev 8436)
@@ -698,23 +698,16 @@
* during the middle of a BigdataSailConnection level operation (or visa
* versa).
*/
+ boolean acquiredConnection = false;
try {
- // acquire the unisolated connection permit.
- journal.acquireUnisolatedConnection();
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- }
- try {
+ try {
+ // acquire the unisolated connection permit.
+ journal.acquireUnisolatedConnection();
+ acquiredConnection = true;
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
-// final boolean create;
-// final long tx0 = txService.newTx(ITx.READ_COMMITTED);
-// try {
-// // verify kb does not exist (can not be located).
-// create = journal.getResourceLocator().locate(namespace, tx0) == null;
-// } finally {
-// txService.abort(tx0);
-// }
-
// Check for pre-existing instance.
{
@@ -730,29 +723,50 @@
}
// Create a new instance.
-// if (create)
{
- final LocalTripleStore lts = new LocalTripleStore(
- journal, namespace, ITx.UNISOLATED, properties);
-
if (Boolean.parseBoolean(properties.getProperty(
BigdataSail.Options.ISOLATABLE_INDICES,
BigdataSail.Options.DEFAULT_ISOLATABLE_INDICES))) {
+ /*
+ * Isolatable indices: requires the use of a tx to create
+ * the KB instance.
+ */
+
final long txCreate = txService.newTx(ITx.UNISOLATED);
-
- final AbstractTripleStore txCreateView = new LocalTripleStore(
- journal, namespace, Long.valueOf(txCreate), properties);
-
- // create the kb instance within the tx.
- txCreateView.create();
-
- // commit the tx.
- txService.commit(txCreate);
+
+ boolean ok = false;
+ try {
+
+ final AbstractTripleStore txCreateView = new LocalTripleStore(
+ journal, namespace, Long.valueOf(txCreate),
+ properties);
+
+ // create the kb instance within the tx.
+ txCreateView.create();
+
+ // commit the tx.
+ txService.commit(txCreate);
+
+ ok = true;
+
+ } finally {
+
+ if (!ok)
+ txService.abort(txCreate);
+
+ }
} else {
+ /*
+ * Create KB without isolatable indices.
+ */
+
+ final LocalTripleStore lts = new LocalTripleStore(
+ journal, namespace, ITx.UNISOLATED, properties);
+
lts.create();
}
@@ -790,7 +804,8 @@
} finally {
- journal.releaseUnisolatedConnection();
+ if (acquiredConnection)
+ journal.releaseUnisolatedConnection();
}
@@ -1314,22 +1329,40 @@
"UNISOLATED connection is not reentrant.");
}
- if (getDatabase().getIndexManager() instanceof Journal) {
- // acquire permit from Journal.
- ((Journal) getDatabase().getIndexManager())
- .acquireUnisolatedConnection();
- }
+ boolean acquiredConnection = false;
+ Lock writeLock = null;
+ BigdataSailConnection conn = null;
+ try {
+ if (getDatabase().getIndexManager() instanceof Journal) {
+ // acquire permit from Journal.
+ ((Journal) getDatabase().getIndexManager())
+ .acquireUnisolatedConnection();
+ acquiredConnection = true;
+ }
- // acquire the write lock.
- final Lock writeLock = lock.writeLock();
- writeLock.lock();
+ // acquire the write lock.
+ writeLock = lock.writeLock();
+ writeLock.lock();
- // new writable connection.
- final BigdataSailConnection conn = new BigdataSailConnection(database,
- writeLock, true/* unisolated */).startConn();
+ // new writable connection.
+ conn = new BigdataSailConnection(database, writeLock, true/* unisolated */)
+ .startConn();
+ } finally {
+ if (conn == null) {
+ // Did not obtain connection.
+ if (writeLock != null) {
+ // release write lock.
+ writeLock.unlock();
+ }
+ if (acquiredConnection) {
+ // release permit.
+ ((Journal) getDatabase().getIndexManager())
+ .releaseUnisolatedConnection();
+ }
+ }
+ }
+ return conn;
- return conn;
-
}
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-06-02 16:41:49 UTC (rev 8435)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-06-02 16:43:56 UTC (rev 8436)
@@ -2167,7 +2167,7 @@
* @param namespace
* The namespace.
* @param timestamp
- * The timestamp.
+ * A timestamp -or- a tx identifier.
*
* @return The {@link AbstractTripleStore} -or- <code>null</code> if none is
* found for that namespace and timestamp.
@@ -2205,7 +2205,7 @@
*
* @throws RepositoryException
*/
- public BigdataSailRepositoryConnection getUnisolatedConnection(
+ public BigdataSailRepositoryConnection getUnisolatedConnection( // FIXME REVIEW CALLERS
final String namespace) throws SailException, RepositoryException {
// resolve the default namespace.
@@ -2247,7 +2247,7 @@
try {
- return getNamespaces(timestamp, tx);
+ return getNamespacesTx(tx);
} finally {
@@ -2257,25 +2257,25 @@
}
- private List<String> getNamespaces(long timestamp, final long tx) {
+ /*package*/ List<String> getNamespacesTx(final long tx) {
- if (timestamp == ITx.READ_COMMITTED) {
+// if (timestamp == ITx.READ_COMMITTED) {
+//
+// // Use the last commit point.
+// timestamp = getIndexManager().getLastCommitTime();
+//
+// }
- // Use the last commit point.
- timestamp = getIndexManager().getLastCommitTime();
-
- }
-
// the triple store namespaces.
final List<String> namespaces = new LinkedList<String>();
final SparseRowStore grs = getIndexManager().getGlobalRowStore(
- timestamp);
+ tx);
if (grs == null) {
- log.warn("No GRS @ timestamp="
- + TimestampUtility.toString(timestamp));
+ log.warn("No GRS @ tx="
+ + TimestampUtility.toString(tx));
// Empty.
return namespaces;
@@ -2346,6 +2346,7 @@
long tx = timestamp; // use dirty reads unless Journal.
if (getIndexManager() instanceof Journal) {
+
final ITransactionService txs = ((Journal) getIndexManager())
.getLocalTransactionManager().getTransactionService();
@@ -2368,12 +2369,9 @@
* The transaction identifier.
*/
public void abortTx(final long tx) {
- if (getIndexManager() instanceof Journal) {
-// if (!TimestampUtility.isReadWriteTx(tx)) {
-// // Not a transaction.
-// throw new IllegalStateException();
-// }
+ if (getIndexManager() instanceof Journal) {
+
final ITransactionService txs = ((Journal) getIndexManager())
.getLocalTransactionManager().getTransactionService();
@@ -2388,4 +2386,22 @@
}
+// public void commitTx(final long tx) {
+//
+// if (getIndexManager() instanceof Journal) {
+//
+// final ITransactionService txs = ((Journal) getIndexManager())
+// .getLocalTransactionManager().getTransactionService();
+//
+// try {
+// txs.commit(tx);
+// } catch (IOException e) {
+// // Note: Local operation. Will not throw IOException.
+// throw new RuntimeException(e);
+// }
+//
+// }
+//
+// }
+
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-06-02 16:41:49 UTC (rev 8435)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-06-02 16:43:56 UTC (rev 8436)
@@ -146,17 +146,35 @@
* client's response. This code path should be used iff we have already
* begun writing the response. Otherwise, an HTTP error status should be
* used instead.
+ * <p>
+ * This method is invoked as follows:
*
+ * <pre>
+ * throw launderThrowable(...)
+ * </pre>
+ *
+ * This keeps the compiler happy since it will understand that the caller's
+ * method always exits with a thrown cause.
+ *
* @param t
* The thrown error.
* @param os
* The stream on which the response will be written.
* @param queryStr
- * The SPARQL Query -or- SPARQL Update command (if available).
+ * The SPARQL Query -or- SPARQL Update command (if available)
+ * -or- a summary of the REST API command -or- an empty string if
+ * nothing else is more appropriate.
*
- * @return The laundered exception.
+ * @return Nothing. The pattern of the returned throwable is used to make
+ * the compiler happy.
*
- * @throws Exception
+ * @throws IOException
+ * if the cause was an {@link IOException}
+ * @throws Error
+ * if the cause was an {@link Error}.
+ * @throws RuntimeException
+ * if the cause was a {@link RuntimeException} or anything not
+ * declared to be thrown by this method.
*/
protected static RuntimeException launderThrowable(final Throwable t,
final HttpServletResponse resp, final String queryStr)
@@ -217,7 +235,7 @@
}
}
if (t instanceof RuntimeException) {
- return (RuntimeException) t;
+ throw (RuntimeException) t;
} else if (t instanceof Error) {
throw (Error) t;
} else if (t instanceof IOException) {
@@ -239,10 +257,12 @@
* namespace (or it should be configured for each graph explicitly, or
* we should bundle the (namespace,timestamp) together as a single
* object).
+ *
+ * @see QueryServlet#ATTR_TIMESTAMP;
*/
protected long getTimestamp(final HttpServletRequest req) {
- final String timestamp = req.getParameter("timestamp");
+ final String timestamp = req.getParameter(QueryServlet.ATTR_TIMESTAMP);
if (timestamp == null) {
@@ -342,7 +362,7 @@
protected void reportModifiedCount(final HttpServletResponse resp,
final long nmodified, final long elapsed) throws IOException {
- final StringWriter w = new StringWriter();
+ final StringWriter w = new StringWriter();
final XMLBuilder t = new XMLBuilder(w);
@@ -422,40 +442,37 @@
/*
* CONNEG for the MIME type.
*/
- {
+ final String acceptStr = req.getHeader("Accept");
- final String acceptStr = req.getHeader("Accept");
+ final ConnegUtil util = new ConnegUtil(acceptStr);
- final ConnegUtil util = new ConnegUtil(acceptStr);
+ // The best RDFFormat for that Accept header.
+ RDFFormat format = util.getRDFFormat();
- // The best RDFFormat for that Accept header.
- RDFFormat format = util.getRDFFormat();
-
- if (format == null)
- format = RDFFormat.RDFXML;
+ if (format == null)
+ format = RDFFormat.RDFXML;
- resp.setStatus(HTTP_OK);
+ resp.setStatus(HTTP_OK);
- resp.setContentType(format.getDefaultMIMEType());
+ resp.setContentType(format.getDefaultMIMEType());
- final OutputStream os = resp.getOutputStream();
- try {
- final RDFWriter writer = RDFWriterRegistry.getInstance()
- .get(format).getWriter(os);
- writer.startRDF();
- final Iterator<Statement> itr = g.iterator();
- while (itr.hasNext()) {
- final Statement stmt = itr.next();
- writer.handleStatement(stmt);
- }
- writer.endRDF();
- os.flush();
- } catch (RDFHandlerException e) {
- log.error(e, e);
- throw launderThrowable(e, resp, "");
- } finally {
- os.close();
+ final OutputStream os = resp.getOutputStream();
+ try {
+ final RDFWriter writer = RDFWriterRegistry.getInstance()
+ .get(format).getWriter(os);
+ writer.startRDF();
+ final Iterator<Statement> itr = g.iterator();
+ while (itr.hasNext()) {
+ final Statement stmt = itr.next();
+ writer.handleStatement(stmt);
}
+ writer.endRDF();
+ os.flush();
+ } catch (RDFHandlerException e) {
+ // log.error(e, e);
+ throw launderThrowable(e, resp, "");
+ } finally {
+ os.close();
}
}
@@ -471,34 +488,31 @@
/*
* CONNEG for the MIME type.
*/
- {
+ final String acceptStr = req.getHeader("Accept");
- final String acceptStr = req.getHeader("Accept");
+ final ConnegUtil util = new ConnegUtil(acceptStr);
- final ConnegUtil util = new ConnegUtil(acceptStr);
+ // The best format for that Accept header.
+ PropertiesFormat format = util.getPropertiesFormat();
- // The bes...
[truncated message content] |
|
From: <tho...@us...> - 2014-06-11 13:13:24
|
Revision: 8466
http://sourceforge.net/p/bigdata/code/8466
Author: thompsonbry
Date: 2014-06-11 13:13:13 +0000 (Wed, 11 Jun 2014)
Log Message:
-----------
Fix for Name2Addr prefix scan and improved correctness for LexiconRelation.prefixScan().
Key changes are to:
- IKeyBuilderFactory - defines getPrimaryKeyBuilder()
- LexiconRelation - uses the getPrimaryKeyBuilder() method.
- Name2Addr - uses the getPrimaryKeyBuilder() method.
Javadoc updates to PrefixFilter.
Added @Override and final attributes to several classes that were touched by this fix.
I have run through the TestLocalTripleStore and TestRWJournal test suites and everything is good. I am currently running TestBigdataSailWithQuads but do not anticipate any issues.
I have verified that the existing tests for Name2Addr and the LexiconRelation prefix scans fail if the code uses the default collation strength rather than PRIMARY so we know that we have regression tests in place for those behaviors.
See #974 (Name2Addr.indexNameScan(prefix) uses scan + filter)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/DefaultTupleSerializer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/filter/PrefixFilter.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ASCIIKeyBuilderFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/DefaultKeyBuilderFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilderFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ThreadLocalKeyBuilderFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdTupleSerializer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestCompletionScan.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTCK.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/DefaultTupleSerializer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/DefaultTupleSerializer.java 2014-06-11 09:34:45 UTC (rev 8465)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/DefaultTupleSerializer.java 2014-06-11 13:13:13 UTC (rev 8466)
@@ -102,12 +102,14 @@
private IRabaCoder leafKeysCoder;
private IRabaCoder leafValsCoder;
+ @Override
final public IRabaCoder getLeafKeysCoder() {
return leafKeysCoder;
}
+ @Override
final public IRabaCoder getLeafValuesCoder() {
return leafValsCoder;
@@ -213,6 +215,7 @@
}
+ @Override
public String toString() {
final StringBuilder sb = new StringBuilder();
@@ -237,6 +240,7 @@
* that the specific configuration values are persisted, even when the
* {@link DefaultTupleSerializer} is de-serialized on a different host.
*/
+ @Override
final public IKeyBuilder getKeyBuilder() {
if(threadLocalKeyBuilderFactory == null) {
@@ -259,6 +263,30 @@
}
+ @Override
+ final public IKeyBuilder getPrimaryKeyBuilder() {
+
+ if(threadLocalKeyBuilderFactory == null) {
+
+ /*
+ * This can happen if you use the de-serialization ctor by mistake.
+ */
+
+ throw new IllegalStateException();
+
+ }
+
+ /*
+ * TODO This should probably to a reset() before returning the object.
+ * However, we need to verify that no callers are assuming that it does
+ * NOT do a reset and implicitly relying on passing the intermediate key
+ * via the return value (which would be very bad style).
+ */
+ return threadLocalKeyBuilderFactory.getPrimaryKeyBuilder();
+
+ }
+
+ @Override
public byte[] serializeKey(final Object obj) {
if (obj == null)
@@ -277,6 +305,7 @@
* @return The serialized representation of the object as a byte[] -or-
* <code>null</code> if the reference is <code>null</code>.
*/
+ @Override
public byte[] serializeVal(final V obj) {
return SerializerUtil.serialize(obj);
@@ -287,6 +316,7 @@
* De-serializes an object from the {@link ITuple#getValue() value} stored
* in the tuple (ignores the key stored in the tuple).
*/
+ @Override
public V deserialize(ITuple tuple) {
if (tuple == null)
@@ -308,6 +338,7 @@
* @throws UnsupportedOperationException
* always.
*/
+ @Override
public K deserializeKey(ITuple tuple) {
throw new UnsupportedOperationException();
@@ -327,6 +358,7 @@
*/
private final static transient byte VERSION = VERSION0;
+ @Override
public void readExternal(final ObjectInput in) throws IOException,
ClassNotFoundException {
@@ -346,6 +378,7 @@
}
+ @Override
public void writeExternal(final ObjectOutput out) throws IOException {
out.writeByte(VERSION);
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2014-06-11 09:34:45 UTC (rev 8465)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2014-06-11 13:13:13 UTC (rev 8466)
@@ -2910,11 +2910,19 @@
* specified for <i>this</i> index.
* </p>
*/
+ @Override
public IKeyBuilder getKeyBuilder() {
return getTupleSerializer().getKeyBuilder();
}
+
+ @Override
+ public IKeyBuilder getPrimaryKeyBuilder() {
+
+ return getTupleSerializer().getPrimaryKeyBuilder();
+
+ }
/**
* @see Configuration#getProperty(IIndexManager, Properties, String, String,
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/filter/PrefixFilter.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/filter/PrefixFilter.java 2014-06-11 09:34:45 UTC (rev 8465)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/filter/PrefixFilter.java 2014-06-11 13:13:13 UTC (rev 8466)
@@ -26,7 +26,7 @@
* </p>
* <h4>WARNING</h4>
* <p>
- * <strong>The prefix keys MUST be formed with {@link StrengthEnum#Identical}.
+ * <strong>The prefix keys MUST be formed with {@link StrengthEnum#Primary}.
* This is necessary in order to match all keys in the index since it causes the
* secondary characteristics to NOT be included in the prefix key even if they
* are present in the keys in the index.</strong> Using other
@@ -55,20 +55,21 @@
* <p>
* at IDENTICAL strength. The additional bytes for the IDENTICAL strength
* reflect the Locale specific Unicode sort key encoding of secondary
- * characteristics such as case. The successor of the PRIMARY strength byte[] is
+ * characteristics such as case. The successor of the IDENTICAL strength byte[]
+ * is
* </p>
*
* <pre>
- * [43, 75, 89, 41, 68]
+ * [43, 75, 89, 41, 67, 1, 9, 1, 143, 9]
* </pre>
*
* <p>
* (one was added to the last byte) which spans all keys of interest. However
- * the successor of the IDENTICAL strength byte[] would
+ * the successor of the PRIMARY strength byte[] would
* </p>
*
* <pre>
- * [43, 75, 89, 41, 67, 1, 9, 1, 143, 9]
+ * [43, 75, 89, 41, 68]
* </pre>
*
* <p>
@@ -81,8 +82,8 @@
* <pre>
* Properties properties = new Properties();
*
- * properties.setProperty(KeyBuilder.Options.STRENGTH, StrengthEnum.Primary
- * .toString());
+ * properties.setProperty(KeyBuilder.Options.STRENGTH,
+ * StrengthEnum.Primary.toString());
*
* prefixKeyBuilder = KeyBuilder.newUnicodeInstance(properties);
* </pre>
@@ -104,7 +105,9 @@
* partition....
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/974" >
+ * Name2Addr.indexNameScan(prefix) uses scan + filter </a>
*/
public class PrefixFilter<E> extends FilterBase implements ITupleFilter<E> {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ASCIIKeyBuilderFactory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ASCIIKeyBuilderFactory.java 2014-06-11 09:34:45 UTC (rev 8465)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ASCIIKeyBuilderFactory.java 2014-06-11 13:13:13 UTC (rev 8466)
@@ -39,7 +39,6 @@
* Factory for instances that do NOT support Unicode.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
*/
public class ASCIIKeyBuilderFactory implements IKeyBuilderFactory, Externalizable {
@@ -59,6 +58,7 @@
/**
* Representation includes all aspects of the {@link Serializable} state.
*/
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder(getClass().getName());
@@ -87,19 +87,35 @@
}
+ @Override
public IKeyBuilder getKeyBuilder() {
return KeyBuilder.newInstance(initialCapacity);
}
- public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Note: The PRIMARY is identical to the as-configured {@link IKeyBuilder}
+ * for ASCII.
+ */
+ @Override
+ public IKeyBuilder getPrimaryKeyBuilder() {
+ return getKeyBuilder();
+
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+
initialCapacity = in.readInt();
}
- public void writeExternal(ObjectOutput out) throws IOException {
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
out.writeInt(initialCapacity);
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/DefaultKeyBuilderFactory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/DefaultKeyBuilderFactory.java 2014-06-11 09:34:45 UTC (rev 8465)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/DefaultKeyBuilderFactory.java 2014-06-11 13:13:13 UTC (rev 8466)
@@ -409,6 +409,7 @@
}
+ @Override
public IKeyBuilder getKeyBuilder() {
if(log.isDebugEnabled()) {
@@ -422,6 +423,20 @@
}
+ @Override
+ public IKeyBuilder getPrimaryKeyBuilder() {
+
+ if(log.isDebugEnabled()) {
+
+ log.debug(toString());
+
+ }
+
+ return KeyBuilder.newInstance(initialCapacity, collator, locale,
+ StrengthEnum.Primary, decompositionMode);
+
+ }
+
/**
* Text of the exception thrown when the ICU library is required but is not
* available.
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilderFactory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilderFactory.java 2014-06-11 09:34:45 UTC (rev 8465)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilderFactory.java 2014-06-11 13:13:13 UTC (rev 8466)
@@ -32,7 +32,6 @@
* A factory for pre-configured {@link IKeyBuilder} instances.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
*/
public interface IKeyBuilderFactory {
@@ -41,4 +40,15 @@
*/
public IKeyBuilder getKeyBuilder();
+ /**
+ * Return an instance of the configured {@link IKeyBuilder} that has been
+ * overridden to have {@link StrengthEnum#Primary} collation strength. This
+ * may be used to form successors for Unicode prefix scans without having
+ * the secondary sort ordering characteristics mucking things up.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/974" >
+ * Name2Addr.indexNameScan(prefix) uses scan + filter </a>
+ */
+ public IKeyBuilder getPrimaryKeyBuilder();
+
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ThreadLocalKeyBuilderFactory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ThreadLocalKeyBuilderFactory.java 2014-06-11 09:34:45 UTC (rev 8465)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ThreadLocalKeyBuilderFactory.java 2014-06-11 13:13:13 UTC (rev 8466)
@@ -31,8 +31,9 @@
import com.bigdata.btree.IIndex;
/**
+ * A thread-local implementation.
+ *
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
*/
public class ThreadLocalKeyBuilderFactory implements IKeyBuilderFactory {
@@ -58,6 +59,7 @@
*/
private ThreadLocal<IKeyBuilder> threadLocalKeyBuilder = new ThreadLocal<IKeyBuilder>() {
+ @Override
protected synchronized IKeyBuilder initialValue() {
return delegate.getKeyBuilder();
@@ -67,13 +69,41 @@
};
/**
+ * {@inheritDoc}
+ * <p>
* Return a {@link ThreadLocal} {@link IKeyBuilder} instance configured
* using the {@link IKeyBuilderFactory} specified to the ctor.
*/
+ @Override
public IKeyBuilder getKeyBuilder() {
return threadLocalKeyBuilder.get();
}
+ private ThreadLocal<IKeyBuilder> threadLocalPrimaryKeyBuilder = new ThreadLocal<IKeyBuilder>() {
+
+ @Override
+ protected synchronized IKeyBuilder initialValue() {
+
+ return delegate.getPrimaryKeyBuilder();
+
+ }
+
+ };
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Return a {@link ThreadLocal} {@link IKeyBuilder} instance configured
+ * using the {@link IKeyBuilderFactory} specified to the ctor but with the
+ * {@link StrengthEnum} overriden as {@link StrengthEnum#Primary}.
+ */
+ @Override
+ public IKeyBuilder getPrimaryKeyBuilder() {
+
+ return threadLocalPrimaryKeyBuilder.get();
+
+ }
+
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java 2014-06-11 09:34:45 UTC (rev 8465)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java 2014-06-11 13:13:13 UTC (rev 8466)
@@ -62,7 +62,6 @@
import com.bigdata.btree.ITuple;
import com.bigdata.btree.ITupleIterator;
import com.bigdata.btree.IndexMetadata;
-import com.bigdata.btree.keys.CollatorEnum;
import com.bigdata.btree.keys.DefaultKeyBuilderFactory;
import com.bigdata.btree.keys.IKeyBuilder;
import com.bigdata.btree.keys.IKeyBuilderFactory;
@@ -82,9 +81,7 @@
import com.bigdata.resources.IndexManager;
import com.bigdata.resources.ResourceManager;
import com.bigdata.util.concurrent.ExecutionExceptions;
-import com.ibm.icu.text.Collator;
-import cutthecrap.utils.striterators.Filter;
import cutthecrap.utils.striterators.IStriterator;
import cutthecrap.utils.striterators.Resolver;
import cutthecrap.utils.striterators.Striterator;
@@ -185,7 +182,6 @@
* reference to the index and we need both on hand to do the commit.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
*/
private class DirtyListener implements IDirtyListener, Comparable<DirtyListener> {
@@ -194,6 +190,7 @@
boolean needsCheckpoint;
long checkpointAddr = 0L;
+ @Override
public String toString() {
return "DirtyListener{name="
@@ -204,7 +201,8 @@
}
- private DirtyListener(String name, ICheckpointProtocol btree, boolean needsCheckpoint) {
+ private DirtyListener(final String name,
+ final ICheckpointProtocol btree, final boolean needsCheckpoint) {
assert name!=null;
@@ -253,6 +251,7 @@
*
* @param btree
*/
+ @Override
public void dirtyEvent(final ICheckpointProtocol btree) {
assert btree == this.btree;
@@ -549,6 +548,7 @@
/**
* @return <i>self</i>
*/
+ @Override
public CommitIndexTask call() throws Exception {
if (log.isInfoEnabled())
@@ -666,6 +666,7 @@
* >Flush indices in parallel during checkpoint to reduce IO
* latency</a>
*/
+ @Override
synchronized
public long handleCommit(final long commitTime) {
@@ -1394,6 +1395,7 @@
}
+ @Override
public String toString() {
return "Entry{name=" + name + ",checkpointAddr=" + checkpointAddr
@@ -1558,6 +1560,7 @@
*/
private final static transient byte VERSION = VERSION0;
+ @Override
public void readExternal(final ObjectInput in) throws IOException,
ClassNotFoundException {
@@ -1575,6 +1578,7 @@
}
+ @Override
public void writeExternal(final ObjectOutput out) throws IOException {
super.writeExternal(out);
@@ -1596,34 +1600,11 @@
*
* @return The names of the indices spanned by that prefix in that index.
*
- * FIXME There is a problem with the prefix scan. It appears that we
- * are not able to generate the key for a prefix correctly. This
- * problem is being worked around by scanning the entire
- * {@link Name2Addr} index and then filter for those entries that
- * start with the specified prefix. This is not very scalable.
- * <p>
- * If you change {@link Name2Addr} to use {@link CollatorEnum#ASCII}
- * then the prefix scan works correctly without that filter. The
- * problem is related to how the {@link Collator} is encoding the
- * keys. Neither the ICU nor the JDK collators work for this right
- * now. At least the ICU collator winds up with some additional
- * bytes after the "end" of the prefix that do not appear when you
- * encode the entire index name. For example, compare "kb" and
- * "kb.red". See TestName2Addr for more about this issue.
- * <p>
- * Fixing this problem MIGHT require a data migration. Or we might
- * be able to handle this entirely by using an appropriate
- * {@link Name2Addr#getKey(String)} and
- * {@link Name2AddrTupleSerializer#serializeKey(Object)}
- * implementation (depending on how the keys are being encoded).
- * <p>
- * Update: See <a
- * href="https://sourceforge.net/apps/trac/bigdata/ticket/743">
- * AbstractTripleStore.destroy() does not filter for correct prefix
- * </a> as well. Maybe the problem is just that we need to have the
- * "." appended to the namespace. This could be something that is
- * done automatically if the caller does not take care of it
- * themselves.
+ * @see <a href="http://trac.bigdata.com/ticket/974" >
+ * Name2Addr.indexNameScan(prefix) uses scan + filter </a>
+ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/743">
+ * AbstractTripleStore.destroy() does not filter for correct prefix
+ * </a>
*/
public static final Iterator<String> indexNameScan(final String prefix,
final IIndex n2a) {
@@ -1631,27 +1612,37 @@
final byte[] fromKey;
final byte[] toKey;
final boolean hasPrefix = prefix != null && prefix.length() > 0;
- final boolean restrictScan = false;
+// final boolean restrictScan = true;
- if (hasPrefix && restrictScan) {
+ if (hasPrefix ) //&& restrictScan)
+ {
/*
* When the namespace prefix was given, generate the toKey as the
* fixed length successor of the fromKey.
+ *
+ * Note: We MUST use StrengthEnum:=PRIMARY for the prefix scan in
+ * order to avoid the secondary collation ordering effects.
*/
- log.error("prefix=" + prefix);
+// final IKeyBuilder keyBuilder = n2a.getIndexMetadata()
+// .getTupleSerializer().getKeyBuilder();
+// final Properties properties = new Properties();
+//
+// properties.setProperty(KeyBuilder.Options.STRENGTH,
+// StrengthEnum.Primary.toString());
+//
+// final IKeyBuilder keyBuilder = new DefaultKeyBuilderFactory(
+// properties).getKeyBuilder();
final IKeyBuilder keyBuilder = n2a.getIndexMetadata()
- .getTupleSerializer().getKeyBuilder();
-
+ .getPrimaryKeyBuilder();
+
fromKey = keyBuilder.reset().append(prefix).getKey();
- // toKey =
- // keyBuilder.reset().append(prefix).appendNul().getKey();
toKey = SuccessorUtil.successor(fromKey.clone());
- if (true || log.isDebugEnabled()) {
+ if (log.isDebugEnabled()) {
log.error("fromKey=" + BytesUtil.toString(fromKey));
@@ -1670,6 +1661,9 @@
@SuppressWarnings("unchecked")
final ITupleIterator<Entry> itr = n2a.rangeIterator(fromKey, toKey);
+ /*
+ * Add resolver from the tuple to the name of the index.
+ */
IStriterator sitr = new Striterator(itr);
sitr = sitr.addFilter(new Resolver() {
@@ -1686,38 +1680,63 @@
});
- if (hasPrefix && !restrictScan) {
+// if (hasPrefix && !restrictScan) {
+//
+// /*
+// * Only report the names that match the prefix.
+// *
+// * Note: For the moment, the filter is hacked by examining the
+// * de-serialized Entry objects and only reporting those that start
+// * with the [prefix].
+// */
+//
+// sitr = sitr.addFilter(new Filter() {
+//
+// private static final long serialVersionUID = 1L;
+//
+// @Override
+// public boolean isValid(final Object obj) {
+//
+// final String name = (String) obj;
+//
+// if (name.startsWith(prefix)) {
+//
+// // acceptable.
+// return true;
+// }
+// return false;
+// }
+// });
+//
+// }
- /*
- * Only report the names that match the prefix.
- *
- * Note: For the moment, the filter is hacked by examining the
- * de-serialized Entry objects and only reporting those that start
- * with the [prefix].
- */
-
- sitr = sitr.addFilter(new Filter() {
-
- private static final long serialVersionUID = 1L;
-
- @Override
- public boolean isValid(final Object obj) {
-
- final String name = (String) obj;
-
- if (name.startsWith(prefix)) {
-
- // acceptable.
- return true;
- }
- return false;
- }
- });
-
- }
-
return sitr;
}
+// /**
+// * The SuccessorUtil does not work with CollatedKeys since it bumps the "meta/control" data
+// * at the end of the key, rather than the "value" data of the key.
+// *
+// * It has been observed that the key data is delimited with a 01 byte, followed by meta/control
+// * data with the key itself delimited by a 00 byte.
+// *
+// * Note that this has only been analyzed for the ICU collator, the standard Java collator does include
+// * 00 bytes in the key. However, it too appears to delimit the value key with a 01 byte so the
+// * same method should work.
+// *
+// * @param src - original key
+// * @return the next key
+// */
+// private static byte[] successor(final byte[] src) {
+// final byte[] nxt = src.clone();
+// for (int i = 1; i < nxt.length; i++) {
+// if (nxt[i] == 01) { // end of data
+// nxt[i-1]++;
+// break;
+// }
+// }
+//
+// return nxt;
+// }
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2014-06-11 09:34:45 UTC (rev 8465)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2014-06-11 13:13:13 UTC (rev 8466)
@@ -82,7 +82,6 @@
* Test suite for {@link BufferMode#DiskRW} journals.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
*/
public class TestRWJournal extends AbstractJournalTestCase {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2014-06-11 09:34:45 UTC (rev 8465)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2014-06-11 13:13:13 UTC (rev 8466)
@@ -71,11 +71,8 @@
import com.bigdata.btree.IndexTypeEnum;
import com.bigdata.btree.filter.PrefixFilter;
import com.bigdata.btree.filter.TupleFilter;
-import com.bigdata.btree.keys.DefaultKeyBuilderFactory;
import com.bigdata.btree.keys.IKeyBuilder;
import com.bigdata.btree.keys.KVO;
-import com.bigdata.btree.keys.KeyBuilder;
-import com.bigdata.btree.keys.StrengthEnum;
import com.bigdata.cache.ConcurrentWeakValueCacheWithBatchedUpdates;
import com.bigdata.journal.IIndexManager;
import com.bigdata.journal.IResourceLock;
@@ -105,7 +102,6 @@
import com.bigdata.rdf.model.BigdataValueSerializer;
import com.bigdata.rdf.rio.StatementBuffer;
import com.bigdata.rdf.spo.ISPO;
-import com.bigdata.rdf.spo.SPO;
import com.bigdata.rdf.store.AbstractTripleStore;
import com.bigdata.rdf.vocab.NoVocabulary;
import com.bigdata.rdf.vocab.Vocabulary;
@@ -1421,27 +1417,32 @@
}
- /*
+ /**
* The KeyBuilder used to form the prefix keys.
*
- * Note: The prefix keys are formed with IDENTICAL strength. This is
+ * Note: The prefix keys are formed with PRIMARY strength. This is
* necessary in order to match all keys in the index since it causes the
* secondary characteristics to NOT be included in the prefix key even
* if they are present in the keys in the index.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/974" >
+ * Name2Addr.indexNameScan(prefix) uses scan + filter </a>
*/
- final LexiconKeyBuilder keyBuilder;
- {
+ final LexiconKeyBuilder keyBuilder = ((Term2IdTupleSerializer) getTerm2IdIndex()
+ .getIndexMetadata().getTupleSerializer())
+ .getLexiconPrimaryKeyBuilder();
+// {
+//
+// final Properties properties = new Properties();
+//
+// properties.setProperty(KeyBuilder.Options.STRENGTH,
+// StrengthEnum.Primary.toString());
+//
+// keyBuilder = new Term2IdTupleSerializer(
+// new DefaultKeyBuilderFactory(properties)).getLexiconKeyBuilder();
+//
+// }
- final Properties properties = new Properties();
-
- properties.setProperty(KeyBuilder.Options.STRENGTH,
- StrengthEnum.Primary.toString());
-
- keyBuilder = new Term2IdTupleSerializer(
- new DefaultKeyBuilderFactory(properties)).getLexiconKeyBuilder();
-
- }
-
/*
* Formulate the keys[].
*
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdTupleSerializer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdTupleSerializer.java 2014-06-11 09:34:45 UTC (rev 8465)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdTupleSerializer.java 2014-06-11 13:13:13 UTC (rev 8466)
@@ -118,12 +118,30 @@
}
/**
+ * Return a {@link LexiconKeyBuilder} that is setup with collation strength
+ * ...
[truncated message content] |
|
From: <tho...@us...> - 2014-06-11 16:35:58
|
Revision: 8472
http://sourceforge.net/p/bigdata/code/8472
Author: thompsonbry
Date: 2014-06-11 16:35:53 +0000 (Wed, 11 Jun 2014)
Log Message:
-----------
1. Additional bug fixes for the REST API connection try/finally and launder throwable patterns.
2. Merged in a refactoring to support group commit at the NSS based on hierarchical locking (using the Name2Addr prefix scan) and the ConcurrencyManager + AbstractTask mechanism. This refactoring is not complete, but much of the NSS test suite passes when group commit is enabled.
See #566 (NSS group commit)
See #966 (Failed to get namespace list under concurrent update)
Patched files:
- LocalTripleStore: - getIndexManager() returns IJournal (was Journal)
- QueryServlet - innocuous changes and FIXME comment block for SPARQL UPDATE to support group commit.
- RestApiTask - new
- RestApiTaskForIndexManager - new
- RestApiTaskForJournal - new
- UpdateServlet - adds fix to connection try/finally and launder throwable pattern.
- AbstractTestNanoSparqlServerClient : conditional tripleStore.destroy() with FIXME for group commit.
- AbstractTask - includes comments about how to create a hierarchical locking system using N2A scans.
Unpatched files:
- BigdataRDFServlet - no interesting changes.
- BigdataServlet - pulled in submitApiTask(), getKBLocks(), and OLD_EXECUTION_MODEL = true.
- DeleteServlet - reconciled. captures REST API task pattern. adds fixes to connection try/finally that were somehow overlooked.
- InsertServlet - reconciled. captures REST API task pattern and fixes to connection try/finally and launderThrowable patterns.
- MultiTenancyServlet - change is incorrect (deals with ITx.READ_COMMITTED). Need modify this class to use the new pattern.
Other files:
- BigdataStatics - added a global boolean that will allow us to enable the NSS group commit feature from a system property (com.bigdata.nssGroupCommit).
- BigdataRDFContext - modified call() to use the try/finally pattern for SPARQL QUERY and UPDATE.
- BlueprintsServlet - added the try/finally/launder pattern.
- WorkbenchServlet - modified to no longer access the AbstractTripleStore and to use a ValueFactoryImpl instead.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/LocalTripleStore.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractTestNanoSparqlClient.java
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RestApiTask.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RestApiTaskForIndexManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RestApiTaskForJournal.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java 2014-06-11 15:52:14 UTC (rev 8471)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java 2014-06-11 16:35:53 UTC (rev 8472)
@@ -27,12 +27,14 @@
package com.bigdata;
+import com.bigdata.journal.IIndexManager;
+import com.bigdata.relation.AbstractRelation;
+
/**
* A class for those few statics that it makes sense to reference from other
* places.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
*/
public class BigdataStatics {
@@ -109,4 +111,21 @@
}
+ /**
+ * FIXME GROUP COMMIT : Disable/Enable group commit on the Journal from the
+ * NSS API. Some global flag should control this and also disable the
+ * journal's semaphore and should disable the wrapping of BTree as an
+ * UnisolatedReadWriteIndex (
+ * {@link AbstractRelation#getIndex(IIndexManager, String, long)}, and
+ * should disable the calls to commit() or abort() from the LocalTripleStore
+ * to the Journal.
+ *
+ * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/753" > HA
+ * doLocalAbort() should interrupt NSS requests and AbstractTasks </a>
+ * @see <a href="- http://sourceforge.net/apps/trac/bigdata/ticket/566" >
+ * Concurrent unisolated operations against multiple KBs </a>
+ */
+ public static final boolean NSS_GROUP_COMMIT = Boolean
+ .getBoolean("com.bigdata.nssGroupCommit");
+
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2014-06-11 15:52:14 UTC (rev 8471)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2014-06-11 16:35:53 UTC (rev 8472)
@@ -1249,7 +1249,7 @@
* Flag is cleared if the task is aborted. This is used to refuse
* access to resources for tasks that ignore interrupts.
*/
- boolean aborted = false;
+ volatile boolean aborted = false;
/**
* The {@link AbstractTask} increments various counters of interest to the
@@ -1557,7 +1557,7 @@
/**
* Return <code>true</code> iff the task declared this as a resource.
*
- * @param name
+ * @param theRequestedResource
* The name of a resource.
*
* @return <code>true</code> iff <i>name</i> is a declared resource.
@@ -1565,17 +1565,58 @@
* @throws IllegalArgumentException
* if <i>name</i> is <code>null</code>.
*/
- public boolean isResource(String name) {
-
- if (name == null)
+ public boolean isResource(final String theRequestedResource) {
+
+ if (theRequestedResource == null)
throw new IllegalArgumentException();
-
- for(String s : resource) {
-
- if(s.equals(name)) return true;
-
+
+ for (String theDeclaredResource : resource) {
+
+ if (theDeclaredResource.equals(theRequestedResource)) {
+ /*
+ * Exact match. This resource was declared.
+ */
+ return true;
+ }
+
+ /**
+ * FIXME GROUP_COMMIT: Supporting this requires us to support
+ * efficient scans of the indices in Name2Addr having the prefix
+ * values declared by [resources] since getIndex(name) will fail if
+ * the Name2Addr entry has not been buffered within the [n2a] cache.
+ *
+ * @see <a
+ * href="http://sourceforge.net/apps/trac/bigdata/ticket/753" >
+ * HA doLocalAbort() should interrupt NSS requests and
+ * AbstractTasks </a>
+ * @see <a
+ * href="- http://sourceforge.net/apps/trac/bigdata/ticket/566"
+ * > Concurrent unisolated operations against multiple KBs </a>
+ */
+// if (theRequestedResource.startsWith(theDeclaredResource)) {
+//
+// // Possible prefix match.
+//
+// if (theRequestedResource.charAt(theDeclaredResource.length()) == '.') {
+//
+// /*
+// * Prefix match.
+// *
+// * E.g., name:="kb.spo.osp" and the task declared the
+// * resource "kb". In this case, "kb" is a PREFIX of the
+// * declared resource and the next character is the separator
+// * character for the resource names (this last point is
+// * important to avoid unintended contention between
+// * namespaces such as "kb" and "kb1").
+// */
+// return true;
+//
+// }
+//
+// }
+
}
-
+
return false;
}
@@ -2085,46 +2126,53 @@
}
+ @Override
public IResourceManager getResourceManager() {
return delegate.getResourceManager();
}
+ @Override
public IJournal getJournal() {
return delegate.getJournal();
}
+ @Override
public String[] getResource() {
return delegate.getResource();
}
+ @Override
public String getOnlyResource() {
return delegate.getOnlyResource();
}
+ @Override
public IIndex getIndex(String name) {
return delegate.getIndex(name);
}
+ @Override
public TaskCounters getTaskCounters() {
return delegate.getTaskCounters();
}
+ @Override
public String toString() {
- return getClass().getName()+"("+delegate.toString()+")";
-
+ return getClass().getName() + "(" + delegate.toString() + ")";
+
}
}
@@ -2577,8 +2625,13 @@
}
// read committed view IFF it exists otherwise [null]
- return new GlobalRowStoreHelper(this).get(ITx.READ_COMMITTED);
+ // TODO Review. Make sure we have tx protection to avoid recycling of the view.
+ final long lastCommitTime = getLastCommitTime();
+ return new GlobalRowStoreHelper(this).get(lastCommitTime);
+
+ //return new GlobalRowStoreHelper(this).get(ITx.READ_COMMITTED);
+
}
@Override
@@ -2696,12 +2749,32 @@
* Disallowed methods (commit protocol and shutdown protocol).
*/
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Marks the task as aborted. The task will not commit. However, the
+ * task will continue to execute until control returns from its
+ * {@link AbstractTask#doTask()} method.
+ */
@Override
public void abort() {
- throw new UnsupportedOperationException();
+ aborted = true;
}
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Overridden as NOP. Tasks do not directly invoke commit() on the
+ * Journal.
+ */
@Override
+ public long commit() {
+ if (aborted)
+ throw new IllegalStateException("aborted");
+ return 0;
+ }
+
+ @Override
public void close() {
throw new UnsupportedOperationException();
}
@@ -2717,11 +2790,6 @@
}
@Override
- public long commit() {
- throw new UnsupportedOperationException();
- }
-
- @Override
public void setCommitter(int index, ICommitter committer) {
throw new UnsupportedOperationException();
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/LocalTripleStore.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/LocalTripleStore.java 2014-06-11 15:52:14 UTC (rev 8471)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/LocalTripleStore.java 2014-06-11 16:35:53 UTC (rev 8472)
@@ -33,6 +33,7 @@
import com.bigdata.btree.BTree;
import com.bigdata.journal.IIndexManager;
+import com.bigdata.journal.IJournal;
import com.bigdata.journal.ITx;
import com.bigdata.journal.Journal;
import com.bigdata.relation.locator.DefaultResourceLocator;
@@ -55,13 +56,13 @@
final static private Logger log = Logger.getLogger(LocalTripleStore.class);
- private final Journal store;
+ private final IJournal store;
/**
* The backing embedded database.
*/
@Override
- public Journal getIndexManager() {
+ public IJournal getIndexManager() {
return store;
@@ -160,7 +161,7 @@
super(indexManager, namespace, timestamp, properties);
- store = (Journal) indexManager;
+ store = (IJournal) indexManager;
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-06-11 15:52:14 UTC (rev 8471)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-06-11 16:35:53 UTC (rev 8472)
@@ -1135,63 +1135,46 @@
abstract protected void doQuery(BigdataSailRepositoryConnection cxn,
OutputStream os) throws Exception;
+ @Override
final public Void call() throws Exception {
BigdataSailRepositoryConnection cxn = null;
+ boolean success = false;
try {
+ // Note: Will be UPDATE connection if UPDATE request!!!
cxn = getQueryConnection(namespace, timestamp);
if(log.isTraceEnabled())
log.trace("Query running...");
beginNanos = System.nanoTime();
-// try {
- if (explain && !update) {
- /*
- * The data goes to a bit bucket and we send an
- * "explanation" of the query evaluation back to the caller.
- *
- * Note: The trick is how to get hold of the IRunningQuery
- * object. It is created deep within the Sail when we
- * finally submit a query plan to the query engine. We have
- * the queryId (on queryId2), so we can look up the
- * IRunningQuery in [m_queries] while it is running, but
- * once it is terminated the IRunningQuery will have been
- * cleared from the internal map maintained by the
- * QueryEngine, at which point we can not longer find it.
- *
- * Note: We can't do this for UPDATE since it would have
- * a side-effect anyway. The way to "EXPLAIN" an UPDATE
- * is to break it down into the component QUERY bits and
- * execute those.
- */
- doQuery(cxn, new NullOutputStream());
- } else {
- doQuery(cxn, os);
- os.flush();
- os.close();
- }
- if(log.isTraceEnabled())
- log.trace("Query done.");
-// } catch(Throwable t) {
-// /*
-// * Log the query and the exception together.
-// */
-// log.error(t.getLocalizedMessage() + ":\n" + queryStr, t);
-// }
- return null;
- } catch (Throwable t) {
- log.error("Will abort: " + t, t);
- if (cxn != null && !cxn.isReadOnly()) {
+ if (explain && !update) {
/*
- * Force rollback of the connection.
+ * The data goes to a bit bucket and we send an
+ * "explanation" of the query evaluation back to the caller.
*
- * Note: It is possible that the commit has already been
- * processed, in which case this rollback() will be a NOP.
- * This can happen when there is an IO error when
- * communicating with the client, but the database has
- * already gone through a commit.
+ * Note: The trick is how to get hold of the IRunningQuery
+ * object. It is created deep within the Sail when we
+ * finally submit a query plan to the query engine. We have
+ * the queryId (on queryId2), so we can look up the
+ * IRunningQuery in [m_queries] while it is running, but
+ * once it is terminated the IRunningQuery will have been
+ * cleared from the internal map maintained by the
+ * QueryEngine, at which point we can not longer find it.
+ *
+ * Note: We can't do this for UPDATE since it would have a
+ * side-effect anyway. The way to "EXPLAIN" an UPDATE is to
+ * break it down into the component QUERY bits and execute
+ * those.
*/
- cxn.rollback();
+ doQuery(cxn, new NullOutputStream());
+ success = true;
+ } else {
+ doQuery(cxn, os);
+ success = true;
+ os.flush();
+ os.close();
}
- throw new Exception(t);
+ if (log.isTraceEnabled())
+ log.trace("Query done.");
+ return null;
} finally {
endNanos = System.nanoTime();
m_queries.remove(queryId);
@@ -1204,11 +1187,26 @@
// }
// }
if (cxn != null) {
+ if (!success && !cxn.isReadOnly()) {
+ /*
+ * Force rollback of the connection.
+ *
+ * Note: It is possible that the commit has already been
+ * processed, in which case this rollback() will be a
+ * NOP. This can happen when there is an IO error when
+ * communicating with the client, but the database has
+ * already gone through a commit.
+ */
+ try {
+ // Force rollback of the connection.
+ cxn.rollback();
+ } catch (Throwable t) {
+ log.error(t, t);
+ }
+ }
try {
// Force close of the connection.
cxn.close();
- if(log.isTraceEnabled())
- log.trace("Connection closed.");
} catch (Throwable t) {
log.error(t, t);
}
@@ -1432,6 +1430,7 @@
* <p>
* This executes the SPARQL UPDATE and formats the HTTP response.
*/
+ @Override
protected void doQuery(final BigdataSailRepositoryConnection cxn,
final OutputStream os) throws Exception {
@@ -1439,24 +1438,31 @@
* Setup a change listener. It will notice the #of mutations.
*/
final CAT mutationCount = new CAT();
+
cxn.addChangeLog(new IChangeLog(){
+
@Override
public void changeEvent(final IChangeRecord record) {
mutationCount.increment();
}
+
@Override
public void transactionBegin() {
}
+
@Override
public void transactionPrepare() {
}
+
@Override
public void transactionCommited(long commitTime) {
}
+
@Override
public void transactionAborted() {
- }});
-
+ }
+ });
+
// Prepare the UPDATE request.
final BigdataSailUpdate update = setupUpdate(cxn);
@@ -2106,10 +2112,11 @@
}
/**
- * Return a connection transaction. When the timestamp is associated with a
- * historical commit point, this will be a read-only connection. When it is
- * associated with the {@link ITx#UNISOLATED} view or a read-write
- * transaction, this will be a mutable connection.
+ * Return a connection transaction, which may be read-only or support
+ * update. When the timestamp is associated with a historical commit point,
+ * this will be a read-only connection. When it is associated with the
+ * {@link ITx#UNISOLATED} view or a read-write transaction, this will be a
+ * mutable connection.
*
* @param namespace
* The namespace.
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-06-11 15:52:14 UTC (rev 8471)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-06-11 16:35:53 UTC (rev 8472)
@@ -29,8 +29,11 @@
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Writer;
+import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Future;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServlet;
@@ -39,12 +42,18 @@
import org.apache.log4j.Logger;
+import com.bigdata.BigdataStatics;
import com.bigdata.ha.HAStatusEnum;
import com.bigdata.journal.AbstractJournal;
+import com.bigdata.journal.IConcurrencyManager;
import com.bigdata.journal.IIndexManager;
+import com.bigdata.journal.Journal;
+import com.bigdata.journal.TimestampUtility;
import com.bigdata.quorum.AbstractQuorum;
import com.bigdata.rdf.sail.webapp.client.IMimeTypes;
import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy;
+import com.bigdata.rdf.store.AbstractTripleStore;
+import com.bigdata.service.IBigdataFederation;
/**
* Useful glue for implementing service actions, but does not directly implement
@@ -190,6 +199,149 @@
}
+ /**
+ * Submit a task and return a {@link Future} for that task. The task will be
+ * run on the appropriate executor service depending on the nature of the
+ * backing database and the view required by the task.
+ *
+ * @param task
+ * The task.
+ *
+ * @return The {@link Future} for that task.
+ *
+ * @throws DatasetNotFoundException
+ *
+ * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/753" > HA
+ * doLocalAbort() should interrupt NSS requests and AbstractTasks </a>
+ * @see <a href="- http://sourceforge.net/apps/trac/bigdata/ticket/566" >
+ * Concurrent unisolated operations against multiple KBs </a>
+ */
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ protected <T> Future<T> submitApiTask(final RestApiTask<T> task)
+ throws DatasetNotFoundException {
+
+ final String namespace = task.getNamespace();
+
+ final long timestamp = task.getTimestamp();
+
+ final IIndexManager indexManager = getIndexManager();
+
+ if (!BigdataStatics.NSS_GROUP_COMMIT || indexManager instanceof IBigdataFederation
+ || TimestampUtility.isReadOnly(timestamp)
+ ) {
+
+ /*
+ * Run on a normal executor service.
+ *
+ * Note: For scale-out, the operation will be applied using
+ * client-side global views of the indices.
+ *
+ * Note: This can be used for operations on read-only views (even on
+ * a Journal). This is helpful since we can avoid some overhead
+ * associated the AbstractTask lock declarations.
+ */
+
+ return indexManager.getExecutorService().submit(
+ new RestApiTaskForIndexManager(indexManager, task));
+
+ } else {
+
+ /**
+ * Run on the ConcurrencyManager of the Journal.
+ *
+ * Mutation operations will be scheduled based on the pre-declared
+ * locks and will have exclusive access to the resources guarded by
+ * those locks when they run.
+ *
+ * FIXME GROUP COMMIT: The {@link AbstractTask} was written to
+ * require the exact set of resource lock declarations. However, for
+ * the REST API, we want to operate on all indices associated with a
+ * KB instance. This requires either:
+ * <p>
+ * (a) pre-resolving the names of those indices and passing them all
+ * into the AbstractTask; or
+ * <P>
+ * (b) allowing the caller to only declare the namespace and then to
+ * be granted access to all indices whose names are in that
+ * namespace.
+ *
+ * (b) is now possible with the fix to the Name2Addr prefix scan.
+ */
+
+ // Obtain the necessary locks for R/w access to KB indices.
+ final String[] locks = getLocksForKB((Journal) indexManager,
+ namespace);
+
+ final IConcurrencyManager cc = ((Journal) indexManager)
+ .getConcurrencyManager();
+
+ // Submit task to ConcurrencyManager. Will acquire locks and run.
+ return cc.submit(new RestApiTaskForJournal(cc, task.getTimestamp(),
+ locks, task));
+
+ }
+
+ }
+
+ /**
+ * Acquire the locks for the named indices associated with the specified KB.
+ *
+ * @param indexManager
+ * The {@link Journal}.
+ * @param namespace
+ * The namespace of the KB instance.
+ *
+ * @return The locks for the named indices associated with that KB instance.
+ *
+ * @throws DatasetNotFoundException
+ *
+ * FIXME GROUP COMMIT : [This should be replaced by the use of
+ * the namespace and hierarchical locking support in
+ * AbstractTask.] This could fail to discover a recently create
+ * KB between the time when the KB is created and when the group
+ * commit for that create becomes visible. This data race exists
+ * because we are using [lastCommitTime] rather than the
+ * UNISOLATED view of the GRS.
+ * <p>
+ * Note: This data race MIGHT be closed by the default locator
+ * cache. If it records the new KB properties when they are
+ * created, then they should be visible. If they are not
+ * visible, then we have a data race. (But if it records them
+ * before the group commit for the KB create, then the actual KB
+ * indices will not be durable until the that group commit...).
+ * <p>
+ * Note: The problem can obviously be resolved by using the
+ * UNISOLATED index to obtain the KB properties, but that would
+ * serialize ALL updates. What we need is a suitable caching
+ * mechanism that (a) ensures that newly create KB instances are
+ * visible; and (b) has high concurrency for read-only requests
+ * for the properties for those KB instances.
+ */
+ private static String[] getLocksForKB(final Journal indexManager,
+ final String namespace) throws DatasetNotFoundException {
+
+ final long timestamp = indexManager.getLastCommitTime();
+
+ final AbstractTripleStore tripleStore = (AbstractTripleStore) indexManager
+ .getResourceLocator().locate(namespace, timestamp);
+
+ if (tripleStore == null)
+ throw new DatasetNotFoundException("Not found: namespace="
+ + namespace + ", timestamp="
+ + TimestampUtility.toString(timestamp));
+
+ final Set<String> lockSet = new HashSet<String>();
+
+ lockSet.addAll(tripleStore.getSPORelation().getIndexNames());
+
+ lockSet.addAll(tripleStore.getLexiconRelation().getIndexNames());
+
+ final String[] locks = lockSet.toArray(new String[lockSet.size()]);
+
+ return locks;
+
+ }
+
// /**
// * Return the {@link Quorum} -or- <code>null</code> if the
// * {@link IIndexManager} is not participating in an HA {@link Quorum}.
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-06-11 15:52:14 UTC (rev 8471)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-06-11 16:35:53 UTC (rev 8472)
@@ -105,6 +105,7 @@
try {
Bigdata...
[truncated message content] |
|
From: <tho...@us...> - 2014-06-16 14:18:07
|
Revision: 8483
http://sourceforge.net/p/bigdata/code/8483
Author: thompsonbry
Date: 2014-06-16 14:17:57 +0000 (Mon, 16 Jun 2014)
Log Message:
-----------
It looks like the ArbitraryLengthPathOp could be more defensive to avoid an NPE:
ArbitraryLengthPathOp.java line 778
{{{
if (parentSolutionIn.isBound(gearing.outVar)) {
// do this later now
if (!bs.get(gearing.tVarOut).equals(parentSolutionIn.get(gearing.outVar))) {
}}}
Since we already know that there is a binding for gearing.outVar, this could be written as:
{{{
if (parentSolutionIn.isBound(gearing.outVar)) {
// do this now: note already known to be bound per test above.
final IConstant<?> poutVar = parentSolutionIn.get(gearing.outVar);
if (!poutVar.equals(bs.get(gearing.tVarOut))) {
}}}
This was noticed when observing an NPE when {{{bs.get(gearing.tVarOut)}}} evaluated to null. This is not the root cause of the problem. I am still looking for that.
I have enabled the property-path test suite for the BigdataEmbeddedFederationSparqlTest. This test suite is not automatically run in CI due to resource leaks (which is documented on another ticket). However, you can now trivially recreate the problem by uncommenting the following line in BigdataSparqlTest and running the BigdataEmbeddedFederationSparqlTest.
{{{
static final Collection<String> testURIs = Arrays.asList(new String[] {
// property paths
// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/syntax-sparql1/manifest#sparql11-collection-01",
}}}
When run locally, the test fails as follows. The failure is the same as the one documented above. It is attempting to bind a null value onto a variable. The root cause is likely to be a failure to flow the solutions back to the query controller such that the results from the sub-query appear as unbound on the query controller. It could also be a failure to run the sub-query from the query controller. I have not diagnosed this further.
{{{
org.openrdf.query.QueryEvaluationException: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.Exception: task=ChunkTask{query=eb7362c8-a987-4448-9113-99816a82311d,bopId=14,partitionId=-1,sinkId=17,altSinkId=null}, cause=java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException
at com.bigdata.rdf.sail.Bigdata2Sesame2BindingSetIterator.hasNext(Bigdata2Sesame2BindingSetIterator.java:188)
at org.openrdf.query.impl.TupleQueryResultImpl.hasNext(TupleQueryResultImpl.java:90)
at info.aduna.iteration.Iterations.addAll(Iterations.java:71)
at org.openrdf.query.impl.MutableTupleQueryResult.<init>(MutableTupleQueryResult.java:86)
at org.openrdf.query.impl.MutableTupleQueryResult.<init>(MutableTupleQueryResult.java:92)
at org.openrdf.query.parser.sparql.SPARQLQueryTest.compareTupleQueryResults(SPARQLQueryTest.java:244)
at org.openrdf.query.parser.sparql.SPARQLASTQueryTest.runTest(SPARQLASTQueryTest.java:196)
at junit.framework.TestCase.runBare(TestCase.java:127)
at junit.framework.TestResult$1.protect(TestResult.java:106)
at junit.framework.TestResult.runProtected(TestResult.java:124)
at junit.framework.TestResult.run(TestResult.java:109)
at junit.framework.TestCase.run(TestCase.java:118)
at junit.framework.TestSuite.runTest(TestSuite.java:208)
at junit.framework.TestSuite.run(TestSuite.java:203)
at org.eclipse.jdt.internal.junit.runner.junit3.JUnit3TestReference.run(JUnit3TestReference.java:130)
at org.eclipse.jdt.internal.junit.runner.TestExecution.run(TestExecution.java:38)
at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:467)
at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:683)
at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.run(RemoteTestRunner.java:390)
at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.main(RemoteTestRunner.java:197)
Caused by: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.Exception: task=ChunkTask{query=eb7362c8-a987-4448-9113-99816a82311d,bopId=14,partitionId=-1,sinkId=17,altSinkId=null}, cause=java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException
at com.bigdata.relation.accesspath.BlockingBuffer$BlockingIterator.checkFuture(BlockingBuffer.java:1523)
at com.bigdata.relation.accesspath.BlockingBuffer$BlockingIterator._hasNext(BlockingBuffer.java:1710)
at com.bigdata.relation.accesspath.BlockingBuffer$BlockingIterator.hasNext(BlockingBuffer.java:1563)
at com.bigdata.striterator.AbstractChunkedResolverator._hasNext(AbstractChunkedResolverator.java:365)
at com.bigdata.striterator.AbstractChunkedResolverator.hasNext(AbstractChunkedResolverator.java:341)
at com.bigdata.rdf.sail.Bigdata2Sesame2BindingSetIterator.hasNext(Bigdata2Sesame2BindingSetIterator.java:134)
... 19 more
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.Exception: task=ChunkTask{query=eb7362c8-a987-4448-9113-99816a82311d,bopId=14,partitionId=-1,sinkId=17,altSinkId=null}, cause=java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException
at java.util.concurrent.FutureTask.report(FutureTask.java:122)
at java.util.concurrent.FutureTask.get(FutureTask.java:188)
at com.bigdata.relation.accesspath.BlockingBuffer$BlockingIterator.checkFuture(BlockingBuffer.java:1454)
... 24 more
Caused by: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.Exception: task=ChunkTask{query=eb7362c8-a987-4448-9113-99816a82311d,bopId=14,partitionId=-1,sinkId=17,altSinkId=null}, cause=java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException
at com.bigdata.rdf.sail.RunningQueryCloseableIterator.checkFuture(RunningQueryCloseableIterator.java:59)
at com.bigdata.rdf.sail.RunningQueryCloseableIterator.close(RunningQueryCloseableIterator.java:73)
at com.bigdata.striterator.ChunkedWrappedIterator.close(ChunkedWrappedIterator.java:180)
at com.bigdata.striterator.AbstractChunkedResolverator$ChunkConsumerTask.call(AbstractChunkedResolverator.java:297)
at com.bigdata.striterator.AbstractChunkedResolverator$ChunkConsumerTask.call(AbstractChunkedResolverator.java:1)
at java.util.concurrent.FutureTask.run(FutureTask.java:262)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.util.concurrent.ExecutionException: java.lang.Exception: task=ChunkTask{query=eb7362c8-a987-4448-9113-99816a82311d,bopId=14,partitionId=-1,sinkId=17,altSinkId=null}, cause=java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException
at com.bigdata.util.concurrent.Haltable.get(Haltable.java:273)
at com.bigdata.bop.engine.AbstractRunningQuery.get(AbstractRunningQuery.java:1477)
at com.bigdata.bop.engine.AbstractRunningQuery.get(AbstractRunningQuery.java:1)
at com.bigdata.rdf.sail.RunningQueryCloseableIterator.checkFuture(RunningQueryCloseableIterator.java:46)
... 8 more
Caused by: java.lang.Exception: task=ChunkTask{query=eb7362c8-a987-4448-9113-99816a82311d,bopId=14,partitionId=-1,sinkId=17,altSinkId=null}, cause=java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException
at com.bigdata.bop.engine.ChunkedRunningQuery$ChunkTask.call(ChunkedRunningQuery.java:1335)
at com.bigdata.bop.engine.ChunkedRunningQuery$ChunkTaskWrapper.run(ChunkedRunningQuery.java:894)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
at java.util.concurrent.FutureTask.run(FutureTask.java:262)
at com.bigdata.concurrent.FutureTaskMon.run(FutureTaskMon.java:63)
at com.bigdata.bop.engine.ChunkedRunningQuery$ChunkFutureTask.run(ChunkedRunningQuery.java:789)
... 3 more
Caused by: java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException
at java.util.concurrent.FutureTask.report(FutureTask.java:122)
at java.util.concurrent.FutureTask.get(FutureTask.java:188)
at com.bigdata.bop.engine.ChunkedRunningQuery$ChunkTask.call(ChunkedRunningQuery.java:1315)
... 8 more
Caused by: java.lang.IllegalArgumentException
at com.bigdata.bop.bindingSet.ListBindingSet.set(ListBindingSet.java:430)
at com.bigdata.bop.ContextBindingSet.set(ContextBindingSet.java:74)
at com.bigdata.bop.paths.ArbitraryLengthPathOp$ArbitraryLengthPathTask.processChunk(ArbitraryLengthPathOp.java:816)
at com.bigdata.bop.paths.ArbitraryLengthPathOp$ArbitraryLengthPathTask.call(ArbitraryLengthPathOp.java:270)
at com.bigdata.bop.paths.ArbitraryLengthPathOp$ArbitraryLengthPathTask.call(ArbitraryLengthPathOp.java:1)
at java.util.concurrent.FutureTask.run(FutureTask.java:262)
at com.bigdata.bop.engine.ChunkedRunningQuery$ChunkTask.call(ChunkedRunningQuery.java:1314)
... 8 more
}}}
See #942 (Property path failures in scale-out).
Revision Links:
--------------
http://sourceforge.net/p/bigdata/code/2
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataEmbeddedFederationSparqlTest.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java 2014-06-16 11:23:44 UTC (rev 8482)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java 2014-06-16 14:17:57 UTC (rev 8483)
@@ -777,13 +777,14 @@
*/
if (parentSolutionIn.isBound(gearing.outVar)) {
- // do this later now
-
- if (!bs.get(gearing.tVarOut).equals(parentSolutionIn.get(gearing.outVar))) {
-
- if (log.isDebugEnabled()) {
- log.debug("transitive output does not match incoming binding for output var, dropping");
- }
+ // do this now: note already known to be bound per test above.
+ final IConstant<?> poutVar = parentSolutionIn.get(gearing.outVar);
+
+ if (!poutVar.equals(bs.get(gearing.tVarOut))) {
+
+ if (log.isDebugEnabled()) {
+ log.debug("transitive output does not match incoming binding for output var, dropping");
+ }
continue;
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataEmbeddedFederationSparqlTest.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataEmbeddedFederationSparqlTest.java 2014-06-16 11:23:44 UTC (rev 8482)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataEmbeddedFederationSparqlTest.java 2014-06-16 14:17:57 UTC (rev 8483)
@@ -65,7 +65,6 @@
* {@link EmbeddedFederation}.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
*/
public class BigdataEmbeddedFederationSparqlTest extends BigdataSparqlTest {
@@ -110,7 +109,7 @@
if(hideDatasetTests)
suite1 = filterOutTests(suite1,"dataset");
- suite1 = filterOutTests(suite1, "property-paths");
+// suite1 = filterOutTests(suite1, "property-paths");
/**
* BSBM BI use case query 5
@@ -157,6 +156,7 @@
final Factory factory = new Factory() {
+ @Override
public SPARQLQueryTest createSPARQLQueryTest(String testURI,
String name, String queryFileURL, String resultFileURL,
Dataset dataSet, boolean laxCardinality) {
@@ -166,6 +166,7 @@
}
+ @Override
public SPARQLQueryTest createSPARQLQueryTest(String testURI,
String name, String queryFileURL, String resultFileURL,
Dataset dataSet, boolean laxCardinality, boolean checkOrder) {
@@ -173,6 +174,7 @@
return new BigdataEmbeddedFederationSparqlTest(testURI, name, queryFileURL,
resultFileURL, dataSet, laxCardinality, checkOrder) {
+ @Override
protected Properties getProperties() {
final Properties p = new Properties(super
@@ -295,7 +297,8 @@
}
- protected void tearDownBackend(IIndexManager backend) {
+ @Override
+ protected void tearDownBackend(final IIndexManager backend) {
backend.destroy();
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java 2014-06-16 11:23:44 UTC (rev 8482)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java 2014-06-16 14:17:57 UTC (rev 8483)
@@ -67,7 +67,6 @@
* a {@link Journal} without full read/write transaction support.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
*/
public class BigdataSparqlTest
//extends SPARQLQueryTest // Sesame TupleExpr based evaluation
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <tho...@us...> - 2014-06-17 13:28:15
|
Revision: 8494
http://sourceforge.net/p/bigdata/code/8494
Author: thompsonbry
Date: 2014-06-17 13:28:07 +0000 (Tue, 17 Jun 2014)
Log Message:
-----------
Fix for #965 (LBS does not work with HA1)
The root cause was the init() method on HALoadBalancerServlet. init() disabled the LBS unless the NSS was in an HA GT 1 deployment model. I have modified the code to always enable the LBS servlet. This allows it to correctly rewrite itself out of the request when in the HA1 or non-HA modes.
There are some code paths that need to be updated because either (a) they do not make an explicit choice about whether or not to use the LBS; (b) they do not parametrize the ContextPath of the web application; or (c) they are in the wrong package (client code should be in a separate patch from server code).
Changes are to:
- HALoadBalancerServlet: init() always succeeds. This fixes the core problem for this ticket.
- TestHA1JournalServer: the test is linked to this ticket.
- RemoteServiceCallImpl: Modified to use declared service configuration information to decide whether or not to use the LBS pattern for the remote service. The default is false, which works for all cases. The default may be overridden to be true if the end point is known to expose the bigdata LBS pattern.
- IServiceOptions: added the isBigdataLBS() method.
- ServiceOptionsBase: added default=false for isBigdataLBS() and setBigdataLBS() methods.
- BigdataSailFactory: Added "FIXME" - This does not support the HA load balancer pattern and does not parameterize the value of the ContextPath. Also, should this class be part of the "client" package?
- BigdataSailRemoteRepository: added constructor variant that accepts the boolean useLBS argument. The version of the constructor without that argument now defaults to useLBS:=true. This changes the default behavior of the client(!).
- RemoteRepository: deprecated the constructor version that does not accept the useLBS parameter. This version of the constructor still default to useLBS:=false. It tends to be used from some less common code paths.
- RemoteRepositoryManager: modified to specify useLBS:=true by default.
- TestNSSHealthCheck: code cleanup.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/INativeServiceOptions.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IServiceOptions.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCallCreateParams.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceOptionsBase.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java 2014-06-17 12:17:24 UTC (rev 8493)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java 2014-06-17 13:28:07 UTC (rev 8494)
@@ -123,17 +123,24 @@
}
+ /**
+ * A simple transaction test against an HA1 mode server using the LBS.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/965" > Cannot run queries in
+ * LBS mode with HA1 setup </a>
+ */
public void testSimpleTransactionLBS() throws Exception {
-
- doStartA();
-
- serverA.awaitHAReady(2, TimeUnit.SECONDS);
-
- awaitCommitCounter(1, new HAGlue[] { serverA });
-
- simpleTransactionLBS();
-
- awaitCommitCounter(2, new HAGlue[] { serverA });
+
+ doStartA();
+
+ serverA.awaitHAReady(2, TimeUnit.SECONDS);
+
+ awaitCommitCounter(1, new HAGlue[] { serverA });
+
+ simpleTransactionLBS();
+
+ awaitCommitCounter(2, new HAGlue[] { serverA });
+
}
public void testMultiTransaction() throws Exception {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/INativeServiceOptions.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/INativeServiceOptions.java 2014-06-17 12:17:24 UTC (rev 8493)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/INativeServiceOptions.java 2014-06-17 13:28:07 UTC (rev 8494)
@@ -32,7 +32,6 @@
* Additional options for native services.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
*/
public interface INativeServiceOptions extends IServiceOptions {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IServiceOptions.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IServiceOptions.java 2014-06-17 12:17:24 UTC (rev 8493)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IServiceOptions.java 2014-06-17 13:28:07 UTC (rev 8494)
@@ -37,7 +37,6 @@
* Options and metadata for service end points.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
*/
public interface IServiceOptions {
@@ -78,5 +77,13 @@
* query planner has locked in the join evaluation order.
*/
boolean isRunFirst();
+
+ /**
+ * Return <code>true</code> if the remote service is known to be a bigdata
+ * service that exposes the HA load balancer servlet (default
+ * <code>false</code>). The default may be overridden iff the end point is
+ * known to expose the bigdata LBS pattern.
+ */
+ boolean isBigdataLBS();
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java 2014-06-17 12:17:24 UTC (rev 8493)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java 2014-06-17 13:28:07 UTC (rev 8494)
@@ -29,7 +29,6 @@
import java.util.UUID;
-import org.apache.http.HttpResponse;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.impl.client.DefaultRedirectStrategy;
import org.openrdf.query.BindingSet;
@@ -49,8 +48,6 @@
* adjusting the {@link RemoteServiceOptions} for the service URI.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id: RemoteServiceCallImpl.java 6060 2012-03-02 16:07:38Z
- * thompsonbry $
*/
public class RemoteServiceCallImpl implements RemoteServiceCall {
@@ -149,7 +146,9 @@
// Setup a standard strategy for following redirects.
httpClient.setRedirectStrategy(new DefaultRedirectStrategy());
- final RemoteRepository repo = new RemoteRepository(uriStr,//
+ final RemoteRepository repo = new RemoteRepository(//
+ uriStr,//
+ params.getServiceOptions().isBigdataLBS(),// useLBS
httpClient,//
params.getTripleStore().getExecutorService()
);
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCallCreateParams.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCallCreateParams.java 2014-06-17 12:17:24 UTC (rev 8493)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCallCreateParams.java 2014-06-17 13:28:07 UTC (rev 8494)
@@ -37,7 +37,6 @@
* {@link ServiceCall} instance.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
*/
public interface ServiceCallCreateParams {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceOptionsBase.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceOptionsBase.java 2014-06-17 12:17:24 UTC (rev 8493)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceOptionsBase.java 2014-06-17 13:28:07 UTC (rev 8494)
@@ -29,19 +29,27 @@
/**
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
*/
abstract public class ServiceOptionsBase implements IServiceOptions {
private boolean isRunFirst = false;
-
+ private boolean useLBS = false;
+
@Override
public boolean isRunFirst() {
return isRunFirst;
}
-
- public void setRunFirst(final boolean newValue) {
+
+ public void setRunFirst(final boolean newValue) {
this.isRunFirst = newValue;
}
+ @Override
+ public boolean isBigdataLBS() {
+ return useLBS;
+ }
+
+ public void setBigdataLBS(final boolean newValue) {
+ this.useLBS = newValue;
+ }
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java 2014-06-17 12:17:24 UTC (rev 8493)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java 2014-06-17 13:28:07 UTC (rev 8494)
@@ -96,6 +96,10 @@
/**
* Connect to a remote bigdata instance.
+ *
+ * FIXME This does not support the HA load balancer pattern and does not
+ * parameterize the value of the ContextPath. Also, should this class be
+ * part of the "client" package?
*/
public static BigdataSailRemoteRepository connect(final String serviceEndpoint) {
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java 2014-06-17 12:17:24 UTC (rev 8493)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java 2014-06-17 13:28:07 UTC (rev 8494)
@@ -1,5 +1,3 @@
-package com.bigdata.rdf.sail.remote;
-
/**
Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved.
@@ -24,6 +22,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+package com.bigdata.rdf.sail.remote;
+
import java.io.File;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -66,9 +66,30 @@
/**
* Ctor that simply specifies an endpoint and lets this class manage the
* ClientConnectionManager for the HTTP client and the manage the
- * ExecutorService. More convenient.
+ * ExecutorService. More convenient, but does not account for whether or not
+ * to use the LBS.
+ *
+ * @param sparqlEndpointURL
+ * The SPARQL end point URL
*/
- public BigdataSailRemoteRepository(final String sparqlEndpointURL) {
+ public BigdataSailRemoteRepository(final String sparqlEndpointURL) {
+
+ this(sparqlEndpointURL, true/* useLBS */);
+
+ }
+
+ /**
+ * Ctor that simply specifies an endpoint and lets this class manage the
+ * ClientConnectionManager for the HTTP client and the manage the
+ * ExecutorService.
+ *
+ * @param sparqlEndpointURL
+ * The SPARQL end point URL
+ * @param useLBS
+ * <code>true</code> iff the LBS pattern should be used.
+ */
+ public BigdataSailRemoteRepository(final String sparqlEndpointURL,
+ final boolean useLBS) {
this.executor = Executors.newCachedThreadPool();
@@ -84,9 +105,9 @@
*/
httpClient.setRedirectStrategy(new DefaultRedirectStrategy());
- this.nss = new RemoteRepository(
- sparqlEndpointURL, httpClient, executor);
-
+ this.nss = new RemoteRepository(sparqlEndpointURL, useLBS, httpClient,
+ executor);
+
}
/**
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-06-17 12:17:24 UTC (rev 8493)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-06-17 13:28:07 UTC (rev 8494)
@@ -386,8 +386,15 @@
* {@inheritDoc}
* <p>
* Extended to setup the as-configured {@link IHALoadBalancerPolicy}.
+ * <p>
+ * Note: If the deployment is does not support HA replication (e.g., either
+ * not HA or HA with replicationFactor:=1), then we still want to be able to
+ * forward to the local service.
*
* @throws ServletException
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/965" > Cannot run queries in
+ * LBS mode with HA1 setup </a>
*/
@Override
public void init() throws ServletException {
@@ -405,42 +412,35 @@
final IIndexManager indexManager = BigdataServlet
.getIndexManager(servletContext);
- if (!(indexManager instanceof HAJournal)){
- // This is not an error, but the LBS is only for HA.
- log.info("LBS Disabled - not HA");
- return;
- }
- if (indexManager instanceof AbstractJournal
+ if (indexManager instanceof HAJournal
&& ((AbstractJournal) indexManager).getQuorum() != null
&& ((AbstractJournal) indexManager).getQuorum()
- .replicationFactor() == 1) {
- // This is not an error, but the LBS is only for HA.
- log.info("LBS Disabled - not HA");
- return;
- }
+ .replicationFactor() > 1) {
- {
- // Get the as-configured policy.
- final IHALoadBalancerPolicy policy = newInstance(//
- servletConfig, //
- HALoadBalancerServlet.class,// owningClass
- IHALoadBalancerPolicy.class,//
- InitParams.POLICY, InitParams.DEFAULT_POLICY);
+ {
+ // Get the as-configured policy.
+ final IHALoadBalancerPolicy policy = newInstance(//
+ servletConfig, //
+ HALoadBalancerServlet.class,// owningClass
+ IHALoadBalancerPolicy.class,//
+ InitParams.POLICY, InitParams.DEFAULT_POLICY);
- // Set the as-configured policy.
- setLBSPolicy(policy);
+ // Set the as-configured policy.
+ setLBSPolicy(policy);
- }
- {
+ }
+ {
- final IHARequestURIRewriter rewriter = newInstance(//
- servletConfig,//
- HALoadBalancerServlet.class, // owningClass
- IHARequestURIRewriter.class,//
- InitParams.REWRITER, InitParams.DEFAULT_REWRITER);
+ final IHARequestURIRewriter rewriter = newInstance(//
+ servletConfig,//
+ HALoadBalancerServlet.class, // owningClass
+ IHARequestURIRewriter.class,//
+ InitParams.REWRITER, InitParams.DEFAULT_REWRITER);
- setRewriter(rewriter);
+ setRewriter(rewriter);
+ }
+
}
servletContext.setAttribute(BigdataServlet.ATTRIBUTE_LBS_PREFIX,
@@ -850,6 +850,10 @@
* the request to the servlet at the resulting requestURI. This forwarding
* effectively disables the LBS but still allows requests which target the
* LBS to succeed against the webapp on the same host.
+ * <p>
+ * Note: If the deployment is does not support HA replication (e.g., either
+ * not HA or HA with replicationFactor:=1), then we still want to be able to
+ * forward to the local service.
*
* @param request
* The request.
@@ -858,6 +862,9 @@
*
* @throws IOException
* @throws ServletException
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/965" > Cannot run queries in
+ * LBS mode with HA1 setup </a>
*/
public void forwardToLocalService(//
final boolean isLeaderRequest,//
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-06-17 12:17:24 UTC (rev 8493)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-06-17 13:28:07 UTC (rev 8494)
@@ -307,6 +307,20 @@
}
+ /**
+ *
+ * @param sparqlEndpointURL
+ * @param httpClient
+ * @param executor
+ *
+ * @deprecated This version does not force the caller to decide whether or
+ * not the LBS pattern will be used. In general, it should be
+ * used if the end point is bigdata. This class is generally,
+ * but not always, used with a bigdata end point. The main
+ * exception is SPARQL Basic Federated Query. For that use case
+ * we can not assume that the end point is bigdata and thus we
+ * can not use the LBS prefix.
+ */
public RemoteRepository(final String sparqlEndpointURL,
final HttpClient httpClient, final Executor executor) {
@@ -865,7 +879,7 @@
}
if (add.uri != null) {
- // set the resource to load.
+ // set the resource to load : FIXME REST API allows multiple URIs, but RemoteRepository does not.
opts.addRequestParam("uri", add.uri);
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2014-06-17 12:17:24 UTC (rev 8493)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2014-06-17 13:28:07 UTC (rev 8494)
@@ -89,7 +89,7 @@
public RemoteRepositoryManager(final String serviceURL,
final HttpClient httpClient, final Executor executor) {
- this(serviceURL, false/* useLBS */, httpClient, executor);
+ this(serviceURL, true/* useLBS */, httpClient, executor);
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java 2014-06-17 12:17:24 UTC (rev 8493)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java 2014-06-17 13:28:07 UTC (rev 8494)
@@ -375,9 +375,9 @@
*
* @return The connection.
*
- * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/619">
- * RemoteRepository class should use application/x-www-form-urlencoded
- * for large POST requests </a>
+ * @see <a href="http://trac.bigdata.com/ticket/619"> RemoteRepository class
+ * should use application/x-www-form-urlencoded for large POST requests
+ * </a>
*/
private HttpResponse doConnect(final ConnectOptions opts) throws Exception {
@@ -452,57 +452,17 @@
}
-// // conn = doConnect(urlString.toString(), opts.method);
-// final URL url = new URL(urlString.toString());
-// conn = (HttpURLConnection) url.openConnection();
-// conn.setRequestMethod(opts.method);
-// conn.setDoOutput(true);
-// conn.setDoInput(true);
-// conn.setUseCaches(false);
-// conn.setReadTimeout(opts.timeout);
-// conn.setRequestProperty("Accept", opts.acceptHeader);
-// if (log.isDebugEnabled())
-// log.debug("Accept: " + opts.acceptHeader);
-
if (opts.entity != null) {
-// if (opts.data == null)
-// throw new AssertionError();
-
-// final String contentLength = Integer.toString(opts.data.length);
-
-// conn.setRequestProperty("Content-Type", opts.contentType);
-// conn.setRequestProperty("Content-Length", contentLength);
-
-// if (log.isDebugEnabled()) {
-// log.debug("Content-Type: " + opts.contentType);
-// log.debug("Content-Length: " + contentLength);
-// }
-
-// final ByteArrayEntity entity = new ByteArrayEntity(opts.data);
-// entity.setContentType(opts.contentType);
-
- ((HttpEntityEnclosingRequestBase) request).setEntity(opts.entity);
+ ((HttpEntityEnclosingRequestBase) request)
+ .setEntity(opts.entity);
-// final OutputStream os = conn.getOutputStream();
-// try {
-// os.write(opts.data);
-// os.flush();
-// } finally {
-// os.close();
-// }
-
}
final HttpResponse response = m_httpClient.execute(request);
return response;
-// // connect.
-// conn.connect();
-//
-// return conn;
-
} catch (Throwable t) {
/*
* If something goes wrong, then close the http connection.
@@ -513,10 +473,6 @@
if (request != null)
request.abort();
-// // clean up the connection resources
-// if (conn != null)
-// conn.disconnect();
-
} catch (Throwable t2) {
// ignored.
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|