From: <jer...@us...> - 2014-05-08 01:49:37
|
Revision: 8226 http://sourceforge.net/p/bigdata/code/8226 Author: jeremy_carroll Date: 2014-05-08 01:49:33 +0000 (Thu, 08 May 2014) Log Message: ----------- Tests for the AnalyzerFactory's. The tests are for their shared behavior. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractSearchTest.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestKeyBuilder.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestConfigurableAsDefaultAnalyzerFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestDefaultAnalyzerFactory.java Added: branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs 2014-05-08 01:49:33 UTC (rev 8226) @@ -0,0 +1,2 @@ +eclipse.preferences.version=1 +encoding//bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java=UTF-8 Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java 2014-05-08 01:49:13 UTC (rev 8225) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/search/ConfigurableAnalyzerFactory.java 2014-05-08 01:49:33 UTC (rev 8226) @@ -547,7 +547,7 @@ // RussianAnalyzer is missing any way to access stop words. if (RussianAnalyzer.class.equals(cls) && useDefaultStopWords()) { - return new AnalyzerPair(languageRange, new RussianAnalyzer(Version.LUCENE_CURRENT, Collections.EMPTY_SET), new RussianAnalyzer(Version.LUCENE_CURRENT)); + return new AnalyzerPair(languageRange, new RussianAnalyzer(Version.LUCENE_CURRENT), new RussianAnalyzer(Version.LUCENE_CURRENT, Collections.EMPTY_SET)); } return new VersionSetAnalyzerPair(this, cls); } @@ -612,7 +612,8 @@ */ private static final int MAX_LANG_CACHE_SIZE = 500; - private final String defaultLanguage; + private String defaultLanguage; + private final FullTextIndex<?> fullTextIndex; public ConfigurableAnalyzerFactory(final FullTextIndex<?> fullTextIndex) { @@ -621,9 +622,9 @@ if (fullTextIndex == null) throw new IllegalArgumentException(); - defaultLanguage = getDefaultLanguage(fullTextIndex); + this.fullTextIndex = fullTextIndex; - final Properties properties = initProperties(fullTextIndex); + final Properties properties = initProperties(); final Map<String, ConfigOptionsToAnalyzer> analyzers = new HashMap<String, ConfigOptionsToAnalyzer>(); @@ -686,6 +687,12 @@ } } + private String getDefaultLanguage() { + if (defaultLanguage == null) { + defaultLanguage = getDefaultLanguage(fullTextIndex); + } + return defaultLanguage; + } private static boolean hasConstructor(Class<? extends Analyzer> cls, Class<?> ... parameterTypes) { return getConstructor(cls, parameterTypes) != null; @@ -731,7 +738,7 @@ } - protected Properties initProperties(final FullTextIndex<?> fullTextIndex) { + protected Properties initProperties() { final Properties parentProperties = fullTextIndex.getProperties(); Properties myProps; if (Boolean.getBoolean(parentProperties.getProperty(Options.INCLUDE_DEFAULTS, Options.DEFAULT_INCLUDE_DEFAULTS))) { @@ -773,7 +780,8 @@ public Analyzer getAnalyzer(String languageCode, boolean filterStopwords) { if (languageCode == null || languageCode.equals("")) { - languageCode = defaultLanguage; + + languageCode = getDefaultLanguage(); } AnalyzerPair pair = langTag2AnalyzerPair.get(languageCode); Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java 2014-05-08 01:49:33 UTC (rev 8226) @@ -0,0 +1,174 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on May 7, 2014 + */ +package com.bigdata.search; + +import java.io.IOException; +import java.io.StringReader; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.TermAttribute; + +public abstract class AbstractAnalyzerFactoryTest extends AbstractSearchTest { + + public AbstractAnalyzerFactoryTest() { + } + + public AbstractAnalyzerFactoryTest(String arg0) { + super(arg0); + } + + public void setUp() throws Exception { + super.setUp(); + init(getExtraProperties()); + } + abstract String[] getExtraProperties(); + + private Analyzer getAnalyzer(String lang, boolean filterStopWords) { + return getNdx().getAnalyzer(lang, filterStopWords); + } + + private void comparisonTest(String lang, + boolean stopWordsSignificant, + String text, + String spaceSeparated) throws IOException { + compareTokenStream(getAnalyzer(lang, stopWordsSignificant), text, + spaceSeparated.split(" ")); + } + private void compareTokenStream(Analyzer a, String text, String expected[]) throws IOException { + TokenStream s = a.tokenStream(null, new StringReader(text)); + int ix = 0; + while (s.incrementToken()) { + final TermAttribute term = s.getAttribute(TermAttribute.class); + final String word = term.term(); + assertTrue(ix < expected.length); + assertEquals(word, expected[ix++]); + } + assertEquals(ix, expected.length); + } + + + public void testEnglishFilterStopWords() throws IOException { + for (String lang: new String[]{ "eng", null, "" }) { + comparisonTest(lang, + true, + "The test to end all tests! Forever.", + "test end all tests forever" + ); + } + } + public void testEnglishNoFilter() throws IOException { + for (String lang: new String[]{ "eng", null, "" }) { + comparisonTest(lang, + false, + "The test to end all tests! Forever.", + "the test to end all tests forever" + ); + } + } + + // Note we careful use a three letter language code for german. + // 'de' is more standard, but the DefaultAnalyzerFactory does not + // implement 'de' correctly. + public void testGermanFilterStopWords() throws IOException { + comparisonTest("ger", + true, + "Hanoi - Im Streit um die Vorherrschaft im Südchinesischen Meer ist es zu einer " + + "erneuten Auseinandersetzung gekommen:", + "hanoi strei um vorherrschaf sudchinesisch meer zu erneu auseinandersetzung gekomm" + ); + + } + + // Note we careful use a three letter language code for Russian. + // 'ru' is more standard, but the DefaultAnalyzerFactory does not + // implement 'ru' correctly. + public void testRussianFilterStopWords() throws IOException { + comparisonTest("rus", + true, + // I hope this is not offensive text. + "Они ответственны полностью и за ту, и за другую трагедию. " + + "Мы уже получили данные от сочувствующих нам офицеров СБУ.", + "ответствен полност ту друг трагед получ дан сочувств нам офицер сбу" + ); + + } + public void testGermanNoStopWords() throws IOException { + comparisonTest("ger", + false, + "Hanoi - Im Streit um die Vorherrschaft im Südchinesischen Meer ist es zu einer " + + "erneuten Auseinandersetzung gekommen:", + "hanoi im strei um die vorherrschaf im sudchinesisch meer ist es zu ein erneu auseinandersetzung gekomm" + ); + + } + public void testRussianNoStopWords() throws IOException { + comparisonTest("rus", + false, + // I hope this is not offensive text. + "Они ответственны полностью и за ту, и за другую трагедию. " + + "Мы уже получили данные от сочувствующих нам офицеров СБУ.", + "он ответствен полност и за ту и за друг трагед мы уж получ дан от сочувств нам офицер сбу" + ); + + } + public void testJapanese() throws IOException { + for (boolean filterStopWords: new Boolean[]{true, false}) { + comparisonTest("jpn", + filterStopWords, + // I hope this is not offensive text. + "高林純示 生態学研究センター教授らの研究グループと松井健二 山口大学医学系研究科(農学系)教授らの研究グループは、", + "高林 林純 純示 生態 態学 学研 研究 究セ セン ンタ ター ー教 教授 授ら らの の研 研究 究グ グル ルー " + + "ープ プと と松 松井 井健 健二 山口 口大 大学 学医 医学 学系 系研 " + + "研究 究科 農学 学系 教授 授ら らの の研 研究 究グ グル ルー ープ プは"); + } + } + public void testConfiguredLanguages() { + checkConfig("BrazilianAnalyzer", "por", "pt"); + checkConfig("ChineseAnalyzer", "zho", "chi", "zh"); + checkConfig("CJKAnalyzer", "jpn", "ja", "kor", "ko"); + checkConfig("CzechAnalyzer", "ces", "cze", "cs"); + checkConfig("DutchAnalyzer", "dut", "nld", "nl"); + checkConfig("GermanAnalyzer", "deu", "ger", "de"); + checkConfig("GreekAnalyzer", "gre", "ell", "el"); + checkConfig("RussianAnalyzer", "rus", "ru"); + checkConfig("ThaiAnalyzer", "th", "tha"); + checkConfig("StandardAnalyzer", "en", "eng", "", null); + } + + private void checkConfig(String classname, String ...langs) { + for (String lang:langs) { + // The DefaultAnalyzerFactory only works for language tags of length exactly three. +// if (lang != null && lang.length()==3) + { + assertEquals(classname, getAnalyzer(lang,true).getClass().getSimpleName()); + assertEquals(classname, getAnalyzer(lang+"-x-foobar",true).getClass().getSimpleName()); + } + } + + } +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractSearchTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractSearchTest.java 2014-05-08 01:49:13 UTC (rev 8225) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractSearchTest.java 2014-05-08 01:49:33 UTC (rev 8226) @@ -1,3 +1,29 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on May 7, 2014 + */ package com.bigdata.search; import java.util.Properties; @@ -2,3 +28,2 @@ -import com.bigdata.btree.IndexMetadata; import com.bigdata.journal.IIndexManager; @@ -11,7 +36,6 @@ private String namespace; private IIndexManager indexManager; private FullTextIndex<Long> ndx; - private IndexMetadata indexMetadata; private Properties properties; public AbstractSearchTest() { @@ -22,19 +46,29 @@ } void init(String ...propertyValuePairs) { - namespace = getName(); - properties = getProperties(); + namespace = getClass().getName()+"#"+getName(); + indexManager = getStore(); + properties = (Properties) getProperties().clone(); + ndx = createFullTextIndex(namespace, properties, propertyValuePairs); + } + + private FullTextIndex<Long> createFullTextIndex(String namespace, Properties properties, String ...propertyValuePairs) { for (int i=0; i<propertyValuePairs.length; ) { properties.setProperty(propertyValuePairs[i++], propertyValuePairs[i++]); } - indexManager = getStore(); - ndx = new FullTextIndex<Long>(indexManager, namespace, ITx.UNISOLATED, properties); + FullTextIndex<Long> ndx = new FullTextIndex<Long>(indexManager, namespace, ITx.UNISOLATED, properties); ndx.create(); - indexMetadata = ndx.getIndex().getIndexMetadata(); - } + return ndx; + } + + FullTextIndex<Long> createFullTextIndex(String namespace, String ...propertyValuePairs) { + return createFullTextIndex(namespace, getProperties(), propertyValuePairs); + } public void tearDown() throws Exception { - indexManager.destroy(); + if (indexManager != null) { + indexManager.destroy(); + } super.tearDown(); } @@ -54,15 +88,8 @@ return ndx; } - IndexMetadata getIndexMetadata() { - return indexMetadata; - } - - Properties getSearchProperties() { return properties; } - - } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestAll.java 2014-05-08 01:49:13 UTC (rev 8225) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestAll.java 2014-05-08 01:49:33 UTC (rev 8226) @@ -104,6 +104,14 @@ // test verifies search index is restart safe. suite.addTestSuite(TestSearchRestartSafe.class); + + // Check behavior of DefaultAnalyzerFactory, see also trac 915 + suite.addTestSuite(TestDefaultAnalyzerFactory.class); + + // Check default behavior of ConfigurableAnalyzerFactory + // which is intended to be the same as the intended + // behavior of DefaultAnalyzerFactory + suite.addTestSuite(TestConfigurableAsDefaultAnalyzerFactory.class); return suite; } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestConfigurableAsDefaultAnalyzerFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestConfigurableAsDefaultAnalyzerFactory.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestConfigurableAsDefaultAnalyzerFactory.java 2014-05-08 01:49:33 UTC (rev 8226) @@ -0,0 +1,43 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on May 7, 2014 + */ +package com.bigdata.search; + +public class TestConfigurableAsDefaultAnalyzerFactory extends AbstractAnalyzerFactoryTest { + + public TestConfigurableAsDefaultAnalyzerFactory() { + } + + public TestConfigurableAsDefaultAnalyzerFactory(String arg0) { + super(arg0); + } + + @Override + String[] getExtraProperties() { + return new String[]{FullTextIndex.Options.ANALYZER_FACTORY_CLASS, ConfigurableAnalyzerFactory.class.getName()}; + } + +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestDefaultAnalyzerFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestDefaultAnalyzerFactory.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestDefaultAnalyzerFactory.java 2014-05-08 01:49:33 UTC (rev 8226) @@ -0,0 +1,43 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on May 7, 2014 + */ +package com.bigdata.search; + +public class TestDefaultAnalyzerFactory extends AbstractAnalyzerFactoryTest { + + public TestDefaultAnalyzerFactory() { + } + + public TestDefaultAnalyzerFactory(String arg0) { + super(arg0); + } + + @Override + String[] getExtraProperties() { + return new String[0]; + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestKeyBuilder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestKeyBuilder.java 2014-05-08 01:49:13 UTC (rev 8225) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/TestKeyBuilder.java 2014-05-08 01:49:33 UTC (rev 8226) @@ -93,6 +93,10 @@ return keyBuilder; } + + IndexMetadata getIndexMetadata() { + return getNdx().getIndex().getIndexMetadata(); + } private IKeyBuilder keyBuilder; /** This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-05-08 01:52:14
|
Revision: 8227 http://sourceforge.net/p/bigdata/code/8227 Author: mrpersonick Date: 2014-05-08 01:52:09 +0000 (Thu, 08 May 2014) Log Message: ----------- fixed the gremlin installer, added a loadGraphML method to all BigdataGraph impls Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java branches/BIGDATA_RELEASE_1_3_0/build.xml Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-08 01:49:33 UTC (rev 8226) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-08 01:52:09 UTC (rev 8227) @@ -54,6 +54,7 @@ import com.tinkerpop.blueprints.GraphQuery; import com.tinkerpop.blueprints.Vertex; import com.tinkerpop.blueprints.util.DefaultGraphQuery; +import com.tinkerpop.blueprints.util.io.graphml.GraphMLReader; /** * A base class for a Blueprints wrapper around a bigdata back-end. @@ -93,6 +94,13 @@ return getClass().getSimpleName().toLowerCase(); } + /** + * Post a GraphML file to the remote server. (Bulk-upload operation.) + */ + public void loadGraphML(final String file) throws Exception { + GraphMLReader.inputGraph(this, file); + } + protected abstract RepositoryConnection cxn() throws Exception; // public BigdataSailRepositoryConnection getConnection() { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-08 01:49:33 UTC (rev 8226) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-08 01:52:09 UTC (rev 8227) @@ -80,8 +80,9 @@ /** * Post a GraphML file to the remote server. (Bulk-upload operation.) */ - public long postGraphML(final String file) throws Exception { - return this.repo.getRemoteRepository().postGraphML(file); + @Override + public void loadGraphML(final String file) throws Exception { + this.repo.getRemoteRepository().postGraphML(file); } /** Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-08 01:49:33 UTC (rev 8226) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-08 01:52:09 UTC (rev 8227) @@ -2530,7 +2530,7 @@ <delete file="${build.dir}/gremlin-groovy-2.5.0.zip"/> </target> - <target name="install-gremlin" depends="prepare,compile,jar"> + <target name="install-gremlin" depends="prepare,compile,jar,bundle"> <delete> <fileset dir="${build.dir}/gremlin-groovy-2.5.0/lib"> <include name="blueprints-graph-sail-2.5.0.jar"/> @@ -2577,12 +2577,17 @@ </fileset> </delete> <copy toDir="${build.dir}/gremlin-groovy-2.5.0/lib" flatten="true"> + <!-- <fileset dir="${bigdata.dir}/bigdata-rdf/lib"> <include name="openrdf-sesame-${sesame.version}-onejar.jar" /> </fileset> <fileset dir="${bigdata.dir}/bigdata-sails/lib/httpcomponents"> <include name="httpmime-${apache.httpmime.version}.jar" /> </fileset> + --> + <fileset dir="${build.dir}/lib"> + <include name="*.jar" /> + </fileset> <fileset dir="${build.dir}"> <include name="${version}.jar" /> </fileset> @@ -2594,9 +2599,10 @@ 1. Start the gremlin console: > ./${build.dir}/gremlin-groovy-2.5.0/bin/gremlin.sh 2. Connect to the bigdata server: - > g = com.bigdata.blueprints.BigdataGraphFactory.connect("http://localhost:9999/bigdata") + gremlin> import com.bigdata.blueprints.* + gremlin> g = BigdataGraphFactory.connect("http://localhost:9999") 3. Don't forget to shut down the connection when you're done: - > g.shutdown() + gremlin> g.shutdown() </echo> </target> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jer...@us...> - 2014-05-08 03:13:01
|
Revision: 8230 http://sourceforge.net/p/bigdata/code/8230 Author: jeremy_carroll Date: 2014-05-08 03:12:55 +0000 (Thu, 08 May 2014) Log Message: ----------- externalized Japanese, Russian and German strings to address encoding issues Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/NonEnglishExamples.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/examples.properties Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs Deleted: branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs 2014-05-08 02:57:15 UTC (rev 8229) +++ branches/BIGDATA_RELEASE_1_3_0/.settings/org.eclipse.core.resources.prefs 2014-05-08 03:12:55 UTC (rev 8230) @@ -1,2 +0,0 @@ -eclipse.preferences.version=1 -encoding//bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java=UTF-8 Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java 2014-05-08 02:57:15 UTC (rev 8229) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/AbstractAnalyzerFactoryTest.java 2014-05-08 03:12:55 UTC (rev 8230) @@ -57,7 +57,7 @@ String text, String spaceSeparated) throws IOException { compareTokenStream(getAnalyzer(lang, stopWordsSignificant), text, - spaceSeparated.split(" ")); + spaceSeparated.split(" ")); //$NON-NLS-1$ } private void compareTokenStream(Analyzer a, String text, String expected[]) throws IOException { TokenStream s = a.tokenStream(null, new StringReader(text)); @@ -73,20 +73,20 @@ public void testEnglishFilterStopWords() throws IOException { - for (String lang: new String[]{ "eng", null, "" }) { + for (String lang: new String[]{ "eng", null, "" }) { //$NON-NLS-1$ //$NON-NLS-2$ comparisonTest(lang, true, - "The test to end all tests! Forever.", - "test end all tests forever" + "The test to end all tests! Forever.", //$NON-NLS-1$ + "test end all tests forever" //$NON-NLS-1$ ); } } public void testEnglishNoFilter() throws IOException { - for (String lang: new String[]{ "eng", null, "" }) { + for (String lang: new String[]{ "eng", null, "" }) { //$NON-NLS-1$ //$NON-NLS-2$ comparisonTest(lang, false, - "The test to end all tests! Forever.", - "the test to end all tests forever" + "The test to end all tests! Forever.", //$NON-NLS-1$ + "the test to end all tests forever" //$NON-NLS-1$ ); } } @@ -95,11 +95,11 @@ // 'de' is more standard, but the DefaultAnalyzerFactory does not // implement 'de' correctly. public void testGermanFilterStopWords() throws IOException { - comparisonTest("ger", + comparisonTest("ger", //$NON-NLS-1$ true, - "Hanoi - Im Streit um die Vorherrschaft im Südchinesischen Meer ist es zu einer " + - "erneuten Auseinandersetzung gekommen:", - "hanoi strei um vorherrschaf sudchinesisch meer zu erneu auseinandersetzung gekomm" + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.10") + //$NON-NLS-1$ + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.11"), //$NON-NLS-1$ + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.12") //$NON-NLS-1$ ); } @@ -108,56 +108,54 @@ // 'ru' is more standard, but the DefaultAnalyzerFactory does not // implement 'ru' correctly. public void testRussianFilterStopWords() throws IOException { - comparisonTest("rus", + comparisonTest("rus", //$NON-NLS-1$ true, // I hope this is not offensive text. - "Они ответственны полностью и за ту, и за другую трагедию. " + - "Мы уже получили данные от сочувствующих нам офицеров СБУ.", - "ответствен полност ту друг трагед получ дан сочувств нам офицер сбу" + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.14") + //$NON-NLS-1$ + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.15"), //$NON-NLS-1$ + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.16") //$NON-NLS-1$ ); } public void testGermanNoStopWords() throws IOException { - comparisonTest("ger", + comparisonTest("ger", //$NON-NLS-1$ false, - "Hanoi - Im Streit um die Vorherrschaft im Südchinesischen Meer ist es zu einer " + - "erneuten Auseinandersetzung gekommen:", - "hanoi im strei um die vorherrschaf im sudchinesisch meer ist es zu ein erneu auseinandersetzung gekomm" + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.18") + //$NON-NLS-1$ + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.19"), //$NON-NLS-1$ + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.20") //$NON-NLS-1$ ); } public void testRussianNoStopWords() throws IOException { - comparisonTest("rus", + comparisonTest("rus", //$NON-NLS-1$ false, - // I hope this is not offensive text. - "Они ответственны полностью и за ту, и за другую трагедию. " + - "Мы уже получили данные от сочувствующих нам офицеров СБУ.", - "он ответствен полност и за ту и за друг трагед мы уж получ дан от сочувств нам офицер сбу" + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.22") + //$NON-NLS-1$ + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.23"), //$NON-NLS-1$ + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.24") //$NON-NLS-1$ ); } public void testJapanese() throws IOException { for (boolean filterStopWords: new Boolean[]{true, false}) { - comparisonTest("jpn", + comparisonTest("jpn", //$NON-NLS-1$ filterStopWords, - // I hope this is not offensive text. - "高林純示 生態学研究センター教授らの研究グループと松井健二 山口大学医学系研究科(農学系)教授らの研究グループは、", - "高林 林純 純示 生態 態学 学研 研究 究セ セン ンタ ター ー教 教授 授ら らの の研 研究 究グ グル ルー " + - "ープ プと と松 松井 井健 健二 山口 口大 大学 学医 医学 学系 系研 " + - "研究 究科 農学 学系 教授 授ら らの の研 研究 究グ グル ルー ープ プは"); + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.26"), //$NON-NLS-1$ + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.27") + //$NON-NLS-1$ + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.28") + //$NON-NLS-1$ + NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.29")); //$NON-NLS-1$ } } public void testConfiguredLanguages() { - checkConfig("BrazilianAnalyzer", "por", "pt"); - checkConfig("ChineseAnalyzer", "zho", "chi", "zh"); - checkConfig("CJKAnalyzer", "jpn", "ja", "kor", "ko"); - checkConfig("CzechAnalyzer", "ces", "cze", "cs"); - checkConfig("DutchAnalyzer", "dut", "nld", "nl"); - checkConfig("GermanAnalyzer", "deu", "ger", "de"); - checkConfig("GreekAnalyzer", "gre", "ell", "el"); - checkConfig("RussianAnalyzer", "rus", "ru"); - checkConfig("ThaiAnalyzer", "th", "tha"); - checkConfig("StandardAnalyzer", "en", "eng", "", null); + checkConfig("BrazilianAnalyzer", "por", "pt"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ + checkConfig("ChineseAnalyzer", "zho", "chi", "zh"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ + checkConfig("CJKAnalyzer", "jpn", "ja", "kor", "ko"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ //$NON-NLS-5$ + checkConfig("CzechAnalyzer", "ces", "cze", "cs"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ + checkConfig("DutchAnalyzer", "dut", "nld", "nl"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ + checkConfig("GermanAnalyzer", "deu", "ger", "de"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ + checkConfig("GreekAnalyzer", "gre", "ell", "el"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ + checkConfig("RussianAnalyzer", "rus", "ru"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ + checkConfig("ThaiAnalyzer", "th", "tha"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ + checkConfig("StandardAnalyzer", "en", "eng", "", null); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //$NON-NLS-4$ } private void checkConfig(String classname, String ...langs) { @@ -166,7 +164,7 @@ // if (lang != null && lang.length()==3) { assertEquals(classname, getAnalyzer(lang,true).getClass().getSimpleName()); - assertEquals(classname, getAnalyzer(lang+"-x-foobar",true).getClass().getSimpleName()); + assertEquals(classname, getAnalyzer(lang+NonEnglishExamples.getString("AbstractAnalyzerFactoryTest.0"),true).getClass().getSimpleName()); //$NON-NLS-1$ } } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/NonEnglishExamples.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/NonEnglishExamples.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/NonEnglishExamples.java 2014-05-08 03:12:55 UTC (rev 8230) @@ -0,0 +1,21 @@ +package com.bigdata.search; + +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class NonEnglishExamples { + private static final String BUNDLE_NAME = "com.bigdata.search.examples"; //$NON-NLS-1$ + + private static final ResourceBundle RESOURCE_BUNDLE = ResourceBundle.getBundle(BUNDLE_NAME); + + private NonEnglishExamples() { + } + + public static String getString(String key) { + try { + return RESOURCE_BUNDLE.getString(key); + } catch (MissingResourceException e) { + return '!' + key + '!'; + } + } +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/examples.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/examples.properties (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/search/examples.properties 2014-05-08 03:12:55 UTC (rev 8230) @@ -0,0 +1,17 @@ +AbstractAnalyzerFactoryTest.0=-x-foobar +AbstractAnalyzerFactoryTest.10=Hanoi - Im Streit um die Vorherrschaft im S\xFCdchinesischen Meer ist es zu einer +AbstractAnalyzerFactoryTest.11=erneuten Auseinandersetzung gekommen: +AbstractAnalyzerFactoryTest.12=hanoi strei um vorherrschaf sudchinesisch meer zu erneu auseinandersetzung gekomm +AbstractAnalyzerFactoryTest.14=\u041E\u043D\u0438 \u043E\u0442\u0432\u0435\u0442\u0441\u0442\u0432\u0435\u043D\u043D\u044B \u043F\u043E\u043B\u043D\u043E\u0441\u0442\u044C\u044E \u0438 \u0437\u0430 \u0442\u0443, \u0438 \u0437\u0430 \u0434\u0440\u0443\u0433\u0443\u044E \u0442\u0440\u0430\u0433\u0435\u0434\u0438\u044E. +AbstractAnalyzerFactoryTest.15=\u041C\u044B \u0443\u0436\u0435 \u043F\u043E\u043B\u0443\u0447\u0438\u043B\u0438 \u0434\u0430\u043D\u043D\u044B\u0435 \u043E\u0442 \u0441\u043E\u0447\u0443\u0432\u0441\u0442\u0432\u0443\u044E\u0449\u0438\u0445 \u043D\u0430\u043C \u043E\u0444\u0438\u0446\u0435\u0440\u043E\u0432 \u0421\u0411\u0423. +AbstractAnalyzerFactoryTest.16=\u043E\u0442\u0432\u0435\u0442\u0441\u0442\u0432\u0435\u043D \u043F\u043E\u043B\u043D\u043E\u0441\u0442 \u0442\u0443 \u0434\u0440\u0443\u0433 \u0442\u0440\u0430\u0433\u0435\u0434 \u043F\u043E\u043B\u0443\u0447 \u0434\u0430\u043D \u0441\u043E\u0447\u0443\u0432\u0441\u0442\u0432 \u043D\u0430\u043C \u043E\u0444\u0438\u0446\u0435\u0440 \u0441\u0431\u0443 +AbstractAnalyzerFactoryTest.18=Hanoi - Im Streit um die Vorherrschaft im S\xFCdchinesischen Meer ist es zu einer +AbstractAnalyzerFactoryTest.19=erneuten Auseinandersetzung gekommen: +AbstractAnalyzerFactoryTest.20=hanoi im strei um die vorherrschaf im sudchinesisch meer ist es zu ein erneu auseinandersetzung gekomm +AbstractAnalyzerFactoryTest.22=\u041E\u043D\u0438 \u043E\u0442\u0432\u0435\u0442\u0441\u0442\u0432\u0435\u043D\u043D\u044B \u043F\u043E\u043B\u043D\u043E\u0441\u0442\u044C\u044E \u0438 \u0437\u0430 \u0442\u0443, \u0438 \u0437\u0430 \u0434\u0440\u0443\u0433\u0443\u044E \u0442\u0440\u0430\u0433\u0435\u0434\u0438\u044E. +AbstractAnalyzerFactoryTest.23=\u041C\u044B \u0443\u0436\u0435 \u043F\u043E\u043B\u0443\u0447\u0438\u043B\u0438 \u0434\u0430\u043D\u043D\u044B\u0435 \u043E\u0442 \u0441\u043E\u0447\u0443\u0432\u0441\u0442\u0432\u0443\u044E\u0449\u0438\u0445 \u043D\u0430\u043C \u043E\u0444\u0438\u0446\u0435\u0440\u043E\u0432 \u0421\u0411\u0423. +AbstractAnalyzerFactoryTest.24=\u043E\u043D \u043E\u0442\u0432\u0435\u0442\u0441\u0442\u0432\u0435\u043D \u043F\u043E\u043B\u043D\u043E\u0441\u0442 \u0438 \u0437\u0430 \u0442\u0443 \u0438 \u0437\u0430 \u0434\u0440\u0443\u0433 \u0442\u0440\u0430\u0433\u0435\u0434 \u043C\u044B \u0443\u0436 \u043F\u043E\u043B\u0443\u0447 \u0434\u0430\u043D \u043E\u0442 \u0441\u043E\u0447\u0443\u0432\u0441\u0442\u0432 \u043D\u0430\u043C \u043E\u0444\u0438\u0446\u0435\u0440 \u0441\u0431\u0443 +AbstractAnalyzerFactoryTest.26=\u9AD8\u6797\u7D14\u793A \u751F\u614B\u5B66\u7814\u7A76\u30BB\u30F3\u30BF\u30FC\u6559\u6388\u3089\u306E\u7814\u7A76\u30B0\u30EB\u30FC\u30D7\u3068\u677E\u4E95\u5065\u4E8C \u5C71\u53E3\u5927\u5B66\u533B\u5B66\u7CFB\u7814\u7A76\u79D1\uFF08\u8FB2\u5B66\u7CFB\uFF09\u6559\u6388\u3089\u306E\u7814\u7A76\u30B0\u30EB\u30FC\u30D7\u306F\u3001 +AbstractAnalyzerFactoryTest.27=\u9AD8\u6797 \u6797\u7D14 \u7D14\u793A \u751F\u614B \u614B\u5B66 \u5B66\u7814 \u7814\u7A76 \u7A76\u30BB \u30BB\u30F3 \u30F3\u30BF \u30BF\u30FC \u30FC\u6559 \u6559\u6388 \u6388\u3089 \u3089\u306E \u306E\u7814 \u7814\u7A76 \u7A76\u30B0 \u30B0\u30EB \u30EB\u30FC +AbstractAnalyzerFactoryTest.28=\u30FC\u30D7 \u30D7\u3068 \u3068\u677E \u677E\u4E95 \u4E95\u5065 \u5065\u4E8C \u5C71\u53E3 \u53E3\u5927 \u5927\u5B66 \u5B66\u533B \u533B\u5B66 \u5B66\u7CFB \u7CFB\u7814 +AbstractAnalyzerFactoryTest.29=\u7814\u7A76 \u7A76\u79D1 \u8FB2\u5B66 \u5B66\u7CFB \u6559\u6388 \u6388\u3089 \u3089\u306E \u306E\u7814 \u7814\u7A76 \u7A76\u30B0 \u30B0\u30EB \u30EB\u30FC \u30FC\u30D7 \u30D7\u306F This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-05-08 17:53:55
|
Revision: 8231 http://sourceforge.net/p/bigdata/code/8231 Author: mrpersonick Date: 2014-05-08 17:53:52 +0000 (Thu, 08 May 2014) Log Message: ----------- rolling back changes to build.xml and RESTServlet from r8223 Revision Links: -------------- http://sourceforge.net/p/bigdata/code/8223 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java branches/BIGDATA_RELEASE_1_3_0/build.xml Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-05-08 03:12:55 UTC (rev 8230) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-05-08 17:53:52 UTC (rev 8231) @@ -1,157 +0,0 @@ -/** -Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -package com.bigdata.rdf.sail.webapp; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; - -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import org.apache.log4j.Logger; - -import com.bigdata.blueprints.BigdataGraphBulkLoad; -import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; -import com.bigdata.rdf.sail.webapp.client.MiniMime; -import com.bigdata.rdf.store.AbstractTripleStore; -import com.tinkerpop.blueprints.util.io.graphml.GraphMLReader; - -/** - * Helper servlet for the blueprints layer. - */ -public class BlueprintsServlet extends BigdataRDFServlet { - - /** - * - */ - private static final long serialVersionUID = 1L; - - static private final transient Logger log = Logger.getLogger(BlueprintsServlet.class); - - static public final List<String> mimeTypes = Arrays.asList(new String[] { - "application/graphml+xml" - }) ; - - /** - * Flag to signify a blueprints operation. - */ - static final transient String ATTR_BLUEPRINTS = "blueprints"; - -// /** -// * Flag to signify a convert operation. POST an RDF document with a -// * content type and an accept header for what it should be converted to. -// */ -// static final transient String ATTR_CONVERT = "convert"; - - - public BlueprintsServlet() { - - } - - /** - * Post a GraphML file to the blueprints layer. - */ - @Override - protected void doPost(final HttpServletRequest req, - final HttpServletResponse resp) throws IOException { - - final long begin = System.currentTimeMillis(); - - final String namespace = getNamespace(req); - - final long timestamp = getTimestamp(req); - - final AbstractTripleStore tripleStore = getBigdataRDFContext() - .getTripleStore(namespace, timestamp); - - if (tripleStore == null) { - /* - * There is no such triple/quad store instance. - */ - buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); - return; - } - - final String contentType = req.getContentType(); - - if (log.isInfoEnabled()) - log.info("Request body: " + contentType); - - final String mimeType = new MiniMime(contentType).getMimeType().toLowerCase(); - - if (!mimeTypes.contains(mimeType)) { - - buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, - "Content-Type not recognized as graph data: " + contentType); - - return; - - } - - try { - - BigdataSailRepositoryConnection conn = null; - try { - - conn = getBigdataRDFContext() - .getUnisolatedConnection(namespace); - - final BigdataGraphBulkLoad graph = new BigdataGraphBulkLoad(conn); - - GraphMLReader.inputGraph(graph, req.getInputStream()); - - graph.commit(); - - final long nmodified = graph.getMutationCountLastCommit(); - - final long elapsed = System.currentTimeMillis() - begin; - - reportModifiedCount(resp, nmodified, elapsed); - - return; - - } catch(Throwable t) { - - if(conn != null) - conn.rollback(); - - throw new RuntimeException(t); - - } finally { - - if (conn != null) - conn.close(); - - } - - } catch (Exception ex) { - - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); - - } - - } - -} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2014-05-08 03:12:55 UTC (rev 8230) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2014-05-08 17:53:52 UTC (rev 8231) @@ -59,7 +59,6 @@ private DeleteServlet m_deleteServlet; private UpdateServlet m_updateServlet; private WorkbenchServlet m_workbenchServlet; - private BlueprintsServlet m_blueprintsServlet; /** * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/584"> @@ -85,7 +84,6 @@ m_deleteServlet = new DeleteServlet(); m_describeServlet = new DescribeCacheServlet(); m_workbenchServlet = new WorkbenchServlet(); - m_blueprintsServlet = new BlueprintsServlet(); m_queryServlet.init(getServletConfig()); m_insertServlet.init(getServletConfig()); @@ -93,7 +91,6 @@ m_deleteServlet.init(getServletConfig()); m_describeServlet.init(getServletConfig()); m_workbenchServlet.init(getServletConfig()); - m_blueprintsServlet.init(getServletConfig()); } @@ -133,11 +130,6 @@ m_workbenchServlet = null; } - if (m_blueprintsServlet != null) { - m_blueprintsServlet.destroy(); - m_blueprintsServlet = null; - } - super.destroy(); } @@ -250,10 +242,6 @@ m_workbenchServlet.doPost(req, resp); - } else if (req.getParameter(BlueprintsServlet.ATTR_BLUEPRINTS) != null) { - - m_blueprintsServlet.doPost(req, resp); - } else if (req.getParameter("uri") != null) { // INSERT via w/ URIs Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-08 03:12:55 UTC (rev 8230) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-08 17:53:52 UTC (rev 8231) @@ -46,131 +46,128 @@ <project name="bigdata" default="bundleJar" basedir="."> - <property file="build.properties" /> + <property file="build.properties" /> - <!-- build-time classpath. --> - <path id="build.classpath"> - <fileset dir="${bigdata.dir}/bigdata/lib"> - <include name="**/*.jar" /> - </fileset> - <fileset dir="${bigdata.dir}/bigdata-jini/lib"> - <include name="**/*.jar" /> - </fileset> - <fileset dir="${bigdata.dir}/bigdata-rdf/lib"> - <include name="**/*.jar" /> - </fileset> - <fileset dir="${bigdata.dir}/bigdata-sails/lib"> - <include name="**/*.jar" /> - </fileset> + <!-- build-time classpath. --> + <path id="build.classpath"> + <fileset dir="${bigdata.dir}/bigdata/lib"> + <include name="**/*.jar" /> + </fileset> + <fileset dir="${bigdata.dir}/bigdata-jini/lib"> + <include name="**/*.jar" /> + </fileset> + <fileset dir="${bigdata.dir}/bigdata-rdf/lib"> + <include name="**/*.jar" /> + </fileset> + <fileset dir="${bigdata.dir}/bigdata-sails/lib"> + <include name="**/*.jar" /> + </fileset> <fileset dir="${bigdata.dir}/bigdata-gom/lib"> <include name="**/*.jar" /> </fileset> - <fileset dir="${bigdata.dir}/bigdata-blueprints/lib"> - <include name="**/*.jar" /> - </fileset> - <!-- - <fileset dir="${bigdata.dir}/ctc-striterator/lib"> - <include name="**/*.jar" /> - </fileset> --> - </path> + <!-- + <fileset dir="${bigdata.dir}/ctc-striterator/lib"> + <include name="**/*.jar" /> + </fileset> --> + </path> - <!-- runtime classpath w/o install. --> - <path id="runtime.classpath"> - <pathelement location="${build.dir}/classes" /> - <path refid="build.classpath" /> - </path> + <!-- runtime classpath w/o install. --> + <path id="runtime.classpath"> + <pathelement location="${build.dir}/classes" /> + <path refid="build.classpath" /> + </path> - <!-- classpath as installed. --> - <!-- @todo .so and .dll --> - <path id="install.classpath"> - <fileset dir="${install.lib.dir}"> - <include name="**/*.jar" /> - </fileset> - </path> + <!-- classpath as installed. --> + <!-- @todo .so and .dll --> + <path id="install.classpath"> + <fileset dir="${install.lib.dir}"> + <include name="**/*.jar" /> + </fileset> + </path> - <target name="clean" description="cleans everything in [build.dir]."> - <delete dir="${build.dir}" /> - <delete dir="${bigdata.dir}/bigdata-test" quiet="true" /> - <delete dir="${bigdata.dir}/dist" quiet="true" /> - </target> + <target name="clean" description="cleans everything in [build.dir]."> + <delete dir="${build.dir}" /> + <delete dir="${bigdata.dir}/bigdata-test" quiet="true" /> + <delete dir="${bigdata.dir}/dist" quiet="true" /> + </target> - <target name="prepare"> - <!-- setup ${version} for regular or snapshot. --> - <tstamp> - <format property="today" pattern="yyyyMMdd" locale="en,US" /> - <format property="osgiDate" pattern="yyyyMMdd" locale="en,US" /> - </tstamp> + <target name="prepare"> + <!-- setup ${version} for regular or snapshot. --> + <tstamp> + <format property="today" pattern="yyyyMMdd" locale="en,US" /> + <format property="osgiDate" pattern="yyyyMMdd" locale="en,US" /> + </tstamp> <condition property="client-version" value="bigdata-client-${build.ver}-${today}" else="bigdata-client-${build.ver}"> <istrue value="${snapshot}" /> </condition> - <condition property="version" value="bigdata-${build.ver}-${today}" else="bigdata-${build.ver}"> - <istrue value="${snapshot}" /> - </condition> - <condition property="osgi.version" value="${build.ver.osgi}.${osgiDate}" else="${build.ver.osgi}.0"> - <istrue value="${snapshot}" /> - </condition> - <!--<echo message="today=${today}"/>--> - <echo message="version=${version}" /> - <available property="svn.checkout" file="./.svn/entries"/> - <echo message="svn.checkout=${svn.checkout}" /> - <!-- create directories. --> - <mkdir dir="${build.dir}" /> - <mkdir dir="${build.dir}/classes" /> - <mkdir dir="${build.dir}/docs" /> - <mkdir dir="${build.dir}/lib" /> - </target> + <condition property="version" value="bigdata-${build.ver}-${today}" else="bigdata-${build.ver}"> + <istrue value="${snapshot}" /> + </condition> + <condition property="osgi.version" value="${build.ver.osgi}.${osgiDate}" else="${build.ver.osgi}.0"> + <istrue value="${snapshot}" /> + </condition> + <!--<echo message="today=${today}"/>--> + <echo message="version=${version}" /> + <available property="svn.checkout" file="./.svn/entries"/> + <echo message="svn.checkout=${svn.checkout}" /> + <!-- create directories. --> + <mkdir dir="${build.dir}" /> + <mkdir dir="${build.dir}/classes" /> + <mkdir dir="${build.dir}/docs" /> + <mkdir dir="${build.dir}/lib" /> + </target> - <target name="buildinfo" depends="prepare" if="svn.checkout" - description="Generate a BuildInfo.java file with metadata about this build."> - <property name="buildinfo.file" - value="${bigdata.dir}\bigdata\src\java\com\bigdata\BuildInfo.java"/> - <loadfile property="svn.revision" srcFile="./.svn/entries"> - <filterchain> - <headfilter lines="1" skip="3"/> - <striplinebreaks/> - </filterchain> - </loadfile> - <loadfile property="svn.url" srcFile="./.svn/entries"> - <filterchain> - <headfilter lines="1" skip="4"/> - <striplinebreaks/> - </filterchain> - </loadfile> - <tstamp> - <format property="build.timestamp" pattern="yyyy/MM/dd HH:mm:ss z" locale="en,US" /> - </tstamp> - <property environment="env" /> - <echo file="${buildinfo.file}"> + <target name="buildinfo" depends="prepare" if="svn.checkout" + description="Generate a BuildInfo.java file with metadata about this build."> + <property name="buildinfo.file" + value="${bigdata.dir}\bigdata\src\java\com\bigdata\BuildInfo.java"/> + <loadfile property="svn.revision" srcFile="./.svn/entries"> + <filterchain> + <headfilter lines="1" skip="3"/> + <striplinebreaks/> + </filterchain> + </loadfile> + <loadfile property="svn.url" srcFile="./.svn/entries"> + <filterchain> + <headfilter lines="1" skip="4"/> + <striplinebreaks/> + </filterchain> + </loadfile> + <tstamp> + <format property="build.timestamp" pattern="yyyy/MM/dd HH:mm:ss z" locale="en,US" /> + </tstamp> + <property environment="env" /> + <echo file="${buildinfo.file}"> package com.bigdata; public class BuildInfo { public static final String buildVersion="${build.ver}"; public static final String buildVersionOSGI="${build.ver.osgi}"; - public static final String svnRevision="${svn.revision}"; + public static final String svnRevision="${svn.revision}"; public static final String svnURL="${svn.url}"; - public static final String buildTimestamp="${build.timestamp}"; - public static final String buildUser="${user.name}"; - public static final String buildHost="${env.COMPUTERNAME}"; - public static final String osArch="${os.arch}"; - public static final String osName="${os.name}"; - public static final String osVersion="${os.version}"; + public static final String buildTimestamp="${build.timestamp}"; + public static final String buildUser="${user.name}"; + public static final String buildHost="${env.COMPUTERNAME}"; + public static final String osArch="${os.arch}"; + public static final String osName="${os.name}"; + public static final String osVersion="${os.version}"; } </echo> - <loadfile property="buildinfo" srcFile="${buildinfo.file}"/> - <echo message="${buildinfo}"/> - </target> - + <loadfile property="buildinfo" srcFile="${buildinfo.file}"/> + <echo message="${buildinfo}"/> + </target> + <!-- Note: I had to explicitly specify the location of the jdepend jar in Preferences => Ant => Runtime in order to get this to work under eclipse. This is odd since eclipse bundles the jar with the ant plugin. - http://www.ryanlowe.ca/blog/archives/001038_junit_ant_task_doesnt_work_in_eclipse.php - - outputfile="${build.dir}/docs/jdepend-report.txt" + http://www.ryanlowe.ca/blog/archives/001038_junit_ant_task_doesnt_work_in_eclipse.php + + outputfile="${build.dir}/docs/jdepend-report.txt" --> <target name="jdepend" depends="jar"> <jdepend format="xml" - outputfile="${build.dir}/docs/jdepend-report.xml"> + outputfile="${build.dir}/docs/jdepend-report.xml"> <exclude name="java.*"/> <exclude name="javax.*"/> <classespath> @@ -185,89 +182,84 @@ <!-- Note: This will (re-)compile the SPARQL grammar. Compilation is --> <!-- fast, but javacc must be installed. --> <target name="javacc" depends="prepare" - description="Compile the SPARQL grammar."> + description="Compile the SPARQL grammar."> <jjtree - javacchome="${javacc.home}" - target="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/sparql.jjt" - outputfile="sparql.jj" + javacchome="${javacc.home}" + target="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/sparql.jjt" + outputfile="sparql.jj" outputdirectory="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/" /> <javacc - javacchome="${javacc.home}" - target="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/sparql.jj" - outputdirectory="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/" - /> + javacchome="${javacc.home}" + target="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/sparql.jj" + outputdirectory="bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/ast/" + /> </target> - <!-- Note: javac error results often if verbose is disabled. --> - <!-- I was able to perform a build with 1.6.0_07. --> - <!-- I set the target to 1.5 to support deployment on non-1.6 JVMs. --> - <target name="compile" depends="prepare, buildinfo" - description="Compile the code base."> - <mkdir dir="${build.dir}" /> - <echo>javac</echo> - <echo> destdir="${build.dir}"</echo> - <echo> fork="yes"</echo> - <echo> memorymaximumsize="1g"</echo> - <echo> debug="yes"</echo> - <echo> debuglevel="${javac.debuglevel}"</echo> - <echo> verbose="${javac.verbose}"</echo> - <echo> encoding="${javac.encoding}"</echo> - <echo> source="${javac.source}"</echo> - <echo> target="${javac.target}"</echo> - <javac classpathref="build.classpath" - destdir="${build.dir}/classes" - fork="yes" - memorymaximumsize="1g" - debug="${javac.debug}" - debuglevel="${javac.debuglevel}" - verbose="${javac.verbose}" - encoding="${javac.encoding}" - source="${javac.source}" - target="${javac.target}" - includeantruntime="false" - > - <!-- note: must also specify -bootclasspath and -extdirs when cross-compiling --> - <src path="${bigdata.dir}/bigdata/src/java" /> - <src path="${bigdata.dir}/bigdata-jini/src/java" /> + <!-- Note: javac error results often if verbose is disabled. --> + <!-- I was able to perform a build with 1.6.0_07. --> + <!-- I set the target to 1.5 to support deployment on non-1.6 JVMs. --> + <target name="compile" depends="prepare, buildinfo" + description="Compile the code base."> + <mkdir dir="${build.dir}" /> + <echo>javac</echo> + <echo> destdir="${build.dir}"</echo> + <echo> fork="yes"</echo> + <echo> memorymaximumsize="1g"</echo> + <echo> debug="yes"</echo> + <echo> debuglevel="${javac.debuglevel}"</echo> + <echo> verbose="${javac.verbose}"</echo> + <echo> encoding="${javac.encoding}"</echo> + <echo> source="${javac.source}"</echo> + <echo> target="${javac.target}"</echo> + <javac classpathref="build.classpath" + destdir="${build.dir}/classes" + fork="yes" + memorymaximumsize="1g" + debug="${javac.debug}" + debuglevel="${javac.debuglevel}" + verbose="${javac.verbose}" + encoding="${javac.encoding}" + source="${javac.source}" + target="${javac.target}" + includeantruntime="false" + > + <!-- note: must also specify -bootclasspath and -extdirs when cross-compiling --> + <src path="${bigdata.dir}/bigdata/src/java" /> + <src path="${bigdata.dir}/bigdata-jini/src/java" /> <src path="${bigdata.dir}/bigdata-rdf/src/java" /> - <src path="${bigdata.dir}/bigdata-blueprints/src/java" /> - <src path="${bigdata.dir}/bigdata-sails/src/java" /> + <src path="${bigdata.dir}/bigdata-sails/src/java" /> <src path="${bigdata.dir}/bigdata-gom/src/java" /> <src path="${bigdata.dir}/bigdata-ganglia/src/java" /> <src path="${bigdata.dir}/bigdata-gas/src/java" /> - <src path="${bigdata.dir}/ctc-striterators/src/java" /> - <!-- Do not include the unit tests @todo conditionally include? + <src path="${bigdata.dir}/ctc-striterators/src/java" /> + <!-- Do not include the unit tests @todo conditionally include? <src path="${bigdata.dir}/bigdata/src/test"/> <src path="${bigdata.dir}/bigdata-jini/src/test"/> <src path="${bigdata.dir}/bigdata-rdf/src/test"/> <src path="${bigdata.dir}/bigdata-sails/src/test"/> --> - <compilerarg value="-version" /> - </javac> - <!-- copy resources. --> - <copy toDir="${build.dir}/classes"> - <fileset dir="${bigdata.dir}/bigdata/src/java"> - <exclude name="**/*.java" /> - <exclude name="**/package.html" /> - <exclude name="**/BytesUtil.c" /> - </fileset> - <fileset dir="${bigdata.dir}/bigdata-jini/src/java"> - <exclude name="**/*.java" /> - <exclude name="**/package.html" /> - </fileset> - <fileset dir="${bigdata.dir}/bigdata-rdf/src/java"> - <exclude name="**/*.java" /> - <exclude name="**/package.html" /> - </fileset> - <fileset dir="${bigdata.dir}/bigdata-sails/src/java"> - <exclude name="**/*.java" /> - <exclude name="**/package.html" /> - </fileset> - <fileset dir="${bigdata.dir}/bigdata-blueprints/src/java"> + <compilerarg value="-version" /> + </javac> + <!-- copy resources. --> + <copy toDir="${build.dir}/classes"> + <fileset dir="${bigdata.dir}/bigdata/src/java"> <exclude name="**/*.java" /> <exclude name="**/package.html" /> + <exclude name="**/BytesUtil.c" /> </fileset> + <fileset dir="${bigdata.dir}/bigdata-jini/src/java"> + <exclude name="**/*.java" /> + <exclude name="**/package.html" /> + </fileset> + <fileset dir="${bigdata.dir}/bigdata-rdf/src/java"> + <exclude name="**/*.java" /> + <exclude name="**/package.html" /> + </fileset> + <fileset dir="${bigdata.dir}/bigdata-sails/src/java"> + <exclude name="**/*.java" /> + <exclude name="**/package.html" /> + </fileset> <fileset dir="${bigdata.dir}/bigdata-gom/src/java"> <exclude name="**/*.java" /> <exclude name="**/package.html" /> @@ -276,37 +268,37 @@ <exclude name="**/*.java" /> <exclude name="**/package.html" /> </fileset> - <!-- Note: This simple copy works so long as there is just one service - provider file per interface. It will not combine (append) multiple - files for the same interface. --> - <fileset dir="${bigdata.dir}/bigdata-rdf/src/resources/service-providers"> - <include name="META-INF/**" /> - </fileset> - <fileset dir="${bigdata.dir}/bigdata-sails/src/resources/sesame-server"> - <include name="META-INF/**" /> - </fileset> - <!-- Copy WAR resources for the embedded NanoSparqlServer. --> - <!-- TODO: This could cause problem since the files exist in --> - <!-- both the JAR and the staged artifact (bigdata/var/jetty). --> - <!-- This makes it difficult to override the ones in the JAR. --> - <!-- See also "run-junit" for an alterative to getting CI to run. --> - <!-- newer approach. --> - <!--fileset dir="${bigdata.dir}/bigdata-war/src"> - <include name="**"/> - </fileset--> - <!-- older approach. --> - <fileset dir="." includes="bigdata-war/src/**"/> - </copy> - </target> + <!-- Note: This simple copy works so long as there is just one service + provider file per interface. It will not combine (append) multiple + files for the same interface. --> + <fileset dir="${bigdata.dir}/bigdata-rdf/src/resources/service-providers"> + <include name="META-INF/**" /> + </fileset> + <fileset dir="${bigdata.dir}/bigdata-sails/src/resources/sesame-server"> + <include name="META-INF/**" /> + </fileset> + <!-- Copy WAR resources for the embedded NanoSparqlServer. --> + <!-- TODO: This could cause problem since the files exist in --> + <!-- both the JAR and the staged artifact (bigdata/var/jetty). --> + <!-- This makes it difficult to override the ones in the JAR. --> + <!-- See also "run-junit" for an alterative to getting CI to run. --> + <!-- newer approach. --> + <!--fileset dir="${bigdata.dir}/bigdata-war/src"> + <include name="**"/> + </fileset--> + <!-- older approach. --> + <fileset dir="." includes="bigdata-war/src/**"/> + </copy> + </target> - <!-- Builds the bigdata JAR and bundles it together with all of its dependencies in the ${build.dir}/lib directory. --> - <target name="bundleJar" depends="clean, bundle, jar" description="Builds the bigdata JAR and bundles it together with all of its dependencies in the ${build.dir}/lib directory."> - <copy file="${build.dir}/${version}.jar" todir="${build.dir}/lib"/> - <!--<property name="myclasspath" refid="runtime.classpath" /> - <echo message="${myclasspath}"/>--> - </target> + <!-- Builds the bigdata JAR and bundles it together with all of its dependencies in the ${build.dir}/lib directory. --> + <target name="bundleJar" depends="clean, bundle, jar" description="Builds the bigdata JAR and bundles it together with all of its dependencies in the ${build.dir}/lib directory."> + <copy file="${build.dir}/${version}.jar" todir="${build.dir}/lib"/> + <!--<property name="myclasspath" refid="runtime.classpath" /> + <echo message="${myclasspath}"/>--> + </target> - <target name="sourceJar" depends="prepare" description="Generates the sources jar."> + <target name="sourceJar" depends="prepare" description="Generates the sources jar."> <jar destfile="${build.dir}/${version}-sources.jar"> <fileset dir="${bigdata.dir}/bigdata/src/java" /> <fileset dir="${bigdata.dir}/bigdata/src/samples" /> @@ -321,192 +313,187 @@ <fileset dir="${bigdata.dir}/bigdata-gom/src/java" /> <fileset dir="${bigdata.dir}/bigdata-gom/src/samples" /> <fileset dir="${bigdata.dir}/ctc-striterators/src/java" /> - <fileset dir="${bigdata.dir}/bigdata-blueprints/src/java" /> </jar> </target> - - <!-- This generates the jar, but does not bundled the dependencies. - See 'bundleJar'. --> - <target name="jar" depends="compile" description="Generates the jar (see also bundleJar)."> - <jar destfile="${build.dir}/${version}.jar"> - <fileset dir="${build.dir}/classes" excludes="test/**" /> - <!-- Copy the copyright top-level NOTICE file. --> - <fileset file="${bigdata.dir}/NOTICE"/> - <!-- Copy the copyright top-level LICENSE file. --> - <fileset file="${bigdata.dir}/LICENSE.txt"/> - <!-- Copy licenses for any project from which have imported something. --> - <fileset dir="${bigdata.dir}/bigdata"> - <include name="LEGAL/apache-license-2_0.txt"/> - </fileset> - <fileset dir="${bigdata.dir}/bigdata-rdf"> - <include name="LEGAL/sesame2.x-license.txt"/> - </fileset> - <manifest> - <!--<attribute name="Main-Class" value="com/bigdata/rdf/rio/TestRioIntegration"/>--> - </manifest> - </jar> - </target> - - <!-- Deploy the JAR to the maven repository. --> + + <!-- This generates the jar, but does not bundled the dependencies. + See 'bundleJar'. --> + <target name="jar" depends="compile" description="Generates the jar (see also bundleJar)."> + <jar destfile="${build.dir}/${version}.jar"> + <fileset dir="${build.dir}/classes" excludes="test/**" /> + <!-- Copy the copyright top-level NOTICE file. --> + <fileset file="${bigdata.dir}/NOTICE"/> + <!-- Copy the copyright top-level LICENSE file. --> + <fileset file="${bigdata.dir}/LICENSE.txt"/> + <!-- Copy licenses for any project from which have imported something. --> + <fileset dir="${bigdata.dir}/bigdata"> + <include name="LEGAL/apache-license-2_0.txt"/> + </fileset> + <fileset dir="${bigdata.dir}/bigdata-rdf"> + <include name="LEGAL/sesame2.x-license.txt"/> + </fileset> + <manifest> + <!--<attribute name="Main-Class" value="com/bigdata/rdf/rio/TestRioIntegration"/>--> + </manifest> + </jar> + </target> + + <!-- Deploy the JAR to the maven repository. --> <target name="maven-deploy" depends="jar" - description="Deploy the jar to the maven repository."> - <exec command="${MAVEN_HOME}/bin/mvn"> - <arg value="clean"/> - <arg value="deploy"/> - </exec> - </target> + description="Deploy the jar to the maven repository."> + <exec command="${MAVEN_HOME}/bin/mvn"> + <arg value="clean"/> + <arg value="deploy"/> + </exec> + </target> - <!-- This generates an osgi bundle jar, but does not bundle the dependencies. - See 'bundleJar'. --> - <target name="osgi" depends="compile, bundle" description="Generates the osgi bundle jar (see also bundleJar)."> - <taskdef resource="aQute/bnd/ant/taskdef.properties" classpath="bigdata/lib/bnd-0.0.384.jar" /> - <mkdir dir="${build.dir}/bundles" /> - <jar destfile="${build.dir}/bundles/com.bigdata.source_${osgi.version}.jar"> - <manifest> - <attribute name="Eclipse-SourceBundle" value='com.bigdata;version="${osgi.version}";roots="."' /> - <attribute name="Bundle-Vendor" value="Systap" /> - <attribute name="Bundle-Version" value="${build.ver.osgi}" /> - <attribute name="Bundle-ManifestVersion" value="2" /> - <attribute name="Bundle-SymbolicName" value="com.bigdata.source" /> - <attribute name="Bundle-DocURL" value="http://www.bigdata.com" /> - <attribute name="Bundle-Description" value="Bigdata Source" /> - </manifest> - <fileset dir="bigdata/src/java" /> - <fileset dir="bigdata-jini/src/java" /> + <!-- This generates an osgi bundle jar, but does not bundle the dependencies. + See 'bundleJar'. --> + <target name="osgi" depends="compile, bundle" description="Generates the osgi bundle jar (see also bundleJar)."> + <taskdef resource="aQute/bnd/ant/taskdef.properties" classpath="bigdata/lib/bnd-0.0.384.jar" /> + <mkdir dir="${build.dir}/bundles" /> + <jar destfile="${build.dir}/bundles/com.bigdata.source_${osgi.version}.jar"> + <manifest> + <attribute name="Eclipse-SourceBundle" value='com.bigdata;version="${osgi.version}";roots="."' /> + <attribute name="Bundle-Vendor" value="Systap" /> + <attribute name="Bundle-Version" value="${build.ver.osgi}" /> + <attribute name="Bundle-ManifestVersion" value="2" /> + <attribute name="Bundle-SymbolicName" value="com.bigdata.source" /> + <attribute name="Bundle-DocURL" value="http://www.bigdata.com" /> + <attribute name="Bundle-Description" value="Bigdata Source" /> + </manifest> + <fileset dir="bigdata/src/java" /> + <fileset dir="bigdata-jini/src/java" /> <fileset dir="bigdata-rdf/src/java" /> - <fileset dir="bigdata-sails/src/java" /> + <fileset dir="bigdata-sails/src/java" /> <fileset dir="bigdata-gom/src/java" /> - </jar> - <bnd output="${build.dir}/bundles/com.bigata-${osgi.version}.jar" classpath="${build.dir}/classes" eclipse="false" failok="false" exceptions="true" files="${basedir}/osgi/bigdata.bnd" /> + </jar> + <bnd output="${build.dir}/bundles/com.bigata-${osgi.version}.jar" classpath="${build.dir}/classes" eclipse="false" failok="false" exceptions="true" files="${basedir}/osgi/bigdata.bnd" /> - <bndwrap jars="${build.dir}/lib/colt-${colt.version}.jar" output="${build.dir}/bundles/colt-${colt.version}.jar" definitions="${basedir}/osgi/" /> - <bndwrap jars="${build.dir}/lib/fastutil-${fastutil.version}.jar" output="${build.dir}/bundles/fastutil-${fastutil.version}.jar" definitions="${basedir}/osgi/" /> - <bndwrap jars="${build.dir}/lib/dsi-utils-${dsiutils.version}.jar" output="${build.dir}/bundles/dsi-utils-${dsiutils.version}.jar" definitions="${basedir}/osgi/" /> - <bndwrap jars="${build.dir}/lib/lgpl-utils-${lgplutils.version}.jar" output="${build.dir}/bundles/lgpl-utils-${lgplutils.version}.jar" definitions="${basedir}/osgi/" /> - <bndwrap jars="${build.dir}/lib/high-scale-lib-v${highscalelib.version}.jar" output="${build.dir}/bundles/high-scale-lib-v${highscalelib.version}.jar" definitions="${basedir}/osgi/" /> - <bndwrap jars="${build.dir}/lib/openrdf-sesame-${sesame.version}-onejar.jar" output="${build.dir}/bundles/openrdf-sesame-${sesame.version}.jar" definitions="${basedir}/osgi/" /> - <bndwrap jars="${build.dir}/lib/apache/zookeeper-${zookeeper.version}.jar" output="${build.dir}/bundles/zookeeper-${zookeeper.version}.jar" definitions="${basedir}/osgi/" /> - <bndwrap jars="${build.dir}/lib/nxparser-${nxparser.version}.jar" output="${build.dir}/bundles/nxparser-2010.6.22.jar" definitions="${basedir}/osgi/" /> - </target> + <bndwrap jars="${build.dir}/lib/colt-${colt.version}.jar" output="${build.dir}/bundles/colt-${colt.version}.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/fastutil-${fastutil.version}.jar" output="${build.dir}/bundles/fastutil-${fastutil.version}.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/dsi-utils-${dsiutils.version}.jar" output="${build.dir}/bundles/dsi-utils-${dsiutils.version}.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/lgpl-utils-${lgplutils.version}.jar" output="${build.dir}/bundles/lgpl-utils-${lgplutils.version}.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/high-scale-lib-v${highscalelib.version}.jar" output="${build.dir}/bundles/high-scale-lib-v${highscalelib.version}.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/openrdf-sesame-${sesame.version}-onejar.jar" output="${build.dir}/bundles/openrdf-sesame-${sesame.version}.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/apache/zookeeper-${zookeeper.version}.jar" output="${build.dir}/bundles/zookeeper-${zookeeper.version}.jar" definitions="${basedir}/osgi/" /> + <bndwrap jars="${build.dir}/lib/nxparser-${nxparser.version}.jar" output="${build.dir}/bundles/nxparser-2010.6.22.jar" definitions="${basedir}/osgi/" /> + </target> - <!-- Note: the javadoc requires a LOT of RAM, but runs quickly on a - server class machine. - - @todo man page for [bigdata] script to @{build.dir}/docs - - --> - <target name="javadoc" depends="prepare" if="javadoc"> - <mkdir dir="${build.dir}/docs/api" /> - <javadoc destdir="${build.dir}/docs/api" defaultexcludes="yes" - author="true" version="true" use="true" verbose="no" - overview="${bigdata.dir}/overview.html" - windowtitle="bigdata® v${build.ver}" - classpathref="build.classpath" - encoding="utf-8" - private="false" - > - <arg value="-J-Xmx1000m" /> + <!-- Note: the javadoc requires a LOT of RAM, but runs quickly on a + server class machine. + + @todo man page for [bigdata] script to @{build.dir}/docs + + --> + <target name="javadoc" depends="prepare" if="javadoc"> + <mkdir dir="${build.dir}/docs/api" /> + <javadoc destdir="${build.dir}/docs/api" defaultexcludes="yes" + author="true" version="true" use="true" verbose="no" + overview="${bigdata.dir}/overview.html" + windowtitle="bigdata® v${build.ver}" + classpathref="build.classpath" + encoding="utf-8" + private="false" + > + <arg value="-J-Xmx1000m" /> <arg value="-quiet" /> - <packageset dir="${bigdata.dir}/bigdata/src/java" /> - <packageset dir="${bigdata.dir}/bigdata/src/samples" /> - <packageset dir="${bigdata.dir}/bigdata-jini/src/java" /> - <packageset dir="${bigdata.dir}/bigdata-rdf/src/java" /> - <packageset dir="${bigdata.dir}/bigdata-rdf/src/samples" /> - <packageset dir="${bigdata.dir}/bigdata-sails/src/java" /> - <packageset dir="${bigdata.dir}/bigdata-sails/src/samples" /> - <packageset dir="${bigdata.dir}/bigdata-blueprints/src/java" /> - <packageset dir="${bigdata.dir}/bigdata-gom/src/java" /> + <packageset dir="${bigdata.dir}/bigdata/src/java" /> + <packageset dir="${bigdata.dir}/bigdata/src/samples" /> + <packageset dir="${bigdata.dir}/bigdata-jini/src/java" /> + <packageset dir="${bigdata.dir}/bigdata-rdf/src/java" /> + <packageset dir="${bigdata.dir}/bigdata-rdf/src/samples" /> + <packageset dir="${bigdata.dir}/bigdata-sails/src/java" /> + <packageset dir="${bigdata.dir}/bigdata-sails/src/samples" /> + <packageset dir="${bigdata.dir}/bigdata-gom/src/java" /> <packageset dir="${bigdata.dir}/bigdata-gom/src/samples" /> <packageset dir="${bigdata.dir}/bigdata-gas/src/java" /> - <packageset dir="${bigdata.dir}/ctc-striterators/src/java" /> - <doctitle> - <![CDATA[<h1>bigdata® v${build.ver}</h1>]]></doctitle> - <bottom> - <![CDATA[<i>Copyright © 2006-2014 SYSTAP, LLC. All Rights Reserved.</i>]]></bottom> - <tag name="todo" scope="all" description="TODO:" /> - <tag name="issue" scope="all" description="ISSUE:" /> - <!--tag name="FIXME" scope="all" description="FIXME:"/--> - <link href="http://download.oracle.com/javase/7/docs/api/" /> - <link href="http://openrdf.callimachus.net/sesame/2.7/apidocs/" /> - <link href="http://lucene.apache.org/java/3_0_0/api/"/> + <packageset dir="${bigdata.dir}/ctc-striterators/src/java" /> + <doctitle> + <![CDATA[<h1>bigdata® v${build.ver}</h1>]]></doctitle> + <bottom> + <![CDATA[<i>Copyright © 2006-2014 SYSTAP, LLC. All Rights Reserved.</i>]]></bottom> + <tag name="todo" scope="all" description="TODO:" /> + <tag name="issue" scope="all" description="ISSUE:" /> + <!--tag name="FIXME" scope="all" description="FIXME:"/--> + <link href="http://download.oracle.com/javase/7/docs/api/" /> + <link href="http://openrdf.callimachus.net/sesame/2.7/apidocs/" /> + <link href="http://lucene.apache.org/java/3_0_0/api/"/> <link href="http://lucene.apache.org/core/old_versioned_docs/versions/3_0_3/api/all/"/> - <link href="http://hc.apache.org/httpcomponents-core-ga/httpcore/apidocs/"/> - <link href="http://hc.apache.org/httpcomponents-core-ga/httpcore-nio/apidocs/"/> - <link href="http://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/"/> - <link href="http://hc.apache.org/httpcomponents-client-ga/httpmime/apidocs/"/> - <link href="http://hc.apache.org/httpcomponents-client-ga/httpclient-cache/apidocs/"/> - <link href="http://icu-project.org/apiref/icu4j/"/> - <link href="http://download.eclipse.org/jetty/stable-9/apidocs/"/> + <link href="http://hc.apache.org/httpcomponents-core-ga/httpcore/apidocs/"/> + <link href="http://hc.apache.org/httpcomponents-core-ga/httpcore-nio/apidocs/"/> + <link href="http://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/"/> + <link href="http://hc.apache.org/httpcomponents-client-ga/httpmime/apidocs/"/> + <link href="http://hc.apache.org/httpcomponents-client-ga/httpclient-cache/apidocs/"/> + <link href="http://icu-project.org/apiref/icu4j/"/> + <link href="http://download.eclipse.org/jetty/stable-9/apidocs/"/> </javadoc> </target> <target name="bundle" description="Bundles all dependencies for easier deployments and releases (does not bundle the bigdata jar)."> <copy toDir="${build.dir}/lib" flatten="true"> - <fileset dir="${bigdata.dir}/bigdata/lib"> - <include name="**/*.jar" /> - <include name="**/*.so" /> - <include name="**/*.dll" /> - <!-- The BytesUtil JNI class is not recommended at this time (no performance gain). --> - <exclude name="**/*BytesUtil*" /> - </fileset> - <fileset dir="${bigdata.dir}/bigdata-rdf/lib"> - <include name="**/*.jar" /> - </fileset> - <fileset dir="${bigdata.dir}/bigdata-sails/lib"> - <include name="**/*.jar" /> - </fileset> + <fileset dir="${bigdata.dir}/bigdata/lib"> + <include name="**/*.jar" /> + <include name="**/*.so" /> + <include name="**/*.dll" /> + <!-- The BytesUtil JNI class is not recommended at this time (no performance gain). --> + <exclude name="**/*BytesUtil*" /> + </fileset> + <fileset dir="${bigdata.dir}/bigdata-rdf/lib"> + <include name="**/*.jar" /> + </fileset> + <fileset dir="${bigdata.dir}/bigdata-sails/lib"> + <include name="**/*.jar" /> + </fileset> <fileset dir="${bigdata.dir}/bigdata-gom/lib"> <include name="**/*.jar" /> </fileset> - <fileset dir="${bigdata.dir}/bigdata-blueprints/lib"> - <include name="**/*.jar" /> - </fileset> </copy> <!-- Do NOT flatten the jini jars. We need the to preserve the --> <!-- lib, lib-dl, and lib-ext distinctions. --> <copy toDir="${build.dir}/lib" flatten="false"> - <fileset dir="${bigdata.dir}/bigdata-jini/lib"> - <include name="**/*.jar" /> - </fileset> + <fileset dir="${bigdata.dir}/bigdata-jini/lib"> + <include name="**/*.jar" /> + </fileset> </copy> </target> - <!-- - This target produces a new jar which includes everything from the bigdata - jar, the dsi-util jar, the lgpl-utils jar, and exactly those class files - from colt and fastutil which are required by the proceeding jars. The - main advantage of the resulting jar is that the vast majority of fastutil - is not necessary, and it is a 13M jar. - - <target name="autojar" - description="Produce an expanded version of the bigdata jar which - includes the data from the dsi-util and lgpl-utils jars and only - those classes from fastutil and colt which are required to support - bigdata and dsiutil at runtime."> - <java jar="src/build/autojar/autojar.jar" fork="true" failonerror="true"> - <arg line="-o ${build.dir}/bigdataPlus.jar - -c ${bigdata.dir}/bigdata/lib/unimi/fastutil*.jar - -c ${bigdata.dir}/bigdata/lib/unimi/colt*.jar - ${build.dir}/lib/bigdata*.jar - ${bigdata.dir}/bigdata/lib/dsi-util*.jar - ${bigdata.dir}/bigdata/lib/lgpl-utils*.jar - " /> - </java> - </target> --> - <!-- java autojar.jar -vo fastutil-stripped.jar -c fastutil.jar -Y bigdata.jar --> - <target name="autojar-strip-fastutil" depends="prepare" - description="Strip unused classes from fastutil."> - <java jar="src/build/autojar/autojar.jar" fork="true" failonerror="true"> - <arg line="-o ${build.dir}/fastutil-stripped.jar - -c ${bigdata.dir}/bigdata/lib/unimi/fastutil*.jar - -- - -Y ${build.dir}/lib/${version}.jar - -Y ${bigdata.dir}/bigdata/lib/dsi-util*.jar - -Y ${bigdata.dir}/bigdata/lib/lgpl-utils*.jar - " /> - </java> - </target> + <!-- + This target produces a new jar which includes everything from the bigdata + jar, the dsi-util jar, the lgpl-utils jar, and exactly those class files + from colt and fastutil which are required by the proceeding jars. The + main advantage of the resulting jar is that the vast majority of fastutil + is not necessary, and it is a 13M jar. + + <target name="autojar" + description="Produce an expanded version of the bigdata jar which + includes the data from the dsi-util and lgpl-utils jars and only + those classes from fastutil and colt which are required to support + bigdata and dsiutil at runtime."> + <java jar="src/build/autojar/autojar.jar" fork="true" failonerror="true"> + <arg line="-o ${build.dir}/bigdataPlus.jar + -c ${bigdata.dir}/bigdata/lib/unimi/fastutil*.jar + -c ${bigdata.dir}/bigdata/lib/unimi/colt*.jar + ${build.dir}/lib/bigdata*.jar + ${bigdata.dir}/bigdata/lib/dsi-util*.jar + ${bigdata.dir}/bigdata/lib/lgpl-utils*.jar + " /> + </java> + </target> --> + <!-- java autojar.jar -vo fastutil-stripped.jar -c fastutil.jar -Y bigdata.jar --> + <target name="autojar-strip-fastutil" depends="prepare" + description="Strip unused classes from fastutil."> + <java jar="src/build/autojar/autojar.jar" fork="true" failonerror="true"> + <arg line="-o ${build.dir}/fastutil-stripped.jar + -c ${bigdata.dir}/bigdata/lib/unimi/fastutil*.jar + -- + -Y ${build.dir}/lib/${version}.jar + -Y ${bigdata.dir}/bigdata/lib/dsi-util*.jar + -Y ${bigdata.dir}/bigdata/lib/lgpl-utils*.jar + " /> + </java> + </target> <!-- @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/628" > Create a bigdata-client jar for the NSS REST API </a> @@ -516,7 +503,7 @@ <java jar="src/build/autojar/autojar.jar" fork="true" failonerror="true"> <arg line="-o ${build.dir}/${client-version}.jar -c ${build.dir}/${version}.jar - -v + -v -- com.bigdata.rdf.sail.webapp.client.*.class com.bigdata.rdf.properties.*.class @@ -526,88 +513,87 @@ </java> </target> - <!--depends="bundleJar"--> - <target name="war" depends="bundleJar, autojar-strip-fastutil" - description="Generates a WAR artifact."> + <!--depends="bundleJar"--> + <target name="war" depends="bundleJar, autojar-strip-fastutil" + description="Generates a WAR artifact."> <property name="war.dir" value="${build.dir}/staged-war" - description="The staging directory for the webapp."/> + description="The staging directory for the webapp."/> <property name="war.file" value="${build.dir}/bigdata.war" - description="The generated WAR file." /> + description="The generated WAR file." /> <delete file="${war.file}" - description="Remove the old WAR file."/> + description="Remove the old WAR file."/> <delete dir="${war.dir}" - description="Remove old WAR staging directory."/> - <echo message="Staging webapp to ${war.dir}"/> - <copy todir="${war.dir}" - includeemptydirs="yes" - preservelastmodified="true" - description="Stage the webapp to a temporary directory." - > - <fileset dir="bigdata-war/src"> - <include name="**/*"/> - <!-- The jetty.xml file is not used when deploying to a servlet container. --> - <exclude name="**/jetty.xml"/> + description="Remove old WAR staging directory."/> + <echo message="Staging webapp to ${war.dir}"/> + <copy todir="${war.dir}" + includeemptydirs="yes" + preservelastmodified="true" + description="Stage the webapp to a temporary directory." + > + <fileset dir="bigdata-war/src"> + <include name="**/*"/> + <!-- The jetty.xml file is not used when deploying to a servlet container. --> + <exclude name="**/jetty.xml"/> </fileset> - </copy> + </copy> <replace file="${war.dir}/WEB-INF/web.xml" - summary="true" - description="Override the default location of the RWStore.properties file."> + summary="true" + description="Override the default location of the RWStore.properties file."> <replacefilter token="WEB-INF/RWStore.properties" - value="../webapps/bigdata/WEB-INF/RWStore.properties" + value="../webapps/bigdata/WEB-INF/RWStore.properties" /> </replace> <echo message="Building webapp in ${war.file} from ${war.dir}"/> - <war destfile="${war.file}" - webxml="${war.dir}/WEB-INF/web.xml" - > - <!-- The staged WAR files. --> - <fileset dir="${war.dir}"/> + <war destfile="${war.file}" + webxml="${war.dir}/WEB-INF/web.xml" + > + <!-- The staged WAR files. --> + <fileset dir="${war.dir}"/> <!-- Copy the bigdata license. --> - <file file="${bigdata.dir}/LICENSE.txt"/> - <!-- Copy the top-level NOTICE file. --> - <file file="${bigdata.dir}/NOTICE"/> + <file file="${bigdata.dir}/LICENSE.txt"/> + <!-- Copy the top-level NOTICE file. --> + <file file="${bigdata.dir}/NOTICE"/> <!-- Copy all of the LEGAL directories. --> - <fileset dir="${bigdata.dir}/bigdata" includes="LEGAL/*"/> - <fileset dir="${bigdata.dir}/bigdata-rdf" includes="LEGAL/*"/> - <fileset dir="${bigdata.dir}/bigdata-sails" includes="LEGAL/*"/> - <fileset dir="${bigdata.dir}/bigdata-blueprints" includes="LEGAL/*"/> + <fileset dir="${bigdata.dir}/bigdata" includes="LEGAL/*"/> + <fileset dir="${bigdata.dir}/bigdata-rdf" includes="LEGAL/*"/> + <fileset dir="${bigdata.dir}/bigdata-sails" includes="LEGAL/*"/> <fileset dir="${bigdata.dir}/bigdata-gom" includes="LEGAL/*"/> - <fileset dir="${bigdata.dir}/bigdata-jini" includes="LEGAL/*"/> - <!-- bigdata jar plus some dependencies as filtered by autojar. - <lib file="${build.dir}/bigdataPlus.jar"/> --> - <!-- The stripped version of fastutil. --> - <lib file="${build.dir}/fastutil-stripped.jar"/> - <lib dir="${build.dir}/lib"> - <exclude name="fastutil*.jar"/> - <!-- jars bundled into "bigdata-plus" by autojar. - <exclude name="colt*.jar"/> - <exclude name="dsi-util*.jar"/> - <exclude name="lgpl-utils*.jar"/> - <exclude name="bigdata*.jar"/>--> - <!-- jars which are not currently used. --> - <exclude name="2p-*.jar"/> - <!-- test suite stuff is not needed. --> - <exclude name="junit*.jar"/> - <exclude name="sesame*testsuite*.jar"/> - <!-- osgi stuff is not needed. --> - <exclude name="bnd*.jar"/> - <!-- jetty / servlet / jsp jars not required for the WAR. --> - <exclude name="jetty*.jar"/> + <fileset dir="${bigdata.dir}/bigdata-jini" includes="LEGAL/*"/> + <!-- bigdata jar plus some dependencies as filtered by autojar. + <lib file="${build.dir}/bigdataPlus.jar"/> --> + <!-- The stripped version of fastutil. --> + <lib file="${build.dir}/fastutil-stripped.jar"/> + <lib dir="${build.dir}/lib"> + <exclude name="fastutil*.jar"/> + <!-- jars bundled into "bigdata-plus" by autojar. + <exclude name="colt*.jar"/> + <exclude name="dsi-util*.jar"/> + <exclude name="lgpl-utils*.jar"/> + <exclude name="bigdata*.jar"/>--> + <!-- jars which are not currently used. --> + <exclude name="2p-*.jar"/> + <!-- test suite stuff is not needed. --> + <exclude name="junit*.jar"/> + <exclude name="sesame*testsuite*.jar"/> + <!-- osgi stuff is not needed. --> + <exclude name="bnd*.jar"/> + <!-- jetty / servlet / jsp jars not required for the WAR. --> + <exclude name="jetty*.jar"/> <exclude name="servlet-api*.jar"/> - <!-- zookeeper only used in scale-out. --> - <exclude name="apache/zookeeper*.jar"/> - <!-- jini only used in scale-out. --> - <exclude name="jini/**/*.jar"/> - </lib> - <classes file="${war.dir}/WEB-INF/classes/log4j.properties"/> - </war> - </target> - + <!-- zookeeper only used in scale-out. --> + <exclude name="apache/zookeeper*.jar"/> + <!-- jini only used in scale-out. --> + <exclude name="jini/**/*.jar"/> + </lib> + <classes file="${war.dir}/WEB-INF/classes/log4j.properties"/> + </war> + </target> + <target name="banner" depends="jar" description="Displays the banner (verifies runtime classpath)."> <java classname="com.bigdata.Banner" failonerror="true" fork="false" logerror="true"> - <classpath refid="runtime.classpath" /> + <classpath refid="runtime.classpath" /> </java> </target> @@ -650,10 +636,10 @@ <mkdir dir="${LAS}" /> <!-- NAS/LAS directories must be read/write for the group. --> <chmod perm="ug+rw,o-rw"> - <fileset dir="${NAS}" /> + <fileset dir="${NAS}" /> </chmod> <chmod perm="ug+rw,o-rw"> - <fileset dir="${LAS}" /> + <fileset dir="${LAS}" /> </chmod> <!-- create subdirectories of NAS - should inherit permissions. --> <mkdir dir="${install.config.dir}" /> @@ -664,118 +650,118 @@ <mkdir dir="${install.dist.dir}" /> <!-- install configuration files. --> <copy toDir="${install.config.dir}"> - <fileset dir="${bigdata.dir}/src/resources/config"> - </fileset> + <fileset dir="${bigdata.dir}/src/resources/config"> + </fileset> </copy> <!-- install documentation. --> <copy toDir="${install.doc.dir}"> - <!-- javadoc. --> - <fileset dir="${build.dir}/docs" /> - <!-- common files from the root of the archive. --> - <!-- @todo cleanup LEGAL into one directory off the root in the src tree? --> - <fileset dir="${bigdata.dir}"> - <include name="LICENSE.txt" /> - <include name="overview.html" /> - <include name="README-JINI" /> - <include name="bigdata/LEGAL/*" /> - <include name="bigdata-jini/LEGAL/*" /> + <!-- javadoc. --> + <fileset dir="${build.dir}/docs" /> + <!-- common files from the root of the archive. --> + <!-- @todo cleanup LEGAL into one directory off the root in the src tree? --> + <fileset dir="${bigdata.dir}"> + <include name="LICENSE.txt" /> + <include name="overview.html" /> + <include name="README-JINI" /> + <include name="bigdata/LEGAL/*" /> + <include name="bigdata-jini/LEGAL/*" /> <include name="bigdata-rdf/LEGAL/*" /> - <include name="bigdata-sails/LEGAL/*" /> + <include name="bigdata-sails/LEGAL/*" /> <include name="bigdata-gom/LEGAL/*" /> - </fileset> + </fileset> </copy> <!-- install JARs. --> <copy toDir="${install.lib.dir}"> - <fileset dir="${build.dir}/lib" /> - <fileset file="${build.dir}/${version}.jar" /> + <fileset dir="${build.dir}/lib" /> + <fileset file="${build.dir}/${version}.jar" /> </copy> <!-- install scripts. --> <copy toDir="${install.bin.dir}"> - <fileset dir="src/resources/scripts"> - </fileset> + <fileset dir="src/resources/scripts"> + </fileset> </copy> <!-- parameter substitution. --> <property name="myclasspath" refid="install.classpath" /> <replace dir="${install.bin.dir}" summary="true"> - <replacefilter token="@FED@" value="${FED}" /> - <replacefilter token="@NAS@" value="${NAS}" /> - <replacefilter token="@LAS@" value="${LAS}" /> - <replacefilter token="@JAVA_HOME@" value="${JAVA_HOME}" /> - <replacefilter token="@JINI_CLASS_SERVER_PORT@" value="${JINI_CLASS_SERVER_PORT}" /> - <replacefilter token="@LOAD_BALANCER_PORT@" value="${LOAD_BALANCER_PORT}" /> - <replacefilter token="@REPORT_ALL@" value="${REPORT_ALL}" /> - <replacefilter token="@SYSSTAT_HOME@" value="${SYSSTAT_HOME}" /> - <replacefilter token="@USE_NIO@" value="${USE_NIO}" /> - <replacefilter token="@BIN_DIR@" value="${install.bin.dir}" /> - <replacefilter token="@LIB_DIR@" value="${install.lib.dir}" /> - <replacefilter token="@LOG_DIR@" value="${install.log.dir}" /> - <replacefilter token="@CONFIG_DIR@" value="${install.config.dir}" /> - <replacefilter token="@INSTALL_USER@" value="${install.user}" /> - <replacefilter token="@INSTALL_GROUP@" value="${install.group}" /> - <replacefilter token="@LOCK_CMD@" value="${LOCK_CMD}" /> - <replacefilter token="@LOCK_FILE@" value="${LOCK_FILE}" /> - <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> - <replacefilter token="@JINI_CONFIG@" value="${jini.config}" /> - <replacefilter token="@POLICY_FILE@" value="${policyFile}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_PORT@" value="${LOG4J_SOCKET_LOGGER_PORT}" /> - <replacefilter token="@LOG4J_SOCKET_LOGGER_CONFIG@" value="${log4jServer.config}" /> - <replacefilter token="@LOG4J_DATE_PATTERN@" value="${LOG4J_DATE_PATTERN}" /> - <replacefilter token="@LOG4J_CONFIG@" value="${log4j.config}" /> - <replacefilter token="@LOGGING_CONFIG@" value="${logging.config}" /> - <replacefilter token="@ERROR_LOG@" value="${errorLog}" /> - <replacefilter token="@DETAIL_LOG@" value="${detailLog}" /> - <replacefilter token="@EVENT_LOG@" value="${eventLog}" /> - <replacefilter token="@RULE_LOG@" value="${ruleLog}" /> - <replacefilter token="@STATE_LOG@" value="${stateLog}" /> - <replacefilter token="@STATE_FILE@" value="${stateFile}" /> - <replacefilter token="@FORCE_KILL_ALL@" value="${forceKillAll}" /> - <replacefilter token="@NTP_MASTER@" value="${NTP_MASTER}" /> - <replacefilter token="@NTP_NETWORK@" value="${NTP_NETWORK}" /> - <replacefilter token="@NTP_NETMASK@" value="${NTP_NETMASK}" /> - <replacefilter token="@CLASSPATH@" value="${myclasspath}" /> + <replacefilter token="@FED@" value="${FED}" /> + <replacefilter token="@NAS@" value="${NAS}" /> + <replacefilter token="@LAS@" value="${LAS}" /> + <replacefilter token="@JAVA_HOME@" value="${JAVA_HOME}" /> + <replacefilter token="@JINI_CLASS_SERVER_PORT@" value="${JINI_CLASS_SERVER_PORT}" /> + <replacefilter token="@LOAD_BALANCER_PORT@" value="${LOAD_BALANCER_PORT}" /> + <replacefilter token="@REPORT_ALL@" value="${REPORT_ALL}" /> + <replacefilter token="@SYSSTAT_HOME@" value="${SYSSTAT_HOME}" /> + <replacefilter token="@USE_NIO@" value="${USE_NIO}" /> + <replacefilter token="@BIN_DIR@" value="${install.bin.dir}" /> + <replacefilter token="@LIB_DIR@" value="${install.lib.dir}" /> + <replacefilter token="@LOG_DIR@" value="${install.log.dir}" /> + <replacefilter token="@CONFIG_DIR@" value="${install.config.dir}" /> + <replacefilter token="@INSTALL_USER@" value="${install.user}" /> + <replacefilter token="@INSTALL_GROUP@" value="${install.group}" /> + <replacefilter token="@LOCK_CMD@" value="${LOCK_CMD}" /> + <replacefilter token="@LOCK_FILE@" value="${LOCK_FILE}" /> + <replacefilter token="@BIGDATA_CONFIG@" value="${bigdata.config}" /> + <replacefilter token="@JINI_CONFIG@" value="${jini.config}" /> + <replacefilter token="@POLICY_FILE@" value="${policyFile}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_HOST@" value="${LOG4J_SOCKET_LOGGER_HOST}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_PORT@" value="${LOG4J_SOCKET_LOGGER_PORT}" /> + <replacefilter token="@LOG4J_SOCKET_LOGGER_CONFIG@" value="${log4jServer.config}... [truncated message content] |
From: <tho...@us...> - 2014-05-13 16:15:39
|
Revision: 8294 http://sourceforge.net/p/bigdata/code/8294 Author: thompsonbry Date: 2014-05-13 16:15:33 +0000 (Tue, 13 May 2014) Log Message: ----------- Commit includes fixes for #920 (content-negotation) and $624 (HA Load Balancer). I have run through the NSS, AST evaluation, and QUADS mode test suites and everything is green. The TestAll_LBS test suite is also green (HA). - CONNEG was broken in previous releases and would return available Content-Type corresponding to the least desired MIME Type as specified by the Accept header. See #920. Changes to ConnegScore, ConnegUtil, TestConneg. - RemoteRepository: A bug was identified where the openrdf binary RDF interchange type could not be used because a non-null charset would cause a Reader to be allocated rather than an InputStream within the BackgroundGraphResult. Historically, due to #920, this interchange type was not preferred and hence this code path was not tested. The fix was to use the default charset for the format associated with the Content-Type of the response unless overridden by an explicit charset in the encoding. - Added a new LBS policy (CountersLBSPolicy) based on the /bigdata/counters servlet. This policy is more chatty than the GangliaLBSPolicy, but it can be used in environments that do not support multicast and can be secured using standard techniques for httpd. The GangliaLBSPolicy was heavily refactored to create an abstract base class that is now shared by both the CountersLBSPolicy and the GangliaLBSPolicy. Added documentation to web.xml and the HALoadBalancer page of the wiki. See #624. - Release a new bigdata-ganglia.jar (v1.0.4). This release permits the Comparator to be null, which is useful since we want to order the hosts based on our IHostScoringRule rather than a simple ganglia metric comparison. - AbstractStatisticsCollection: Added @Override tags and FIXME on getCounters(). - CounterSet: private and final attributes. ignoring some unchecked conversions or raw types. @Override attributes. - ICounterSetSelector: expanded the interface slightly to allow optional filtering for HistoryInstruments (was implicit and manditory). This was necessary in order to support XML rendering of /bigdata/counters. - CounterSetFormat: Added to support CONNEG for the different kinds of counter set interchange (text/plain, text/html, application/xml). This was in support of the new CountersLBSPolicy. - IOStatCollector, VMStatCollector: Fixed some bugs in the OSX platforn metrics collectors, mostly around data races. - BigdataSailRemoteRepositoryConnection: added link to #914 (Set timeout on remote query). I have not worked on this ticket yet, but these comments mark the integration points. The other integration point is BigdataRDFContext.newQuery(), which is also linked to the ticket in this commit. - CountersServlet: modified to support CONNEG. - ConnegOptions: added toString(). clean up. - jetty.xml: refactored per guidence from webtide. - web.xml: comments on the CountersLBSPolicy. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/.classpath branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/CounterSet.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/osx/IOStatCollector.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/osx/VMStatCollector.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/CounterSetBTreeSelector.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/CounterSetSelector.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/ICounterSelector.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/URLQueryModel.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/render/TextRenderer.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/render/XHTMLRenderer.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/render/XMLRenderer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/build.properties branches/BIGDATA_RELEASE_1_3_0/bigdata-ganglia/src/java/com/bigdata/ganglia/GangliaService.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll_LBS.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/properties/PropertiesFormat.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConnegScore.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConnegUtil.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CountersServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractLBSPolicy.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/HostScore.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/IHALoadBalancerPolicy.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/ServiceScore.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/NOPLBSPolicy.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/RoundRobinLBSPolicy.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/DefaultHostScoringRule.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/GangliaLBSPolicy.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/LoadOneHostScoringRule.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestConneg.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestProtocolAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml branches/BIGDATA_RELEASE_1_3_0/build.properties branches/BIGDATA_RELEASE_1_3_0/pom.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.4.jar branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/format/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/format/CounterSetFormat.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3LoadBalancer_CountersLBS.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostMetrics.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/HostTable.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/IHostMetrics.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/IHostScoringRule.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/NOPHostScoringRule.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/counters/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/counters/CounterSetHostMetricsWrapper.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/counters/CountersLBSPolicy.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/counters/DefaultHostScoringRule.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/GangliaHostMetricWrapper.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/lbs/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/lbs/TestAbstractHostLBSPolicy.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/lbs/TestAll.java Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.3.jar branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/HostTable.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/IHostScoringRule.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/policy/ganglia/NOPHostScoringRule.java Modified: branches/BIGDATA_RELEASE_1_3_0/.classpath =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-13 12:41:52 UTC (rev 8293) +++ branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-13 16:15:33 UTC (rev 8294) @@ -76,7 +76,7 @@ <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-fileupload-1.2.2.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-io-2.1.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/apache/log4j-1.2.17.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.6.10-onejar.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.6.10-onejar.jar" sourcepath="/Users/bryan/Documents/workspace/org.openrdf.sesame-2.6.10"/> <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/sesame-rio-testsuite-2.6.10.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/sesame-sparql-testsuite-2.6.10.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/sesame-store-testsuite-2.6.10.jar"/> Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.3.jar =================================================================== (Binary files differ) Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.4.jar =================================================================== (Binary files differ) Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.4.jar =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.4.jar 2014-05-13 12:41:52 UTC (rev 8293) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.4.jar 2014-05-13 16:15:33 UTC (rev 8294) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/bigdata-ganglia-1.0.4.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2014-05-13 12:41:52 UTC (rev 8293) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2014-05-13 16:15:33 UTC (rev 8294) @@ -137,6 +137,7 @@ * The interval in seconds at which the counter values are read from the * host platform. */ + @Override public int getInterval() { return interval; @@ -225,8 +226,15 @@ * <p> * Note: Subclasses MUST extend this method to initialize their own * counters. + * + * TODO Why does this use the older <code>synchronized</code> pattern with a + * shared {@link #countersRoot} object rather than returning a new object + * per request? Check assumptions in the scale-out and local journal code + * bases for this. */ - synchronized public CounterSet getCounters() { + @Override + synchronized + public CounterSet getCounters() { if (countersRoot == null) { @@ -319,6 +327,7 @@ serviceRoot.addCounter(IProcessCounters.Memory_runtimeFreeMemory, new Instrument<Long>() { + @Override public void sample() { setValue(Runtime.getRuntime().freeMemory()); } @@ -326,6 +335,7 @@ serviceRoot.addCounter(IProcessCounters.Memory_runtimeTotalMemory, new Instrument<Long>() { + @Override public void sample() { setValue(Runtime.getRuntime().totalMemory()); } @@ -599,6 +609,7 @@ * Start collecting host performance data -- must be extended by the * concrete subclass. */ + @Override public void start() { if (log.isInfoEnabled()) @@ -612,6 +623,7 @@ * Stop collecting host performance data -- must be extended by the concrete * subclass. */ + @Override public void stop() { if (log.isInfoEnabled()) @@ -634,6 +646,7 @@ final Thread t = new Thread() { + @Override public void run() { AbstractStatisticsCollector.this.stop(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/CounterSet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/CounterSet.java 2014-05-13 12:41:52 UTC (rev 8293) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/CounterSet.java 2014-05-13 16:15:33 UTC (rev 8294) @@ -87,7 +87,7 @@ */ public class CounterSet extends AbstractCounterSet implements ICounterSet { - static protected final Logger log = Logger.getLogger(CounterSet.class); + static private final Logger log = Logger.getLogger(CounterSet.class); // private String pathx; private final Map<String,ICounterNode> children = new ConcurrentHashMap<String,ICounterNode>(); @@ -107,7 +107,7 @@ * @param name * The name of the child. */ - private CounterSet(String name,CounterSet parent) { + private CounterSet(final String name, final CounterSet parent) { super(name,parent); @@ -159,6 +159,9 @@ // // } + /** + * Return <code>true</code> iff there are no children. + */ public boolean isLeaf() { return children.isEmpty(); @@ -216,7 +219,6 @@ } - @SuppressWarnings("unchecked") private void attach2(final ICounterNode src, final boolean replace) { if (src == null) @@ -286,7 +288,7 @@ } else { - ((Counter)src).parent = this; + ((Counter<?>)src).parent = this; } @@ -311,7 +313,8 @@ * @return The node -or- <code>null</code> if there is no node with that * path. */ - synchronized public ICounterNode detach(String path) { + @SuppressWarnings({ "rawtypes", "unchecked" }) + synchronized public ICounterNode detach(final String path) { final ICounterNode node = getPath(path); @@ -347,7 +350,7 @@ * @todo optimize for patterns that are anchored by filtering the child * {@link ICounterSet}. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "rawtypes" }) public Iterator<ICounter> counterIterator(final Pattern filter) { final IStriterator src = new Striterator(directChildIterator( @@ -391,7 +394,7 @@ * * @return */ - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "rawtypes" }) public Iterator<ICounterNode> getNodes(final Pattern filter) { IStriterator src = ((IStriterator) postOrderIterator()) @@ -414,7 +417,8 @@ } - @SuppressWarnings("unchecked") + @Override + @SuppressWarnings({ "unchecked", "rawtypes" }) public Iterator<ICounter> getCounters(final Pattern filter) { IStriterator src = ((IStriterator) postOrderIterator()) @@ -450,8 +454,9 @@ * When <code>null</code> all directly attached children * (counters and counter sets) are visited. */ - public Iterator directChildIterator(boolean sorted, - Class<? extends ICounterNode> type) { + @SuppressWarnings("rawtypes") + public Iterator directChildIterator(final boolean sorted, + final Class<? extends ICounterNode> type) { /* * Note: In order to avoid concurrent modification problems under @@ -514,7 +519,7 @@ * child with a post-order traversal of its children and finally visits this * node itself. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({ "rawtypes", "unchecked" }) public Iterator postOrderIterator() { /* @@ -531,6 +536,7 @@ * child with a pre-order traversal of its children and finally visits this * node itself. */ + @SuppressWarnings({ "rawtypes", "unchecked" }) public Iterator preOrderIterator() { /* @@ -562,7 +568,9 @@ /* * Expand each child in turn. */ - protected Iterator expand(Object childObj) { + @Override + @SuppressWarnings("rawtypes") + protected Iterator expand(final Object childObj) { /* * A child of this node. @@ -603,7 +611,9 @@ /* * Expand each child in turn. */ - protected Iterator expand(Object childObj) { + @Override + @SuppressWarnings("rawtypes") + protected Iterator expand(final Object childObj) { /* * A child of this node. @@ -624,7 +634,8 @@ } - public ICounterNode getChild(String name) { + @Override + public ICounterNode getChild(final String name) { if (name == null) throw new IllegalArgumentException(); @@ -642,6 +653,7 @@ * * @return The {@link CounterSet} described by the path. */ + @Override synchronized public CounterSet makePath(String path) { if (path == null) { @@ -740,6 +752,7 @@ * The object that is used to take the measurements from which * the counter's value will be determined. */ + @SuppressWarnings("rawtypes") synchronized public ICounter addCounter(final String path, final IInstrument instrument) { @@ -767,7 +780,7 @@ } - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "rawtypes" }) private ICounter addCounter2(final String name, final IInstrument instrument) { if (name == null) @@ -831,12 +844,14 @@ * * @throws IOException */ + @Override public void asXML(Writer w, Pattern filter) throws IOException { XMLUtility.INSTANCE.writeXML(this, w, filter); } + @Override public void readXML(final InputStream is, final IInstrumentFactory instrumentFactory, final Pattern filter) throws IOException, ParserConfigurationException, SAXException { Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/format/CounterSetFormat.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/format/CounterSetFormat.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/format/CounterSetFormat.java 2014-05-13 16:15:33 UTC (rev 8294) @@ -0,0 +1,214 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* +Portions of this code are: + +Copyright Aduna (http://www.aduna-software.com/) � 2001-2007 + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +/* + * Created on Jul 25, 2012 + */ +package com.bigdata.counters.format; + +import info.aduna.lang.FileFormat; + +import java.nio.charset.Charset; +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.concurrent.CopyOnWriteArraySet; + +import com.bigdata.counters.ICounterSet; + +/** + * Formats for {@link ICounterSet}s. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class CounterSetFormat extends FileFormat implements Iterable<CounterSetFormat> { + + /** + * All known/registered formats for this class. + */ + private static final CopyOnWriteArraySet<CounterSetFormat> formats = new CopyOnWriteArraySet<CounterSetFormat>(); + + /** + * A thread-safe iterator that will visit all known formats (declared by + * {@link Iterable}). + */ + @Override + public Iterator<CounterSetFormat> iterator() { + + return formats.iterator(); + + } + + /** + * Alternative static method signature. + */ + static public Iterator<CounterSetFormat> getFormats() { + + return formats.iterator(); + + } + + /** + * Text properties file using <code>text/plain</code> and + * <code>UTF-8</code>. + */ + public static final CounterSetFormat TEXT = new CounterSetFormat(// + "text/plain",// + Arrays.asList("text/plain"),// + Charset.forName("UTF-8"), // + Arrays.asList("counterSet")// + ); + + /** + * XML properties file using <code>application/xml</code> and + * <code>UTF-8</code>. + */ + public static final CounterSetFormat XML = new CounterSetFormat(// + "application/xml",// + Arrays.asList("application/xml"),// + Charset.forName("UTF-8"),// charset + Arrays.asList("xml")// known-file-extensions + ); + + /** + * XML properties file using <code>text/html</code> and <code>UTF-8</code>. + */ + public static final CounterSetFormat HTML = new CounterSetFormat(// + "text/html",// + Arrays.asList("text/html"),// + Charset.forName("UTF-8"),// charset + Arrays.asList("html")// known-file-extensions + ); + + /** + * Registers the specified format. + */ + public static void register(final CounterSetFormat format) { + + formats.add(format); + + } + + static { + + register(HTML); + register(TEXT); + register(XML); + + } + + /** + * Creates a new RDFFormat object. + * + * @param name + * The name of the RDF file format, e.g. "RDF/XML". + * @param mimeTypes + * The MIME types of the RDF file format, e.g. + * <tt>application/rdf+xml</tt> for the RDF/XML file format. + * The first item in the list is interpreted as the default + * MIME type for the format. + * @param charset + * The default character encoding of the RDF file format. + * Specify <tt>null</tt> if not applicable. + * @param fileExtensions + * The RDF format's file extensions, e.g. <tt>rdf</tt> for + * RDF/XML files. The first item in the list is interpreted + * as the default file extension for the format. + */ + public CounterSetFormat(final String name, + final Collection<String> mimeTypes, final Charset charset, + final Collection<String> fileExtensions) { + + super(name, mimeTypes, charset, fileExtensions); + + } + + /** + * Tries to determine the appropriate file format based on the a MIME type + * that describes the content type. + * + * @param mimeType + * A MIME type, e.g. "text/html". + * @return An {@link CounterSetFormat} object if the MIME type was + * recognized, or <tt>null</tt> otherwise. + * @see #forMIMEType(String,PropertiesFormat) + * @see #getMIMETypes() + */ + public static CounterSetFormat forMIMEType(final String mimeType) { + + return forMIMEType(mimeType, null); + + } + + /** + * Tries to determine the appropriate file format based on the a MIME type + * that describes the content type. The supplied fallback format will be + * returned when the MIME type was not recognized. + * + * @param mimeType + * A file name. + * @return An {@link CounterSetFormat} that matches the MIME type, or the + * fallback format if the extension was not recognized. + * @see #forMIMEType(String) + * @see #getMIMETypes() + */ + public static CounterSetFormat forMIMEType(String mimeType, + CounterSetFormat fallback) { + + return matchMIMEType(mimeType, formats/* Iterable<FileFormat> */, + fallback); + + } + +} \ No newline at end of file Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/osx/IOStatCollector.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/osx/IOStatCollector.java 2014-05-13 12:41:52 UTC (rev 8293) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/osx/IOStatCollector.java 2014-05-13 16:15:33 UTC (rev 8294) @@ -28,11 +28,11 @@ package com.bigdata.counters.osx; -import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Pattern; import com.bigdata.counters.AbstractProcessCollector; @@ -48,14 +48,13 @@ import com.bigdata.rawstore.Bytes; /** - * Collects some counters using <code>iostat</code>. Unfortunately, + * Collects some counters using <code>iostat</code> under OSX. Unfortunately, * <code>iostat</code> does not break down the reads and writes and does not * report IO Wait. This information is obviously available from OSX as it is * provided by the ActivityMonitor, but we can not get it from * <code>iostat</code>. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id: VMStatCollector.java 4289 2011-03-10 21:22:30Z thompsonbry $ */ public class IOStatCollector extends AbstractProcessCollector implements ICounterHierarchy, IRequiredHostCounters, IHostCounters{ @@ -77,7 +76,7 @@ } - public I(String path) { + public I(final String path) { assert path != null; @@ -85,9 +84,10 @@ } + @Override public long lastModified() { - return lastModified; + return lastModified.get(); } @@ -95,7 +95,8 @@ * @throws UnsupportedOperationException * always. */ - public void setValue(T value, long timestamp) { + @Override + public void setValue(final T value, final long timestamp) { throw new UnsupportedOperationException(); @@ -114,7 +115,7 @@ DI(final String path) { - this(path,1d); + this(path, 1d); } @@ -126,7 +127,7 @@ } - + @Override public Double getValue() { final Double value = (Double) vals.get(path); @@ -146,14 +147,14 @@ /** * Map containing the current values for the configured counters. The keys * are paths into the {@link CounterSet}. The values are the data most - * recently read from <code>vmstat</code>. + * recently read from <code>iostat</code>. */ - final private Map<String, Object> vals = new HashMap<String, Object>(); + final private Map<String, Object> vals = new ConcurrentHashMap<String, Object>(); /** * The timestamp associated with the most recently collected values. */ - private long lastModified = System.currentTimeMillis(); + private final AtomicLong lastModified = new AtomicLong(System.currentTimeMillis()); /** * The {@link Pattern} used to split apart the rows read from @@ -178,7 +179,8 @@ this.cpuStats = cpuStats; } - + + @Override public List<String> getCommand() { final List<String> command = new LinkedList<String>(); @@ -203,14 +205,13 @@ } - /** - * Declares the counters that we will collect - */ + @Override public CounterSet getCounters() { final CounterSet root = new CounterSet(); - inst = new LinkedList<I>(); + @SuppressWarnings("rawtypes") + final List<I> inst = new LinkedList<I>(); /* * Note: Counters are all declared as Double to facilitate aggregation. @@ -249,24 +250,22 @@ inst.add(new DI(IHostCounters.CPU_PercentUserTime, .01d)); // Note: column sy inst.add(new DI(IHostCounters.CPU_PercentSystemTime, .01d)); -// // Note: IO Wait is NOT reported by vmstat. +// // Note: IO Wait is NOT reported by iostat. // inst.add(new DI(IHostCounters.CPU_PercentIOWait, .01d)); } - for (Iterator<I> itr = inst.iterator(); itr.hasNext();) { + for (@SuppressWarnings("rawtypes") I i : inst) { - final I i = itr.next(); + root.addCounter(i.getPath(), i); - root.addCounter(i.getPath(), i); + } - } - return root; } - private List<I> inst = null; + @Override public AbstractProcessReader getProcessReader() { return new IOStatReader(); @@ -300,6 +299,7 @@ */ private class IOStatReader extends ProcessReaderHelper { + @Override protected ActiveProcess getActiveProcess() { if (activeProcess == null) @@ -427,7 +427,7 @@ try { // timestamp - lastModified = System.currentTimeMillis(); + lastModified.set(System.currentTimeMillis()); final String[] fields = pattern .split(data.trim(), 0/* limit */); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/osx/VMStatCollector.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/osx/VMStatCollector.java 2014-05-13 12:41:52 UTC (rev 8293) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/osx/VMStatCollector.java 2014-05-13 16:15:33 UTC (rev 8294) @@ -28,11 +28,11 @@ package com.bigdata.counters.osx; -import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Pattern; import com.bigdata.counters.AbstractProcessCollector; @@ -72,17 +72,19 @@ } - public I(String path) { - - assert path != null; - + public I(final String path) { + + if (path == null) + throw new IllegalArgumentException(); + this.path = path; } + @Override public long lastModified() { - return lastModified; + return lastModified.get(); } @@ -90,6 +92,7 @@ * @throws UnsupportedOperationException * always. */ + @Override public void setValue(T value, long timestamp) { throw new UnsupportedOperationException(); @@ -108,20 +111,20 @@ protected final double scale; DI(final String path) { - - this(path,1d); + this(path, 1d); + } DI(final String path, final double scale) { - - super( path ); - + + super(path); + this.scale = scale; - + } - - + + @Override public Double getValue() { final Double value = (Double) vals.get(path); @@ -143,12 +146,13 @@ * are paths into the {@link CounterSet}. The values are the data most * recently read from <code>vmstat</code>. */ - final private Map<String, Object> vals = new HashMap<String, Object>(); - + private final Map<String, Object> vals = new ConcurrentHashMap<String, Object>(); + /** * The timestamp associated with the most recently collected values. */ - private long lastModified = System.currentTimeMillis(); + private final AtomicLong lastModified = new AtomicLong( + System.currentTimeMillis()); /** * The {@link Pattern} used to split apart the rows read from @@ -166,7 +170,8 @@ super(interval); } - + + @Override public List<String> getCommand() { final List<String> command = new LinkedList<String>(); @@ -180,14 +185,13 @@ } - /** - * Declares the counters that we will collect - */ + @Override public CounterSet getCounters() { final CounterSet root = new CounterSet(); - inst = new LinkedList<I>(); + @SuppressWarnings("rawtypes") + final List<I> inst = new LinkedList<I>(); /* * Note: Counters are all declared as Double to facilitate aggregation. @@ -209,19 +213,17 @@ */ inst.add(new DI(IHostCounters.Memory_Bytes_Free)); - for (Iterator<I> itr = inst.iterator(); itr.hasNext();) { + for (@SuppressWarnings("rawtypes") I i : inst) { - final I i = itr.next(); + root.addCounter(i.getPath(), i); - root.addCounter(i.getPath(), i); + } - } - - return root; + return root; } - private List<I> inst = null; + @Override public AbstractProcessReader getProcessReader() { return new VMStatReader(); @@ -249,6 +251,7 @@ */ private class VMStatReader extends ProcessReaderHelper { + @Override protected ActiveProcess getActiveProcess() { if (activeProcess == null) @@ -357,7 +360,7 @@ try { // timestamp - lastModified = System.currentTimeMillis(); + lastModified.set(System.currentTimeMillis()); final String[] fields = pattern.split(data.trim(), 0/* limit */); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/CounterSetBTreeSelector.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/CounterSetBTreeSelector.java 2014-05-13 12:41:52 UTC (rev 8293) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/CounterSetBTreeSelector.java 2014-05-13 16:15:33 UTC (rev 8294) @@ -32,8 +32,6 @@ import java.util.Vector; import java.util.regex.Pattern; -import org.apache.log4j.Logger; - import com.bigdata.counters.CounterSet; import com.bigdata.counters.ICounter; import com.bigdata.counters.PeriodEnum; @@ -43,11 +41,10 @@ * Reads the relevant performance counter data from the store. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class CounterSetBTreeSelector implements ICounterSelector { - protected static final Logger log = Logger.getLogger(CounterSetBTreeSelector.class); +// private static final Logger log = Logger.getLogger(CounterSetBTreeSelector.class); private final CounterSetBTree btree; @@ -65,9 +62,12 @@ } + @Override + @SuppressWarnings("rawtypes") public ICounter[] selectCounters(final int depth, final Pattern pattern, - final long fromTime, final long toTime, final PeriodEnum period) { - + final long fromTime, final long toTime, final PeriodEnum period, + final boolean historyRequiredIsIgnored) { + final CounterSet counterSet = btree.rangeIterator(fromTime, toTime, period.toTimeUnit(), pattern, depth); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/CounterSetSelector.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/CounterSetSelector.java 2014-05-13 12:41:52 UTC (rev 8293) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/CounterSetSelector.java 2014-05-13 16:15:33 UTC (rev 8294) @@ -43,11 +43,10 @@ * Reads counters from a {@link CounterSet}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class CounterSetSelector implements ICounterSelector { - protected static final Logger log = Logger.getLogger(CounterSetSelector.class); + private static final Logger log = Logger.getLogger(CounterSetSelector.class); private final CounterSet counterSet; @@ -70,10 +69,13 @@ * Note: logic was modified to no longer consider the relative depth, only * the absolute depth. * - * FIXME does not use [fromTime, toTime, or period]. + * FIXME does not use [fromTime, toTime, or period] (or model.path) */ + @Override + @SuppressWarnings("rawtypes") public ICounter[] selectCounters(final int depth, final Pattern pattern, - final long fromTime, final long toTime, final PeriodEnum period) { + final long fromTime, final long toTime, final PeriodEnum period, + final boolean historyRequired) { // // depth of the hierarchy at the point where we are starting. // final int ourDepth = counterSet.getDepth(); @@ -94,11 +96,15 @@ nscanned++; - if(!(c.getInstrument() instanceof HistoryInstrument)) { + if (log.isDebugEnabled()) + log.debug("considering: " + c.getPath()); + + if (historyRequired + && !(c.getInstrument() instanceof HistoryInstrument)) { // prune non-history counters. if (log.isDebugEnabled()) - log.debug("skipping: " + c.getPath()); + log.debug("skipping (history): " + c.getPath()); nskipped++; @@ -106,9 +112,6 @@ } - if (log.isDebugEnabled()) - log.debug("considering: " + c.getPath()); - if (depth != 0) { final int counterDepth = c.getDepth(); @@ -117,7 +120,7 @@ // prune by depth if (log.isDebugEnabled()) - log.debug("skipping: " + c.getPath()); + log.debug("skipping (depth): " + c.getPath()); nskipped++; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/ICounterSelector.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/ICounterSelector.java 2014-05-13 12:41:52 UTC (rev 8293) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/ICounterSelector.java 2014-05-13 16:15:33 UTC (rev 8294) @@ -30,6 +30,7 @@ import java.util.regex.Pattern; +import com.bigdata.counters.HistoryInstrument; import com.bigdata.counters.ICounter; import com.bigdata.counters.PeriodEnum; @@ -37,7 +38,6 @@ * Interface for selecting counters. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface ICounterSelector { @@ -57,10 +57,16 @@ * counter timestamps which will be selected. * @param period * The unit of aggregation for the selected performance counters. - * + * @param historyRequired + * When <code>true</code> the {@link ICounter} will be ignored + * unless it is associated with a {@link HistoryInstrument}. + * (This used to be the default behavior, but there are use cases + * where we do not need to have history.) + * * @return The selected performance counters. */ + @SuppressWarnings("rawtypes") ICounter[] selectCounters(int depth, Pattern pattern, long fromTime, - long toTime, PeriodEnum period); + long toTime, PeriodEnum period, boolean historyRequired); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/URLQueryModel.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/URLQueryModel.java 2014-05-13 12:41:52 UTC (rev 8293) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/query/URLQueryModel.java 2014-05-13 16:15:33 UTC (rev 8294) @@ -52,7 +52,6 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.log4j.Logger; import com.bigdata.counters.History; @@ -343,6 +342,70 @@ */ final public File file; + @Override + public String toString() { + + final StringBuilder sb = new StringBuilder(); + + sb.append(URLQueryModel.class.getName()); + + sb.append("{uri=" + uri); + + sb.append(",params=" + params); + + sb.append(",path=" + path); + + sb.append(",depth=" + depth); + + sb.append(",reportType=" + reportType); + + sb.append(",mimeType=" + mimeType); + + sb.append(",pattern=" + pattern); + + sb.append(",category=" + + (category == null ? "N/A" : Arrays.toString(category))); + + sb.append(",period=" + period); + + sb.append(",[fromTime=" + fromTime); + + sb.append(",toTime=" + toTime + "]"); + + sb.append(",flot=" + flot); + + if (eventOrderBy != null) { + sb.append(",eventOrderBy=["); + boolean first = true; + for (Field f : eventOrderBy) { + if (!first) + sb.append(","); + sb.append(f.getName()); + first = false; + } + sb.append("]"); + } + + if (eventFilters != null && !eventFilters.isEmpty()) { + sb.append(",eventFilters{"); + boolean first = true; + for (Map.Entry<Field, Pattern> e : eventFilters.entrySet()) { + if (!first) + sb.append(","); + sb.append(e.getKey().getName()); + sb.append("="); + sb.append(e.getValue()); + first = false; + } + sb.append("}"); + } + + sb.append("}"); + + return sb.toString(); + + } + /** * Factory for {@link NanoHTTPD} integration. * @@ -396,7 +459,10 @@ * * @param service * The service object IFF one was specified when - * {@link CounterSetHTTPD} was started. + * {@link CounterSetHTTPD} was started. If this implements the + * {@link IEventReportingService} interface, then events can also + * be requested. + * * @param req * The request. * @param resp @@ -412,7 +478,7 @@ final LinkedHashMap<String, Vector<String>> params = new LinkedHashMap<String, Vector<String>>(); - @SuppressWarnings("unchecked") +// @SuppressWarnings("unchecked") final Enumeration<String> enames = req.getParameterNames(); while (enames.hasMoreElements()) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/render/TextRenderer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/render/TextRenderer.java 2014-05-13 12:41:52 UTC (rev 8293) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/render/TextRenderer.java 2014-05-13 16:15:33 UTC (rev 8294) @@ -48,12 +48,12 @@ /** * Describes the state of the controller. */ - public final URLQueryModel model; + private final URLQueryModel model; /** * Selects the counters to be rendered. */ - final ICounterSelector counterSelector; + private final ICounterSelector counterSelector; /** * @param model @@ -77,7 +77,8 @@ } - public void render(Writer w) throws IOException { + @Override + public void render(final Writer w) throws IOException { final IRenderer renderer; @@ -85,8 +86,10 @@ case correlated: { - final ICounter[] counters = counterSelector.selectCounters(model.depth, - model.pattern, model.fromTime, model.toTime, model.period); + @SuppressWarnings("rawtypes") + final ICounter[] counters = counterSelector.selectCounters( + model.depth, model.pattern, model.fromTime, model.toTime, + model.period, true/* historyRequired */); final HistoryTable historyTable = new HistoryTable(counters, model.period); @@ -100,9 +103,11 @@ case pivot: { - final ICounter[] counters = counterSelector.selectCounters(model.depth, - model.pattern, model.fromTime, model.toTime, model.period); - + @SuppressWarnings("rawtypes") + final ICounter[] counters = counterSelector.selectCounters( + model.depth, model.pattern, model.fromTime, model.toTime, + model.period, true/* historyRequired */); + final HistoryTable historyTable = new HistoryTable(counters, model.period); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/render/XHTMLRenderer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/render/XHTMLRenderer.java 2014-05-13 12:41:52 UTC (rev 8293) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/counters/render/XHTMLRenderer.java 2014-05-13 16:15:33 UTC (rev 8294) @@ -48,11 +48,10 @@ * name. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class XHTMLRenderer implements IRenderer { - final static protected Logger log = Logger.getLogger(XHTMLRenderer.class); + final static private Logger log = Logger.getLogger(XHTMLRenderer.class); final public static String ps = ICounterSet.pathSeparator; @@ -68,12 +67,12 @@ /** * Describes the state of the controller. */ - public final URLQueryModel model; + private final URLQueryModel model; /** * Selects the counters to be rendered. */ - public final ICounterSelector counterSelector; + private final ICounterSelector counterSelector; /** * @param model @@ -101,6 +100,7 @@ * @param w * @throws IOException */ + @Override public void render(final Writer w) throws IOException { writeXmlDecl(w); @@ -117,7 +117,7 @@ } - protected void writeXmlDecl(Writer w) throws IOException { + protected void writeXmlDecl(final Writer w) throws IOException { w.write("<?xml version=\"1.0\" encoding=\"" + encoding + "\"?>\n"); @@ -128,7 +128,7 @@ * @param w * @throws IOException */ - protected void writeDocType(Writer w) throws IOException { + protected void writeDocType(final Writer w) throws IOException { // if(true) return; @@ -143,7 +143,7 @@ } /** The start <code>html</code> tag. */ - protected void writeHtml(Writer w) throws IOException { + protected void writeHtml(final Writer w) throws IOException { w.write("<html "); @@ -159,7 +159,7 @@ } - protected void writeHead(Writer w) throws IOException { + protected void writeHead(final Writer w) throws IOException { w.write("<head\n>"); @@ -170,13 +170,13 @@ w.write("</head\n>"); } - protected void writeTitle(Writer w) throws IOException { + protected void writeTitle(final Writer w) throws IOException { w.write("<title>bigdata(tm) telemetry : "+cdata(model.path)+"</title\n>"); } - protected void writeScripts(Writer w) throws IOException { + protected void writeScripts(final Writer w) throws IOException { if (model.flot) { @@ -195,7 +195,8 @@ protected void writeBody(final Writer w) throws IOException { w.write("<body\n>"); - + + // Navigate to the node selected by the path. final ICounterNode node = ((CounterSetSelector) counterSelector) .getRoot().getPath(model.path); @@ -221,7 +222,7 @@ if(node instanceof ICounter) { - writeCounter(w, (ICounter) node); + writeCounter(w, (ICounter<?>) node); } else { @@ -241,17 +242,17 @@ writeHistoryTable(w, counterSelector.selectCounters( model.depth, model.pattern, model.fromTime, - model.toTime, model.period), model.period, - model.timestampFormat); + model.toTime, model.period, true/* historyRequired */), + model.period, model.timestampFormat); break; case pivot: - writePivotTable(w, counterSelector.selectCounters( - model.depth, model.pattern, model.fromTime, - model.toTime, model.period)); - + writePivotTable(w, counterSelector.selectCounters(model.depth, + model.pattern, model.fromTime, model.toTime, + model.period, true/* historyRequired */)); + break; case events: @@ -280,7 +281,7 @@ * * @deprecated by refactor inside of a rendering object. */ - protected void writeFullPath(Writer w, String path) + protected void writeFullPath(final Writer w, final String path) throws IOException { writePath(w, path, 0/* root */); @@ -296,8 +297,8 @@ * * @deprecated by refactor inside of a rendering object. */ - protected void writePath(Writer w, String path, int rootDepth) - throws IOException { + protected void writePath(final Writer w, final String path, + final int rootDepth) throws IOException { final String[] a = path.split(ps); @@ -393,7 +394,7 @@ * {@link CounterSet} in a single table (this is the navigational view of * the counter set hierarchy). */ - protected void writeCounterSet(Writer w, final CounterSet counterSet, + protected void writeCounterSet(final Writer w, final CounterSet counterSet, final int depth) throws IOException { // depth of the hierarchy at the point where we are starting. @@ -480,7 +481,7 @@ } else { - final ICounter counter = (ICounter) node; + final ICounter<?> counter = (ICounter<?>) node; /* * write out values for the counter. @@ -502,7 +503,7 @@ * LBS. */ - HistoryInstrument inst = (HistoryInstrument) counter + final HistoryInstrument<?> inst = (HistoryInstrument<?>) counter .getInstrument(); w.write(" <td>" + value(counter,inst.minutes.getAverage()) @@ -553,7 +554,8 @@ * * @throws IOException */ - protected void writeCounter(final Writer w, final ICounter counter) + protected void writeCounter(final Writer w, + @SuppressWarnings("rawtypes") final ICounter counter) throws IOException { if (counter.getInstrument() instanceof HistoryInstrument) { @@ -746,17 +748,16 @@ /** * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class HTMLValueFormatter extends ValueFormatter { - protected final URLQueryModel model; + private final URLQueryModel model; /** * * @param model */ - public HTMLValueFormatter(URLQueryModel model) { + public HTMLValueFormatter(final URLQueryModel model) { super(model); @@ -769,16 +770,19 @@ * for inclusion in a CDATA section (we do both operations together so that * we can format {@link IServiceCounters#LOCAL_HTTPD} as a link anchor. */ - public String value(final ICounter counter, final Object val) { + public String value( + @SuppressWarnings("rawtypes") final ICounter counter, + final Object val) { - return XHTMLRenderer.this.value(counter,val); + return XHTMLRenderer.this.value(counter, val); } /** * A clickable trail of the path from the root. */ - public void writeFullPath(Writer w, String path) + @Override + public void writeFullPath(final Writer w, final String path) throws IOException { writePath(w, path, 0/* root */); @@ -792,8 +796,9 @@ * The path components will be shown beginning at this depth - * ZERO (0) is the root. */ - public void writePath(Writer w, String path, int rootDepth) - throws IOException { + @Override + public void writePath(final Writer w, final String path, + final int rootDepth) throws IOException { final String[] a = path.split(ps); @@ -838,7 +843,8 @@ if(rootDepth!=0 && n==rootDepth) { w.write("<a href=\"" - + model.getRequestURL(new URLQueryParam[] { new URLQueryParam(URLQueryModel.PATH, prefix) }) + "\">"); + + model.getRequestURL(new URLQueryParam[] { new URLQueryParam( + URLQueryModel.PATH, prefix) }) + "\">"); w.write("..."); @@ -851,8 +857,9 @@ w.write(" "); w.write("<a href=\"" - + model.getRequestURL(new URLQueryParam[] { new URLQueryParam(URLQueryModel.PATH, sb - .toString()) }) + "\">"); + + model.getRequestURL(new URLQueryParam[] { new URLQueryParam( + URLQueryModel.PATH, sb.toString()) }) + + "\">"); // current path component. w.write(cdata(name)); @@ -889,9 +896,11 @@ * if any element of <i>a</i> does not use a * {@link HistoryInstrument}. * - * @todo review use of basePeriod - this is {@link URLQueryModel#period}, right? + * @todo review use of basePeriod - this is {@link URLQueryModel#period}, + * right? */ - protected void writeHistoryTable(final Writer w, final ICounter[] a, + protected void writeHistoryTable(final Writer w, + @SuppressWarnings("rawtypes") final ICo... [truncated message content] |
From: <mrp...@us...> - 2014-05-13 19:32:16
|
Revision: 8298 http://sourceforge.net/p/bigdata/code/8298 Author: mrpersonick Date: 2014-05-13 19:32:10 +0000 (Tue, 13 May 2014) Log Message: ----------- full blueprints integration commit Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphEmbedded.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphQuery.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataPredicate.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataVertex.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BlueprintsRDFFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/AbstractTestBigdataGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphClient.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/BigdataSailNSSWrapper.java branches/BIGDATA_RELEASE_1_3_0/build.xml Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphEmbedded.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataVertex.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BlueprintsRDFFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/edge.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/edges.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/edgesByProperty.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/vertex.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphClient.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java branches/BIGDATA_RELEASE_1_3_0/build.xml Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java 2014-05-13 18:15:26 UTC (rev 8297) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java 2014-05-13 19:32:10 UTC (rev 8298) @@ -1,107 +0,0 @@ -/** -Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -package com.bigdata.blueprints; - -import java.util.Arrays; -import java.util.List; - -import org.openrdf.model.Statement; -import org.openrdf.model.URI; -import org.openrdf.model.vocabulary.RDFS; - -import com.tinkerpop.blueprints.Direction; -import com.tinkerpop.blueprints.Edge; -import com.tinkerpop.blueprints.Vertex; - -/** - * Edge implementation that wraps an Edge statement and points to a - * {@link BigdataGraph} instance. - * - * @author mikepersonick - * - */ -public class BigdataEdge extends BigdataElement implements Edge { - - private static final List<String> blacklist = Arrays.asList(new String[] { - "id", "", "label" - }); - - protected final Statement stmt; - - public BigdataEdge(final Statement stmt, final BigdataGraph graph) { - super(stmt.getPredicate(), graph); - - this.stmt = stmt; - } - - @Override - public Object getId() { - return graph.factory.fromEdgeURI(uri); - } - - @Override - public void remove() { - graph.removeEdge(this); - } - - @Override - public String getLabel() { - return (String) graph.getProperty(uri, RDFS.LABEL); - } - - @Override - public Vertex getVertex(final Direction dir) throws IllegalArgumentException { - - if (dir == Direction.BOTH) { - throw new IllegalArgumentException(); - } - - final URI uri = (URI) - (dir == Direction.OUT ? stmt.getSubject() : stmt.getObject()); - - final String id = graph.factory.fromVertexURI(uri); - - return graph.getVertex(id); - - } - - @Override - public void setProperty(final String property, final Object val) { - - if (property == null || blacklist.contains(property)) { - throw new IllegalArgumentException(); - } - - super.setProperty(property, val); - - } - - @Override - public String toString() { - final URI s = (URI) stmt.getSubject(); - final URI p = (URI) stmt.getPredicate(); - final URI o = (URI) stmt.getObject(); - return "e["+p.getLocalName()+"]["+s.getLocalName()+"->"+o.getLocalName()+"]"; - } - -} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java 2014-05-13 19:32:10 UTC (rev 8298) @@ -0,0 +1,115 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.blueprints; + +import java.util.Arrays; +import java.util.List; + +import org.openrdf.model.Statement; +import org.openrdf.model.URI; +import org.openrdf.model.vocabulary.RDFS; + +import com.tinkerpop.blueprints.Direction; +import com.tinkerpop.blueprints.Edge; +import com.tinkerpop.blueprints.Vertex; + +/** + * Edge implementation that wraps an Edge statement and points to a + * {@link BigdataGraph} instance. + * + * @author mikepersonick + * + */ +public class BigdataEdge extends BigdataElement implements Edge { + + private static final List<String> blacklist = Arrays.asList(new String[] { + "id", "", "label" + }); + + protected final Statement stmt; + + public BigdataEdge(final Statement stmt, final BigdataGraph graph) { + super(stmt.getPredicate(), graph); + + this.stmt = stmt; + } + + @Override + public Object getId() { + + return graph.factory.fromEdgeURI(uri); + + } + + @Override + public void remove() { + + graph.removeEdge(this); + + } + + @Override + public String getLabel() { + + return (String) graph.getProperty(uri, RDFS.LABEL); + + } + + @Override + public Vertex getVertex(final Direction dir) throws IllegalArgumentException { + + if (dir == Direction.BOTH) { + throw new IllegalArgumentException(); + } + + final URI uri = (URI) + (dir == Direction.OUT ? stmt.getSubject() : stmt.getObject()); + + final String id = graph.factory.fromVertexURI(uri); + + return graph.getVertex(id); + + } + + @Override + public void setProperty(final String prop, final Object val) { + + if (prop == null || blacklist.contains(prop)) { + throw new IllegalArgumentException(); + } + + super.setProperty(prop, val); + + } + + @Override + public String toString() { + + final URI s = (URI) stmt.getSubject(); + final URI p = (URI) stmt.getPredicate(); + final URI o = (URI) stmt.getObject(); + return "e["+p.getLocalName()+"]["+s.getLocalName()+"->"+o.getLocalName()+"]"; + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java 2014-05-13 18:15:26 UTC (rev 8297) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java 2014-05-13 19:32:10 UTC (rev 8298) @@ -1,134 +0,0 @@ -/** -Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -package com.bigdata.blueprints; - -import java.util.Arrays; -import java.util.List; -import java.util.Set; - -import org.openrdf.model.Literal; -import org.openrdf.model.URI; - -import com.tinkerpop.blueprints.Element; - -/** - * Base class for {@link BigdataVertex} and {@link BigdataEdge}. Handles - * property-related methods. - * - * @author mikepersonick - * - */ -public abstract class BigdataElement implements Element { - - private static final List<String> blacklist = Arrays.asList(new String[] { - "id", "" - }); - - protected final URI uri; - protected final BigdataGraph graph; - - public BigdataElement(final URI uri, final BigdataGraph graph) { - this.uri = uri; - this.graph = graph; - } - - @Override - @SuppressWarnings("unchecked") - public <T> T getProperty(final String property) { - - final URI p = graph.factory.toPropertyURI(property); - - return (T) graph.getProperty(uri, p); - - } - - @Override - public Set<String> getPropertyKeys() { - - return graph.getPropertyKeys(uri); - - } - - @Override - @SuppressWarnings("unchecked") - public <T> T removeProperty(final String property) { - - final URI p = graph.factory.toPropertyURI(property); - - return (T) graph.removeProperty(uri, p); - - } - - @Override - public void setProperty(final String property, final Object val) { - - if (property == null || blacklist.contains(property)) { - throw new IllegalArgumentException(); - } - - final URI p = graph.factory.toPropertyURI(property); - - final Literal o = graph.factory.toLiteral(val); - - graph.setProperty(uri, p, o); - - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((graph == null) ? 0 : graph.hashCode()); - result = prime * result + ((uri == null) ? 0 : uri.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - BigdataElement other = (BigdataElement) obj; - if (graph == null) { - if (other.graph != null) - return false; - } else if (!graph.equals(other.graph)) - return false; - if (uri == null) { - if (other.uri != null) - return false; - } else if (!uri.equals(other.uri)) - return false; - return true; - } - - @Override - public String toString() { - return uri.toString(); - } - - -} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java 2014-05-13 19:32:10 UTC (rev 8298) @@ -0,0 +1,154 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.blueprints; + +import java.util.Arrays; +import java.util.List; +import java.util.Set; + +import org.openrdf.model.URI; + +import com.tinkerpop.blueprints.Element; + +/** + * Base class for {@link BigdataVertex} and {@link BigdataEdge}. Handles + * property-related methods. + * + * @author mikepersonick + * + */ +public abstract class BigdataElement implements Element { + + private static final List<String> blacklist = Arrays.asList(new String[] { + "id", "" + }); + + protected final URI uri; + protected final BigdataGraph graph; + + public BigdataElement(final URI uri, final BigdataGraph graph) { + this.uri = uri; + this.graph = graph; + } + + @Override + @SuppressWarnings("unchecked") + public <T> T getProperty(final String property) { + + return (T) graph.getProperty(uri, property); + + } + + @Override + public Set<String> getPropertyKeys() { + + return graph.getPropertyKeys(uri); + + } + + @Override + @SuppressWarnings("unchecked") + public <T> T removeProperty(final String property) { + + return (T) graph.removeProperty(uri, property); + + } + + @Override + public void setProperty(final String prop, final Object val) { + + if (prop == null || blacklist.contains(prop)) { + throw new IllegalArgumentException(); + } + + graph.setProperty(uri, prop, val); + + } + + /** + * Simple extension for multi-valued properties. + */ + public void addProperty(final String prop, final Object val) { + + if (prop == null || blacklist.contains(prop)) { + throw new IllegalArgumentException(); + } + + graph.addProperty(uri, prop, val); + + } + + /** + * Simple extension for multi-valued properties. + */ + @SuppressWarnings("unchecked") + public <T> List<T> getProperties(final String property) { + + return (List<T>) graph.getProperties(uri, property); + + } + + /** + * Generated code. + */ + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((graph == null) ? 0 : graph.hashCode()); + result = prime * result + ((uri == null) ? 0 : uri.hashCode()); + return result; + } + + /** + * Generated code. + */ + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + BigdataElement other = (BigdataElement) obj; + if (graph == null) { + if (other.graph != null) + return false; + } else if (!graph.equals(other.graph)) + return false; + if (uri == null) { + if (other.uri != null) + return false; + } else if (!uri.equals(other.uri)) + return false; + return true; + } + + @Override + public String toString() { + return uri.toString(); + } + + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-13 18:15:26 UTC (rev 8297) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-13 19:32:10 UTC (rev 8298) @@ -1,851 +0,0 @@ -/** -Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -package com.bigdata.blueprints; - -import info.aduna.iteration.CloseableIteration; - -import java.util.Iterator; -import java.util.LinkedHashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Set; -import java.util.UUID; - -import org.apache.commons.io.IOUtils; -import org.openrdf.OpenRDFException; -import org.openrdf.model.Literal; -import org.openrdf.model.Statement; -import org.openrdf.model.URI; -import org.openrdf.model.Value; -import org.openrdf.model.impl.StatementImpl; -import org.openrdf.model.impl.URIImpl; -import org.openrdf.model.vocabulary.RDF; -import org.openrdf.model.vocabulary.RDFS; -import org.openrdf.query.GraphQueryResult; -import org.openrdf.query.QueryLanguage; -import org.openrdf.repository.RepositoryConnection; -import org.openrdf.repository.RepositoryResult; - -import com.bigdata.rdf.store.BD; -import com.tinkerpop.blueprints.Direction; -import com.tinkerpop.blueprints.Edge; -import com.tinkerpop.blueprints.Features; -import com.tinkerpop.blueprints.Graph; -import com.tinkerpop.blueprints.GraphQuery; -import com.tinkerpop.blueprints.Vertex; -import com.tinkerpop.blueprints.util.DefaultGraphQuery; -import com.tinkerpop.blueprints.util.io.graphml.GraphMLReader; - -/** - * A base class for a Blueprints wrapper around a bigdata back-end. - * - * @author mikepersonick - * - */ -public abstract class BigdataGraph implements Graph { - - public static final URI VERTEX = new URIImpl(BD.NAMESPACE + "Vertex"); - - public static final URI EDGE = new URIImpl(BD.NAMESPACE + "Edge"); - -// final BigdataSailRepository repo; -// -// transient BigdataSailRepositoryConnection cxn; - - final BlueprintsRDFFactory factory; - -// public BigdataGraph(final BigdataSailRepository repo) { -// this(repo, BigdataRDFFactory.INSTANCE); -// } - - public BigdataGraph(//final BigdataSailRepository repo, - final BlueprintsRDFFactory factory) { -// try { -// this.repo = repo; -// this.cxn = repo.getUnisolatedConnection(); -// this.cxn.setAutoCommit(false); - this.factory = factory; -// } catch (RepositoryException ex) { -// throw new RuntimeException(ex); -// } - } - - public String toString() { - return getClass().getSimpleName().toLowerCase(); - } - - /** - * Post a GraphML file to the remote server. (Bulk-upload operation.) - */ - public void loadGraphML(final String file) throws Exception { - GraphMLReader.inputGraph(this, file); - } - - protected abstract RepositoryConnection cxn() throws Exception; - -// public BigdataSailRepositoryConnection getConnection() { -// return this.cxn; -// } -// -// public BlueprintsRDFFactory getFactory() { -// return this.factory; -// } - -// public Value getValue(final URI s, final URI p) { -// -// try { -// -// final RepositoryResult<Statement> result = -// cxn.getStatements(s, p, null, false); -// -// if (result.hasNext()) { -// -// final Value o = result.next().getObject(); -// -// if (result.hasNext()) { -// throw new RuntimeException(s -// + ": more than one value for p: " + p -// + ", did you mean to call getValues()?"); -// } -// -// return o; -// -// } -// -// return null; -// -// } catch (Exception ex) { -// throw new RuntimeException(ex); -// } -// -// } - - public Object getProperty(final URI s, final URI p) { - - try { - - final RepositoryResult<Statement> result = - cxn().getStatements(s, p, null, false); - - if (result.hasNext()) { - - final Value value = result.next().getObject(); - - if (result.hasNext()) { - throw new RuntimeException(s - + ": more than one value for p: " + p - + ", did you mean to call getValues()?"); - } - - if (!(value instanceof Literal)) { - throw new RuntimeException("not a property: " + value); - } - - final Literal lit = (Literal) value; - - return factory.fromLiteral(lit); - - } - - return null; - - } catch (Exception ex) { - throw new RuntimeException(ex); - } - - } - -// public List<Value> getValues(final URI s, final URI p) { -// -// try { -// -// final RepositoryResult<Statement> result = -// cxn().getStatements(s, p, null, false); -// -// final List<Value> values = new LinkedList<Value>(); -// -// while (result.hasNext()) { -// -// final Value o = result.next().getObject(); -// -// values.add(o); -// -// } -// -// return values; -// -// } catch (Exception ex) { -// throw new RuntimeException(ex); -// } -// -// } - - public List<Object> getProperties(final URI s, final URI p) { - - try { - - final RepositoryResult<Statement> result = - cxn().getStatements(s, p, null, false); - - final List<Object> props = new LinkedList<Object>(); - - while (result.hasNext()) { - - final Value value = result.next().getObject(); - - if (!(value instanceof Literal)) { - throw new RuntimeException("not a property: " + value); - } - - final Literal lit = (Literal) value; - - props.add(factory.fromLiteral(lit)); - - } - - return props; - - } catch (Exception ex) { - throw new RuntimeException(ex); - } - - } - - public Set<String> getPropertyKeys(final URI s) { - - try { - - final RepositoryResult<Statement> result = - cxn().getStatements(s, null, null, false); - - final Set<String> properties = new LinkedHashSet<String>(); - - while (result.hasNext()) { - - final Statement stmt = result.next(); - - if (!(stmt.getObject() instanceof Literal)) { - continue; - } - - if (stmt.getPredicate().equals(RDFS.LABEL)) { - continue; - } - - final String p = - factory.fromPropertyURI(stmt.getPredicate()); - - properties.add(p); - - } - - return properties; - - } catch (Exception ex) { - throw new RuntimeException(ex); - } - - } - - public Object removeProperty(final URI s, final URI p) { - - try { - - final Object oldVal = getProperty(s, p); - - cxn().remove(s, p, null); - - return oldVal; - - } catch (Exception e) { - throw new RuntimeException(e); - } - - } - - public void setProperty(final URI s, final URI p, final Literal o) { - - try { - - cxn().remove(s, p, null); - - cxn().add(s, p, o); - - } catch (Exception e) { - throw new RuntimeException(e); - } - - } - - @Override - public Edge addEdge(final Object key, final Vertex from, final Vertex to, - final String label) { - - if (label == null) { - throw new IllegalArgumentException(); - } - - final String eid = key != null ? key.toString() : UUID.randomUUID().toString(); - - final URI edgeURI = factory.toEdgeURI(eid); - - if (key != null) { - - final Edge edge = getEdge(key); - - if (edge != null) { - if (!(edge.getVertex(Direction.OUT).equals(from) && - (edge.getVertex(Direction.OUT).equals(to)))) { - throw new IllegalArgumentException("edge already exists: " + key); - } - } - - } - - try { - -// if (cxn().hasStatement(edgeURI, RDF.TYPE, EDGE, false)) { -// throw new IllegalArgumentException("edge " + eid + " already exists"); -// } - - final URI fromURI = factory.toVertexURI(from.getId().toString()); - final URI toURI = factory.toVertexURI(to.getId().toString()); - - cxn().add(fromURI, edgeURI, toURI); - cxn().add(edgeURI, RDF.TYPE, EDGE); - cxn().add(edgeURI, RDFS.LABEL, factory.toLiteral(label)); - - return new BigdataEdge(new StatementImpl(fromURI, edgeURI, toURI), this); - - } catch (Exception ex) { - throw new RuntimeException(ex); - } - - } - - @Override - public Vertex addVertex(final Object key) { - - try { - - final String vid = key != null ? - key.toString() : UUID.randomUUID().toString(); - - final URI uri = factory.toVertexURI(vid); - -// if (cxn().hasStatement(vertexURI, RDF.TYPE, VERTEX, false)) { -// throw new IllegalArgumentException("vertex " + vid + " already exists"); -// } - - cxn().add(uri, RDF.TYPE, VERTEX); - - return new BigdataVertex(uri, this); - - } catch (Exception ex) { - throw new RuntimeException(ex); - } - - } - - @Override - public Edge getEdge(final Object key) { - - if (key == null) - throw new IllegalArgumentException(); - - try { - - final URI edge = factory.toEdgeURI(key.toString()); - - final RepositoryResult<Statement> result = - cxn().getStatements(null, edge, null, false); - - if (result.hasNext()) { - - final Statement stmt = result.next(); - - if (result.hasNext()) { - throw new RuntimeException( - "duplicate edge: " + key); - } - - return new BigdataEdge(stmt, this); - - } - - return null; - - } catch (Exception ex) { - throw new RuntimeException(ex); - } - - } - - @Override - public Iterable<Edge> getEdges() { - - final URI wild = null; - return getEdges(wild, wild); - - } - - public Iterable<Edge> getEdges(final URI s, final URI o, final String... labels) { - - try { - -// final RepositoryResult<Statement> result = -// cxn().getStatements(s, p, o, false); -// -// return new EdgeIterable(result); - - final StringBuilder sb = new StringBuilder(); - sb.append("construct { ?from ?edge ?to . } where {\n"); - sb.append("?edge rdf:type bd:Edge . ?from ?edge ?to .\n"); - if (labels != null && labels.length > 0) { - if (labels.length == 1) { - sb.append("?edge rdfs:label \"").append(labels[0]).append("\" .\n"); - } else { - sb.append("?edge rdfs:label ?label .\n"); - sb.append("filter(?label in ("); - for (String label : labels) { - sb.append("\""+label+"\", "); - } - sb.setLength(sb.length()-2); - sb.append(")) .\n"); - } - } - sb.append("}"); - - final String queryStr = sb.toString() - .replace("?from", s != null ? "<"+s+">" : "?from") - .replace("?to", o != null ? "<"+o+">" : "?to"); - - final org.openrdf.query.GraphQuery query = - cxn().prepareGraphQuery(QueryLanguage.SPARQL, queryStr); - - final GraphQueryResult stmts = query.evaluate(); - - return new EdgeIterable(stmts); - - } catch (Exception ex) { - throw new RuntimeException(ex); - } - - } - - public Iterable<Vertex> getVertices(final URI s, final URI o, - final String... labels) { - - if (s != null && o != null) { - throw new IllegalArgumentException(); - } - - if (s == null && o == null) { - throw new IllegalArgumentException(); - } - - try { - -// final RepositoryResult<Statement> result = -// cxn().getStatements(s, null, o, false); -// -// return new VertexIterable(result, s == null); - - final StringBuilder sb = new StringBuilder(); - sb.append("construct { ?from ?edge ?to . } where {\n"); - sb.append("?edge rdf:type bd:Edge . ?from ?edge ?to .\n"); - if (labels != null && labels.length > 0) { - if (labels.length == 1) { - sb.append("?edge rdfs:label \"").append(labels[0]).append("\" .\n"); - } else { - sb.append("?edge rdfs:label ?label .\n"); - sb.append("filter(?label in ("); - for (String label : labels) { - sb.append("\""+label+"\", "); - } - sb.setLength(sb.length()-2); - sb.append(")) .\n"); - } - } - sb.append("}"); - - final String queryStr = sb.toString() - .replace("?from", s != null ? "<"+s+">" : "?from") - .replace("?to", o != null ? "<"+o+">" : "?to"); - - final org.openrdf.query.GraphQuery query = - cxn().prepareGraphQuery(QueryLanguage.SPARQL, queryStr); - - final GraphQueryResult stmts = query.evaluate(); - - return new VertexIterable(stmts, s == null); - - } catch (Exception ex) { - throw new RuntimeException(ex); - } - - } - - public final <T> Iterable<T> fuse(final Iterable<T>... args) { - - return new FusedIterable<T>(args); - } - - - @Override - public Iterable<Edge> getEdges(final String prop, final Object val) { - - final URI p = factory.toPropertyURI(prop); - final Literal o = factory.toLiteral(val); - - try { - - final String queryStr = IOUtils.toString( - getClass().getResourceAsStream("edgesByProperty.rq")) - .replace("?prop", "<"+p+">") - .replace("?val", o.toString()); - - final org.openrdf.query.GraphQuery query = - cxn().prepareGraphQuery(QueryLanguage.SPARQL, queryStr); - - final GraphQueryResult stmts = query.evaluate(); - - return new EdgeIterable(stmts); - - } catch (Exception ex) { - throw new RuntimeException(ex); - } - - } - - @Override - public Features getFeatures() { - - return FEATURES; - - } - - @Override - public Vertex getVertex(final Object key) { - - if (key == null) - throw new IllegalArgumentException(); - - final URI uri = factory.toVertexURI(key.toString()); - try { - if (cxn().hasStatement(uri, RDF.TYPE, VERTEX, false)) { - return new BigdataVertex(uri, this); - } - return null; - } catch (Exception ex) { - throw new RuntimeException(ex); - } - - } - - @Override - public Iterable<Vertex> getVertices() { - - try { - final RepositoryResult<Statement> result = - cxn().getStatements(null, RDF.TYPE, VERTEX, false); - return new VertexIterable(result, true); - } catch (Exception ex) { - throw new RuntimeException(ex); - } - - } - - @Override - public Iterable<Vertex> getVertices(String prop, Object val) { - - final URI p = factory.toPropertyURI(prop); - final Literal o = factory.toLiteral(val); - try { - final RepositoryResult<Statement> result = - cxn().getStatements(null, p, o, false); - return new VertexIterable(result, true); - } catch (Exception ex) { - throw new RuntimeException(ex); - } - - } - - @Override - public GraphQuery query() { - return new DefaultGraphQuery(this); - } - - @Override - public void removeEdge(final Edge edge) { - try { - final URI uri = factory.toURI(edge); - if (!cxn().hasStatement(uri, RDF.TYPE, EDGE, false)) { - throw new IllegalStateException(); - } - final URI wild = null; - // remove the edge statement - cxn().remove(wild, uri, wild); - // remove its properties - cxn().remove(uri, wild, wild); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - @Override - public void removeVertex(final Vertex vertex) { - try { - final URI uri = factory.toURI(vertex); - if (!cxn().hasStatement(uri, RDF.TYPE, VERTEX, false)) { - throw new IllegalStateException(); - } - final URI wild = null; - // remove outgoing links and properties - cxn().remove(uri, wild, wild); - // remove incoming links - cxn().remove(wild, wild, uri); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - -// @Override -// public void commit() { -// try { -// cxn().commit(); -// } catch (RepositoryException e) { -// throw new RuntimeException(e); -// } -// } -// -// @Override -// public void rollback() { -// try { -// cxn().rollback(); -// cxn.close(); -// cxn = repo.getUnisolatedConnection(); -// cxn.setAutoCommit(false); -// } catch (RepositoryException e) { -// throw new RuntimeException(e); -// } -// } -// -// @Override -// public void shutdown() { -// try { -// cxn.close(); -// repo.shutDown(); -// } catch (RepositoryException e) { -// throw new RuntimeException(e); -// } -// } -// -// @Override -// @Deprecated -// public void stopTransaction(Conclusion arg0) { -// } - - public class VertexIterable implements Iterable<Vertex>, Iterator<Vertex> { - - private final CloseableIteration<Statement, ? extends OpenRDFException> stmts; - - private final boolean subject; - - private final List<Vertex> cache; - - public VertexIterable( - final CloseableIteration<Statement, ? extends OpenRDFException> stmts, - final boolean subject) { - this.stmts = stmts; - this.subject = subject; - this.cache = new LinkedList<Vertex>(); - } - - @Override - public boolean hasNext() { - try { - return stmts.hasNext(); - } catch (OpenRDFException e) { - throw new RuntimeException(e); - } - } - - @Override - public Vertex next() { - try { - final Statement stmt = stmts.next(); - final URI v = (URI) - (subject ? stmt.getSubject() : stmt.getObject()); - if (!hasNext()) { - stmts.close(); - } - final Vertex vertex = new BigdataVertex(v, BigdataGraph.this); - cache.add(vertex); - return vertex; - } catch (OpenRDFException e) { - throw new RuntimeException(e); - } - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - @Override - public Iterator<Vertex> iterator() { - return hasNext() ? this : cache.iterator(); - } - - } - - public class EdgeIterable implements Iterable<Edge>, Iterator<Edge> { - - private final CloseableIteration<Statement, ? extends OpenRDFException> stmts; - - private final List<Edge> cache; - - public EdgeIterable( - final CloseableIteration<Statement, ? extends OpenRDFException> stmts) { - this.stmts = stmts; - this.cache = new LinkedList<Edge>(); - } - - @Override - public boolean hasNext() { - try { - return stmts.hasNext(); - } catch (OpenRDFException e) { - throw new RuntimeException(e); - } - } - - @Override - public Edge next() { - try { - final Statement stmt = stmts.next(); - if (!hasNext()) { - stmts.close(); - } - final Edge edge = new BigdataEdge(stmt, BigdataGraph.this); - cache.add(edge); - return edge; - } catch (OpenRDFException e) { - throw new RuntimeException(e); - } - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - @Override - public Iterator<Edge> iterator() { - return hasNext() ? this : cache.iterator(); - } - - } - - public class FusedIterable<T> implements Iterable<T>, Iterator<T> { - - private final Iterable<T>[] args; - - private transient int i = 0; - - private transient Iterator<T> curr; - - public FusedIterable(final Iterable<T>... args) { - this.args = args; - this.curr = args[0].iterator(); - } - - @Override - public boolean hasNext() { - if (curr.hasNext()) { - return true; - } - while (!curr.hasNext() && i < (args.length-1)) { - curr = args[++i].iterator(); - if (curr.hasNext()) { - return true; - } - } - return false; - } - - @Override - public T next() { - return curr.next(); - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - @Override - public Iterator<T> iterator() { - return this; - } - - } - - protected static final Features FEATURES = new Features(); - - static { - - FEATURES.supportsSerializableObjectProperty = false; - FEATURES.supportsBooleanProperty = true; - FEATURES.supportsDoubleProperty = true; - FEATURES.supportsFloatProperty = true; - FEATURES.supportsIntegerProperty = true; - FEATURES.supportsPrimitiveArrayProperty = false; - FEATURES.supportsUniformListProperty = false; - FEATURES.supportsMixedListProperty = false; - FEATURES.supportsLongProperty = true; - FEATURES.supportsMapProperty = false; - FEATURES.supportsStringProperty = true; - - FEATURES.supportsDuplicateEdges = true; - FEATURES.supportsSelfLoops = true; - FEATURES.isPersistent = true; - FEATURES.isWrapper = false; - FEATURES.supportsVertexIteration = true; - FEATURES.supportsEdgeIteration = true; - FEATURES.supportsVertexIndex = false; - FEATURES.supportsEdgeIndex = false; - FEATURES.ignoresSuppliedIds = true; - FEATURES.supportsTransactions = false; - FEATURES.supportsIndices = true; - FEATURES.supportsKeyIndices = true; - FEATURES.supportsVertexKeyIndex = true; - FEATURES.supportsEdgeKeyIndex = true; - FEATURES.supportsEdgeRetrieval = true; - FEATURES.supportsVertexProperties = true; - FEATURES.supportsEdgeProperties = true; - FEATURES.supportsThreadedTransactions = false; - } - -} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-13 19:32:10 UTC (rev 8298) @@ -0,0 +1,1017 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.blueprints; + +import info.aduna.iteration.CloseableIteration; + +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.UUID; + +import org.openrdf.OpenRDFException; +import org.openrdf.model.Literal; +import org.openrdf.model.Statement; +import org.openrdf.model.URI; +import org.openrdf.model.Value; +import org.openrdf.model.impl.StatementImpl; +import org.openrdf.model.impl.URIImpl; +import org.openrdf.model.vocabulary.RDF; +import org.openrdf.model.vocabulary.RDFS; +import org.openrdf.query.GraphQueryResult; +import org.openrdf.query.QueryLanguage; +import org.openrdf.repository.RepositoryConnection; +import org.openrdf.repository.RepositoryResult; + +import com.bigdata.rdf.store.BD; +import com.tinkerpop.blueprints.Direction; +import com.tinkerpop.blueprints.Edge; +import com.tinkerpop.blueprints.Features; +import com.tinkerpop.blueprints.Graph; +import com.tinkerpop.blueprints.GraphQuery; +import com.tinkerpop.blueprints.Vertex; +import com.tinkerpop.blueprints.util.io.graphml.GraphMLReader; + +/** + * A base class for a Blueprints wrapper around a bigdata back-end. + * + * @author mikepersonick + * + */ +public abstract class BigdataGraph implements Graph { + + /** + * URI used to represent a Vertex. + */ + public static final URI VERTEX = new URIImpl(BD.NAMESPACE + "Vertex"); + + /** + * URI used to represent a Edge. + */ + public static final URI EDGE = new URIImpl(BD.NAMESPACE + "Edge"); + + /** + * Factory for round-tripping between Blueprints data and RDF data. + */ + final BlueprintsRDFFactory factory; + + public BigdataGraph(final BlueprintsRDFFactory factory) { + + this.factory = factory; + + } + + /** + * For some reason this is part of the specification (i.e. part of the + * Blueprints test suite). + */ + public String toString() { + + return getClass().getSimpleName().toLowerCase(); + + } + + /** + * Different implementations will return different types of connections + * depending on the mode (client/server, embedded, read-only, etc.) + */ + protected abstract RepositoryConnection cxn() throws Exception; + + /** + * Return a single-valued property for an edge or vertex. + * + * @see {@link BigdataElement} + */ + public Object getProperty(final URI uri, final String prop) { + + return getProperty(uri, factory.toPropertyURI(prop)); + + } + + /** + * Return a single-valued property for an edge or vertex. + * + * @see {@link BigdataElement} + */ + public Object getProperty(final URI uri, final URI prop) { + + try { + + final RepositoryResult<Statement> result = + cxn().getStatements(uri, prop, null, false); + + if (result.hasNext()) { + + final Value value = result.next().getObject(); + + if (result.hasNext()) { + throw new RuntimeException(uri + + ": more than one value for p: " + prop + + ", did you mean to call getProperties()?"); + } + + if (!(value instanceof Literal)) { + throw new RuntimeException("not a property: " + value); + } + + final Literal lit = (Literal) value; + + return factory.fromLiteral(lit); + + } + + return null; + + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + } + + /** + * Return a multi-valued property for an edge or vertex. + * + * @see {@link BigdataElement} + */ + public List<Object> getProperties(final URI uri, final String prop) { + + return getProperties(uri, factory.toPropertyURI(prop)); + + } + + + /** + * Return a multi-valued property for an edge or vertex. + * + * @see {@link BigdataElement} + */ + public List<Object> getProperties(final URI uri, final URI prop) { + + try { + + final RepositoryResult<Statement> result = + cxn().getStatements(uri, prop, null, false); + + final List<Object> props = new LinkedList<Object>(); + + while (result.hasNext()) { + + final Value value = result.next().getObject(); + + if (!(value instanceof Literal)) { + throw new RuntimeException("not a property: " + value); + } + + final Literal lit = (Literal) value; + + props.add(factory.fromLiteral(lit)); + + } + + return props; + + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + } + + /** + * Return the property names for an edge or vertex. + * + * @see {@link BigdataElement} + */ + public Set<String> getPropertyKeys(final URI uri) { + + try { + + final RepositoryResult<Statement> result = + cxn().getStatements(uri, null, null, false); + + final Set<String> properties = new LinkedHashSet<String>(); + + while (result.hasNext()) { + + final Statement stmt = result.next(); + + if (!(stmt.getObject() instanceof Literal)) { + continue; + } + + if (stmt.getPredicate().equals(RDFS.LABEL)) { + continue; + } + + final String p = + factory.fromPropertyURI(stmt.getPredicate()); + + properties.add(p); + + } + + return properties; + + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + } + + /** + * Remove all values for a particular property on an edge or vertex. + * + * @see {@link BigdataElement} + */ + public Object removeProperty(final URI uri, final String prop) { + + return removeProperty(uri, factory.toPropertyURI(prop)); + + } + + /** + * Remove all values for a particular property on an edge or vertex. + * + * @see {@link BigdataElement} + */ + public Object removeProperty(final URI uri, final URI prop) { + + try { + + final Object oldVal = getProperty(uri, prop); + + cxn().remove(uri, prop, null); + + return oldVal; + + } catch (Exception e) { + throw new RuntimeException(e); + } + + } + + /** + * Set a single-value property on an edge or vertex (remove the old + * value first). + * + * @see {@link BigdataElement} + */ + public void setProperty(final URI uri, final String prop, final Object val) { + + setProperty(uri, factory.toPropertyURI(prop), factory.toLiteral(val)); + + } + + /** + * Set a single-value property on an edge or vertex (remove the old + * value first). + * + * @see {@link BigdataElement} + */ + public void setProperty(final URI uri, final URI prop, final Literal val) { + + try { + + cxn().remove(uri, prop, null); + + cxn().add(uri, prop, val); + + } catch (Exception e) { + throw new RuntimeException(e); + } + + } + + /** + * Add a property on an edge or vertex (multi-value property extension). + * + * @see {@link BigdataElement} + */ + public void addProperty(final URI uri, final String prop, final Object val) { + + setProperty(uri, factory.toPropertyURI(prop), factory.toLiteral(val)); + + } + + /** + * Add a property on an edge or vertex (multi-value property extension). + * + * @see {@link BigdataElement} + */ + public void addProperty(final URI uri, final URI prop, final Literal val) { + + try { + + cxn().add(uri, prop, val); + + } catch (Exception e) { + throw new RuntimeException(e); + } + + } + + /** + * Post a GraphML file to the remote server. (Bulk-upload operation.) + */ + public void loadGraphML(final String file) throws Exception { + + GraphMLReader.inputGraph(this, file); + + } + + /** + * Add an edge. + */ + @Override + public Edge addEdge(final Object key, final Vertex from, final Vertex to, + final String label) { + + if (label == null) { + throw new IllegalArgumentException(); + } + + final String eid = key != null ? key.toString() : UUID.randomUUID().toString(); + + final URI edgeURI = factory.toEdgeURI(eid); + + if (key != null) { + + final Edge edge = getEdge(key); + + if (edge != null) { + if (!(edge.getVertex(Direction.OUT).equals(from) && + (edge.getVertex(Direction.OUT).equals(to)))) { + throw new IllegalArgumentException("edge already exists: " + key); + } + } + + } + + try { + + // do we need to check this? +// if (cxn().hasStatement(edgeURI, RDF.TYPE, EDGE, false)) { +// throw new IllegalArgumentException("edge " + eid + " already exists"); +// } + + final URI fromURI = factory.toVertexURI(from.getId().toString()); + final URI toURI = factory.toVertexURI(to.getId().toString()); + + cxn().add(fromURI, edgeURI, toURI); + cxn().add(edgeURI, RDF.TYPE, EDGE); + cxn().add(edgeURI, RDFS.LABEL, factory.toLiteral(label)); + + return new BigdataEdge(new StatementImpl(fromURI, edgeURI, toURI), this); + + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + } + + /** + * Add a vertex. + */ + @Override + public Vertex addVertex(final Object key) { + + try { + + final String vid = key != null ? + key.toString() : UUID.randomUUID().toString(); + + final URI uri = factory.toVertexURI(vid); + + // do we need to check this? +// if (cxn().hasStatement(vertexURI, RDF.TYPE, VERTEX, false)) { +// throw new IllegalArgumentException("vertex " + vid + " already exists"); +// } + + cxn().add(uri, RDF.TYPE, VERTEX); + + return new BigdataVertex(uri, this); + + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + } + + /** + * Lookup an edge. + */ + @Override + public Edge getEdge(final Object key) { + + if (key == null) + throw new IllegalArgumentException(); + + try { + + final URI edge = factory.toEdgeURI(key.toString()); + + final RepositoryResult<Statement> result = + cxn().getStatements(null, edge, null, false); + + if (result.hasNext()) { + + final Statement stmt = result.next(); + + if (result.hasNext()) { + throw new RuntimeException( + "duplicate edge: " + key); + } + + return new BigdataEdge(stmt, this); + + } + + return null; + + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + } + + /** + * Iterate all edges. + */ + @Override + public Iterable<Edge> getEdges() { + + final URI wild = null; + return getEdges(wild, wild); + + } + + /** + * Find edges based on the from and to vertices and the edge labels, all + * optional parameters (can be null). The edge labels can be null to include + * all labels. + * <p> + * + * @param from + * the from vertex (null for wildcard) + * @param to + * the to vertex (null for wildcard) + * @param labels + * the edge labels to consider (optional) + * @return the edges matching the supplied criteria + */ + Iterable<Edge> getEdges(final URI from, final URI to, final String... labels) { + + final GraphQueryResult stmts = getElements(from, to, labels); + + return new EdgeIterable(stmts); + + } + + /** + * Translates the request to a high-performance SPARQL query: + * + * construct { + * ?from ?edge ?to . + * } where { + * ?edge rdf:type <Edge> . + * + * ?from ?edge ?to . + * + * # filter by edge label + * ?edge rdfs:label ?label . + * filter(?label in ("label1", "label2", ...)) . + * } + */ + protected GraphQueryResult getElements(final URI from, final URI to, + final String... labels) { + + final StringBuilder sb = new StringBuilder(); + sb.append("construct { ?from ?edge ?to . } where {\n"); + sb.append(" ?edge rdf:type bd:Edge .\n"); + sb.append(" ?from ?edge ?to .\n"); + if (labels != null && labels.length > 0) { + if (labels.length == 1) { + sb.append(" ?edge rdfs:label \"").append(labels[0]).append("\" .\n"); + } else { + sb.append(" ?edge rdfs:label ?label .\n"); + sb.append(" filter(?label in ("); + for (String label : labels) { + sb.append("\""+label+"\", "); + } + sb.setLength(sb.length()-2); + sb.append(")) .\n"); + } + } + sb.append("}"); + + // bind the from and/or to + final String queryStr = sb.toString() + .replace("?from", from != null ? "<"+from+">" : "?from") + .replace("?to", to != null ? "<"+to+">" : "?to"); + + try { + + final org.openrdf.query.GraphQuery query = + cxn().prepareGraphQuery(QueryLanguage.SPARQL, queryStr); + + final GraphQueryResult stmts = query.evaluate(); + + return stmts; + + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + } + + /** + * Find edges based on a SPARQL construct query. The query MUST construct + * edge statements: + * <p> + * construct { ?from ?edge ?to } where { ... } + * + * @see {@link BigdataGraphQuery} + */ + Iterable<Edge> getEdges(final String queryStr) { + + try { + + final org.openrdf.query.GraphQuery query = + cxn().prepareGraphQuery(QueryLanguage.SPARQL, queryStr); + + final GraphQueryResult stmts = query.evaluate(); + + return new EdgeIterable(stmts); + + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + } + + /** + * Find vertices based on the supplied from and to vertices and the edge + * labels. One or the other (from and to) must be null (wildcard), but not + * both. Use getEdges() for wildcards on both the from and to. The edge + * labels can be null to include all labels. + * + * @param from + * the from vertex (null for wildcard) + * @param to + * the to vertex (null for wildcard) + * @param labels + * the edge labels to consider (optional) + * @return + * the vertices matching the supplied criteria + */ + Iterable<Vertex> getVertices(final URI from, final URI to, + final String... labels) { + + if (from != null && to != null) { + throw new IllegalArgumentException(); + } + + if (from == null && to == null) { + throw new IllegalArgumentException(); + } + + final GraphQueryResult stmts = getElements(from, to, labels); + + return new VertexIterable(stmts, from == null); + + } + + /** + * Find vertices based on a SPARQL construct query. If the subject parameter + * is true, the vertices will be taken from the subject position of the + * constructed statements, otherwise they will be taken from the object + * position. + * + * @see {@link BigdataGraphQuery} + */ + Iterable<Vertex> getVertices(final String queryStr, final boolean subject) { + + try { + + final org.openrdf.query.GraphQuery query = + cxn().prepareGraphQuery(QueryLanguage.SPARQL, queryStr); + + final GraphQueryResult stmts = query.evaluate(); + + return new VertexIterable(stmts, subject); + + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + } + + /** + * Find edges with the supplied property value. + * + * construct { + * ?from ?edge ?to . + * } + * where { + * ?edge <prop> <val> . + * ?from ?edge ?to . + * } + */ + @Override + public Iterable<Edge> getEdges(final String prop, final Object val) { + + final URI p = factory.toPropertyURI(prop); + final Literal o = factory.toLiteral(val); + + try { + + final StringBuilder sb = new StringBuilder(); + sb.append("construct { ?from ?edge ?to . } where {\n"); + sb.append(" ?edge <"+p+"> "+o+" .\n"); + sb.append(" ?from ?edge ?to .\n"); + sb.append("}"); + + final String queryStr = sb.toString(); + + return getEdges(queryStr); + + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + } + + /** + * Lookup a vertex. + */ + @Override + public Vertex getVertex(final Object key) { + + if (key == null) + throw new IllegalArgumentException(); + + final URI uri = factory.toVertexURI(key.toString()); + + try { + + if (cxn().hasStatement(uri, RDF.TYPE, VERTEX, false)) { + return new BigdataVertex(uri, this); + } + + return null; + + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + } + + + /** + * Iterate all vertices. + */ + @Override + public Iterable<Vertex> getVertices() { + + try { + + final RepositoryResult<Statement> result = + cxn().getStatements(null, RDF.TYPE, VERTEX, false); + + return new VertexIterable(result, true); + + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + } + + /** + * Find vertices with the supplied property value. + */ + @Override + public Iterable<Vertex> getVertices(final String prop, final Object val) { + + final URI p = factory.toPropertyURI(prop); + final Literal o = factory.toLiteral(val); + + try { + + final RepositoryResult<Statement> result = + cxn().getStatements(null, p, o, false); + + return new VertexIterable(result, true); + + } catch (Exception ex) { + throw new RuntimeException(ex); + } + + } + + /** + * Providing an override implementation for our GraphQuery to avoid the + * low-performance scan and filter paradigm. See {@link BigdataGraphQuery}. + */ + @Override + public GraphQuery query() { +// return new DefaultGraphQuery(this); + return new BigdataGraphQuery(this); + } + + /** + * Remove an edge and its properties. + */ + @Override + public void removeEdge(final Edge edge) { + + try { + + final URI uri = factory.toURI(edge); + + if (!cxn().hasStatement(uri, RDF.TYPE, EDGE, false)) { + throw new IllegalStateException(); + } + + final URI wild = null; + + // remove the edge statement + cxn().remove(wild, uri, wild); + + // remove its properties + cxn().remove(uri, wild, wild); + + } catch (Exception e) { + throw new RuntimeException(e); + ... [truncated message content] |
From: <tho...@us...> - 2014-05-14 15:32:59
|
Revision: 8314 http://sourceforge.net/p/bigdata/code/8314 Author: thompsonbry Date: 2014-05-14 15:32:55 +0000 (Wed, 14 May 2014) Log Message: ----------- LBS policy fix. Moved the JVM_OPTS into /etc/defaults/bigdataHA. This is the more standard practice. See #624 (HA LBS) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java 2014-05-14 15:16:27 UTC (rev 8313) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java 2014-05-14 15:32:55 UTC (rev 8314) @@ -802,7 +802,7 @@ double sum = 0d; for (HostScore tmp : hostScores) { hostScore = tmp; - sum += (1d - hostScore.getScore()); + sum += hostScore.getScore(); if (sum >= d) { // found desired host. break; Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-14 15:16:27 UTC (rev 8313) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-14 15:32:55 UTC (rev 8314) @@ -21,17 +21,6 @@ pidFile=$lockDir/pid ## -# ServiceStarter JVM options. -# -# The ServiceStarter is launched as a JVM with the following JVM options. -# The other services (including the HAJournalServer) will run inside of -# this JVM. This is where you specify the size of the Java heap and the -# size of the direct memory heap (used for the write cache buffers and -# some related things). -## -export JVM_OPTS="-server -Xmx4G -XX:MaxDirectMemorySize=3000m" - -## # HAJournalServer configuration parameter overrides (see HAJournal.config). # # The bigdata HAJournal.config file may be heavily parameterized through Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA 2014-05-14 15:16:27 UTC (rev 8313) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA 2014-05-14 15:32:55 UTC (rev 8314) @@ -14,6 +14,18 @@ #pidFile= ## +# ServiceStarter JVM options. +# +# The ServiceStarter is launched as a JVM with the following JVM options. +# The other services (including the HAJournalServer) will run inside of +# this JVM. This is where you specify the size of the Java heap and the +# size of the direct memory heap (used for the write cache buffers and +# some related things). +## +export JVM_OPTS="-server -Xmx4G -XX:MaxDirectMemorySize=3000m" +#export JVM_OPTS="-server -Xmx4G -XX:MaxDirectMemorySize=3000m -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1046" + +## # The following variables configure the startHAServices script, which # passes them through to HAJournal.config. ## This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-14 15:48:19
|
Revision: 8315 http://sourceforge.net/p/bigdata/code/8315 Author: thompsonbry Date: 2014-05-14 15:48:14 +0000 (Wed, 14 May 2014) Log Message: ----------- Added worksheet for the HA LBS LOAD => AVAILABILITY normalization logic and link to worksheet in the HA LBS code. See #624 (HA LBS) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/architecture/HA_LBS.xlsx Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/architecture/HA_LBS.xlsx =================================================================== (Binary files differ) Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/architecture/HA_LBS.xlsx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/architecture/HA_LBS.xlsx 2014-05-14 15:32:55 UTC (rev 8314) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/architecture/HA_LBS.xlsx 2014-05-14 15:48:14 UTC (rev 8315) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/architecture/HA_LBS.xlsx ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java 2014-05-14 15:32:55 UTC (rev 8314) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/lbs/AbstractHostLBSPolicy.java 2014-05-14 15:48:14 UTC (rev 8315) @@ -762,6 +762,8 @@ * be proxied -or- <code>null</code> if the request should not be * proxied (because we lack enough information to identify a target * host). + * + * @see bigdata/src/resources/architecture/HA_LBS.xls */ static HostScore getHost(// final double d, // This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-15 18:39:41
|
Revision: 8340 http://sourceforge.net/p/bigdata/code/8340 Author: thompsonbry Date: 2014-05-15 18:39:35 +0000 (Thu, 15 May 2014) Log Message: ----------- Modified startHAServices to pass along environment variables to control the jetty thread pool. Modified jetty.xml to unpack the war per webtide guidence. This only happens if necessary. Modified NanoSparqlServer to detect a failure to start and throw out an exception. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-15 18:38:16 UTC (rev 8339) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-15 18:39:35 UTC (rev 8340) @@ -28,6 +28,8 @@ import java.net.URL; import java.util.LinkedHashMap; import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import javax.servlet.ServletContextListener; @@ -106,6 +108,14 @@ */ String DEFAULT_JETTY_XML = "jetty.xml"; + /** + * The timeout in seconds that we will await the start of the jetty + * {@link Server} (default {@value #DEFAULT_JETTY_START_TIMEOUT}). + */ + String JETTY_STARTUP_TIMEOUT = "jetty.start.timeout"; + + String DEFAULT_JETTY_STARTUP_TIMEOUT = "10"; + } /** @@ -328,26 +338,12 @@ initParams.put(ConfigParams.SERVLET_CONTEXT_LISTENER_CLASS, servletContextListenerClass); - final Server server; + final long jettyStartTimeout = Long.parseLong(System.getProperty( + SystemProperties.JETTY_STARTUP_TIMEOUT, + SystemProperties.DEFAULT_JETTY_STARTUP_TIMEOUT)); - boolean ok = false; - try { - // Create the service. - server = NanoSparqlServer.newInstance(port, jettyXml, - null/* indexManager */, initParams); - // Start Server. - server.start(); - // Await running. - while (server.isStarting() && !server.isRunning()) { - Thread.sleep(100/* ms */); - } - ok = true; - } finally { - if (!ok) { - // Complain if Server did not start. - System.err.println("Server did not start."); - } - } + final Server server = awaitServerStart(port, jettyXml, initParams, + jettyStartTimeout, TimeUnit.SECONDS); /* * Report *an* effective URL of this service. @@ -384,6 +380,68 @@ } /** + * Await a {@link Server} start up to a timeout. + * + * @param port + * The port (maybe ZERO for a random port). + * @param jettyXml + * The location of the <code>jetty.xml</code> file. + * @param initParams + * The init-param overrides. + * @param timeout + * The timeout. + * @param units + * + * @return The server iff the server started before the timeout. + * + * @throws InterruptedException + * @throws TimeoutException + * @throws Exception + */ + private static Server awaitServerStart(final int port, + final String jettyXml, final Map<String, String> initParams, + final long timeout, final TimeUnit units) + throws InterruptedException, TimeoutException, Exception { + + Server server = null; + boolean ok = false; + final long begin = System.nanoTime(); + final long nanos = units.toNanos(timeout); + long remaining = nanos; + try { + // Create the service. + server = NanoSparqlServer.newInstance(port, jettyXml, + null/* indexManager */, initParams); + // Start Server. + server.start(); + // Await running. + remaining = nanos - (System.nanoTime() - begin); + while (server.isStarting() && !server.isRunning() && remaining > 0) { + Thread.sleep(100/* ms */); + // remaining = nanos - (now - begin) [aka elapsed] + remaining = nanos - (System.nanoTime() - begin); + } + if (remaining < 0) { + throw new TimeoutException(); + } + ok = true; + } finally { + if (!ok) { + // Complain if Server did not start. + final String msg = "Server did not start."; + System.err.println(msg); + log.fatal(msg); + if (server != null) { + server.stop(); + server.destroy(); + } + } + } + return server; + + } + + /** * Start the embedded {@link Server}. * <p> * Note: The port override argument given here is applied by setting the Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-15 18:38:16 UTC (rev 8339) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-15 18:39:35 UTC (rev 8340) @@ -149,7 +149,7 @@ <Set name="contextPath">/bigdata</Set> <Set name="descriptor">WEB-INF/web.xml</Set> <Set name="parentLoaderPriority">true</Set> - <Set name="extractWAR">false</Set> + <Set name="extractWAR">true</Set> </New> </Arg> </Call> Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-15 18:38:16 UTC (rev 8339) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-05-15 18:39:35 UTC (rev 8340) @@ -73,6 +73,9 @@ -DHA_PORT=${HA_PORT}\ "-Dcom.bigdata.hostname=${BIGDATA_HOSTNAME}"\ "-Djetty.port=${JETTY_PORT}"\ + "-Djetty.threads.min=${JETTY_THREADS_MIN}"\ + "-Djetty.threads.max=${JETTY_THREADS_MAX}"\ + "-Djetty.threads.timeout=${JETTY_THREADS_TIMEOUT}\" "-Djetty.resourceBase=${JETTY_RESOURCE_BASE}"\ "-DJETTY_XML=${JETTY_XML}"\ -DCOLLECT_QUEUE_STATISTICS=${COLLECT_QUEUE_STATISTICS}\ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-15 19:38:11
|
Revision: 8341 http://sourceforge.net/p/bigdata/code/8341 Author: thompsonbry Date: 2014-05-15 19:38:04 +0000 (Thu, 15 May 2014) Log Message: ----------- Working to chase down a problem with locating bigdata-war/src in the JAR when running the NSS from the command line. Refactored the logic to await the NSS start up to a timeout into the three main invocations of the NSS. This also places the code to interpret jetty.dump.start into each of these code paths in order to provide additional information on the startup contexts. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-15 18:39:35 UTC (rev 8340) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-15 19:38:04 UTC (rev 8341) @@ -29,7 +29,6 @@ import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.net.InetSocketAddress; -import java.net.URL; import java.nio.ByteBuffer; import java.nio.channels.ClosedByInterruptException; import java.rmi.Remote; @@ -105,6 +104,7 @@ import com.bigdata.rdf.sail.webapp.ConfigParams; import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet; import com.bigdata.rdf.sail.webapp.NanoSparqlServer; +import com.bigdata.rdf.sail.webapp.NanoSparqlServer.SystemProperties; import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy; import com.bigdata.rwstore.RWStore; import com.bigdata.service.AbstractHATransactionService; @@ -114,7 +114,6 @@ import com.bigdata.util.StackInfoReport; import com.bigdata.util.concurrent.LatchedExecutor; import com.bigdata.util.concurrent.MonitoredFutureTask; -import com.bigdata.util.config.NicUtil; import com.sun.jini.start.LifeCycle; /** @@ -4544,55 +4543,9 @@ jettyServer = NanoSparqlServer .newInstance(jettyXml, journal, null/* initParams */); - log.warn("Starting NSS"); - - // Start the server. - jettyServer.start(); + // Wait until the server starts (up to a timeout). + NanoSparqlServer.awaitServerStart(jettyServer); - if (Boolean.getBoolean("jetty.dump.start")) { - - // Support the jetty dump-after-start semantics. - log.warn(jettyServer.dump()); - - } - - /* - * Report *an* effective URL of this service. - * - * Note: This is an effective local URL (and only one of them, and - * even then only one for the first connector). It does not reflect - * any knowledge about the desired external deployment URL for the - * service end point. - */ - final String serviceURL; - { - - final int actualPort = getNSSPort(); -// final int actualPort = jettyServer.getConnectors()[0] -// .getLocalPort(); - - String hostAddr = NicUtil.getIpAddress("default.nic", - "default", true/* loopbackOk */); - - if (hostAddr == null) { - - hostAddr = "localhost"; - - } - - serviceURL = new URL("http", hostAddr, actualPort, ""/* file */) - .toExternalForm(); - - final String msg = "logicalServiceZPath: " - + logicalServiceZPath + "\n" + "serviceURL: " - + serviceURL; - - System.out.println(msg); - if (log.isInfoEnabled()) - log.warn(msg); - - } - } catch (Exception e1) { // Log and ignore. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-15 18:39:35 UTC (rev 8340) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-15 19:38:04 UTC (rev 8341) @@ -115,6 +115,12 @@ String JETTY_STARTUP_TIMEOUT = "jetty.start.timeout"; String DEFAULT_JETTY_STARTUP_TIMEOUT = "10"; + + /** + * When <code>true</code>, the state of jetty will be dumped onto a + * logger after the server start. + */ + String JETTY_DUMP_START = "jetty.dump.start"; } @@ -338,42 +344,12 @@ initParams.put(ConfigParams.SERVLET_CONTEXT_LISTENER_CLASS, servletContextListenerClass); - final long jettyStartTimeout = Long.parseLong(System.getProperty( - SystemProperties.JETTY_STARTUP_TIMEOUT, - SystemProperties.DEFAULT_JETTY_STARTUP_TIMEOUT)); + // Create the service. + final Server server = NanoSparqlServer.newInstance(port, jettyXml, + null/* indexManager */, initParams); - final Server server = awaitServerStart(port, jettyXml, initParams, - jettyStartTimeout, TimeUnit.SECONDS); + awaitServerStart(server); - /* - * Report *an* effective URL of this service. - * - * Note: This is an effective local URL (and only one of them, and - * even then only one for the first connector). It does not reflect - * any knowledge about the desired external deployment URL for the - * service end point. - */ - final String serviceURL; - { - - final int actualPort = getLocalPort(server); - - String hostAddr = NicUtil.getIpAddress("default.nic", "default", - true/* loopbackOk */); - - if (hostAddr == null) { - - hostAddr = "localhost"; - - } - - serviceURL = new URL("http", hostAddr, actualPort, ""/* file */) - .toExternalForm(); - - System.out.println("serviceURL: " + serviceURL); - - } - // Wait for the service to terminate. server.join(); @@ -382,37 +358,25 @@ /** * Await a {@link Server} start up to a timeout. * - * @param port - * The port (maybe ZERO for a random port). - * @param jettyXml - * The location of the <code>jetty.xml</code> file. - * @param initParams - * The init-param overrides. - * @param timeout - * The timeout. - * @param units - * - * @return The server iff the server started before the timeout. - * + * @parma server The {@link Server} to start. * @throws InterruptedException * @throws TimeoutException * @throws Exception */ - private static Server awaitServerStart(final int port, - final String jettyXml, final Map<String, String> initParams, - final long timeout, final TimeUnit units) + public static void awaitServerStart(final Server server) throws InterruptedException, TimeoutException, Exception { - Server server = null; + final long timeout = Long.parseLong(System.getProperty( + SystemProperties.JETTY_STARTUP_TIMEOUT, + SystemProperties.DEFAULT_JETTY_STARTUP_TIMEOUT)); + boolean ok = false; final long begin = System.nanoTime(); - final long nanos = units.toNanos(timeout); + final long nanos = TimeUnit.SECONDS.toNanos(timeout); long remaining = nanos; try { - // Create the service. - server = NanoSparqlServer.newInstance(port, jettyXml, - null/* indexManager */, initParams); // Start Server. + log.warn("Starting NSS"); server.start(); // Await running. remaining = nanos - (System.nanoTime() - begin); @@ -432,13 +396,59 @@ System.err.println(msg); log.fatal(msg); if (server != null) { + /* + * Support the jetty dump-after-start semantics. + */ + if (Boolean.getBoolean(SystemProperties.JETTY_DUMP_START)) { + log.warn(server.dump()); + } server.stop(); server.destroy(); } } } - return server; + /* + * Support the jetty dump-after-start semantics. + */ + if (Boolean.getBoolean(SystemProperties.JETTY_DUMP_START)) { + log.warn(server.dump()); + } + + /* + * Report *an* effective URL of this service. + * + * Note: This is an effective local URL (and only one of them, and even + * then only one for the first connector). It does not reflect any + * knowledge about the desired external deployment URL for the service + * end point. + */ + final String serviceURL; + { + + final int actualPort = getLocalPort(server); + + String hostAddr = NicUtil.getIpAddress("default.nic", "default", + true/* loopbackOk */); + + if (hostAddr == null) { + + hostAddr = "localhost"; + + } + + serviceURL = new URL("http", hostAddr, actualPort, ""/* file */) + .toExternalForm(); + + final String msg = "serviceURL: " + serviceURL; + + System.out.println(msg); + + if (log.isInfoEnabled()) + log.warn(msg); + + } + } /** @@ -528,9 +538,7 @@ } /** - * Variant used when you already have the {@link IIndexManager} on hand and - * want to use <code>web.xml</code> to configure the {@link WebAppContext} - * and <code>jetty.xml</code> to configure the jetty {@link Server}. + * Variant used when you already have the {@link IIndexManager}. * <p> * When the optional {@link IIndexManager} argument is specified, it will be * set as an attribute on the {@link WebAppContext}. This will cause the @@ -563,9 +571,11 @@ * Allow configuration of embedded NSS jetty server using jetty-web.xml * </a> */ - static public Server newInstance(final String jettyXml, - final IIndexManager indexManager, - final Map<String, String> initParams) throws Exception { + static public Server newInstance(// + final String jettyXml,// + final IIndexManager indexManager,// + final Map<String, String> initParams// + ) throws Exception { if (jettyXml == null) throw new IllegalArgumentException(); @@ -676,10 +686,12 @@ */ if (initParams != null) { - wac.setAttribute(BigdataRDFServletContextListener.INIT_PARAM_OVERRIDES, initParams); + wac.setAttribute( + BigdataRDFServletContextListener.INIT_PARAM_OVERRIDES, + initParams); } - + } return server; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java 2014-05-15 18:39:35 UTC (rev 8340) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/samples/com/bigdata/samples/NSSEmbeddedExample.java 2014-05-15 19:38:04 UTC (rev 8341) @@ -1,6 +1,5 @@ package com.bigdata.samples; -import java.net.URL; import java.util.LinkedHashMap; import java.util.Map; @@ -10,7 +9,6 @@ import com.bigdata.journal.IIndexManager; import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.rdf.sail.webapp.NanoSparqlServer; -import com.bigdata.util.config.NicUtil; /** * Class demonstrates how to start the {@link NanoSparqlServer} from within @@ -56,24 +54,8 @@ server = NanoSparqlServer.newInstance(port, indexManager, initParams); - server.start(); + NanoSparqlServer.awaitServerStart(server); - final int actualPort = NanoSparqlServer.getLocalPort(server); - - String hostAddr = NicUtil.getIpAddress("default.nic", - "default", true/* loopbackOk */); - - if (hostAddr == null) { - - hostAddr = "localhost"; - - } - - final String serviceURL = new URL("http", hostAddr, actualPort, ""/* file */) - .toExternalForm(); - - System.out.println("serviceURL: " + serviceURL); - // Block and wait. The NSS is running. server.join(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-05-15 21:22:00
|
Revision: 8342 http://sourceforge.net/p/bigdata/code/8342 Author: mrpersonick Date: 2014-05-15 21:21:54 +0000 (Thu, 15 May 2014) Log Message: ----------- upgraded to blueprints 2.5.0. added a rexster 2.5.0 dependency. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/.classpath branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Depends.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java branches/BIGDATA_RELEASE_1_3_0/build.properties branches/BIGDATA_RELEASE_1_3_0/build.xml branches/BIGDATA_RELEASE_1_3_0/pom.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.4.0.jar branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.4.0.jar Modified: branches/BIGDATA_RELEASE_1_3_0/.classpath =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-05-15 21:21:54 UTC (rev 8342) @@ -94,8 +94,10 @@ <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar" sourcepath="/Users/bryan/Downloads/org.eclipse.jetty.project-jetty-9.1.4.v20140401"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-xml-9.1.4.v20140401.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/jackson-core-2.2.3.jar"/> - <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-core-2.4.0.jar"/> - <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-test-2.4.0.jar"/> <classpathentry kind="lib" path="bigdata-blueprints/lib/jettison-1.3.3.jar"/> + <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-core-2.5.0.jar"/> + <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-test-2.5.0.jar"/> + <classpathentry kind="lib" path="bigdata-blueprints/lib/rexster-core-2.5.0.jar"/> + <classpathentry kind="lib" path="bigdata-blueprints/lib/commons-configuration-1.10.jar"/> <classpathentry kind="output" path="bin"/> </classpath> Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Depends.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Depends.java 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Depends.java 2014-05-15 21:21:54 UTC (rev 8342) @@ -277,6 +277,10 @@ "https://github.com/tinkerpop/blueprints", "https://github.com/tinkerpop/blueprints/blob/master/LICENSE.txt"); + private final static Dep rexsterCore = new Dep("rexster-core", + "https://github.com/tinkerpop/rexster", + "https://github.com/tinkerpop/rexster/blob/master/LICENSE.txt"); + static private final Dep[] depends; static { depends = new Dep[] { // @@ -306,6 +310,7 @@ servletApi,// jacksonCore,// blueprintsCore,// + rexsterCore,// bigdataGanglia,// // scale-out jini,// Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt 2014-05-15 21:21:54 UTC (rev 8342) @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/apache-commons.txt ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt 2014-05-15 21:21:54 UTC (rev 8342) @@ -0,0 +1,24 @@ +Copyright (c) 2009-Infinity, TinkerPop [http://tinkerpop.com] +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the TinkerPop nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL TINKERPOP BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/LEGAL/rexster-license.txt ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.4.0.jar =================================================================== (Binary files differ) Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar =================================================================== (Binary files differ) Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar 2014-05-15 21:21:54 UTC (rev 8342) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-core-2.5.0.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.4.0.jar =================================================================== (Binary files differ) Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar =================================================================== (Binary files differ) Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar 2014-05-15 21:21:54 UTC (rev 8342) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/blueprints-test-2.5.0.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar =================================================================== (Binary files differ) Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar 2014-05-15 21:21:54 UTC (rev 8342) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/commons-configuration-1.10.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar =================================================================== (Binary files differ) Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar 2014-05-15 21:21:54 UTC (rev 8342) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/lib/rexster-core-2.5.0.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-15 21:21:54 UTC (rev 8342) @@ -1,5 +1,5 @@ /** -Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. Contact: SYSTAP, LLC Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java 2014-05-15 21:21:54 UTC (rev 8342) @@ -0,0 +1,146 @@ +/** +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.blueprints; + +import org.apache.commons.configuration.Configuration; + +import com.tinkerpop.rexster.config.GraphConfiguration; +import com.tinkerpop.rexster.config.GraphConfigurationContext; +import com.tinkerpop.rexster.config.GraphConfigurationException; + +/** + * Create and configure a BigdataGraph for Rexster. + * + * @author mikepersonick + * + */ +public class BigdataGraphConfiguration implements GraphConfiguration { + + public interface Options { + + /** + * Specify the type of bigdata instance to use - embedded or remote. + */ + String TYPE = "properties.type"; + + /** + * Specifies that an embedded bigdata instance should be used. + */ + String TYPE_EMBEDDED = "embedded"; + + /** + * Specifies that a remote bigdata instance should be used. + */ + String TYPE_REMOTE = "remote"; + + /** + * Journal file for an embedded bigdata instance. + */ + String FILE = "properties.file"; + + /** + * Host for a remote bigdata instance. + */ + String HOST = "properties.host"; + + /** + * Port for a remote bigdata instance. + */ + String PORT = "properties.port"; + + } + + /** + * Configure and return a BigdataGraph based on the supplied configuration + * parameters. + * + * @see {@link Options} + * @see com.tinkerpop.rexster.config.GraphConfiguration#configureGraphInstance(com.tinkerpop.rexster.config.GraphConfigurationContext) + */ + @Override + public BigdataGraph configureGraphInstance(final GraphConfigurationContext context) + throws GraphConfigurationException { + + try { + + return configure(context); + + } catch (Exception ex) { + + throw new GraphConfigurationException(ex); + + } + + } + + protected BigdataGraph configure(final GraphConfigurationContext context) + throws Exception { + + final Configuration config = context.getProperties(); + + if (!config.containsKey(Options.TYPE)) { + throw new GraphConfigurationException("missing required parameter: " + Options.TYPE); + } + + final String type = config.getString(Options.TYPE).toLowerCase(); + + if (Options.TYPE_EMBEDDED.equals(type)) { + + if (config.containsKey(Options.FILE)) { + + final String journal = config.getString(Options.FILE); + + return BigdataGraphFactory.create(journal); + + } else { + + return BigdataGraphFactory.create(); + + } + + } else if (Options.TYPE_REMOTE.equals(type)) { + + if (!config.containsKey(Options.HOST)) { + throw new GraphConfigurationException("missing required parameter: " + Options.HOST); + } + + if (!config.containsKey(Options.PORT)) { + throw new GraphConfigurationException("missing required parameter: " + Options.PORT); + } + + final String host = config.getString(Options.HOST); + + final int port = config.getInt(Options.PORT); + + return BigdataGraphFactory.connect(host, port); + + } else { + + throw new GraphConfigurationException("unrecognized value for " + + Options.TYPE + ": " + type); + + } + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml 2014-05-15 21:21:54 UTC (rev 8342) @@ -0,0 +1,113 @@ +<?xml version="1.0" encoding="UTF-8"?> +<rexster> + <http> + <server-port>8182</server-port> + <server-host>0.0.0.0</server-host> + <base-uri>http://localhost</base-uri> + <web-root>public</web-root> + <character-set>UTF-8</character-set> + <enable-jmx>false</enable-jmx> + <enable-doghouse>true</enable-doghouse> + <max-post-size>2097152</max-post-size> + <max-header-size>8192</max-header-size> + <upload-timeout-millis>30000</upload-timeout-millis> + <thread-pool> + <worker> + <core-size>8</core-size> + <max-size>8</max-size> + </worker> + <kernal> + <core-size>4</core-size> + <max-size>4</max-size> + </kernal> + </thread-pool> + <io-strategy>leader-follower</io-strategy> + </http> + <rexpro> + <server-port>8184</server-port> + <server-host>0.0.0.0</server-host> + <session-max-idle>1790000</session-max-idle> + <session-check-interval>3000000</session-check-interval> + <connection-max-idle>180000</connection-max-idle> + <connection-check-interval>3000000</connection-check-interval> + <read-buffer>65536</read-buffer> + <enable-jmx>false</enable-jmx> + <thread-pool> + <worker> + <core-size>8</core-size> + <max-size>8</max-size> + </worker> + <kernal> + <core-size>4</core-size> + <max-size>4</max-size> + </kernal> + </thread-pool> + <io-strategy>leader-follower</io-strategy> + </rexpro> + <shutdown-port>8183</shutdown-port> + <shutdown-host>127.0.0.1</shutdown-host> + <config-check-interval>10000</config-check-interval> + <script-engines> + <script-engine> + <name>gremlin-groovy</name> + <reset-threshold>-1</reset-threshold> + <init-scripts>config/init.groovy</init-scripts> + <imports>com.tinkerpop.rexster.client.*</imports> + <static-imports>java.lang.Math.PI</static-imports> + </script-engine> + </script-engines> +<!-- + <security> + <authentication> + <type>none</type> + <configuration> + <users> + <user> + <username>rexster</username> + <password>rexster</password> + </user> + </users> + </configuration> + </authentication> + </security> + <metrics> + <reporter> + <type>jmx</type> + </reporter> + <reporter> + <type>http</type> + </reporter> + <reporter> + <type>console</type> + <properties> + <rates-time-unit>SECONDS</rates-time-unit> + <duration-time-unit>SECONDS</duration-time-unit> + <report-period>10</report-period> + <report-time-unit>MINUTES</report-time-unit> + <includes>http.rest.*</includes> + <excludes>http.rest.*.delete</excludes> + </properties> + </reporter> + </metrics> +--> + <graphs> + <graph> + <graph-name>bigdata</graph-name> + <graph-type>com.bigdata.blueprints.BigdataGraphConfiguration</graph-type> + <properties> +<!-- + <type>embedded</type> + <file>/tmp/bigdata.jnl</file> +--> + <type>remote</type> + <host>localhost</host> + <port>9999</port> + </properties> + <extensions> + <allows> + <allow>tp:gremlin</allow> + </allows> + </extensions> + </graph> + </graphs> +</rexster> Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/resources/rexster.xml ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-05-15 21:21:54 UTC (rev 8342) @@ -72,8 +72,9 @@ ganglia-version=1.0.4 gas-version=0.1.0 jackson-version=2.2.3 -blueprints.version=2.4.0 +blueprints.version=2.5.0 jettison.version=1.3.3 +rexster.version=2.5.0 # Set to false to NOT start services (zookeeper, lookup server, class server, etc). # When false, tests which depend on those services will not run. (This can also be Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 21:21:54 UTC (rev 8342) @@ -457,7 +457,7 @@ <include name="**/*.jar" /> </fileset> <fileset dir="${bigdata.dir}/bigdata-blueprints/lib"> - <include name="blueprints-core-${blueprints.version}.jar" /> + <include name="**/*.jar" /> </fileset> <fileset dir="${bigdata.dir}/bigdata-gom/lib"> <include name="**/*.jar" /> @@ -2520,7 +2520,7 @@ <java classname="com.bigdata.rdf.sail.webapp.NanoSparqlServer" failonerror="true" fork="true" logerror="true"> <classpath refid="runtime.classpath" /> <jvmarg value="-server"/> - <jvmarg value="-Xmx1G"/> + <jvmarg value="-Xmx4G"/> <jvmarg value="-Dlog4j.configuration=bigdata-war/src/WEB-INF/classes/log4j.properties"/> <arg value="9999"/> <arg value="kb"/> @@ -2550,36 +2550,8 @@ <include name="semargl-rdf-0.4.jar"/> <include name="semargl-rdfa-0.4.jar"/> <include name="semargl-sesame-0.4.jar"/> - <include name="sesame-http-client-2.7.10.jar"/> - <include name="sesame-http-protocol-2.7.10.jar"/> - <include name="sesame-model-2.7.10.jar"/> - <include name="sesame-query-2.7.10.jar"/> - <include name="sesame-queryalgebra-evaluation-2.7.10.jar"/> - <include name="sesame-queryalgebra-model-2.7.10.jar"/> - <include name="sesame-queryparser-api-2.7.10.jar"/> - <include name="sesame-queryparser-serql-2.7.10.jar"/> - <include name="sesame-queryparser-sparql-2.7.10.jar"/> - <include name="sesame-queryresultio-api-2.7.10.jar"/> - <include name="sesame-queryresultio-sparqlxml-2.7.10.jar"/> - <include name="sesame-repository-api-2.7.10.jar"/> - <include name="sesame-repository-sparql-2.7.10.jar"/> - <include name="sesame-rio-api-2.7.10.jar"/> - <include name="sesame-rio-binary-2.7.10.jar"/> - <include name="sesame-rio-datatypes-2.7.10.jar"/> - <include name="sesame-rio-languages-2.7.10.jar"/> - <include name="sesame-rio-n3-2.7.10.jar"/> - <include name="sesame-rio-nquads-2.7.10.jar"/> - <include name="sesame-rio-ntriples-2.7.10.jar"/> - <include name="sesame-rio-rdfjson-2.7.10.jar"/> - <include name="sesame-rio-rdfxml-2.7.10.jar"/> - <include name="sesame-rio-trig-2.7.10.jar"/> - <include name="sesame-rio-trix-2.7.10.jar"/> - <include name="sesame-rio-turtle-2.7.10.jar"/> - <include name="sesame-sail-api-2.7.10.jar"/> - <include name="sesame-sail-inferencer-2.7.10.jar"/> - <include name="sesame-sail-memory-2.7.10.jar"/> - <include name="sesame-sail-nativerdf-2.7.10.jar"/> - <include name="sesame-util-2.7.10.jar"/> + <include name="sesame-*.jar"/> + <include name="neo4j-*.jar"/> <include name="bigdata-*.jar"/> </fileset> </delete> @@ -2617,4 +2589,63 @@ <target name="gremlin" depends="fetch-gremlin,install-gremlin"> </target> + <target name="fetch-rexster" depends="prepare,compile,jar"> + <echo>Installing Rexster...</echo> + <get + src="http://www.tinkerpop.com/downloads/gremlin/rexster-console-2.5.0.zip" + dest="${build.dir}/rexster-console-2.5.0.zip"/> + <unzip src="${build.dir}/rexster-console-2.5.0.zip" dest="${build.dir}/"/> + <delete file="${build.dir}/rexster-console-2.5.0.zip"/> + </target> + + <target name="install-rexster" depends="prepare,compile,jar,bundle"> + <delete> + <fileset dir="${build.dir}/rexster-server-2.5.0/lib"> + <include name="blueprints-sail-graph-2.5.0.jar"/> + <include name="jsonld-java-0.3.jar"/> + <include name="jsonld-java-sesame-0.3.jar"/> + <include name="linked-data-sail-1.1.jar"/> + <include name="repository-sail-1.8.jar"/> + <include name="semargl-core-0.4.jar"/> + <include name="semargl-rdf-0.4.jar"/> + <include name="semargl-rdfa-0.4.jar"/> + <include name="semargl-sesame-0.4.jar"/> + <include name="sesame-*.jar"/> + <include name="neo4j-*.jar"/> + <include name="bigdata-*.jar"/> + </fileset> + </delete> + <copy toDir="${build.dir}/rexster-server-2.5.0/lib" flatten="true"> + <!-- + <fileset dir="${bigdata.dir}/bigdata-rdf/lib"> + <include name="openrdf-sesame-${sesame.version}-onejar.jar" /> + </fileset> + <fileset dir="${bigdata.dir}/bigdata-sails/lib/httpcomponents"> + <include name="httpmime-${apache.httpmime.version}.jar" /> + </fileset> + --> + <fileset dir="${build.dir}/lib"> + <include name="*.jar" /> + </fileset> + <fileset dir="${build.dir}"> + <include name="${version}.jar" /> + </fileset> + </copy> + <copy toDir="${build.dir}/rexster-server-2.5.0/config/" + file="${bigdata.dir}/bigdata-blueprints/src/resources/rexster.xml" + overwrite="true"/> + <chmod file="${build.dir}/rexster-server-2.5.0/bin/rexster.sh" perm="+x"/> + <echo>Rexster installation complete. +0. Make sure the bigdata server is running: + > ant start-bigdata +1. Start the rexster server: + > ./${build.dir}/rexster-server-2.5.0/bin/rexster.sh -s -c ${build.dir}/rexster-server-2.5.0/config/rexster.xml +2. Open the DogHouse UI: http://localhost:8182/doghouse + </echo> + + </target> + + <target name="rexster" depends="fetch-rexster,install-rexster"> + </target> + </project> Added: branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml 2014-05-15 21:21:54 UTC (rev 8342) @@ -0,0 +1,54 @@ +<?xml version="1.0" encoding="UTF-8"?> +<graphml xmlns="http://graphml.graphdrawing.org/xmlns" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> + <key id="weight" for="edge" attr.name="weight" attr.type="float"/> + <key id="name" for="node" attr.name="name" attr.type="string"/> + <key id="age" for="node" attr.name="age" attr.type="int"/> + <key id="lang" for="node" attr.name="lang" attr.type="string"/> + <graph id="G" edgedefault="directed"> + <node id="1"> + <data key="name">marko</data> + <data key="age">29</data> + </node> + <node id="2"> + <data key="name">vadas</data> + <data key="age">27</data> + </node> + <node id="3"> + <data key="name">lop</data> + <data key="lang">java</data> + </node> + <node id="4"> + <data key="name">josh</data> + <data key="age">32</data> + </node> + <node id="5"> + <data key="name">ripple</data> + <data key="lang">java</data> + </node> + <node id="6"> + <data key="name">peter</data> + <data key="age">35</data> + </node> + <edge id="7" source="1" target="2" label="knows"> + <data key="weight">0.5</data> + </edge> + <edge id="8" source="1" target="4" label="knows"> + <data key="weight">1.0</data> + </edge> + <edge id="9" source="1" target="3" label="created"> + <data key="weight">0.4</data> + </edge> + <edge id="10" source="4" target="5" label="created"> + <data key="weight">1.0</data> + </edge> + <edge id="11" source="4" target="3" label="created"> + <data key="weight">0.4</data> + </edge> + <edge id="12" source="6" target="3" label="created"> + <data key="weight">0.2</data> + </edge> + </graph> +</graphml> \ No newline at end of file Property changes on: branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/pom.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-05-15 19:38:04 UTC (rev 8341) +++ branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-05-15 21:21:54 UTC (rev 8342) @@ -99,8 +99,9 @@ <lgplutils.version>1.0.7-270114</lgplutils.version> <bigdata.ganglia.version>1.0.4</bigdata.ganglia.version> <jackson.version>2.2.3</jackson.version> - <blueprints.version>2.4.0</blueprints.version> + <blueprints.version>2.5.0</blueprints.version> <jettison.version>1.3.3</jettison.version> + <rexster.version>2.5.0</rexster.version> </properties> <!-- TODO Can we declare the versions of the dependencies here as properties and have them be substituted in for us? Can we pick @@ -321,6 +322,11 @@ <artifactId>blueprints-core</artifactId> <version>${blueprints.version}</version> </dependency> + <dependency> + <groupId>com.tinkerpop.rexster</groupId> + <artifactId>rexster-core</artifactId> + <version>${rexster.version}</version> + </dependency> <!-- --> <!-- artifacts that we publish (because they are not readily --> <!-- available) but we do not maintain. --> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-05-15 21:43:24
|
Revision: 8344 http://sourceforge.net/p/bigdata/code/8344 Author: mrpersonick Date: 2014-05-15 21:43:20 +0000 (Thu, 15 May 2014) Log Message: ----------- again, fixed the rexster URL for the fetch-rexster task Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 21:39:21 UTC (rev 8343) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-15 21:43:20 UTC (rev 8344) @@ -2592,10 +2592,10 @@ <target name="fetch-rexster" depends="prepare,compile,jar"> <echo>Installing Rexster...</echo> <get - src="http://www.tinkerpop.com/downloads/rexster/rexster-console-2.5.0.zip" - dest="${build.dir}/rexster-console-2.5.0.zip"/> - <unzip src="${build.dir}/rexster-console-2.5.0.zip" dest="${build.dir}/"/> - <delete file="${build.dir}/rexster-console-2.5.0.zip"/> + src="http://www.tinkerpop.com/downloads/rexster/rexster-server-2.5.0.zip" + dest="${build.dir}/rexster-server-2.5.0.zip"/> + <unzip src="${build.dir}/rexster-server-2.5.0.zip" dest="${build.dir}/"/> + <delete file="${build.dir}/rexster-server-2.5.0.zip"/> </target> <target name="install-rexster" depends="prepare,compile,jar,bundle"> Deleted: branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml 2014-05-15 21:39:21 UTC (rev 8343) +++ branches/BIGDATA_RELEASE_1_3_0/graph-example-1.xml 2014-05-15 21:43:20 UTC (rev 8344) @@ -1,54 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<graphml xmlns="http://graphml.graphdrawing.org/xmlns" - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns - http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> - <key id="weight" for="edge" attr.name="weight" attr.type="float"/> - <key id="name" for="node" attr.name="name" attr.type="string"/> - <key id="age" for="node" attr.name="age" attr.type="int"/> - <key id="lang" for="node" attr.name="lang" attr.type="string"/> - <graph id="G" edgedefault="directed"> - <node id="1"> - <data key="name">marko</data> - <data key="age">29</data> - </node> - <node id="2"> - <data key="name">vadas</data> - <data key="age">27</data> - </node> - <node id="3"> - <data key="name">lop</data> - <data key="lang">java</data> - </node> - <node id="4"> - <data key="name">josh</data> - <data key="age">32</data> - </node> - <node id="5"> - <data key="name">ripple</data> - <data key="lang">java</data> - </node> - <node id="6"> - <data key="name">peter</data> - <data key="age">35</data> - </node> - <edge id="7" source="1" target="2" label="knows"> - <data key="weight">0.5</data> - </edge> - <edge id="8" source="1" target="4" label="knows"> - <data key="weight">1.0</data> - </edge> - <edge id="9" source="1" target="3" label="created"> - <data key="weight">0.4</data> - </edge> - <edge id="10" source="4" target="5" label="created"> - <data key="weight">1.0</data> - </edge> - <edge id="11" source="4" target="3" label="created"> - <data key="weight">0.4</data> - </edge> - <edge id="12" source="6" target="3" label="created"> - <data key="weight">0.2</data> - </edge> - </graph> -</graphml> \ No newline at end of file This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-05-16 18:36:37
|
Revision: 8348 http://sourceforge.net/p/bigdata/code/8348 Author: mrpersonick Date: 2014-05-16 18:36:34 +0000 (Fri, 16 May 2014) Log Message: ----------- fixed some Blueprints CI errors related to the 2.5.0 upgrade Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-16 14:59:23 UTC (rev 8347) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-05-16 18:36:34 UTC (rev 8348) @@ -144,9 +144,11 @@ return null; - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -192,9 +194,11 @@ return props; - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -233,9 +237,11 @@ return properties; - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -297,9 +303,11 @@ cxn().add(uri, prop, val); - } catch (Exception e) { - throw new RuntimeException(e); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -325,6 +333,8 @@ cxn().add(uri, prop, val); + } catch (RuntimeException e) { + throw e; } catch (Exception e) { throw new RuntimeException(e); } @@ -384,9 +394,11 @@ return new BigdataEdge(new StatementImpl(fromURI, edgeURI, toURI), this); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -412,9 +424,11 @@ return new BigdataVertex(uri, this); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -449,9 +463,11 @@ return null; - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -539,8 +555,10 @@ return stmts; - } catch (Exception ex) { - throw new RuntimeException(ex); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); } } @@ -564,9 +582,11 @@ return new EdgeIterable(stmts); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -621,9 +641,11 @@ return new VertexIterable(stmts, subject); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -656,9 +678,11 @@ return getEdges(queryStr); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -681,9 +705,11 @@ return null; - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -701,9 +727,11 @@ return new VertexIterable(result, true); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -723,9 +751,11 @@ return new VertexIterable(result, true); - } catch (Exception ex) { - throw new RuntimeException(ex); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -761,9 +791,11 @@ // remove its properties cxn().remove(uri, wild, wild); - } catch (Exception e) { - throw new RuntimeException(e); - } + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } } @@ -789,7 +821,9 @@ // remove incoming edges cxn().remove(wild, wild, uri); - } catch (Exception e) { + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { throw new RuntimeException(e); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-16 14:59:23 UTC (rev 8347) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-05-16 18:36:34 UTC (rev 8348) @@ -29,19 +29,19 @@ /** * This is a thin-client implementation of a Blueprints wrapper around the - * client library that interacts with the NanoSparqlServer. This is a functional + * client library that interacts with the NanoSparqlServer. This is a functional * implementation suitable for writing POCs - it is not a high performance - * implementation by any means (currently does not support caching, batched - * update, or Blueprints query re-writes). Does have a single "bulk upload" - * operation that wraps a method on RemoteRepository that will POST a graphml - * file to the blueprints layer of the bigdata server. + * implementation by any means (currently does not support caching or batched + * update). Does have a single "bulk upload" operation that wraps a method on + * RemoteRepository that will POST a graphml file to the blueprints layer of the + * bigdata server. * * @see {@link BigdataSailRemoteRepository} * @see {@link BigdataSailRemoteRepositoryConnection} * @see {@link RemoteRepository} * * @author mikepersonick - * + * */ public class BigdataGraphClient extends BigdataGraph { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java 2014-05-16 14:59:23 UTC (rev 8347) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java 2014-05-16 18:36:34 UTC (rev 8348) @@ -30,6 +30,7 @@ import org.openrdf.model.URI; import org.openrdf.model.ValueFactory; import org.openrdf.model.impl.ValueFactoryImpl; +import org.openrdf.model.vocabulary.RDFS; import com.bigdata.rdf.internal.XSD; import com.tinkerpop.blueprints.Edge; @@ -138,7 +139,19 @@ try { - return vf.createURI(GRAPH_NAMESPACE, URLEncoder.encode(property, "UTF-8")); + if (property.equals("label")) { + + /* + * Label is a reserved property for edge labels, we use + * rdfs:label for that. + */ + return RDFS.LABEL; + + } else { + + return vf.createURI(GRAPH_NAMESPACE, URLEncoder.encode(property, "UTF-8")); + + } } catch (UnsupportedEncodingException e) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java 2014-05-16 14:59:23 UTC (rev 8347) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/test/com/bigdata/blueprints/TestBigdataGraphEmbedded.java 2014-05-16 18:36:34 UTC (rev 8348) @@ -68,7 +68,8 @@ test.doTestSuite(new TransactionalGraphTestSuite(test)); GraphTest.printTestPerformance("TransactionalGraphTestSuite", test.stopWatch()); - } + } + // public void testGraphSuite() throws Exception { // final GraphTest test = newBigdataGraphTest(); // test.stopWatch(); @@ -77,12 +78,12 @@ //} -// public void testTransactionIsolationCommitCheck() throws Exception { +// public void testGetEdgesByLabel() throws Exception { // final BigdataGraphTest test = new BigdataGraphTest(); // test.stopWatch(); // final BigdataTestSuite testSuite = new BigdataTestSuite(test); // try { -// testSuite.testTransactionIsolationCommitCheck(); +// testSuite.testGetEdgesByLabel(); // } finally { // test.shutdown(); // } @@ -95,71 +96,25 @@ super(graphTest); } - public void testTransactionIsolationCommitCheck() throws Exception { - // the purpose of this test is to simulate rexster access to a graph instance, where one thread modifies - // the graph and a separate thread cannot affect the transaction of the first - final TransactionalGraph graph = (TransactionalGraph) graphTest.generateGraph(); - - final CountDownLatch latchCommittedInOtherThread = new CountDownLatch(1); - final CountDownLatch latchCommitInOtherThread = new CountDownLatch(1); - - // this thread starts a transaction then waits while the second thread tries to commit it. - final Thread threadTxStarter = new Thread() { - public void run() { - System.err.println(Thread.currentThread().getId() + ": 1"); - final Vertex v = graph.addVertex(null); - - // System.out.println("added vertex"); - - System.err.println(Thread.currentThread().getId() + ": 2"); - latchCommitInOtherThread.countDown(); - - System.err.println(Thread.currentThread().getId() + ": 3"); - try { - latchCommittedInOtherThread.await(); - } catch (InterruptedException ie) { - throw new RuntimeException(ie); - } - - System.err.println(Thread.currentThread().getId() + ": 4"); - graph.rollback(); - - System.err.println(Thread.currentThread().getId() + ": 5"); - // there should be no vertices here - // System.out.println("reading vertex before tx"); - assertFalse(graph.getVertices().iterator().hasNext()); - // System.out.println("read vertex before tx"); - } - }; - - threadTxStarter.start(); - - // this thread tries to commit the transaction started in the first thread above. - final Thread threadTryCommitTx = new Thread() { - public void run() { - System.err.println(Thread.currentThread().getId() + ": 6"); - try { - latchCommitInOtherThread.await(); - } catch (InterruptedException ie) { - throw new RuntimeException(ie); - } - - System.err.println(Thread.currentThread().getId() + ": 7"); - // try to commit the other transaction - graph.commit(); - - System.err.println(Thread.currentThread().getId() + ": 8"); - latchCommittedInOtherThread.countDown(); - System.err.println(Thread.currentThread().getId() + ": 9"); - } - }; - - threadTryCommitTx.start(); - - threadTxStarter.join(); - threadTryCommitTx.join(); - graph.shutdown(); - + public void testGetEdgesByLabel() { + Graph graph = graphTest.generateGraph(); + if (graph.getFeatures().supportsEdgeIteration) { + Vertex v1 = graph.addVertex(null); + Vertex v2 = graph.addVertex(null); + Vertex v3 = graph.addVertex(null); + + Edge e1 = graph.addEdge(null, v1, v2, graphTest.convertLabel("test1")); + Edge e2 = graph.addEdge(null, v2, v3, graphTest.convertLabel("test2")); + Edge e3 = graph.addEdge(null, v3, v1, graphTest.convertLabel("test3")); + + assertEquals(e1, getOnlyElement(graph.query().has("label", graphTest.convertLabel("test1")).edges())); + assertEquals(e2, getOnlyElement(graph.query().has("label", graphTest.convertLabel("test2")).edges())); + assertEquals(e3, getOnlyElement(graph.query().has("label", graphTest.convertLabel("test3")).edges())); + + assertEquals(e1, getOnlyElement(graph.getEdges("label", graphTest.convertLabel("test1")))); + assertEquals(e2, getOnlyElement(graph.getEdges("label", graphTest.convertLabel("test2")))); + assertEquals(e3, getOnlyElement(graph.getEdges("label", graphTest.convertLabel("test3")))); + } } @@ -173,6 +128,7 @@ private class BigdataGraphTest extends GraphTest { private List<String> exclude = Arrays.asList(new String[] { + // this one creates a deadlock, no way around it "testTransactionIsolationCommitCheck" }); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java 2014-05-16 14:59:23 UTC (rev 8347) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java 2014-05-16 18:36:34 UTC (rev 8348) @@ -646,7 +646,7 @@ public void add(final Statement stmt, final Resource... c) throws RepositoryException { - log.warn("single statement updates not recommended"); +// log.warn("single statement updates not recommended"); final Graph g = new GraphImpl(); g.add(stmt); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-17 16:08:26
|
Revision: 8351 http://sourceforge.net/p/bigdata/code/8351 Author: thompsonbry Date: 2014-05-17 16:08:21 +0000 (Sat, 17 May 2014) Log Message: ----------- Working on #939 (NSS does not start from command line: bigdata-war/src not found). NanoSparqlServer: code has been modified to explicitly search (if jetty.resourceBase is not defined) (a) the local file system; (b) the classpath; and then (c) default to whatever is the default value in jetty.xml for the jetty.resourceBase property. TestNSSHealthCheck: added a basic test suite for checking the health of an NSS instance once deployed. This is a starting point for CI based tests of the various deployment models. build.xml: modified to illustrate a possible way of performing the CI deployment tests. More needs to be done here! Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml branches/BIGDATA_RELEASE_1_3_0/build.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-17 16:04:33 UTC (rev 8350) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-17 16:08:21 UTC (rev 8351) @@ -25,6 +25,7 @@ import java.io.File; import java.io.InputStream; +import java.net.MalformedURLException; import java.net.URL; import java.util.LinkedHashMap; import java.util.Map; @@ -122,6 +123,60 @@ */ String JETTY_DUMP_START = "jetty.dump.start"; + /** + * This property specifies the resource path for the web application. In + * order for this mechanism to work, the <code>jetty.xml</code> file + * MUST contain a line which allows the resourceBase of the web + * application to be set from an environment variable. For example: + * + * <pre> + * <SystemProperty name="jetty.resourceBase" default="bigdata-war/src" /> + * </pre> + * + * The <code>jetty.resourceBase</code> variable may identify either a + * file or a resource on the class path. To force the use of the web + * application embedded within the <code>bigdata.jar</code> you need to + * specify a JAR URL along the following lines (using the appropriate + * file path and jar name and version: + * + * <pre> + * jar:file:../lib/bigdata-1.3.0.jar!/bigdata-war/src + * </pre> + * + * The use of absolute file paths are recommended for reliable + * resolution. + * <p> + * The order of preference is: + * <ol> + * <li><code>jetty.resourceBase</code> is specified. The value of this + * environment variable will be used to locate the web application.</li> + * <li> + * <code>jetty.resourceBase</code> is not specified (either + * <code>null</code> or whitespace). An attempt is made to locate the + * <code>bigdata-war/src</code> resource in the file system (relative to + * the current working directory). If found, the + * <code>jetty.resourceBase</code> environment variable is set to this + * resource using a <code>file:</code> style URL. This will cause jetty + * to use the web application directory in the file system. + * <p> + * If the resource is not found in the file system, then an attempt is + * made to locate that resource using the classpath. If found, the the + * <code>jetty.resourceBase</code> is set to the URL for the located + * resource. This will cause jetty to use the web application resource + * on the classpath. If there are multiple such resources on the + * classpath, the first such resource will be discovered and used.</li> + * <li> + * Otherwise, the <code>jetty.resourceBase</code> environment variable + * is not modified and the default location specified in the + * <code>jetty.xml</code> file will be used. If jetty is unable to + * resolve that resource, then the web application will not start.</li> + * </ol> + * + * @see <a href="http://trac.bigdata.com/ticket/939" > NSS does not + * start from command line: bigdata-war/src not found </a> + */ + String JETTY_RESOURCE_BASE = "jetty.resourceBase"; + } /** @@ -163,7 +218,7 @@ * <dt>-jettyXml</dt> * <dd>The location of the jetty.xml resource that will be used * to start the {@link Server} (default is the file in the JAR). - * * The default will locate the <code>jetty.xml</code> resource + * The default will locate the <code>jetty.xml</code> resource * that is bundled with the JAR. This preserves the historical * behavior. If you want to use a different * <code>jetty.xml</code> file, just override this property on @@ -216,7 +271,11 @@ * use a different jetty.xml file, just override this property on the * command line. */ - String jettyXml = "bigdata-war/src/jetty.xml"; + String jettyXml = System.getProperty(// + SystemProperties.JETTY_XML,// + "bigdata-war/src/jetty.xml"// +// SystemProperties.DEFAULT_JETTY_XML + ); /* * Handle all arguments starting with "-". These should appear before @@ -589,45 +648,26 @@ /* * Configure the jetty Server using a jetty.xml file. In turn, the * jetty.xml file configures the webapp using a web.xml file. The caller - * can override the location of the jetty.xml file if they need to - * change the way in which either jetty or the webapp are configured. - * You can also override many of the properties in the jetty.xml file - * using environment variables. + * can override the location of the jetty.xml file using the [jetty.xml] + * environment variable if they need to change the way in which either + * jetty or the webapp are configured. You can also override many of the + * properties in the [jetty.xml] file using environment variables. For + * example, they can also override the location of the web application + * (including the web.xml file) using the [jetty.resourceBase] + * environment variable. */ final Server server; { - // Locate jetty.xml. - final URL jettyXmlUrl; - if (new File(jettyXml).exists()) { + // Find the effective jetty.xml URL. + final URL jettyXmlURL = getEffectiveJettyXmlURL(classLoader, + jettyXml); - // Check the file system. -// jettyXmlUrl = new File(jettyXml).toURI(); - jettyXmlUrl = new URL("file:" + jettyXml); - - } else { - - // Check the classpath. - jettyXmlUrl = classLoader.getResource(jettyXml); -// jettyXmlUrl = classLoader.getResource("bigdata-war/src/jetty.xml"); - - } - - if (jettyXmlUrl == null) { - - throw new RuntimeException("Not found: " + jettyXml); - - } - - if (log.isInfoEnabled()) - log.info("jetty configuration: jettyXml=" + jettyXml - + ", jettyXmlUrl=" + jettyXmlUrl); - - // Build configuration from that resource. + // Build the server configuration from that jetty.xml resource. final XmlConfiguration configuration; { // Open jetty.xml resource. - final Resource jettyConfig = Resource.newResource(jettyXmlUrl); + final Resource jettyConfig = Resource.newResource(jettyXmlURL); InputStream is = null; try { is = jettyConfig.getInputStream(); @@ -639,65 +679,208 @@ } } } - + + // Configure/apply jetty.resourceBase overrides. + configureEffectiveResourceBase(classLoader); + // Configure the jetty server. server = (Server) configuration.configure(); } /* - * Configure the webapp (overrides, IIndexManager, etc.) + * Configure any overrides for the web application init-params. */ - { + configureWebAppOverrides(server, indexManager, initParams); - final WebAppContext wac = getWebApp(server); + return server; + + } - if (wac == null) { + private static URL getEffectiveJettyXmlURL(final ClassLoader classLoader, + final String jettyXml) throws MalformedURLException { - /* - * This is a fatal error. If we can not set the IIndexManager, - * the NSS will try to interpret the propertyFile in web.xml - * rather than using the one that is already open and specified - * by the caller. Among other things, that breaks the - * HAJournalServer startup. - */ + // Locate jetty.xml. + final URL jettyXmlUrl; + boolean isFile = false; + boolean isClassPath = false; + if (new File(jettyXml).exists()) { - throw new RuntimeException("Could not locate " - + WebAppContext.class.getName()); + // Check the file system. + // jettyXmlUrl = new File(jettyXml).toURI(); + jettyXmlUrl = new URL("file:" + jettyXml); + isFile = true; - } + } else { + // Check the classpath. + jettyXmlUrl = classLoader.getResource(jettyXml); + // jettyXmlUrl = + // classLoader.getResource("bigdata-war/src/jetty.xml"); + isClassPath = true; + + } + + if (jettyXmlUrl == null) { + + throw new RuntimeException("Not found: " + jettyXml); + + } + + if (log.isInfoEnabled()) + log.info("jetty configuration: jettyXml=" + jettyXml + ", isFile=" + + isFile + ", isClassPath=" + isClassPath + + ", jettyXmlUrl=" + jettyXmlUrl); + + return jettyXmlUrl; + + } + + /** + * Search (a) the local file system; and (b) the classpath for the web + * application. If the resource is located, then set the + * [jetty.resourceBase] property. This search sequence gives preference to + * the local file system and then searches the classpath (which jetty does + * not known how to do by itself.) + * + * @throws MalformedURLException + * + * @see <a href="http://trac.bigdata.com/ticket/939" > NSS does not start + * from command line: bigdata-war/src not found </a> + */ + private static void configureEffectiveResourceBase( + final ClassLoader classLoader) throws MalformedURLException { + + // Check the environment variable. + String resourceBaseStr = System + .getProperty(SystemProperties.JETTY_RESOURCE_BASE); + + // true iff declared as an environment variable. + final boolean isDeclared = resourceBaseStr != null + && resourceBaseStr.trim().length() > 0; + boolean isFile = false; // iff found in local file system. + boolean isClassPath = false; // iff found on classpath. + + if (!isDeclared) { + /* - * Force the use of the caller's IIndexManager. This is how we get the - * NSS to use the already open Journal for the HAJournalServer. + * jetty.resourceBase not declared in the environment. */ - if (indexManager != null) { - // Set the IIndexManager attribute on the WebAppContext. - wac.setAttribute(IIndexManager.class.getName(), indexManager); - + // default location: TODO To DEFAULT_JETTY_RESOURCE_BASE + resourceBaseStr = "./bigdata-war/src"; + + final URL resourceBaseURL; + if (new File(resourceBaseStr).exists()) { + + // Check the file system. + resourceBaseURL = new URL("file:" + resourceBaseStr); + isFile = true; + + } else { + + // Check the classpath. + resourceBaseURL = classLoader.getResource(resourceBaseStr); + isClassPath = resourceBaseURL != null; + } - - /* - * Note: You simply can not override the init parameters specified - * in web.xml. Therefore, this sets the overrides on an attribute. - * The attribute is then consulted when the web app starts and its - * the override values are used if given. - */ - if (initParams != null) { - wac.setAttribute( - BigdataRDFServletContextListener.INIT_PARAM_OVERRIDES, - initParams); + if (resourceBaseURL != null) { + /* + * We found the resource either in the file system or in the + * classpath. + * + * Explicitly set the discovered value on the jetty.resourceBase + * property. This will cause jetty to use the version of that + * resource that we discovered above. + * + * Note: If we did not find the resource, then the default value + * from the jetty.xml SystemProperty expression will be used by + * jetty. If it can not find a resource using that default + * value, then the startup will fail. We leave this final check + * to jetty itself since it will interpret the jetty.xml file + * itself. + */ + System.setProperty(SystemProperties.JETTY_RESOURCE_BASE, + resourceBaseURL.toExternalForm()); + } } - return server; + if (log.isInfoEnabled()) + log.info("jetty configuration"// + + ": resourceBaseStr=" + resourceBaseStr + + ", isDeclared=" + + isDeclared + ", isFile=" + isFile + + ", isClassPath=" + + isClassPath + + ", jetty.resourceBase(effective)=" + + System.getProperty(SystemProperties.JETTY_RESOURCE_BASE)); } + + /** + * Configure the webapp (overrides, IIndexManager, etc.) + * <p> + * Note: These overrides are achieved by setting the {@link WebAppContext} + * attribute named + * {@link BigdataRDFServletContextListener#INIT_PARAM_OVERRIDES}. The + * {@link BigdataRDFServletContextListener} then consults the attribute when + * reporting the effective value of the init-params. This convoluted + * mechanism is required because you can not otherwise override the + * init-params without editing <code>web.xml</code>. + */ + private static void configureWebAppOverrides(// + final Server server,// + final IIndexManager indexManager,// + final Map<String, String> initParams// + ) { + final WebAppContext wac = getWebApp(server); + + if (wac == null) { + + /* + * This is a fatal error. If we can not set the IIndexManager, the + * NSS will try to interpret the propertyFile in web.xml rather than + * using the one that is already open and specified by the caller. + * Among other things, that breaks the HAJournalServer startup. + */ + + throw new RuntimeException("Could not locate " + + WebAppContext.class.getName()); + + } + + /* + * Force the use of the caller's IIndexManager. This is how we get the + * NSS to use the already open Journal for the HAJournalServer. + */ + if (indexManager != null) { + + // Set the IIndexManager attribute on the WebAppContext. + wac.setAttribute(IIndexManager.class.getName(), indexManager); + + } + + /* + * Note: You simply can not override the init parameters specified in + * web.xml. Therefore, this sets the overrides on an attribute. The + * attribute is then consulted when the web app starts and its the + * override values are used if given. + */ + if (initParams != null) { + + wac.setAttribute( + BigdataRDFServletContextListener.INIT_PARAM_OVERRIDES, + initParams); + + } + + } + /** * Return the {@link WebAppContext} for the {@link Server}. * Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java 2014-05-17 16:08:21 UTC (rev 8351) @@ -0,0 +1,642 @@ +/** +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sail.webapp.health; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import junit.framework.AssertionFailedError; +import junit.framework.Test; +import junit.framework.TestCase2; +import junit.framework.TestListener; +import junit.framework.TestResult; +import junit.framework.TestSuite; +import junit.textui.ResultPrinter; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpResponse; +import org.apache.http.client.HttpClient; +import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.conn.ClientConnectionManager; +import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.impl.client.DefaultRedirectStrategy; +import org.apache.http.util.EntityUtils; + +import com.bigdata.BigdataStatics; +import com.bigdata.rdf.sail.webapp.NanoSparqlServer; +import com.bigdata.rdf.sail.webapp.client.ConnectOptions; +import com.bigdata.rdf.sail.webapp.client.DefaultClientConnectionManagerFactory; +import com.bigdata.rdf.sail.webapp.client.HttpException; +import com.bigdata.rdf.sail.webapp.client.RemoteRepository; +import com.bigdata.rdf.sail.webapp.client.RemoteRepositoryManager; +import com.bigdata.util.concurrent.DaemonThreadFactory; + +/** + * Utility test suite provides a health check for a deployed instance. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class TestNSSHealthCheck extends TestCase2 { + + /** + * A marker placed into index.html so we can recognize when that page is + * served. + */ + private static final String JUNIT_TEST_MARKER_INDEX_HTML = "junit test marker: index.html"; + + /** + * The executor used by the http client. + */ + private ExecutorService executorService; + + /** + * The {@link ClientConnectionManager} for the {@link HttpClient} used by + * the {@link RemoteRepository}. This is used when we tear down the + * {@link RemoteRepository}. + */ + private ClientConnectionManager m_cm; + + /** + * Exposed to tests that do direct HTTP GET/POST operations. + */ + protected HttpClient m_httpClient; + + /** + * The client-API wrapper to the NSS. + */ + protected RemoteRepositoryManager m_repo; + + /** + * The effective {@link NanoSparqlServer} http end point (including the + * ContextPath). + * <pre> + * http://localhost:8080/bigdata -- webapp URL (includes "/bigdata" context path. + * </pre> + */ + protected String m_serviceURL; + + /** + * The URL of the root of the web application server. This does NOT include + * the ContextPath for the webapp. + * + * <pre> + * http://localhost:8080 -- root URL + * </pre> + */ + protected String m_rootURL; + + public TestNSSHealthCheck(final String name) {//, final String requestURI) { + + super(name); + +// m_requestURI = requestURI; + + } + + /** + * FIXME hacked in test suite constructor. + */ + private static String requestURI; + + @Override + protected void setUp() throws Exception { + + super.setUp(); + + m_rootURL = requestURI; + + m_serviceURL = m_rootURL + BigdataStatics.getContextPath(); + + m_cm = DefaultClientConnectionManagerFactory.getInstance() + .newInstance(); + + final DefaultHttpClient httpClient = new DefaultHttpClient(m_cm); + m_httpClient = httpClient; + + /* + * Ensure that the client follows redirects using a standard policy. + * + * Note: This is necessary for tests of the webapp structure since the + * container may respond with a redirect (302) to the location of the + * webapp when the client requests the root URL. + */ + httpClient.setRedirectStrategy(new DefaultRedirectStrategy()); + + executorService = Executors.newCachedThreadPool(DaemonThreadFactory + .defaultThreadFactory()); + + m_repo = new RemoteRepositoryManager(m_serviceURL, m_httpClient, + executorService); + + } + + @Override + protected void tearDown() throws Exception { + + m_rootURL = null; + m_serviceURL = null; + + if (m_cm != null) { + m_cm.shutdown(); + m_cm = null; + } + + m_httpClient = null; + m_repo = null; + + if (executorService != null) { + executorService.shutdownNow(); + executorService = null; + } + + super.tearDown(); + + } + + static class HealthCheckTestSuite extends TestSuite { + + /** + * The URL of the bigdata web application. + */ + @SuppressWarnings("unused") + private final String requestURI; + + /** + * + * @param name + * @param requestURI + * The URL of the bigdata web application. + */ + private HealthCheckTestSuite(final String name, final String requestURI) { + + super(name); + + this.requestURI = requestURI; + + // FIXME Hacked through static field. + TestNSSHealthCheck.requestURI = requestURI; + + } + + } + + static HealthCheckTestSuite createTestSuite(final String name, + final String requestURI) { + + final HealthCheckTestSuite suite = new HealthCheckTestSuite(name, + requestURI); + + suite.addTestSuite(TestNSSHealthCheck.class); + + return suite; + + } + + /** + * bare URL of the server + * + * <pre> + * http://localhost:8080 + * </pre> + * + * The response is should be <code>index.html</code> since we want the + * bigdata webapp to respond for the top-level context. + * + * <p> + * Note: You must ensure that the client follows redirects using a standard + * policy. This is necessary for tests of the webapp structure since the + * container may respond with a redirect (302) to the location of the webapp + * when the client requests the root URL. + */ + public void test_webapp_structure_rootURL() throws Exception { + + final String content = doGET(m_rootURL); + + assertTrue(content.contains(JUNIT_TEST_MARKER_INDEX_HTML)); + + } + + /** + * URL with correct context path + * + * <pre> + * http://localhost:8080/bigdata + * </pre> + * + * The response is should be <code>index.html</code>, which is specified + * through the welcome files list. + */ + public void test_webapp_structure_contextPath() throws Exception { + + final String content = doGET(m_serviceURL); + + assertTrue(content.contains(JUNIT_TEST_MARKER_INDEX_HTML)); + } + + /** + * URL with context path and index.html reference + * + * <pre> + * http://localhost:8080/bigdata/index.html + * </pre> + * + * This URL does NOT get mapped to anything (404). + */ + public void test_webapp_structure_contextPath_indexHtml() throws Exception { + + try { + + doGET(m_serviceURL + "/index.html"); + + } catch (HttpException ex) { + + assertEquals(404, ex.getStatusCode()); + + } + + } + + /** + * The <code>favicon.ico</code> file. + * + * @see <a href="http://www.w3.org/2005/10/howto-favicon"> How to add a + * favicon </a> + */ + public void test_webapp_structure_favicon() throws Exception { + + doGET(m_serviceURL + "/html/favicon.ico"); + + } + + /** + * The <code>/status</code> servlet responds. + */ + public void test_webapp_structure_status() throws Exception { + + doGET(m_serviceURL + "/status"); + + } + + /** + * The <code>/counters</code> servlet responds. + */ + public void test_webapp_structure_counters() throws Exception { + + doGET(m_serviceURL + "/counters"); + + } + +// /** +// * The <code>/namespace/</code> servlet responds (multi-tenancy API). +// */ +// public void test_webapp_structure_namespace() throws Exception { +// +// doGET(m_serviceURL + "/namespace/"); +// +// } + + /** + * The fully qualified URL for <code>index.html</code> + * + * <pre> + * http://localhost:8080/bigdata/html/index.html + * </pre> + * + * The response is should be <code>index.html</code>, which is specified + * through the welcome files list. + */ + public void test_webapp_structure_contextPath_html_indexHtml() throws Exception { + + doGET(m_serviceURL + "/html/index.html"); + } + + private String doGET(final String url) throws Exception { + + HttpResponse response = null; + HttpEntity entity = null; + + try { + + final ConnectOptions opts = new ConnectOptions(url); + opts.method = "GET"; + + response = doConnect(opts); + + checkResponseCode(url, response); + + entity = response.getEntity(); + + final String content = EntityUtils.toString(entity); + + return content; + + } finally { + + try { + EntityUtils.consume(entity); + } catch (IOException ex) { + log.warn(ex, ex); + } + + } + + } + + /** + * Connect to a SPARQL end point (GET or POST query only). + * + * @param opts + * The connection options. + * + * @return The connection. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/619"> + * RemoteRepository class should use application/x-www-form-urlencoded + * for large POST requests </a> + */ + private HttpResponse doConnect(final ConnectOptions opts) throws Exception { + + /* + * Generate the fully formed and encoded URL. + */ + + final StringBuilder urlString = new StringBuilder(opts.serviceURL); + + ConnectOptions.addQueryParams(urlString, opts.requestParams); + + final boolean isLongRequestURL = urlString.length() > 1024; + + if (isLongRequestURL && opts.method.equals("POST") + && opts.entity == null) { + + /* + * URL is too long. Reset the URL to just the service endpoint and + * use application/x-www-form-urlencoded entity instead. Only in + * cases where there is not already a request entity (SPARQL query + * and SPARQL update). + */ + + urlString.setLength(0); + urlString.append(opts.serviceURL); + + opts.entity = ConnectOptions.getFormEntity(opts.requestParams); + + } else if (isLongRequestURL && opts.method.equals("GET") + && opts.entity == null) { + + /* + * Convert automatically to a POST if the request URL is too long. + * + * Note: [opts.entity == null] should always be true for a GET so + * this bit is a paranoia check. + */ + + opts.method = "POST"; + + urlString.setLength(0); + urlString.append(opts.serviceURL); + + opts.entity = ConnectOptions.getFormEntity(opts.requestParams); + + } + + if (log.isDebugEnabled()) { + log.debug("*** Request ***"); + log.debug(opts.serviceURL); + log.debug(opts.method); + log.debug("query=" + opts.getRequestParam("query")); + log.debug(urlString.toString()); + } + + HttpUriRequest request = null; + try { + + request = RemoteRepository.newRequest(urlString.toString(), opts.method); + + if (opts.requestHeaders != null) { + + for (Map.Entry<String, String> e : opts.requestHeaders + .entrySet()) { + + request.addHeader(e.getKey(), e.getValue()); + + if (log.isDebugEnabled()) + log.debug(e.getKey() + ": " + e.getValue()); + + } + + } + +// // conn = doConnect(urlString.toString(), opts.method); +// final URL url = new URL(urlString.toString()); +// conn = (HttpURLConnection) url.openConnection(); +// conn.setRequestMethod(opts.method); +// conn.setDoOutput(true); +// conn.setDoInput(true); +// conn.setUseCaches(false); +// conn.setReadTimeout(opts.timeout); +// conn.setRequestProperty("Accept", opts.acceptHeader); +// if (log.isDebugEnabled()) +// log.debug("Accept: " + opts.acceptHeader); + + if (opts.entity != null) { + +// if (opts.data == null) +// throw new AssertionError(); + +// final String contentLength = Integer.toString(opts.data.length); + +// conn.setRequestProperty("Content-Type", opts.contentType); +// conn.setRequestProperty("Content-Length", contentLength); + +// if (log.isDebugEnabled()) { +// log.debug("Content-Type: " + opts.contentType); +// log.debug("Content-Length: " + contentLength); +// } + +// final ByteArrayEntity entity = new ByteArrayEntity(opts.data); +// entity.setContentType(opts.contentType); + + ((HttpEntityEnclosingRequestBase) request).setEntity(opts.entity); + +// final OutputStream os = conn.getOutputStream(); +// try { +// os.write(opts.data); +// os.flush(); +// } finally { +// os.close(); +// } + + } + + final HttpResponse response = m_httpClient.execute(request); + + return response; + +// // connect. +// conn.connect(); +// +// return conn; + + } catch (Throwable t) { + /* + * If something goes wrong, then close the http connection. + * Otherwise, the connection will be closed by the caller. + */ + try { + + if (request != null) + request.abort(); + +// // clean up the connection resources +// if (conn != null) +// conn.disconnect(); + + } catch (Throwable t2) { + // ignored. + } + throw new RuntimeException(opts.serviceURL + " : " + t, t); + } + + } + + /** + * Throw an exception if the status code does not indicate success. + * + * @param response + * The response. + * + * @return The response. + * + * @throws IOException + */ + private static HttpResponse checkResponseCode(final String url, + final HttpResponse response) throws IOException { + + final int rc = response.getStatusLine().getStatusCode(); + + if (rc < 200 || rc >= 300) { + throw new HttpException(rc, "StatusCode=" + rc + ", StatusLine=" + + response.getStatusLine() + ", headers=" + + Arrays.toString(response.getAllHeaders()) + + ", ResponseBody=" + + EntityUtils.toString(response.getEntity())); + + } + + if (log.isDebugEnabled()) { + /* + * write out the status list, headers, etc. + */ + log.debug("*** Response ***"); + log.debug("Status Line: " + response.getStatusLine()); + } + + return response; + + } + + /** + * Connect to the NSS end point and run a test suite designed to verify the + * health of that instance. + * + * @param args + * URL + * + * @throws MalformedURLException + * + * TODO Support HA health checks as well. + */ + public static void main(final String[] args) throws MalformedURLException { + + if (args.length < 1) { + System.err.println("usage: <cmd> Request-URI"); + System.exit(1); + } + + final String requestURI = args[0]; + + // Setup test result. + final TestResult result = new TestResult(); + + // Setup listener, which will write the result on System.out + result.addListener(new ResultPrinter(System.out)); + + result.addListener(new TestListener() { + + @Override + public void startTest(Test arg0) { + log.info(arg0); + } + + @Override + public void endTest(Test arg0) { + log.info(arg0); + } + + @Override + public void addFailure(Test arg0, AssertionFailedError arg1) { + log.error(arg0, arg1); + } + + @Override + public void addError(Test arg0, Throwable arg1) { + log.error(arg0, arg1); + } + }); + + try { + + // Setup test suite + final Test test = createTestSuite(null/* name */, requestURI); + + // Run the test suite. + test.run(result); + + } finally { + + } + + final String msg = "nerrors=" + result.errorCount() + ", nfailures=" + + result.failureCount() + ", nrun=" + result.runCount(); + + System.out.println(msg); + + if (result.errorCount() > 0 || result.failureCount() > 0) { + + // At least one test failed. + System.exit(1); + + } + + // All green. + System.exit(0); + + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-17 16:04:33 UTC (rev 8350) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-17 16:08:21 UTC (rev 8351) @@ -149,7 +149,7 @@ <Set name="contextPath">/bigdata</Set> <Set name="descriptor">WEB-INF/web.xml</Set> <Set name="parentLoaderPriority">true</Set> - <Set name="extractWAR">true</Set> + <Set name="extractWAR">false</Set> </New> </Arg> </Call> Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-17 16:04:33 UTC (rev 8350) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-17 16:08:21 UTC (rev 8351) @@ -1955,6 +1955,9 @@ <property name="app.home" location="${bigdata.dir}" /> + <!-- port for test NSS deployments. --> + <property name="test.NSS.port" value="24444" /> + <property name="test.codebase.port" value="23333" /> <property name="test.codebase.dir" value="${dist.lib.dl}" /> <property name="test.codebase" value="http://${this.hostname}:${test.codebase.port}/jsk-dl.jar" /> @@ -2225,6 +2228,9 @@ <isset property="testName" /> </condition> + <!-- test various deployment models. --> + <antcall target="test-deployments" /> + <!-- Note: timeout is milliseconds per forked JVM if specified. --> <!-- We have some long running unit tests so the timeout needs --> <!-- to be more than a minute if you do specify this property. --> @@ -2433,6 +2439,33 @@ </delete> </target> + <!-- --> + <!-- FIXME Ant targets for testing a variety of deployment models. --> + <!-- --> + <target name="test-deployments" description="Validate deployment models."> + <!-- antcall target="test-nss-default" / --> + </target> + + <!-- TODO Test with -Djetty.xml override. --> + <!-- TODO Test with -Djetty.resourceBase override. --> + <!-- TODO Test with CWD == ${dist.var.jetty} so it will locate the web app in the file system. --> + <target name="test-nss-default"> + <java classname="com.bigdata.rdf.sail.webapp.NanoSparqlServer" + failonerror="true" fork="true" logerror="true"> + <classpath path="${run.class.path}"/> + <jvmarg value="-server"/> + <jvmarg value="-Xmx1G"/> + <jvmarg value="-Dlog4j.configuration=${dist.var.config.logging}log4j.properties"/> + <arg value="${test.NSS.port}"/> + <arg value="kb"/> + <arg value="${dist.var.jetty}/WEB-INF/RWStore.properties"/> + </java> + <!-- TODO Get the PID --> + <!-- TODO Run junit test suite for validation (and integrate into total CI junit summary). --> + <!-- TODO Sure kill using the PID (or use the jetty command to stop the process). --> + <!-- TODO We also need a reliable way to handle this for a remote deploy. --> + </target> + <!-- --> <!-- SESAME SERVER TARGETS --> <!-- --> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-18 13:35:44
|
Revision: 8353 http://sourceforge.net/p/bigdata/code/8353 Author: thompsonbry Date: 2014-05-18 13:35:40 +0000 (Sun, 18 May 2014) Log Message: ----------- Added test for #887 - ticket is closed. Problem can not be demonstrated against the current code base. Suspect was fixed for the 1.3.0 release (heisenbug). Javadoc update for jetty.resourceBase for the NSS. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket887.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.trig Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2014-05-17 17:16:32 UTC (rev 8352) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2014-05-18 13:35:40 UTC (rev 8353) @@ -128,6 +128,7 @@ // Test suite for SPARQL 1.1 BINDINGS clause suite.addTestSuite(TestBindings.class); suite.addTestSuite(TestBindHeisenbug708.class); + suite.addTestSuite(TestTicket887.class); // Complex queries. suite.addTestSuite(TestComplexQuery.class); Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket887.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket887.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket887.java 2014-05-18 13:35:40 UTC (rev 8353) @@ -0,0 +1,78 @@ +/** + +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sparql.ast.eval; + + +/** + * Test suite for a hesienbug involving BIND. Unlike the other issues this + * sometimes happens, and is sometimes OK, so we run the test in a loop 20 + * times. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/708"> + * Heisenbug </a> + * + * @version $Id$ + */ +public class TestTicket887 extends AbstractDataDrivenSPARQLTestCase { + + public TestTicket887() { + } + + public TestTicket887(String name) { + super(name); + } + + /** + * <pre> + * SELECT * + * WHERE { + * + * GRAPH ?g { + * + * BIND( "hello" as ?hello ) . + * BIND( CONCAT(?hello, " world") as ?helloWorld ) . + * + * ?member a ?class . + * + * } + * + * } + * LIMIT 1 + * </pre> + * + * @see <a href="http://trac.bigdata.com/ticket/887" > BIND is leaving a + * variable unbound </a> + */ + public void test_ticket_887_bind() throws Exception { + + new TestHelper( + "ticket_887_bind", // testURI, + "ticket_887_bind.rq",// queryFileURL + "ticket_887_bind.trig",// dataFileURL + "ticket_887_bind.srx"// resultFileURL + ).runTest(); + + } + +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.rq 2014-05-18 13:35:40 UTC (rev 8353) @@ -0,0 +1,14 @@ +SELECT * +WHERE { + + GRAPH ?g { + + BIND( "hello" as ?hello ) . + BIND( CONCAT(?hello, " world") as ?helloWorld ) . + + ?member a ?class . + + } + +} +LIMIT 1 \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.srx 2014-05-18 13:35:40 UTC (rev 8353) @@ -0,0 +1,32 @@ +<?xml version="1.0"?> +<sparql + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" + xmlns="http://www.w3.org/2005/sparql-results#" > + <head> + <variable name="?hello"/> + <variable name="?helloWorld"/> + <variable name="?member"/> + <variable name="?class"/> + <variable name="?g"/> + </head> + <results> + <result> + <binding name="hello"> + <literal>hello</literal> + </binding> + <binding name="helloWorld"> + <literal>hello world</literal> + </binding> + <binding name="member"> + <uri>http://www.bigdata.com/member</uri> + </binding> + <binding name="class"> + <uri>http://www.bigdata.com/cls</uri> + </binding> + <binding name="g"> + <uri>http://www.bigdata.com/</uri> + </binding> + </result> + </results> +</sparql> \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.trig =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.trig (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_887_bind.trig 2014-05-18 13:35:40 UTC (rev 8353) @@ -0,0 +1,6 @@ +@prefix : <http://www.bigdata.com/> . +@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . + +: { + :member a :cls +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-17 17:16:32 UTC (rev 8352) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-18 13:35:40 UTC (rev 8353) @@ -152,25 +152,39 @@ * environment variable will be used to locate the web application.</li> * <li> * <code>jetty.resourceBase</code> is not specified (either - * <code>null</code> or whitespace). An attempt is made to locate the - * <code>bigdata-war/src</code> resource in the file system (relative to - * the current working directory). If found, the - * <code>jetty.resourceBase</code> environment variable is set to this - * resource using a <code>file:</code> style URL. This will cause jetty - * to use the web application directory in the file system. - * <p> - * If the resource is not found in the file system, then an attempt is - * made to locate that resource using the classpath. If found, the the - * <code>jetty.resourceBase</code> is set to the URL for the located - * resource. This will cause jetty to use the web application resource - * on the classpath. If there are multiple such resources on the - * classpath, the first such resource will be discovered and used.</li> + * <code>null</code> or whitespace). + * <ol> + * <li>An attempt is made to locate the <code>bigdata-war/src</code> + * resource in the file system (relative to the current working + * directory). If found, the <code>jetty.resourceBase</code> environment + * variable is set to this resource using a <code>file:</code> style + * URL. This will cause jetty to use the web application directory in + * the file system.</li> * <li> + * An attempt is made to locate the resource + * <code>/WEB-INF/web.xml</code> using the classpath (this handles the + * case when running under the eclipse IDE). If found, the the + * <code>jetty.resourceBase</code> is set to the URL formed by removing + * the trailing <code>WEB-INF/web.xml</code> for the located resource. + * This will cause jetty to use the web application resource on the + * classpath. If there are multiple such resources on the classpath, the + * first such resource will be discovered and used.</li> + * <li>An attempt is made to locate the resource + * <code>bigdata-war/src/WEB-INF/web.xml</code> using the classpath + * (this handles the case when running from the command line using a + * bigdata JAR). If found, the the <code>jetty.resourceBase</code> is + * set to the URL formed by the trailing <code>WEB-INF/web.xml</code> + * for the located resource. This will cause jetty to use the web + * application resource on the classpath. If there are multiple such + * resources on the classpath, the first such resource will be + * discovered and used.</li> + * <li> * Otherwise, the <code>jetty.resourceBase</code> environment variable * is not modified and the default location specified in the * <code>jetty.xml</code> file will be used. If jetty is unable to * resolve that resource, then the web application will not start.</li> * </ol> + * </ol> * * @see <a href="http://trac.bigdata.com/ticket/939" > NSS does not * start from command line: bigdata-war/src not found </a> @@ -825,7 +839,7 @@ } if (tmp != null) { if (src != null) { - if (log.isInfoEnabled()) + if(log.isInfoEnabled()) log.info("Found: src=" + src + ", url=" + tmp); } final String s = tmp.toExternalForm(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-19 14:26:16
|
Revision: 8359 http://sourceforge.net/p/bigdata/code/8359 Author: thompsonbry Date: 2014-05-19 14:26:12 +0000 (Mon, 19 May 2014) Log Message: ----------- Bug fix for #940 (HA LBS breaks tomcat deployment). The root cause is that the ProxyServlet is not available under tomcat (or anything else besides jetty). Therefore it can not be configured from the same web.xml file that is used for other platforms. To address this, I extracted the HA LBS configuration into a new override-web.xml file and then modified the NanoSparqlServer to locate that resource. The HA test suite also needed to be modified to explictly locate this resource. See #940 (HA LBS breaks tomcat deployment). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/override-web.xml Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-05-18 15:49:11 UTC (rev 8358) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-A.config 2014-05-19 14:26:12 UTC (rev 8359) @@ -233,10 +233,10 @@ serviceDir = bigdata.serviceDir; + logicalServiceId = bigdata.logicalServiceId; + haLogDir = bigdata.logDir; - logicalServiceId = bigdata.logicalServiceId; - writePipelineAddr = new InetSocketAddress("localhost",bigdata.haPort); /* Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-05-18 15:49:11 UTC (rev 8358) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal-C.config 2014-05-19 14:26:12 UTC (rev 8359) @@ -61,7 +61,7 @@ private static fedname = "benchmark"; // The RMI port for the HAGlue interface (may be ZERO for a random port). - private static rmiPort = ConfigMath.add(9080,1); + private static rmiPort = ConfigMath.add(9080,2); // write replication pipeline port (listener). private static haPort = ConfigMath.add(9090,2); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-05-18 15:49:11 UTC (rev 8358) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-05-19 14:26:12 UTC (rev 8359) @@ -95,6 +95,7 @@ import com.bigdata.quorum.QuorumException; import com.bigdata.quorum.zk.ZKQuorumClient; import com.bigdata.quorum.zk.ZKQuorumImpl; +import com.bigdata.rdf.sail.webapp.NanoSparqlServer; import com.bigdata.rdf.sail.webapp.client.HttpException; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; import com.bigdata.service.jini.JiniClientConfig; @@ -135,6 +136,7 @@ */ static class ServiceListener implements IServiceListener { + @SuppressWarnings("unused") private volatile HAGlue haGlue; private volatile ProcessHelper processHelper; private volatile boolean dead = false; @@ -2226,7 +2228,7 @@ * Used to override the port at which jetty sets up the http * connection. */ - private final String TEST_JETTY_PORT = "jetty.port"; + private final String TEST_JETTY_PORT = NanoSparqlServer.SystemProperties.JETTY_PORT; /** * The path in the local file system to the root of the web @@ -2234,13 +2236,15 @@ * code, but the webapp gets deployed to the serviceDir for this * test suite. */ - private final String JETTY_RESOURCE_BASE = "jetty.resourceBase"; - + private final String JETTY_RESOURCE_BASE = NanoSparqlServer.SystemProperties.JETTY_RESOURCE_BASE; + + private final String JETTY_OVERRIDE_WEB_XML = NanoSparqlServer.SystemProperties.JETTY_OVERRIDE_WEB_XML; + /** * Used to override the <code>jetty.dump.start</code> environment * property. */ - private final String TEST_JETTY_DUMP_START = "jetty.dump.start"; + private final String TEST_JETTY_DUMP_START = NanoSparqlServer.SystemProperties.JETTY_DUMP_START; /** * The absolute effective path of the service directory. This is @@ -2290,6 +2294,9 @@ // Override the location of the webapp as deployed. cmds.add("-D" + JETTY_RESOURCE_BASE + "=."); + // Override the location of the override-web.xml file as deployed. + cmds.add("-D" + JETTY_OVERRIDE_WEB_XML + "=./WEB-INF/override-web.xml"); + // Override the jetty.dump.start. cmds.add("-D" + TEST_JETTY_DUMP_START + "=" + jettyDumpStart); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-18 15:49:11 UTC (rev 8358) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/NanoSparqlServer.java 2014-05-19 14:26:12 UTC (rev 8359) @@ -191,6 +191,23 @@ */ String JETTY_RESOURCE_BASE = "jetty.resourceBase"; + /** + * The location of the <code>override-web.xml</code> resource. The + * default is given in <code>jetty.xml</code> and serves to locate the + * resource when deployed under an IDE. If not explicitly given, value + * of the environment variable is set by the same logic that sets the + * {@link #JETTY_RESOURCE_BASE} environment variable. This allows the + * <code>override-web.xml</code> resource to be found in its default + * location (which is the same directory / package as the + * <code>web.xml</code> file) while still preserving the ability to + * override the location of that resource explicitly by setting the + * environment variable before starting the server. + * + * @see <a href="http://trac.bigdata.com/ticket/940" > ProxyServlet in + * web.xml breaks tomcat WAR (HA LBS) </a> + */ + String JETTY_OVERRIDE_WEB_XML = "jetty.overrideWebXml"; + } /** @@ -439,10 +456,17 @@ public static void awaitServerStart(final Server server) throws InterruptedException, TimeoutException, Exception { +// Note: Does not appear to help. +// +// final WebAppContext wac = getWebApp(server); +// +// if (wac == null) +// throw new Exception("WebApp is not available?"); + final long timeout = Long.parseLong(System.getProperty( SystemProperties.JETTY_STARTUP_TIMEOUT, SystemProperties.DEFAULT_JETTY_STARTUP_TIMEOUT)); - + boolean ok = false; final long begin = System.nanoTime(); final long nanos = TimeUnit.SECONDS.toNanos(timeout); @@ -453,7 +477,8 @@ server.start(); // Await running. remaining = nanos - (System.nanoTime() - begin); - while (server.isStarting() && !server.isRunning() && remaining > 0) { + while (server.isStarting() && !server.isRunning() + /* && !wac.isRunning() */ && remaining > 0) { Thread.sleep(100/* ms */); // remaining = nanos - (now - begin) [aka elapsed] remaining = nanos - (System.nanoTime() - begin); @@ -461,6 +486,8 @@ if (remaining < 0) { throw new TimeoutException(); } +// if (!wac.isRunning()) +// throw new Exception("WebApp is not running?"); ok = true; } finally { if (!ok) { @@ -870,9 +897,18 @@ * to jetty itself since it will interpret the jetty.xml file * itself. */ + final String tmp = resourceBaseURL.toExternalForm(); + System.setProperty(SystemProperties.JETTY_RESOURCE_BASE, - resourceBaseURL.toExternalForm()); + tmp); + final URL overrideWebXmlURL = new URL(tmp + + (tmp.endsWith("/") ? "" : "/") + + "WEB-INF/override-web.xml"); + + System.setProperty(SystemProperties.JETTY_OVERRIDE_WEB_XML, + overrideWebXmlURL.toExternalForm()); + } } @@ -885,7 +921,9 @@ + ", isClassPath=" + isClassPath + ", jetty.resourceBase(effective)=" - + System.getProperty(SystemProperties.JETTY_RESOURCE_BASE)); + + System.getProperty(SystemProperties.JETTY_RESOURCE_BASE) + + ", jetty.overrideWebXml(effective)=" + + System.getProperty(SystemProperties.JETTY_OVERRIDE_WEB_XML)); } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/override-web.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/override-web.xml (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/override-web.xml 2014-05-19 14:26:12 UTC (rev 8359) @@ -0,0 +1,100 @@ +<?xml version="1.0" encoding="UTF-8"?> +<web-app xmlns="http://java.sun.com/xml/ns/javaee" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_1.xsd" + version="3.1"> + <servlet> + <servlet-name>Load Balancer</servlet-name> + <description> + The HA Load Balancer servlet provides a transparent proxy for + requests arriving its configured URL pattern (the "external" + interface for the load balancer) to the root of the web + application. + + The use of the load balancer is entirely optional. If the + security rules permit, then clients MAY make requests directly + against a specific service. Thus, no specific provision exists + to disable the load balancer servlet, but you may choose not to + deploy it. + + When successfully deployed, requests having prefix corresponding to + the URL pattern for the load balancer are automatically redirected + to a joined service in the met quorum based on the configured load + balancer policy. + + Requests directed to /bigdata/LBS/leader are proxied to the quorum + leader - this URL must be used for non-idempotent requests + (updates). + + Requests directed to /bigdata/LBS/read are load balanced over the + services joined with the met quourm. This URL may only be used + with idempotent requests (reads). + + For non-HA deployments, requests are simply forwarded to the local + service after stripping off the /LBS/leader or /LBS/read prefix. + Thus, it is always safe to use the LBS request URLs. + + The load balancer policies are "HA aware." They will always + redirect update requests to the quorum leader. The default + polices will load balance read requests over the leader and + followers in a manner that reflects the CPU, IO Wait, and GC + Time associated with each service. The PlatformStatsPlugIn + and GangliaPlugIn MUST be enabled for the default load + balancer policy to operate. It depends on those plugins to + maintain a model of the load on the HA replication cluster. + The GangliaPlugIn should be run only as a listener if you are + are running the real gmond process on the host. If you are + not running gmond, then the GangliaPlugIn should be configured + as both a listener and a sender. + </description> + <servlet-class>com.bigdata.rdf.sail.webapp.HALoadBalancerServlet</servlet-class> + <load-on-startup>1</load-on-startup> + <async-supported>true</async-supported> + <init-param> + <param-name>policy</param-name> + <param-value>com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy</param-value> + <description> + The load balancer policy. This must be an instance of the + IHALoadBalancerPolicy interface. A default policy (NOPLBSPolicy) is + used when no value is specified. + + The policies differ ONLY in how they handle READ requests. All policies + proxy updates to the leader. If you do not want update proxying, then + use a URL that does not address the HALoadBalancerServlet. + + The following policies are pre-defined: + + com.bigdata.rdf.sail.webapp.lbs.policy.NOPLBSPolicy: + + Does not load balance read requests. + + com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy: + + Round robin for read requests. + + com.bigdata.rdf.sail.webapp.lbs.policy.counters.CountersLBSPolicy: + + Load based proxying for read requests using the build in http + service for reporting performance counters. This policy requires + the PlatformStatsPlugIn and may also require platform specific + metrics collection dependencies, e.g., sysstat. + + com.bigdata.rdf.sail.webapp.lbs.policy.ganglia.GangliaLBSPolicy: + + Load based proxying for read requests using ganglia. This policy + requires the requires the PlatformStatsPlugIn. In addition, either + gmond must be installed on each node or the embedded GangliaService + must be enabled such that performance metrics are collected and + reported. + + Some of these policies can be further configured using additional + init-param elements that they understand. See the javadoc for the + individual policies for more information. + </description> + </init-param> + </servlet> + <servlet-mapping> + <servlet-name>Load Balancer</servlet-name> + <url-pattern>/LBS/*</url-pattern> + </servlet-mapping> +</web-app> Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml 2014-05-18 15:49:11 UTC (rev 8358) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/web.xml 2014-05-19 14:26:12 UTC (rev 8359) @@ -89,102 +89,8 @@ <description>Performance counters.</description> <servlet-class>com.bigdata.rdf.sail.webapp.CountersServlet</servlet-class> <async-supported>true</async-supported> - </servlet><!-- --> - <servlet> - <servlet-name>Load Balancer</servlet-name> - <description> - The HA Load Balancer servlet provides a transparent proxy for - requests arriving its configured URL pattern (the "external" - interface for the load balancer) to the root of the web - application. - - The use of the load balancer is entirely optional. If the - security rules permit, then clients MAY make requests directly - against a specific service. Thus, no specific provision exists - to disable the load balancer servlet, but you may choose not to - deploy it. - - When successfully deployed, requests having prefix corresponding to - the URL pattern for the load balancer are automatically redirected - to a joined service in the met quorum based on the configured load - balancer policy. - - Requests directed to /bigdata/LBS/leader are proxied to the quorum - leader - this URL must be used for non-idempotent requests - (updates). - - Requests directed to /bigdata/LBS/read are load balanced over the - services joined with the met quourm. This URL may only be used - with idempotent requests (reads). - - For non-HA deployments, requests are simply forwarded to the local - service after stripping off the /LBS/leader or /LBS/read prefix. - Thus, it is always safe to use the LBS request URLs. - - The load balancer policies are "HA aware." They will always - redirect update requests to the quorum leader. The default - polices will load balance read requests over the leader and - followers in a manner that reflects the CPU, IO Wait, and GC - Time associated with each service. The PlatformStatsPlugIn - and GangliaPlugIn MUST be enabled for the default load - balancer policy to operate. It depends on those plugins to - maintain a model of the load on the HA replication cluster. - The GangliaPlugIn should be run only as a listener if you are - are running the real gmond process on the host. If you are - not running gmond, then the GangliaPlugIn should be configured - as both a listener and a sender. - </description> - <servlet-class>com.bigdata.rdf.sail.webapp.HALoadBalancerServlet</servlet-class> - <load-on-startup>1</load-on-startup> - <async-supported>true</async-supported> - <init-param> - <param-name>policy</param-name> - <param-value>com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy</param-value> - <description> - The load balancer policy. This must be an instance of the - IHALoadBalancerPolicy interface. A default policy (NOPLBSPolicy) is - used when no value is specified. - - The policies differ ONLY in how they handle READ requests. All policies - proxy updates to the leader. If you do not want update proxying, then - use a URL that does not address the HALoadBalancerServlet. - - The following policies are pre-defined: - - com.bigdata.rdf.sail.webapp.lbs.policy.NOPLBSPolicy: - - Does not load balance read requests. - - com.bigdata.rdf.sail.webapp.lbs.policy.RoundRobinLBSPolicy: - - Round robin for read requests. - - com.bigdata.rdf.sail.webapp.lbs.policy.counters.CountersLBSPolicy: - - Load based proxying for read requests using the build in http - service for reporting performance counters. This policy requires - the PlatformStatsPlugIn and may also require platform specific - metrics collection dependencies, e.g., sysstat. - - com.bigdata.rdf.sail.webapp.lbs.policy.ganglia.GangliaLBSPolicy: - - Load based proxying for read requests using ganglia. This policy - requires the requires the PlatformStatsPlugIn. In addition, either - gmond must be installed on each node or the embedded GangliaService - must be enabled such that performance metrics are collected and - reported. - - Some of these policies can be further configured using additional - init-param elements that they understand. See the javadoc for the - individual policies for more information. - </description> - </init-param> </servlet> - <servlet-mapping> - <servlet-name>Load Balancer</servlet-name> - <url-pattern>/LBS/*</url-pattern> - </servlet-mapping> - <!-- --> + <!-- Note: The HALoadBalancerServlet is deployed from override-web.xml --> <!-- Serve anything under /html/* as a simple file. --> <servlet-mapping> <servlet-name>default</servlet-name> Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-18 15:49:11 UTC (rev 8358) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-19 14:26:12 UTC (rev 8359) @@ -142,14 +142,12 @@ <Arg> <!-- This is the bigdata web application. --> <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> - <Set name="war"> - <!-- The location of the top-level of the bigdata webapp. --> - <SystemProperty name="jetty.resourceBase" default="bigdata-war/src" /> - </Set> - <Set name="contextPath">/bigdata</Set> + <Set name="war"><SystemProperty name="jetty.resourceBase" default="bigdata-war/src"/></Set> + <Set name="contextPath">/bigdata</Set> <Set name="descriptor">WEB-INF/web.xml</Set> <Set name="parentLoaderPriority">true</Set> <Set name="extractWAR">false</Set> + <Set name="overrideDescriptor"><SystemProperty name="jetty.overrideWebXml" default="bigdata-war/src/WEB-INF/override-web.xml"/></Set> </New> </Arg> </Call> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-20 13:47:55
|
Revision: 8377 http://sourceforge.net/p/bigdata/code/8377 Author: thompsonbry Date: 2014-05-20 13:47:52 +0000 (Tue, 20 May 2014) Log Message: ----------- Modified to stage the javadoc as well. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-20 13:28:29 UTC (rev 8376) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-20 13:47:52 UTC (rev 8377) @@ -408,7 +408,7 @@ encoding="utf-8" private="false" > - <arg value="-J-Xmx1000m" /> + <arg value="-J-Xmx2000m" /> <arg value="-quiet" /> <packageset dir="${bigdata.dir}/bigdata/src/java" /> <packageset dir="${bigdata.dir}/bigdata/src/samples" /> @@ -917,7 +917,7 @@ <!-- --> <target name="stage" description="stages resources (jar, config, policy, logging files) needed to package or execute the bigdata distribution." - depends="jar"> + depends="jar, javadoc"> <!-- Create staging directories --> <property name="dist.dir" location="${bigdata.dir}/dist/bigdata" /> @@ -933,6 +933,7 @@ <property name="dist.var.config.jini" location="${dist.var.config}/jini" /> <property name="dist.var.jetty" location="${dist.var}/jetty" /> <property name="dist.doc" location="${dist.dir}/doc" /> + <property name="dist.doc.api" location="${dist.dir}/doc/api" /> <property name="dist.doc.legal" location="${dist.dir}/doc/LEGAL" /> <delete dir="${dist.dir}" quiet="true" /> @@ -947,6 +948,7 @@ <mkdir dir="${dist.var.config.logging}" /> <mkdir dir="${dist.var.config.jini}" /> <mkdir dir="${dist.doc}" /> + <mkdir dir="${dist.doc.api}" /> <mkdir dir="${dist.doc.legal}" /> <mkdir dir="${dist.dir}/etc" /> <mkdir dir="${dist.dir}/etc/init.d" /> @@ -1232,12 +1234,18 @@ </fileset> </copy> - <!-- Stage top-level license file and copyright NOTICE file. --> + <!-- Stage top-level license file and copyright NOTICE file. --> <copy toDir="${dist.doc}"> <fileset file="${bigdata.dir}/LICENSE.txt"/> <fileset file="${bigdata.dir}/NOTICE"/> </copy> + <!-- Stage javadoc (iff generated). --> + <copy toDir="${dist.doc.api}" failonerror="false"> + <fileset dir="${build.dir}/docs/api"> + </fileset> + </copy> + <!-- Stage license files for dependencies (LEGAL). --> <copy toDir="${dist.doc.legal}" flatten="true"> <fileset dir="${bigdata.dir}"> @@ -1245,11 +1253,11 @@ </fileset> </copy> - <!-- Stage README. --> + <!-- Stage README. --> <copy file="${src.resources}/HAJournal/README" todir="${dist.dir}/doc" /> - <!-- Stage documentation from the wiki. --> + <!-- Stage documentation from the wiki. --> <get dest="${dist.doc}/HAJournalServer.html" ignoreerrors="true" src="http://wiki.bigdata.com/wiki/index.php/HAJournalServer?printable=yes" /> @@ -1299,10 +1307,11 @@ bigdata/var/jetty - the webapp. bigdata/var/jetty/jetty.xml - jetty server configuration. bigdata/var/jetty/bigdata/WEB-INF/web.xml - webapp configuration. + bigdata/doc - documentation bigdata/doc/LEGAL - license files for dependencies. bigdata/doc/LICENSE.txt - bigdata license file. bigdata/doc/NOTICE - copyright NOTICE files. - bigdata/doc/docs - javadoc (FIXME INSTALL JAVADOC, HA wiki page) + bigdata/doc/api - javadoc bigdata/etc/init.d/bigdataHA - HA services start/stop script. bigdata/etc/default/bigdataHA - HA services required config file. Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README 2014-05-20 13:28:29 UTC (rev 8376) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README 2014-05-20 13:47:52 UTC (rev 8377) @@ -1,5 +1,7 @@ Bigdata Highly Available Replication Cluster +*** See the HAJournalServer on the wiki for more information *** + ========== INSTALL ========== 0. The nodes MUST have synchronized clocks, both for logging and to This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-05-20 18:55:47
|
Revision: 8386 http://sourceforge.net/p/bigdata/code/8386 Author: mrpersonick Date: 2014-05-20 18:55:44 +0000 (Tue, 20 May 2014) Log Message: ----------- added a means of just running the AST optimizers without actually running the query Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailTupleQuery.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java 2014-05-20 17:13:40 UTC (rev 8385) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java 2014-05-20 18:55:44 UTC (rev 8386) @@ -411,7 +411,53 @@ } } + + /** + * Optimize a SELECT query. + * + * @param store + * The {@link AbstractTripleStore} having the data. + * @param queryPlan + * The {@link ASTContainer}. + * @param bs + * The initial solution to kick things off. + * + * @return An optimized AST. + * + * @throws QueryEvaluationException + */ + static public QueryRoot optimizeTupleQuery( + final AbstractTripleStore store, final ASTContainer astContainer, + final QueryBindingSet bs) throws QueryEvaluationException { + final AST2BOpContext context = new AST2BOpContext(astContainer, store); + + // Clear the optimized AST. + astContainer.clearOptimizedAST(); + + // Batch resolve Values to IVs and convert to bigdata binding set. + final IBindingSet[] bindingSets = mergeBindingSets(astContainer, + batchResolveIVs(store, bs)); + + // Convert the query (generates an optimized AST as a side-effect). + AST2BOpUtility.convert(context, bindingSets); + + // Get the projection for the query. + final IVariable<?>[] projected = astContainer.getOptimizedAST() + .getProjection().getProjectionVars(); + + final List<String> projectedSet = new LinkedList<String>(); + + for (IVariable<?> var : projected) + projectedSet.add(var.getName()); + + // The optimized AST. + final QueryRoot optimizedQuery = astContainer.getOptimizedAST(); + + return optimizedQuery; + + } + /** * Evaluate a CONSTRUCT/DESCRIBE query. * <p> Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailTupleQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailTupleQuery.java 2014-05-20 17:13:40 UTC (rev 8385) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailTupleQuery.java 2014-05-20 18:55:44 UTC (rev 8386) @@ -98,4 +98,32 @@ } + public QueryRoot optimize() throws QueryEvaluationException { + + return optimize((BindingsClause) null); + + } + + public QueryRoot optimize(final BindingsClause bc) + throws QueryEvaluationException { + + final QueryRoot originalQuery = astContainer.getOriginalAST(); + + if (bc != null) + originalQuery.setBindingsClause(bc); + + if (getMaxQueryTime() > 0) + originalQuery.setTimeout(TimeUnit.SECONDS + .toMillis(getMaxQueryTime())); + + originalQuery.setIncludeInferred(getIncludeInferred()); + + final QueryRoot optimized = ASTEvalHelper.optimizeTupleQuery( + getTripleStore(), astContainer, new QueryBindingSet( + getBindings())); + + return optimized; + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-23 15:29:44
|
Revision: 8414 http://sourceforge.net/p/bigdata/code/8414 Author: thompsonbry Date: 2014-05-23 15:29:40 +0000 (Fri, 23 May 2014) Log Message: ----------- See #941 (merge deployments branch to main branch). - HARestore.sh: You can not safely rely on the limited classpath that is used in this script. This is very likely to break based merely on the imports into the HARestore, Journal, AbstractJournal and related classes. At a minimum, we would need to test this classpath for each release or in CI. I would prefer that we had a means to assemble a better classpath. The startHAServices script has a similar problem. The classpath is currently hacked there using the incantation export HAJOURNAL_CLASSPATH=`find ${LIB_DIR} -name '*.jar' -print0 | tr '\0' ':'` - What is the purpose of the "src/resources/deployment" directory? Is this the "single-server, non-HA" NSS deployment? - /bigdata/deployment - we put all of this stuff under /src/resources NOT /bigdata. - I have deleted /bigdata/deployment entirely from branches/BIGDATA_RELEASE_1_3_0. - I have copied the files (but not the SVN folders) from the DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment into /src/resources/deployment. - jetty.xml: copied from the DEPLOYMENTS branch. - /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh - This has been removed. The src/resources/deployment/nss directory has similar scripts. It is Ok to add an ant task to start the nss for developers, but deployments should be based on the "ant stage" pattern. - src/resources/deployment/nss/WEB-INF/RWStore.properties should be removed. The brew script should replace the following line in the version from bigdata-war/src/WEB-INF/RWStore.properties with an absolute filename. com.bigdata.journal.AbstractJournal.file=ZZZZ - src/resources/deployment/nss/WEB-INF/log4j.properties should be removed. The brew script should replace the following lines in the version from dist/var/config/logging/log4j.properties in order to setup (a) logging to a file; and (b) to specify the absolution location of that file. log4j.rootCategory=XXXX log4j.appender.file.File=YYYY Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml branches/BIGDATA_RELEASE_1_3_0/build.xml branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/WEB-INF/classes/log4j.properties branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/README.txt branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdata branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/etc/jetty.xml Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties 2014-05-23 15:29:40 UTC (rev 8414) @@ -1,9 +1,11 @@ # Default log4j configuration. See the individual classes for the # specific loggers, but generally they are named for the class in # which they are defined. - -# Default log4j configuration for testing purposes. # +# This configuration gets used by the bigdata.war artifact when deployed +# into a servlet container. It also might be used by the bigdata webapp +# if -Dlog4j.configuration is not specified when starting bigdata. +# # You probably want to set the default log level to ERROR. # log4j.rootCategory=WARN, dest1 @@ -36,7 +38,7 @@ ## # Rule execution log. This is a formatted log file (comma delimited). -log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog +#log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog log4j.additivity.com.bigdata.relation.rule.eval.RuleLog=false log4j.appender.ruleLog=org.apache.log4j.FileAppender log4j.appender.ruleLog.Threshold=ALL Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-23 15:29:40 UTC (rev 8414) @@ -29,46 +29,46 @@ <Set name="detailedDump">false</Set> </Get> - <!-- =========================================================== --> - <!-- Get the platform mbean server --> - <!-- =========================================================== --> - <Call id="MBeanServer" class="java.lang.management.ManagementFactory" - name="getPlatformMBeanServer" /> - - <!-- =========================================================== --> - <!-- Initialize the Jetty MBean container --> - <!-- =========================================================== --> - <!-- Note: This breaks CI if it is enabled - <Call name="addBean"> - <Arg> - <New id="MBeanContainer" class="org.eclipse.jetty.jmx.MBeanContainer"> - <Arg> - <Ref refid="MBeanServer" /> - </Arg> - </New> - </Arg> - </Call>--> - - <!-- Add the static log to the MBean server. - <Call name="addBean"> - <Arg> - <New class="org.eclipse.jetty.util.log.Log" /> - </Arg> - </Call>--> + <!-- =========================================================== --> + <!-- Get the platform mbean server --> + <!-- =========================================================== --> + <Call id="MBeanServer" class="java.lang.management.ManagementFactory" + name="getPlatformMBeanServer" /> + + <!-- =========================================================== --> + <!-- Initialize the Jetty MBean container --> + <!-- =========================================================== --> + <!-- Note: This breaks CI if it is enabled + <Call name="addBean"> + <Arg> + <New id="MBeanContainer" class="org.eclipse.jetty.jmx.MBeanContainer"> + <Arg> + <Ref refid="MBeanServer" /> + </Arg> + </New> + </Arg> + </Call>--> + + <!-- Add the static log to the MBean server. + <Call name="addBean"> + <Arg> + <New class="org.eclipse.jetty.util.log.Log" /> + </Arg> + </Call>--> - <!-- For remote MBean access (optional) - <New id="ConnectorServer" class="org.eclipse.jetty.jmx.ConnectorServer"> - <Arg> - <New class="javax.management.remote.JMXServiceURL"> - <Arg type="java.lang.String">rmi</Arg> - <Arg type="java.lang.String" /> - <Arg type="java.lang.Integer"><SystemProperty name="jetty.jmxrmiport" default="1090"/></Arg> - <Arg type="java.lang.String">/jndi/rmi://<SystemProperty name="jetty.jmxrmihost" default="localhost"/>:<SystemProperty name="jetty.jmxrmiport" default="1099"/>/jmxrmi</Arg> - </New> - </Arg> - <Arg>org.eclipse.jetty.jmx:name=rmiconnectorserver</Arg> - <Call name="start" /> - </New>--> + <!-- For remote MBean access (optional) + <New id="ConnectorServer" class="org.eclipse.jetty.jmx.ConnectorServer"> + <Arg> + <New class="javax.management.remote.JMXServiceURL"> + <Arg type="java.lang.String">rmi</Arg> + <Arg type="java.lang.String" /> + <Arg type="java.lang.Integer"><Property name="jetty.jmxrmiport" default="1090"/></Arg> + <Arg type="java.lang.String">/jndi/rmi://<Property name="jetty.jmxrmihost" default="localhost"/>:<Property name="jetty.jmxrmiport" default="1099"/>/jmxrmi</Arg> + </New> + </Arg> + <Arg>org.eclipse.jetty.jmx:name=rmiconnectorserver</Arg> + <Call name="start" /> + </New>--> <!-- =========================================================== --> <!-- Http Configuration. --> @@ -97,25 +97,25 @@ </New> <!-- Configure the HTTP endpoint. --> - <Call name="addConnector"> - <Arg> - <New class="org.eclipse.jetty.server.ServerConnector"> - <Arg name="server"><Ref refid="Server" /></Arg> - <Arg name="factories"> - <Array type="org.eclipse.jetty.server.ConnectionFactory"> - <Item> - <New class="org.eclipse.jetty.server.HttpConnectionFactory"> - <Arg name="config"><Ref refid="httpConfig" /></Arg> - </New> - </Item> - </Array> - </Arg> - <Set name="host"><SystemProperty name="jetty.host" /></Set> - <Set name="port"><SystemProperty name="jetty.port" default="8080" /></Set> - <Set name="idleTimeout"><SystemProperty name="http.timeout" default="30000"/></Set> - </New> - </Arg> - </Call> + <Call name="addConnector"> + <Arg> + <New class="org.eclipse.jetty.server.ServerConnector"> + <Arg name="server"><Ref refid="Server" /></Arg> + <Arg name="factories"> + <Array type="org.eclipse.jetty.server.ConnectionFactory"> + <Item> + <New class="org.eclipse.jetty.server.HttpConnectionFactory"> + <Arg name="config"><Ref refid="httpConfig" /></Arg> + </New> + </Item> + </Array> + </Arg> + <Set name="host"><Property name="jetty.host" /></Set> + <Set name="port"><Property name="jetty.port" default="8080" /></Set> + <Set name="idleTimeout"><Property name="http.timeout" default="30000"/></Set> + </New> + </Arg> + </Call> <!-- =========================================================== --> <!-- Set handler Collection Structure --> @@ -142,12 +142,12 @@ <Arg> <!-- This is the bigdata web application. --> <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> - <Set name="war"><SystemProperty name="jetty.resourceBase" default="bigdata-war/src"/></Set> + <Set name="war"><Property name="jetty.resourceBase" default="bigdata-war/src"/></Set> <Set name="contextPath">/bigdata</Set> <Set name="descriptor">WEB-INF/web.xml</Set> <Set name="parentLoaderPriority">true</Set> <Set name="extractWAR">false</Set> - <Set name="overrideDescriptor"><SystemProperty name="jetty.overrideWebXml" default="bigdata-war/src/WEB-INF/override-web.xml"/></Set> + <Set name="overrideDescriptor"><Property name="jetty.overrideWebXml" default="bigdata-war/src/WEB-INF/override-web.xml"/></Set> </New> </Arg> </Call> @@ -166,4 +166,4 @@ <Set name="dumpAfterStart"><Property name="jetty.dump.start" default="false"/></Set> <Set name="dumpBeforeStop"><Property name="jetty.dump.stop" default="false"/></Set> -</Configure> \ No newline at end of file +</Configure> Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-23 15:29:40 UTC (rev 8414) @@ -935,6 +935,9 @@ <property name="dist.doc" location="${dist.dir}/doc" /> <property name="dist.doc.api" location="${dist.dir}/doc/api" /> <property name="dist.doc.legal" location="${dist.dir}/doc/LEGAL" /> + <!-- deployment directories having stuff to be staged. --> + <property name="deploy" location="src/resources/deployment"/> + <property name="deploy.nss" location="${deploy}/nss"/> <delete dir="${dist.dir}" quiet="true" /> <mkdir dir="${dist.dir}" /> @@ -966,7 +969,7 @@ <property name="bigdata-jini.lib" location="${bigdata.dir}/bigdata-jini/lib/jini/lib" /> <property name="bigdata-rdf.lib" location="${bigdata.dir}/bigdata-rdf/lib" /> <property name="bigdata-sails.lib" location="${bigdata.dir}/bigdata-sails/lib" /> - <property name="bigdata-blueprints.lib" location="${bigdata.dir}/bigdata-blueprints/lib" /> + <property name="bigdata-blueprints.lib" location="${bigdata.dir}/bigdata-blueprints/lib" /> <property name="bigdata-gom.lib" location="${bigdata.dir}/bigdata-gom/lib" /> <property name="bigdata-jetty.lib" location="${bigdata.dir}/bigdata/lib/jetty" /> <property name="bigdata-http.lib" location="${bigdata.dir}/bigdata-sails/lib/httpcomponents" /> @@ -1265,6 +1268,30 @@ src="http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer?printable=yes" /> + <!-- Stage files specific to NSS deployments provided by Brew and Chef. --> + <chmod file="${dist.bin}/bigdata" perm="755" /> + <copy file="${deploy.nss}/bin/bigdataNSS" + todir="${dist.bin}" /> + <chmod file="${dist.bin}/bigdata" perm="755" /> + <copy file="${deploy.nss}/bin/startNSS" + todir="${dist.bin}" /> + <chmod file="${dist.bin}/startNSS" perm="755" /> +<!-- +TODO These lines were removed per #951 (Deployments branch merge). They +break the other deployment models by introducing metavariables for regex +substitutions. + + bigdata-war/src/WEB-INF/RWStore.properties (staged into bigdata/var/jetty/bigdata/WEB-INF/RWStore.properties) + + and + + bigdata/src/resources/log4j.properties (staged into dist/var/config/logging/log4j.properties). + <copy file="${deploy.nss}/WEB-INF/RWStore.properties" + todir="${dist.var.jetty}/WEB-INF" overwrite="true" /> + <copy file="${deploy.nss}/WEB-INF/classes/log4j.properties" + todir="${dist.var.jetty}/WEB-INF/classes" overwrite="true" /> +--> + </target> <!-- --> @@ -1344,8 +1371,41 @@ </target> + <target name="package-nss-brew" depends="clean, stage" + description="Create compressed tar file for Jetty based deployment via Brew and Chef installers."> - <!-- FIXME DEBUG and add 'depends="javadoc, stage" (should stage stage javadoc?)' --> + <tar destfile="${bigdata.dir}/REL-NSS.${version}.tgz" + compression="gzip"> + + <tarfileset dir="${bigdata.dir}/dist"> + <include name="bigdata/doc/**" /> + <exclude name="bigdata/doc/api/**" /> + <exclude name="bigdata/doc/HAJournalServer.html" /> + <include name="bigdata/lib/**" /> + <exclude name="bigdata/lib/bigdata-ganglia.jar" /> + <exclude name="bigdata/lib/browser.jar" /> + <exclude name="bigdata/lib/reggie.jar" /> + <exclude name="bigdata/lib/zookeeper.jar" /> + <exclude name="bigdata/lib/jsk-*.jar" /> + <exclude name="bigdata/lib-dl" /> + <exclude name="bigdata/lib-ext" /> + <include name="bigdata/var/jetty/**" /> + <include name="bigdata/var/config/logging/logging.properties" /> + <exclude name="bigdata/var/jetty/html/new.html" /> + <exclude name="bigdata/var/jetty/html/old.html" /> + </tarfileset> + + <!-- Add scripts separately, making them executable --> + + <tarfileset dir="${bigdata.dir}/dist" filemode="755"> + <include name="bigdata/bin/bigdataNSS" /> + <include name="bigdata/bin/startNSS" /> + </tarfileset> + </tar> + + </target> + + <!-- FIXME DEBUG and add 'depends="javadoc, stage" (should stage stage javadoc?)' --> <!-- Note: can require 'rpm' and 'rpm-build. --> <!-- TODO: We do not need both this and "deploy-artifact". --> <target name="rpm" depends="prepare" description="Build RPM installer."> Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore 2014-05-23 15:29:40 UTC (rev 8414) @@ -1,10 +1,31 @@ #!/bin/bash +# +# This script has been developed for the "systap-aws-bigdata-ha" cluster +# deployment package. +# +# The HARestore script will recreate the Bigdata HA journal file as of +# the most recent commit point from log and snapshot files. The +# intended use of the script is to restore a journal file that resides +# on an ephemeral storage media (especially, an SSD instance disk) +# from a combination of full backups and transaction logs on durable +# media (e.g., EBS) following a system reboot. The script should not +# be executed while Bigdata is running (it requires exclusive access +# to the journal and will not be able to run if bigdata is already +# running). +# +# HARestore takes no arguments and assumes the Bigdata journal filename\ +# convention: "bigdata-ha.jnl". +# + source /etc/default/bigdataHA SERVICE_DIR="$FED_DIR/$FEDNAME/$LOGICAL_SERVICE_ID/HAJournalServer" LIB_DIR="$FED_DIR/lib" -java -cp $LIB_DIR/bigdata.jar:$LIB_DIR/commons-logging.jar:$LIB_DIR/log4j.jar:$LIB_DIR/highscalelib.jar:$LIB_DIR/fastutil.jar:$LIB_DIR/dsiutils.jar:$LIB_DIR/lgplutils.jar:$LIB_DIR/icu4j.jar -Dlog4j.configuration=file:var/config/logging/log4j.properties com.bigdata.journal.jini.ha.HARestore -o $DATA_DIR/bigdata-ha.jnl $SERVICE_DIR/snapshot $SERVICE_DIR/HALog - - +java -cp $LIB_DIR/bigdata.jar:$LIB_DIR/commons-logging.jar:$LIB_DIR/log4j.jar:$LIB_DIR/highscalelib.jar:$LIB_DIR/fastutil.jar:$LIB_DIR/dsiutils.jar:$LIB_DIR/lgplutils.jar:$LIB_DIR/icu4j.jar\ + -Dlog4j.configuration=file:var/config/logging/log4j.properties\ + com.bigdata.journal.jini.ha.HARestore\ + -o $DATA_DIR/bigdata-ha.jnl\ + $SERVICE_DIR/snapshot\ + $SERVICE_DIR/HALog Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh 2014-05-23 15:29:40 UTC (rev 8414) @@ -1,61 +0,0 @@ -#!/bin/bash - -# Start the services and put the JVM in the background. All services will -# run in a single JVM. See Apache River com.sun.jini.start.ServiceStarter -# for more details. The services are configured in the accompanying -# startHAServices.config file. Specific configuration options for each -# service are defined in the documentation for that service. -# -# Note: One drawback with running each service in the same JVM is that the -# GC load of all services is combined and all services would be suspended -# at the same time by a Full GC pass. If this is a problem, then you can -# break out the river services (ClassServer and Reggie) into a separate -# ServiceStarter instance from the HAJournalServer. - -# The top-level of the installation. -pushd `dirname $0` > /dev/null;cd ..;INSTALL_DIR=`pwd`;popd > /dev/null - -## -# HAJournalServer configuration parameter overrides (see HAJournal.config). -# -# The bigdata HAJournal.config file may be heavily parameterized through -# environment variables that get passed through into the JVM started by -# this script and are thus made available to the HAJournalServer when it -# interprets the contents of the HAJournal.config file. See HAJournal.config -# for the meaning of these environment variables. -# -# Note: Many of these properties have defaults. -## - -export JETTY_XML="${INSTALL_DIR}/var/jetty/jetty.xml" -export JETTY_RESOURCE_BASE="${INSTALL_DIR}/var/jetty" -export LIB_DIR=${INSTALL_DIR}/lib -export CONFIG_DIR=${INSTALL_DIR}/var/config -export LOG4J_CONFIG=${CONFIG_DIR}/logging/log4j.properties - -# TODO Explicitly enumerate JARs so we can control order if necessary and -# deploy on OS without find and tr. -export HAJOURNAL_CLASSPATH=`find ${LIB_DIR} -name '*.jar' -print0 | tr '\0' ':'` - -export JAVA_OPTS="\ - -server -Xmx4G\ - -Dlog4j.configuration=${LOG4J_CONFIG}\ - -Djetty.resourceBase=${JETTY_RESOURCE_BASE}\ - -DJETTY_XML=${JETTY_XML}\ -" - -cmd="java ${JAVA_OPTS} \ - -server -Xmx4G \ - -cp ${HAJOURNAL_CLASSPATH} \ - com.bigdata.rdf.sail.webapp.NanoSparqlServer \ - 9999 kb \ - ${INSTALL_DIR}/var/jetty/WEB-INF/GraphStore.properties \ -" -echo "Running: $cmd" -$cmd& -pid=$! -# echo "PID=$pid" -echo "kill $pid" > stop.sh -chmod +w stop.sh - -# Note: To obtain the pid, do: read pid < "$pidFile" Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/README.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/README.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/README.txt 2014-05-23 15:29:40 UTC (rev 8414) @@ -0,0 +1,26 @@ +brew - homebrew installer. installation is the NSS using jetty. No HA features. + +chef - cook book has recipes for bigdata under tomcat; bigdata HA; MapGraph; + NSS using jetty. + +nss - NSS using jetty. The directory contains shell scripts to (a) control + the run state of bigdata in an init.d style script; and (b) start the + NSS using jetty. + +vagrant - HA cluster launcher for AWS; MapGraph launcher; NSS using jetty + launcher; tomcat + bigdata.war install. + +====== Maintenance ====== + +TODO Rename these things to be less ambiguous once we agree on names. + +TODO Document how things are structured from a support and maintenance +perspective. + +TODO Document on the wiki what these various deployments are, how to +choose the right one, and where to get it. See the following tickets. +Also capture the deployment matrix that Daniel has sent by email. + +#926 Add Wiki Entry for Brew Deployment +#925 Add Wiki Entry for Vagrant Deployments +#924 Add Wiki Entry for Chef Cookbooks Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/WEB-INF/classes/log4j.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/WEB-INF/classes/log4j.properties 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/WEB-INF/classes/log4j.properties 2014-05-23 15:29:40 UTC (rev 8414) @@ -53,7 +53,7 @@ ## # Rule execution log. This is a formatted log file (comma delimited). -log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog +#log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog log4j.additivity.com.bigdata.relation.rule.eval.RuleLog=false log4j.appender.ruleLog=org.apache.log4j.FileAppender log4j.appender.ruleLog.Threshold=ALL Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdata =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdata 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdata 2014-05-23 15:29:40 UTC (rev 8414) @@ -1,109 +0,0 @@ -#!/bin/bash - -# init.d style script for bigdata HA services. The script can be used -# to 'start' or 'stop' services. -# -# Environment: -# -# binDir - The directory containing the installed scripts. -# pidFile - The pid is written on this file. -# -# Misc. -# -# See http://tldp.org/LDP/abs/html/index.html -# -# Note: Blank lines are significant in shell scripts. -# -# Note: Children must do "exit 0" to indicate success. -# -# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix - -# Source function library (just used for 'action'). If you don't have this -# it SHOULD automatically use the inline definition for "action()". - -# -# the following template line will be replaced by a deployer application (e.g. brew, chef) -# -export INSTALL_TYPE="<%= INSTALL_TYPE %>" -export BD_HOME="<%= BD_HOME %>" -pidFile=${BD_HOME}/var/lock/pid -binDir=${BD_HOME}/bin - - -# -# See how we were called. -# -case "$1" in - start) -# -# Start the ServiceStarter and child services if not running. -# - if [ -f "$pidFile" ]; then - read pid < "$pidFile" - pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) - if [ -z "$pidno" ]; then -# The process has died so remove the old pid file. - echo $"`date` : `hostname` : $pid died?" - rm -f "$pidFile" - fi - fi - if [ ! -f "$pidFile" ]; then - echo -ne $"`date` : `hostname` : bringing bigdata services up ... " - $binDir/startNSS - echo "done!" - else - echo $"`date` : `hostname` : running as $pid" - fi - ;; - stop) -# -# Stop the ServiceStarter and all child services. -# - if [ -f "$pidFile" ]; then - read pid < "$pidFile" - pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) - if [ -z "$pidno" ]; then -# The process has died so remove the old pid file. - echo $"`date` : `hostname` : $pid died?" - rm -f "$pidFile" - else - echo -ne $"`date` : `hostname` : bringing bigdata service down ... " - kill $pid - rm -f "$pidFile" - echo "done!" - fi - fi - ;; - status) -# -# Report status for the ServicesManager (up or down). -# - if [ -f "$pidFile" ]; then - read pid < "$pidFile" - pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) - if [ -z "$pidno" ]; then - echo $"`date` : `hostname` : process died? pid=$pid." - else - echo $"`date` : `hostname` : running as $pid." - fi - else - echo $"`date` : `hostname` : not running." - fi - ;; -# -# Simply stop then start. -# - restart) - $0 stop - $0 start - ;; - *) -# -# Usage -# - me=`basename $0` - echo $"Usage: $0 {start|stop|status|restart}" - exit 1 -esac - -exit 0 Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS 2014-05-23 15:29:40 UTC (rev 8414) @@ -0,0 +1,109 @@ +#!/bin/bash + +# init.d style script for bigdata HA services. The script can be used +# to 'start' or 'stop' services. +# +# Environment: +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Misc. +# +# See http://tldp.org/LDP/abs/html/index.html +# +# Note: Blank lines are significant in shell scripts. +# +# Note: Children must do "exit 0" to indicate success. +# +# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix + +# Source function library (just used for 'action'). If you don't have this +# it SHOULD automatically use the inline definition for "action()". + +# +# the following template line will be replaced by a deployer application (e.g. brew, chef) +# +export INSTALL_TYPE="<%= INSTALL_TYPE %>" +export BD_HOME="<%= BD_HOME %>" +pidFile=${BD_HOME}/var/lock/pid +binDir=${BD_HOME}/bin + + +# +# See how we were called. +# +case "$1" in + start) +# +# Start the ServiceStarter and child services if not running. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + fi + fi + if [ ! -f "$pidFile" ]; then + echo -ne $"`date` : `hostname` : bringing bigdata services up ... " + $binDir/startNSS + echo "done!" + else + echo $"`date` : `hostname` : running as $pid" + fi + ;; + stop) +# +# Stop the ServiceStarter and all child services. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + else + echo -ne $"`date` : `hostname` : bringing bigdata service down ... " + kill $pid + rm -f "$pidFile" + echo "done!" + fi + fi + ;; + status) +# +# Report status for the ServicesManager (up or down). +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then + echo $"`date` : `hostname` : process died? pid=$pid." + else + echo $"`date` : `hostname` : running as $pid." + fi + else + echo $"`date` : `hostname` : not running." + fi + ;; +# +# Simply stop then start. +# + restart) + $0 stop + $0 start + ;; + *) +# +# Usage +# + me=`basename $0` + echo $"Usage: $0 {start|stop|status|restart}" + exit 1 +esac + +exit 0 Property changes on: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS 2014-05-23 15:29:40 UTC (rev 8414) @@ -2,9 +2,9 @@ export INSTALL_DIR=${BD_HOME} if [ $INSTALL_TYPE == "BREW" ]; then - export LIB_DIR=${INSTALL_DIR}/libexec + export LIB_DIR=${INSTALL_DIR}/libexec else - export LIB_DIR=${INSTALL_DIR}/lib + export LIB_DIR=${INSTALL_DIR}/lib fi export JETTY_CLASSPATH=`find ${LIB_DIR} -name '*.jar' -print0 | tr '\0' ':'` export JETTY_DIR=${INSTALL_DIR}/var/jetty @@ -21,7 +21,7 @@ export DATA_DIR=${BD_HOME}/var/data if [ ! -d $DATA_DIR ]; then - mkdir -p $DATA_DIR + mkdir -p $DATA_DIR fi export NSS="com.bigdata.rdf.sail.webapp.NanoSparqlServer" @@ -34,7 +34,7 @@ export JETTY_PORT="8080" fi if [ -z "${JETTY_XML}" ]; then - export JETTY_XML="${JETTY_DIR}/etc/jetty.xml" + export JETTY_XML="${JETTY_DIR}/jetty.xml" fi if [ -z "${JETTY_RESOURCE_BASE}" ]; then export JETTY_RESOURCE_BASE="${JETTY_DIR}" @@ -57,7 +57,7 @@ # Setup the directory for the pid of the ServiceStarter process. lockDir=${INSTALL_DIR}/var/lock if [ ! -d $lockDir ]; then - mkdir -p $lockDir + mkdir -p $lockDir fi pidFile=$lockDir/pid Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/etc/jetty.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/etc/jetty.xml 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/etc/jetty.xml 2014-05-23 15:29:40 UTC (rev 8414) @@ -1,133 +0,0 @@ -<?xml version="1.0"?> -<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure.dtd"> -<!-- See http://www.eclipse.org/jetty/documentation/current/ --> -<!-- See http://wiki.eclipse.org/Jetty/Reference/jetty.xml_syntax --> -<Configure id="Server" class="org.eclipse.jetty.server.Server"> - - <!-- =========================================================== --> - <!-- Configure the Server Thread Pool. --> - <!-- The server holds a common thread pool which is used by --> - <!-- default as the executor used by all connectors and servlet --> - <!-- dispatches. --> - <!-- --> - <!-- Configuring a fixed thread pool is vital to controlling the --> - <!-- maximal memory footprint of the server and is a key tuning --> - <!-- parameter for tuning. In an application that rarely blocks --> - <!-- then maximal threads may be close to the number of 5*CPUs. --> - <!-- In an application that frequently blocks, then maximal --> - <!-- threads should be set as high as possible given the memory --> - <!-- available. --> - <!-- --> - <!-- Consult the javadoc of o.e.j.util.thread.QueuedThreadPool --> - <!-- for all configuration that may be set here. --> - <!-- =========================================================== --> - <!-- uncomment to change type of threadpool --> - <Arg name="threadpool"><New id="threadpool" class="org.eclipse.jetty.util.thread.QueuedThreadPool"/></Arg> - <!-- --> - <Get name="ThreadPool"> - <Set name="minThreads" type="int"><SystemProperty name="jetty.threads.min" default="10"/></Set> - <Set name="maxThreads" type="int"><SystemProperty name="jetty.threads.max" default="64"/></Set> - <Set name="idleTimeout" type="int"><SystemProperty name="jetty.threads.timeout" default="60000"/></Set> - <Set name="detailedDump">false</Set> - </Get> - - <!-- =========================================================== --> - <!-- Http Configuration. --> - <!-- This is a common configuration instance used by all --> - <!-- connectors that can carry HTTP semantics (HTTP, HTTPS, SPDY)--> - <!-- It configures the non wire protocol aspects of the HTTP --> - <!-- semantic. --> - <!-- --> - <!-- Consult the javadoc of o.e.j.server.HttpConfiguration --> - <!-- for all configuration that may be set here. --> - <!-- =========================================================== --> - <New id="httpConfig" class="org.eclipse.jetty.server.HttpConfiguration"> - <Set name="secureScheme">https</Set> - <Set name="securePort"><Property name="jetty.secure.port" default="8443" /></Set> - <Set name="outputBufferSize"><Property name="jetty.output.buffer.size" default="32768" /></Set> - <Set name="requestHeaderSize"><Property name="jetty.request.header.size" default="8192" /></Set> - <Set name="responseHeaderSize"><Property name="jetty.response.header.size" default="8192" /></Set> - <Set name="sendServerVersion"><Property name="jetty.send.server.version" default="true" /></Set> - <Set name="sendDateHeader"><Property name="jetty.send.date.header" default="false" /></Set> - <Set name="headerCacheSize">512</Set> - <!-- Uncomment to enable handling of X-Forwarded- style headers - <Call name="addCustomizer"> - <Arg><New class="org.eclipse.jetty.server.ForwardedRequestCustomizer"/></Arg> - </Call> - --> - </New> - - <!-- Configure the HTTP endpoint. --> - <Call name="addConnector"> - <Arg> - <New class="org.eclipse.jetty.server.ServerConnector"> - <Arg name="server"><Ref refid="Server" /></Arg> - <Arg name="factories"> - <Array type="org.eclipse.jetty.server.ConnectionFactory"> - <Item> - <New class="org.eclipse.jetty.server.HttpConnectionFactory"> - <Arg name="config"><Ref refid="httpConfig" /></Arg> - </New> - </Item> - </Array> - </Arg> - <Set name="host"><Property name="jetty.host" /></Set> - <Set name="port"><Property name="jetty.port" default="8080" /></Set> - <Set name="idleTimeout"><Property name="http.timeout" default="30000"/></Set> - </New> - </Arg> - </Call> - - <!-- =========================================================== --> - <!-- Set handler Collection Structure --> - <!-- =========================================================== --> - <Set name="handler"> - <New id="Handlers" class="org.eclipse.jetty.server.handler.HandlerCollection"> - <Set name="handlers"> - <Array type="org.eclipse.jetty.server.Handler"> - <Item> - <!-- This is the bigdata web application. --> - <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> - <Set name="resourceBase"> - <!-- The location of the top-level of the bigdata webapp. --> - <Property name="jetty.resourceBase" default="<%= JETTY_DIR %>" /> - </Set> - <Set name="contextPath">/bigdata</Set> - <Set name="descriptor"><%= JETTY_DIR %>/WEB-INF/web.xml</Set> - <Set name="parentLoaderPriority">true</Set> - <Set name="extractWAR">false</Set> - </New> - </Item> - <Item> - <!-- This appears to be necessary in addition to the above. --> - <!-- Without this, it will not resolve http://localhost:8080/ --> - <!-- and can fail to deliver some of the static content. --> - <New id="ResourceHandler" class="org.eclipse.jetty.server.handler.ResourceHandler"> - <Set name="resourceBase"> - <!-- The location of the top-level of the bigdata webapp. --> - <Property name="jetty.resourceBase" default="<%= JETTY_DIR %>" /> - </Set> - <Set name="welcomeFiles"> - <Array type="java.lang.String"> - <Item>html/index.html</Item> - </Array> - </Set> - </New> - </Item> - <!-- <Item> - <New id="DefaultHandler" class="org.eclipse.jetty.server.handler.DefaultHandler"></New> - </Item> --> - </Array> - </Set> - </New> - </Set> - - <!-- =========================================================== --> - <!-- extra server options --> - <!-- =========================================================== --> - <Set name="stopAtShutdown">true</Set> - <Set name="stopTimeout">5000</Set> - <Set name="dumpAfterStart"><Property name="jetty.dump.start" default="false"/></Set> - <Set name="dumpBeforeStop"><Property name="jetty.dump.stop" default="false"/></Set> - -</Configure> Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-05-23 15:29:40 UTC (rev 8414) @@ -95,7 +95,7 @@ fi fi if [ ! -f "$pidFile" ]; then - action $"`date` : `hostname` : bringing up services: " sudo -u $BD_USER -g $BD_GROUP bash -c "source /etc/default/bigdataHA ; $binDir/startHAServices" + action $"`date` : `hostname` : bringing up services: " sudo -u $BD_USER -g $BD_GROUP bash -c "source /etc/default/bigdataHA ; $binDir/startHAServices > /dev/null 2>&1 &" else echo $"`date` : `hostname` : running as $pid" fi This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-27 13:28:11
|
Revision: 8422 http://sourceforge.net/p/bigdata/code/8422 Author: thompsonbry Date: 2014-05-27 13:28:02 +0000 (Tue, 27 May 2014) Log Message: ----------- - Declared an interface that exposes a post-constructor Callable to initialize a service. This will be used for the SnapshotManager, HALogNexus, and HAJournal. - Modified the SnapshotManager to use a parallel scan and the new IServiceInit interface. - Added test to verify that snapshots are located after a service restart. - Defined, exposed, and tested a variety of constants for the CommitCounterUtility. These were added to support a parallel scan of the files in a leaf directory. - Declared a "startupThreads" parameter that controls the number of parallel scans for the HAJournal startup processes. Snapshot test suites are green locally. See #775 (HAJournal.start() - optimization) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3SnapshotPolicy2.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IServiceInit.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -1752,6 +1752,7 @@ } + @Override final public File getFile() { final IBufferStrategy tmp = getBufferStrategy(); @@ -1915,6 +1916,7 @@ * @exception IllegalStateException * if the journal is open. */ + @Override public void deleteResources() { if (isOpen()) @@ -2307,12 +2309,14 @@ } + @Override final public UUID getUUID() { return journalMetadata.get().getUUID(); } + @Override final public IResourceMetadata getResourceMetadata() { return journalMetadata.get(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -37,6 +37,17 @@ /** * Utility class for operations on files that are named using a commit counter. + * <p> + * The commit counter based files are arranged in a heirarchial directory + * structure with 3 digits per directory and 7 directory levels. These levels + * are labeled with depths <code>[0..6]</code>. The root directory is at depth + * ZERO (0). Each directory contains up to <code>1000</code> children. The + * children in the non-leaf directories are subdirectories labeled + * <code>0..999</code>. The leaf directories are at depth SIX (6). Leaf + * directories contain files. Each file in a leaf directory is labeled with a + * <code>21</code> digit base name and some purpose specific file extension. + * Each such file has data for the specific commit point encoded by the basename + * of the file. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ @@ -46,6 +57,89 @@ .getLogger(CommitCounterUtility.class); /** + * The number of base-10 digits per directory level. This allows children + * having labels <code>000...999</code>. Thus there are <code>1000</code> + * children per directory. + */ + private static final int DIGITS_PER_DIR = 3; + + /** The number of files per directory. */ + private static final int FILES_PER_DIR = 1000; + + /** The depth of the root directory. */ + private static final int ROOT_DIR_DEPTH = 0; + + /** The depth of a leaf directory. */ + private static final int LEAF_DIR_DEPTH = 6; + + /** + * The #of digits (21) in the base file name for a commit counter as + * formatted by {@link #getCommitCounterStr(long)}. + * <p> + * Note: 21 := (leafDirDepth+1) * digitsPerDir + */ + private static final int BASENAME_DIGITS = 21; + + /** + * The {@link Formatter} string that is used to generate the base name of + * the files in the leaf directories. This string represents the commit + * counter value with leading zeros. The leading zeros are relied upon to + * impose an ordering over the base names of the files using a sort. + */ + private static final String FORMAT_STR = "%0" + BASENAME_DIGITS + "d"; + + /** + * The #of digits (21) in the base file name for a commit counter as + * formatted by {@link #getCommitCounterStr(long)}. + * <p> + * Note: 21 := (leafDirDepth+1) * digitsPerDir + */ + public static int getBasenameDigits() { + + return BASENAME_DIGITS; + + } + + /** + * The number of base-10 digits per directory level ( + * {@value #DIGITS_PER_DIR}). This allows children having labels + * <code>000...999</code>. Thus there are <code>1000</code> children per + * directory. + */ + public static int getDigitsPerDirectory() { + + return DIGITS_PER_DIR; + + } + + /** + * The number of files per directory ({@value #FILES_PER_DIR}). + */ + public static int getFilesPerDirectory() { + + return FILES_PER_DIR; + + } + + /** + * The depth of the root directory ({@value #ROOT_DIR_DEPTH}). + */ + public static int getRootDirectoryDepth() { + + return ROOT_DIR_DEPTH; + + } + + /** + * The depth of a leaf directory ({@value #LEAF_DIR_DEPTH}). + */ + public static int getLeafDirectoryDepth() { + + return LEAF_DIR_DEPTH; + + } + + /** * Return the name of the {@link File} associated with the commitCounter. * * @param dir @@ -79,15 +173,11 @@ * Now figure out the recursive directory name. */ File t = dir; + + for (int i = 0; i < (BASENAME_DIGITS - DIGITS_PER_DIR); i += DIGITS_PER_DIR) { - if (true) { + t = new File(t, basename.substring(i, i + DIGITS_PER_DIR)); - for (int i = 0; i < (21 - 3); i += 3) { - - t = new File(t, basename.substring(i, i + 3)); - - } - } final File file = new File(t, basename + ext); @@ -108,11 +198,11 @@ */ public static String getCommitCounterStr(final long commitCounter) { - final StringBuilder sb = new StringBuilder(21); + final StringBuilder sb = new StringBuilder(BASENAME_DIGITS); final Formatter f = new Formatter(sb); - f.format("%021d", commitCounter); + f.format(FORMAT_STR, commitCounter); f.flush(); f.close(); Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IServiceInit.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IServiceInit.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IServiceInit.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -0,0 +1,46 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on May 27th, 2014 + */ +package com.bigdata.service; + +import java.util.concurrent.Callable; + +/** + * Interface for post-constructor initialization. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @param <T> + * The generic type of the object to which the initialization task + * will be evaluated. + */ +public interface IServiceInit<T> { + + /** + * Return a task that must be used to initialize the service. + */ + Callable<T> init(); +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -42,10 +42,35 @@ public TestCommitCounterUtility() { } - public TestCommitCounterUtility(String name) { + public TestCommitCounterUtility(final String name) { super(name); } + /** + * Verify the value of specific constants. These constants must not be + * modified since they define the hierarchical structure of the durable data + * and a relied upon to generate and parse the fully qualified names of the + * files within a managed commit counter based directory system. + */ + public void test_constants() { + + assertEquals("filesPerDirectory", 1000, + CommitCounterUtility.getFilesPerDirectory()); + + assertEquals("digitsPerDirectory", 3, + CommitCounterUtility.getDigitsPerDirectory()); + + assertEquals("basenameDigits", 21, + CommitCounterUtility.getBasenameDigits()); + + assertEquals("rootDirectoryDepth", 0, + CommitCounterUtility.getRootDirectoryDepth()); + + assertEquals("leafDirectoryDepth", 6, + CommitCounterUtility.getLeafDirectoryDepth()); + + } + public void test01() { final File dir = new File("/tmp"); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -39,6 +39,8 @@ import java.util.Properties; import java.util.UUID; import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.locks.Lock; @@ -420,6 +422,16 @@ // Snapshot manager. snapshotManager = new SnapshotManager(server, this, config); + try { + getExecutorService().submit(snapshotManager.init()).get(); + } catch (InterruptedException e) { + throw new RuntimeException(e); // TODO Do not wrap. + } catch (CancellationException e) { + throw e; + } catch (ExecutionException e) { + throw new RuntimeException(e); + } + } /** Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -383,6 +383,19 @@ String DEFAULT_SNAPSHOT_DIR = "snapshot"; /** + * The number of threads that will be used for a parallel scan of the + * files in the {@link #HA_LOG_DIR} and {@link #SNAPSHOT_DIR} in order + * to accelerate the service start. The minimum is ONE (1). The default + * is {@value #DEFAULT_STARTUP_THREADS}. + * + * @see <a href="http://trac.bigdata.com/ticket/775" > HAJournal start() + * (optimization) </a> + */ + String STARTUP_THREADS = "startupThreads"; + + int DEFAULT_STARTUP_THREADS = 20; + + /** * The policy that specifies when a new snapshot will be taken. The * decision to take a snapshot is a local decision and the snapshot is * assumed to be written to local disk. However, offsite replication of @@ -871,6 +884,36 @@ * {@inheritDoc} * <p> * Note: called from {@link AbstractServer#run()} + * + * FIXME We should be able to start the NSS while still reading the HALog + * files from the disk. The action to start the {@link HAQuorumService} + * should await a {@link Future} for the journal start. Thus, the + * {@link HAJournal} start needs to be turned into a {@link Callable} or + * {@link Runnable}. + * <p> + * In fact, the journal open is very fast. The slow part is the building an + * index over the HALogs and (to a lesser extent) over the snapshots. Those + * index builds can run in parallel, but we need to have a critical section + * in which we check some necessary conditions, especially whether the last + * HALog is valid. + * <p> + * We need to push a start() computation into both the {@link HALogNexus} + * and the {@link SnapshotManager}. This could be done with an interface + * that is also shared by the {@link HAJournal}. The interface could provide + * some reporting on the startup process, but most critical is that it + * provides a {@link Future} for evaluating that process. + * <p> + * The {@link Future} can evaluate to the outcome of that startup procedure. + * <p> + * The startup procedure should use multiple threads (or async IO) to reduce + * the startup latency. It could use the executor on the journal for this. + * <p> + * We could parallelize the HALog and snapshot startup then enter a critical + * section in which we validate the consistency of those resources with + * respect to the HAJournal's current root block. + * + * @see <a href="http://trac.bigdata.com/ticket/775" > HAJournal start() + * (optimization) </a> */ @Override protected void startUpHook() { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -128,11 +128,11 @@ */ volatile IHAWriteMessage lastLiveHAWriteMessage = null; - /* - * Set to protect log files against deletion while a digest is - * computed. This is checked by deleteHALogs. + /** + * Set to protect log files against deletion while a digest is computed. + * This is checked by {@link #deleteHALogs(long, long)}. */ - private final AtomicInteger logAccessors = new AtomicInteger(); + private final AtomicInteger logAccessors = new AtomicInteger(); /** * Filter visits all HALog files <strong>except</strong> the current HALog @@ -1042,23 +1042,26 @@ /** * Protects logs from removal while a digest is being computed - * @param earliestDigest */ void addAccessor() { - if (logAccessors.incrementAndGet() == 1) { - if (log.isInfoEnabled()) - log.info("Access protection added"); - } + if (logAccessors.incrementAndGet() == 1) { + if (log.isDebugEnabled()) + log.debug("Access protection added"); + } } - + /** * Releases current protection against log removal */ void releaseAccessor() { - if (logAccessors.decrementAndGet() == 0) { - if (log.isInfoEnabled()) - log.info("Access protection removed"); - } + final long tmp; + if ((tmp = logAccessors.decrementAndGet()) == 0) { + if (log.isDebugEnabled()) + log.debug("Access protection removed"); + } + if (tmp < 0) + throw new RuntimeException("Decremented to a negative value: " + + tmp); } /** Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -36,10 +36,13 @@ import java.nio.ByteBuffer; import java.security.DigestException; import java.security.MessageDigest; +import java.util.ArrayList; import java.util.Iterator; +import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.zip.GZIPInputStream; @@ -73,17 +76,19 @@ import com.bigdata.quorum.Quorum; import com.bigdata.quorum.QuorumException; import com.bigdata.rawstore.Bytes; +import com.bigdata.service.IServiceInit; import com.bigdata.striterator.Resolver; import com.bigdata.striterator.Striterator; import com.bigdata.util.ChecksumError; import com.bigdata.util.ChecksumUtility; +import com.bigdata.util.concurrent.LatchedExecutor; /** * Class to manage the snapshot files. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ -public class SnapshotManager { +public class SnapshotManager implements IServiceInit<Void> { private static final Logger log = Logger.getLogger(SnapshotManager.class); @@ -185,6 +190,11 @@ private final IRestorePolicy restorePolicy; /** + * @see HAJournalServer.ConfigurationOptions#STARTUP_THREADS + */ + private final int startupThreads; + + /** * An in memory index over the last commit time of each snapshot. This is * populated when the {@link HAJournal} starts from the file system and * maintained as snapshots are taken or destroyed. @@ -299,62 +309,241 @@ IRestorePolicy.class, // HAJournalServer.ConfigurationOptions.DEFAULT_RESTORE_POLICY); + { + + startupThreads = (Integer) config + .getEntry( + HAJournalServer.ConfigurationOptions.COMPONENT, + HAJournalServer.ConfigurationOptions.STARTUP_THREADS, + Integer.TYPE, + HAJournalServer.ConfigurationOptions.DEFAULT_STARTUP_THREADS); + + if (startupThreads <= 0) { + throw new ConfigurationException( + HAJournalServer.ConfigurationOptions.STARTUP_THREADS + + "=" + startupThreads + " : must be GT ZERO"); + } + + } + snapshotIndex = SnapshotIndex.createTransient(); - /* - * Delete any temporary files that were left lying around in the - * snapshot directory. - */ - CommitCounterUtility.recursiveDelete(false/* errorIfDeleteFails */, - getSnapshotDir(), TEMP_FILE_FILTER); + } - // Make sure the snapshot directory exists. - ensureSnapshotDirExists(); + @Override + public Callable<Void> init() { - // Populate the snapshotIndex from the snapshotDir. - populateIndexRecursive(getSnapshotDir(), SNAPSHOT_FILTER); + return new InitTask(); - // Initialize the snapshot policy. It can self-schedule. - snapshotPolicy.init(journal); - } - private void ensureSnapshotDirExists() throws IOException { + /** + * Task that is used to initialize the {@link SnapshotManager}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ + private class InitTask implements Callable<Void> { - if (!snapshotDir.exists()) { + @Override + public Void call() throws Exception { - // Create the directory. - if (!snapshotDir.mkdirs()) - throw new IOException("Could not create directory: " - + snapshotDir); + lock.lock(); + + try { + + doRunWithLock(); + + // Done. + return (Void) null; + + } finally { + + lock.unlock(); + + } + + } + private void doRunWithLock() throws IOException, InterruptedException, + ExecutionException { + + if (log.isInfoEnabled()) + log.info("Starting cleanup."); + + /* + * Delete any temporary files that were left lying around in the + * snapshot directory. + * + * TODO This may be relatively lengthy. It would be better to + * combine this with the scan in which we read the root blocks and + * index the snapshots. However, this will require another refactor + * of the parallel scan logic. For now, I am merely reporting out + * the times for these different scans so I can get a better sense + * of the latencies involved. + */ + CommitCounterUtility.recursiveDelete(false/* errorIfDeleteFails */, + getSnapshotDir(), TEMP_FILE_FILTER); + + // Make sure the snapshot directory exists. + ensureSnapshotDirExists(); + + if (log.isInfoEnabled()) + log.info("Starting scan."); + + final LatchedExecutor executor = new LatchedExecutor( + journal.getExecutorService(), startupThreads); + + // Populate the snapshotIndex from the snapshotDir. + populateIndexRecursive(// + executor,// + getSnapshotDir(), // + SNAPSHOT_FILTER, // + 0 // depth@root + ); + + if (log.isInfoEnabled()) + log.info("Starting policy."); + + // Initialize the snapshot policy. It can self-schedule. + snapshotPolicy.init(journal); + + if (log.isInfoEnabled()) + log.info("Done."); + } - } - - /** - * Scans the {@link #snapshotDir} and populates the {@link #snapshotIndex} - * from the root blocks in snapshot files found in that directory. - * - * @throws IOException - */ - private void populateIndexRecursive(final File f, - final FileFilter fileFilter) throws IOException { + /** + * Scans the {@link #snapshotDir} and populates the {@link #snapshotIndex} + * from the root blocks in snapshot files found in that directory. + * + * @throws IOException + * @throws ExecutionException + * @throws InterruptedException + */ + private void populateIndexRecursive(final LatchedExecutor executor, + final File f, final FileFilter fileFilter, final int depth) + throws IOException, InterruptedException, ExecutionException { - if (f.isDirectory()) { + if (depth == CommitCounterUtility.getLeafDirectoryDepth()) { - final File[] children = f.listFiles(fileFilter); + /* + * Leaf directory. + */ + + final File[] children = f.listFiles(fileFilter); - for (int i = 0; i < children.length; i++) { + /* + * Setup tasks for parallel threads to read the commit record from + * each file. + */ + final List<FutureTask<SnapshotRecord>> futures = new ArrayList<FutureTask<SnapshotRecord>>( + children.length); - populateIndexRecursive(children[i], fileFilter); + for (int i = 0; i < children.length; i++) { + final File child = children[i]; + + final FutureTask<SnapshotRecord> ft = new FutureTask<SnapshotRecord>( + + new Callable<SnapshotRecord>() { + + @Override + public SnapshotRecord call() throws Exception { + + return getSnapshotRecord(child); + + } + + }); + + futures.add(ft); + + } + + try { + + /* + * Schedule all futures. + */ + for (FutureTask<SnapshotRecord> ft : futures) { + + executor.execute(ft); + + } + + /* + * Await futures, obtaining snapshot records for the current + * leaf directory. + */ + final List<SnapshotRecord> records = new ArrayList<SnapshotRecord>( + children.length); + + for (int i = 0; i < children.length; i++) { + + final Future<SnapshotRecord> ft = futures.get(i); + + final SnapshotRecord r = ft.get(); + + records.add(r); + + } + + // Add all records in the caller's thread. + for (SnapshotRecord r : records) { + + snapshotIndex.add(r); + + } + + } finally { + + /* + * Ensure tasks are terminated. + */ + + for (Future<SnapshotRecord> ft : futures) { + + ft.cancel(true/* mayInterruptIfRunning */); + + } + + } + + } else if (f.isDirectory()) { + + /* + * Sequential recursion into a child directory. + */ + + final File[] children = f.listFiles(fileFilter); + + for (int i = 0; i < children.length; i++) { + + final File child = children[i]; + + populateIndexRecursive(executor, child, fileFilter, depth + 1); + + } + + } else { + + log.warn("Ignoring file in non-leaf directory: " + f); + } - } else { + } - addSnapshot(f); + } + + private void ensureSnapshotDirExists() throws IOException { + if (!snapshotDir.exists()) { + + // Create the directory. + if (!snapshotDir.mkdirs()) + throw new IOException("Could not create directory: " + + snapshotDir); + } } @@ -434,7 +623,26 @@ * if the file can not be read. * @throws ChecksumError * if there is a checksum problem with the root blocks. + */ + private void addSnapshot(final File file) throws IOException { + + snapshotIndex.add(getSnapshotRecord(file)); + + } + + /** + * Create a {@link SnapshotRecord} from a file. * + * @param file + * The snapshot file. + * + * @throws IllegalArgumentException + * if argument is <code>null</code>. + * @throws IOException + * if the file can not be read. + * @throws ChecksumError + * if there is a checksum problem with the root blocks. + * * TODO If the root blocks are bad, then this will throw an * IOException and that will prevent the startup of the * HAJournalServer. However, if we start up the server with a @@ -449,8 +657,8 @@ * with that HALog file unless it also happens to correspond to * a snapshot. */ - private void addSnapshot(final File file) throws IOException { - + private SnapshotRecord getSnapshotRecord(final File file) throws IOException { + if (file == null) throw new IllegalArgumentException(); @@ -459,10 +667,10 @@ final long sizeOnDisk = file.length(); - snapshotIndex.add(new SnapshotRecord(currentRootBlock, sizeOnDisk)); - + return new SnapshotRecord(currentRootBlock, sizeOnDisk); + } - + /** * Remove an snapshot from the file system and the {@link #snapshotIndex}. * @@ -1164,6 +1372,7 @@ } + @Override public IHASnapshotResponse call() throws Exception { // The quorum token (must remain valid through this operation). Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -85,6 +85,7 @@ import com.bigdata.jini.start.process.ProcessHelper; import com.bigdata.jini.util.ConfigMath; import com.bigdata.jini.util.JiniUtil; +import com.bigdata.journal.CommitCounterUtility; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.StoreState; import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; @@ -1529,6 +1530,7 @@ } // Quorum that can be used to monitor the distributed quorum state. + @SuppressWarnings({ "unchecked", "rawtypes" }) final Quorum<HAGlue, QuorumClient<HAGlue>> quorum = (Quorum) new ZKQuorumImpl<HAGlue, ZKQuorumClient<HAGlue>>( replicationFactor);//, zka, acl); @@ -3154,6 +3156,7 @@ awaitNSSAndHAReady(haGlue); // Wait until self-reports RunMet. assertCondition(new Runnable() { + @Override public void run() { try { final String extendedRunState = haGlue.getExtendedRunState(); @@ -3169,6 +3172,22 @@ } /** + * Assert that a snapshot exists for the specific commit point. + * + * @param snapshotDir + * The snapshot directory for the service. + * @param commitCounter + * The commit point. + */ + protected void assertSnapshotExists(final File snapshotDir, + long commitCounter) { + + assertTrue(CommitCounterUtility.getCommitCounterFile(snapshotDir, + commitCounter, SnapshotManager.SNAPSHOT_EXT).exists()); + + } + + /** * Await the specified snapshot. * * @param server @@ -3183,6 +3202,7 @@ // Wait until self-reports RunMet. assertCondition(new Runnable() { + @Override public void run() { try { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3SnapshotPolicy2.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3SnapshotPolicy2.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3SnapshotPolicy2.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -115,6 +115,7 @@ final HAGlue serverB = startB(); // Await quorum meet. + @SuppressWarnings("unused") final long token = awaitMetQuorum(); // Wait until both services are ready. @@ -130,14 +131,38 @@ // Verify/await snapshot on A. awaitSnapshotExists(serverA, commitCounter); + // Verify existence of the snapshot file. + assertSnapshotExists(getSnapshotDirA(), commitCounter); + // Verify/await snapshot on B. awaitSnapshotExists(serverB, commitCounter); + // Verify existence of the snapshot file. + assertSnapshotExists(getSnapshotDirB(), commitCounter); + + /* + * Restart B and verify that the service is await of the snapshot + * after a restart. + */ + restartB(); + + // Verify existence of the snapshot file after restart. + assertSnapshotExists(getSnapshotDirB(), commitCounter); + + /* + * Restart A and verify that the service is await of the snapshot + * after a restart. + */ + restartA(); + + // Verify existence of the snapshot file after restart. + assertSnapshotExists(getSnapshotDirA(), commitCounter); + } /** * Verify that C snapshots the journal when it enters RunMet after - * resynchronizing from A+B. (This can just be start A+B, await quorum meet, + * resynchronizing from A+B. (This can be just start A+B, await quorum meet, * then start C. C will resync from the leader. The snapshot should be taken * when resync is done and we enter RunMet.) */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-02 16:44:02
|
Revision: 8436 http://sourceforge.net/p/bigdata/code/8436 Author: thompsonbry Date: 2014-06-02 16:43:56 +0000 (Mon, 02 Jun 2014) Log Message: ----------- See #966 (Failed to get namespace list under concurrent update) Martyn and I worked through the REST API transaction semantics and have found and fixed a few issues. He is going to continue a review to normalize: - use of launderThrowable() - patterns for try/finally for methods that perform mutations. The desired pattern looks like this: {{{ } finally { if (conn != null) { if (!success) conn.rollback(); conn.close(); } } } catch (Throwable t) { throw BigdataRDFServlet.launderThrowable(t, resp, ""/*summary-of-REST_API_CALL*/); } }}} This commit includes the following changes: - DefaultResourceLocator: identified and marked a possible hotspot. - GlobalRowStoreHelper: get(timestamp) now invokes getGlobalRowStore() when timestamp==ITx.UNISOLATED. getGlobalRowStore() has implicit creation semantics for the GRS. This way the two methods have the same semantics for that timestamp. - AbstractTripleStore: @Override annotations. - TestLocalTripleStoreDestroy: modified to check post-conditions after calling tripleStore.commit() - BigdataSail.createLTS(): fixed issues with some abnormal code paths which could leave the global semaphore or the write lock held and thus block further updates against the DB/SAIL. Webapp: - MultiTenancyServlet: fixed some issues with failure to hold a transaction open across the operation that was the root cause of this ticket. - Documentation and throwable handling fixes to several servlets. Martyn will continue to work on this aspect of the ticket. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CountersServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DescribeCacheServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -482,7 +482,7 @@ protected Properties locateResource(final String namespace, final long timestamp, final AtomicReference<IIndexManager> foundOn) { - synchronized (seeAlso) { + synchronized (seeAlso) { // FIXME Probably a read/write lock since [seeAlso] normally empty. for (IIndexManager indexManager : seeAlso.keySet()) { @@ -1126,7 +1126,7 @@ * * @see #locateResource(String) */ - public void add(IIndexManager indexManager) { + public void add(final IIndexManager indexManager) { if (indexManager == null) throw new IllegalArgumentException(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -178,6 +178,13 @@ if (log.isInfoEnabled()) log.info(TimestampUtility.toString(timestamp)); + if (timestamp == ITx.UNISOLATED) { + + /* This version does an implicit create if the GRS does not exist. */ + return getGlobalRowStore(); + + } + final IIndex ndx; /** Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -1730,7 +1730,8 @@ } - public void destroy() { + @Override + final public void destroy() { assertWritable(); @@ -2142,6 +2143,7 @@ * @throws IllegalStateException * if the view is read only. */ + @Override public long commit() { if (isReadOnly()) @@ -2163,6 +2165,7 @@ } + @Override final public long getTermCount() { long rangeCount = 0L; @@ -2175,6 +2178,7 @@ } + @Override final public long getURICount() { long rangeCount = 0L; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -41,6 +41,7 @@ import com.bigdata.relation.RelationSchema; import com.bigdata.relation.locator.DefaultResourceLocator; import com.bigdata.sparse.ITPS; +import com.bigdata.sparse.SparseRowStore; /** * Test suite to verify the semantics of destroying a {@link LocalTripleStore}, @@ -94,12 +95,19 @@ try { + final long lastCommitTime = store.getIndexManager().getLastCommitTime(); + // Note: Will be in lexical order for Unicode. - final String[] namespaces = getNamespaces(indexManager).toArray( - new String[] {}); + assertEquals( + new String[] { namespace }, + getNamespaces(indexManager, ITx.UNISOLATED).toArray( + new String[] {})); + // Note found before the create. + assertEquals( + new String[] {}, + getNamespaces(indexManager, lastCommitTime - 1).toArray( + new String[] {})); - assertEquals(new String[] { namespace }, namespaces); - assertTrue(store == indexManager.getResourceLocator().locate( store.getNamespace(), ITx.UNISOLATED)); assertTrue(store.getLexiconRelation() == indexManager @@ -118,9 +126,16 @@ */ store.destroy(); + // Did not go through a commit on the LTS. + assertEquals(lastCommitTime, store.getIndexManager() + .getLastCommitTime()); + // global row store entry is gone. - assertTrue(getNamespaces(indexManager).isEmpty()); + assertTrue(getNamespaces(indexManager, ITx.UNISOLATED).isEmpty()); + // but not in the last commited view. + assertFalse(getNamespaces(indexManager, lastCommitTime).isEmpty()); + // resources can not be located. assertTrue(null == indexManager.getResourceLocator().locate( namespace, ITx.UNISOLATED)); @@ -134,7 +149,19 @@ ITx.UNISOLATED)); assertNull(indexManager.getIndex(primaryStatementIndexName, ITx.UNISOLATED)); + // but not at the last commit time. + assertNotNull(indexManager.getIndex(primaryStatementIndexName, + lastCommitTime)); + + /* + * Commit. + */ + store.commit(); + // No longer present at the last commit time. + assertTrue(getNamespaces(indexManager, + store.getIndexManager().getLastCommitTime()).isEmpty()); + } finally { indexManager.destroy(); @@ -175,8 +202,8 @@ store.addTerm(store.getValueFactory().createLiteral("bigdata")); // Note: Will be in lexical order for Unicode. - final String[] namespaces = getNamespaces(indexManager).toArray( - new String[] {}); + final String[] namespaces = getNamespaces(indexManager, + ITx.UNISOLATED).toArray(new String[] {}); assertEquals(new String[] { namespace }, namespaces); @@ -202,7 +229,7 @@ store.destroy(); // global row store entry is gone. - assertTrue(getNamespaces(indexManager).isEmpty()); + assertTrue(getNamespaces(indexManager,ITx.UNISOLATED).isEmpty()); // resources can not be located. assertTrue(null == indexManager.getResourceLocator().locate( @@ -222,6 +249,32 @@ assertNotNull(indexManager.getResourceLocator().locate(namespace, commitTime-1)); + /* + * Commit the destroy. + */ + store.commit(); + + + // global row store entry is gone. + assertTrue(getNamespaces(indexManager,ITx.UNISOLATED).isEmpty()); + + // resources can not be located. + assertTrue(null == indexManager.getResourceLocator().locate( + namespace, ITx.UNISOLATED)); + assertTrue(null == indexManager.getResourceLocator().locate( + namespaceLexiconRelation, ITx.UNISOLATED)); + assertTrue(null == indexManager.getResourceLocator().locate( + namespaceSPORelation, ITx.UNISOLATED)); + + // indicies are gone. + assertNull(indexManager.getIndex(lexiconRelationIndexName, + ITx.UNISOLATED)); + assertNull(indexManager.getIndex(primaryStatementIndexName, + ITx.UNISOLATED)); + + // The committed version of the triple store remains visible. + assertNotNull(indexManager.getResourceLocator().locate(namespace, + commitTime-1)); } finally { indexManager.destroy(); @@ -234,15 +287,24 @@ * Return a list of the namespaces for the {@link AbstractTripleStore}s * registered against the bigdata instance. */ - static private List<String> getNamespaces(final IIndexManager indexManager) { + static private List<String> getNamespaces(final IIndexManager indexManager, + final long timestamp) { // the triple store namespaces. final List<String> namespaces = new LinkedList<String>(); + final SparseRowStore grs = indexManager.getGlobalRowStore(timestamp); + + if (grs == null) { + + return namespaces; + + } + // scan the relation schema in the global row store. @SuppressWarnings("unchecked") - final Iterator<ITPS> itr = (Iterator<ITPS>) indexManager - .getGlobalRowStore().rangeIterator(RelationSchema.INSTANCE); + final Iterator<ITPS> itr = (Iterator<ITPS>) grs + .rangeIterator(RelationSchema.INSTANCE); while (itr.hasNext()) { @@ -348,7 +410,7 @@ * * Note: Will be in lexical order for Unicode. */ - final String[] namespaces = getNamespaces(indexManager) + final String[] namespaces = getNamespaces(indexManager,ITx.UNISOLATED) .toArray(new String[] {}); assertEquals(new String[] { namespace, namespace1 }, namespaces); @@ -404,7 +466,7 @@ kb.destroy(); // global row store entry is gone. - final String[] namespaces = getNamespaces(indexManager).toArray( + final String[] namespaces = getNamespaces(indexManager,ITx.UNISOLATED).toArray( new String[] {}); assertEquals(new String[] { namespace1 }, namespaces); @@ -438,7 +500,7 @@ * * Note: Will be in lexical order for Unicode. */ - final String[] namespaces = getNamespaces(indexManager).toArray( + final String[] namespaces = getNamespaces(indexManager,ITx.UNISOLATED).toArray( new String[] {}); assertEquals(new String[] { namespace1 }, namespaces); @@ -477,7 +539,7 @@ kb1.destroy(); // global row store entry is gone. - assertTrue(getNamespaces(indexManager).isEmpty()); + assertTrue(getNamespaces(indexManager,ITx.UNISOLATED).isEmpty()); // resources can not be located. assertTrue(null == indexManager.getResourceLocator().locate( Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -698,23 +698,16 @@ * during the middle of a BigdataSailConnection level operation (or visa * versa). */ + boolean acquiredConnection = false; try { - // acquire the unisolated connection permit. - journal.acquireUnisolatedConnection(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - try { + try { + // acquire the unisolated connection permit. + journal.acquireUnisolatedConnection(); + acquiredConnection = true; + } catch (InterruptedException e) { + throw new RuntimeException(e); + } -// final boolean create; -// final long tx0 = txService.newTx(ITx.READ_COMMITTED); -// try { -// // verify kb does not exist (can not be located). -// create = journal.getResourceLocator().locate(namespace, tx0) == null; -// } finally { -// txService.abort(tx0); -// } - // Check for pre-existing instance. { @@ -730,29 +723,50 @@ } // Create a new instance. -// if (create) { - final LocalTripleStore lts = new LocalTripleStore( - journal, namespace, ITx.UNISOLATED, properties); - if (Boolean.parseBoolean(properties.getProperty( BigdataSail.Options.ISOLATABLE_INDICES, BigdataSail.Options.DEFAULT_ISOLATABLE_INDICES))) { + /* + * Isolatable indices: requires the use of a tx to create + * the KB instance. + */ + final long txCreate = txService.newTx(ITx.UNISOLATED); - - final AbstractTripleStore txCreateView = new LocalTripleStore( - journal, namespace, Long.valueOf(txCreate), properties); - - // create the kb instance within the tx. - txCreateView.create(); - - // commit the tx. - txService.commit(txCreate); + + boolean ok = false; + try { + + final AbstractTripleStore txCreateView = new LocalTripleStore( + journal, namespace, Long.valueOf(txCreate), + properties); + + // create the kb instance within the tx. + txCreateView.create(); + + // commit the tx. + txService.commit(txCreate); + + ok = true; + + } finally { + + if (!ok) + txService.abort(txCreate); + + } } else { + /* + * Create KB without isolatable indices. + */ + + final LocalTripleStore lts = new LocalTripleStore( + journal, namespace, ITx.UNISOLATED, properties); + lts.create(); } @@ -790,7 +804,8 @@ } finally { - journal.releaseUnisolatedConnection(); + if (acquiredConnection) + journal.releaseUnisolatedConnection(); } @@ -1314,22 +1329,40 @@ "UNISOLATED connection is not reentrant."); } - if (getDatabase().getIndexManager() instanceof Journal) { - // acquire permit from Journal. - ((Journal) getDatabase().getIndexManager()) - .acquireUnisolatedConnection(); - } + boolean acquiredConnection = false; + Lock writeLock = null; + BigdataSailConnection conn = null; + try { + if (getDatabase().getIndexManager() instanceof Journal) { + // acquire permit from Journal. + ((Journal) getDatabase().getIndexManager()) + .acquireUnisolatedConnection(); + acquiredConnection = true; + } - // acquire the write lock. - final Lock writeLock = lock.writeLock(); - writeLock.lock(); + // acquire the write lock. + writeLock = lock.writeLock(); + writeLock.lock(); - // new writable connection. - final BigdataSailConnection conn = new BigdataSailConnection(database, - writeLock, true/* unisolated */).startConn(); + // new writable connection. + conn = new BigdataSailConnection(database, writeLock, true/* unisolated */) + .startConn(); + } finally { + if (conn == null) { + // Did not obtain connection. + if (writeLock != null) { + // release write lock. + writeLock.unlock(); + } + if (acquiredConnection) { + // release permit. + ((Journal) getDatabase().getIndexManager()) + .releaseUnisolatedConnection(); + } + } + } + return conn; - return conn; - } /** Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -2167,7 +2167,7 @@ * @param namespace * The namespace. * @param timestamp - * The timestamp. + * A timestamp -or- a tx identifier. * * @return The {@link AbstractTripleStore} -or- <code>null</code> if none is * found for that namespace and timestamp. @@ -2205,7 +2205,7 @@ * * @throws RepositoryException */ - public BigdataSailRepositoryConnection getUnisolatedConnection( + public BigdataSailRepositoryConnection getUnisolatedConnection( // FIXME REVIEW CALLERS final String namespace) throws SailException, RepositoryException { // resolve the default namespace. @@ -2247,7 +2247,7 @@ try { - return getNamespaces(timestamp, tx); + return getNamespacesTx(tx); } finally { @@ -2257,25 +2257,25 @@ } - private List<String> getNamespaces(long timestamp, final long tx) { + /*package*/ List<String> getNamespacesTx(final long tx) { - if (timestamp == ITx.READ_COMMITTED) { +// if (timestamp == ITx.READ_COMMITTED) { +// +// // Use the last commit point. +// timestamp = getIndexManager().getLastCommitTime(); +// +// } - // Use the last commit point. - timestamp = getIndexManager().getLastCommitTime(); - - } - // the triple store namespaces. final List<String> namespaces = new LinkedList<String>(); final SparseRowStore grs = getIndexManager().getGlobalRowStore( - timestamp); + tx); if (grs == null) { - log.warn("No GRS @ timestamp=" - + TimestampUtility.toString(timestamp)); + log.warn("No GRS @ tx=" + + TimestampUtility.toString(tx)); // Empty. return namespaces; @@ -2346,6 +2346,7 @@ long tx = timestamp; // use dirty reads unless Journal. if (getIndexManager() instanceof Journal) { + final ITransactionService txs = ((Journal) getIndexManager()) .getLocalTransactionManager().getTransactionService(); @@ -2368,12 +2369,9 @@ * The transaction identifier. */ public void abortTx(final long tx) { - if (getIndexManager() instanceof Journal) { -// if (!TimestampUtility.isReadWriteTx(tx)) { -// // Not a transaction. -// throw new IllegalStateException(); -// } + if (getIndexManager() instanceof Journal) { + final ITransactionService txs = ((Journal) getIndexManager()) .getLocalTransactionManager().getTransactionService(); @@ -2388,4 +2386,22 @@ } +// public void commitTx(final long tx) { +// +// if (getIndexManager() instanceof Journal) { +// +// final ITransactionService txs = ((Journal) getIndexManager()) +// .getLocalTransactionManager().getTransactionService(); +// +// try { +// txs.commit(tx); +// } catch (IOException e) { +// // Note: Local operation. Will not throw IOException. +// throw new RuntimeException(e); +// } +// +// } +// +// } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -146,17 +146,35 @@ * client's response. This code path should be used iff we have already * begun writing the response. Otherwise, an HTTP error status should be * used instead. + * <p> + * This method is invoked as follows: * + * <pre> + * throw launderThrowable(...) + * </pre> + * + * This keeps the compiler happy since it will understand that the caller's + * method always exits with a thrown cause. + * * @param t * The thrown error. * @param os * The stream on which the response will be written. * @param queryStr - * The SPARQL Query -or- SPARQL Update command (if available). + * The SPARQL Query -or- SPARQL Update command (if available) + * -or- a summary of the REST API command -or- an empty string if + * nothing else is more appropriate. * - * @return The laundered exception. + * @return Nothing. The pattern of the returned throwable is used to make + * the compiler happy. * - * @throws Exception + * @throws IOException + * if the cause was an {@link IOException} + * @throws Error + * if the cause was an {@link Error}. + * @throws RuntimeException + * if the cause was a {@link RuntimeException} or anything not + * declared to be thrown by this method. */ protected static RuntimeException launderThrowable(final Throwable t, final HttpServletResponse resp, final String queryStr) @@ -217,7 +235,7 @@ } } if (t instanceof RuntimeException) { - return (RuntimeException) t; + throw (RuntimeException) t; } else if (t instanceof Error) { throw (Error) t; } else if (t instanceof IOException) { @@ -239,10 +257,12 @@ * namespace (or it should be configured for each graph explicitly, or * we should bundle the (namespace,timestamp) together as a single * object). + * + * @see QueryServlet#ATTR_TIMESTAMP; */ protected long getTimestamp(final HttpServletRequest req) { - final String timestamp = req.getParameter("timestamp"); + final String timestamp = req.getParameter(QueryServlet.ATTR_TIMESTAMP); if (timestamp == null) { @@ -342,7 +362,7 @@ protected void reportModifiedCount(final HttpServletResponse resp, final long nmodified, final long elapsed) throws IOException { - final StringWriter w = new StringWriter(); + final StringWriter w = new StringWriter(); final XMLBuilder t = new XMLBuilder(w); @@ -422,40 +442,37 @@ /* * CONNEG for the MIME type. */ - { + final String acceptStr = req.getHeader("Accept"); - final String acceptStr = req.getHeader("Accept"); + final ConnegUtil util = new ConnegUtil(acceptStr); - final ConnegUtil util = new ConnegUtil(acceptStr); + // The best RDFFormat for that Accept header. + RDFFormat format = util.getRDFFormat(); - // The best RDFFormat for that Accept header. - RDFFormat format = util.getRDFFormat(); - - if (format == null) - format = RDFFormat.RDFXML; + if (format == null) + format = RDFFormat.RDFXML; - resp.setStatus(HTTP_OK); + resp.setStatus(HTTP_OK); - resp.setContentType(format.getDefaultMIMEType()); + resp.setContentType(format.getDefaultMIMEType()); - final OutputStream os = resp.getOutputStream(); - try { - final RDFWriter writer = RDFWriterRegistry.getInstance() - .get(format).getWriter(os); - writer.startRDF(); - final Iterator<Statement> itr = g.iterator(); - while (itr.hasNext()) { - final Statement stmt = itr.next(); - writer.handleStatement(stmt); - } - writer.endRDF(); - os.flush(); - } catch (RDFHandlerException e) { - log.error(e, e); - throw launderThrowable(e, resp, ""); - } finally { - os.close(); + final OutputStream os = resp.getOutputStream(); + try { + final RDFWriter writer = RDFWriterRegistry.getInstance() + .get(format).getWriter(os); + writer.startRDF(); + final Iterator<Statement> itr = g.iterator(); + while (itr.hasNext()) { + final Statement stmt = itr.next(); + writer.handleStatement(stmt); } + writer.endRDF(); + os.flush(); + } catch (RDFHandlerException e) { + // log.error(e, e); + throw launderThrowable(e, resp, ""); + } finally { + os.close(); } } @@ -471,34 +488,31 @@ /* * CONNEG for the MIME type. */ - { + final String acceptStr = req.getHeader("Accept"); - final String acceptStr = req.getHeader("Accept"); + final ConnegUtil util = new ConnegUtil(acceptStr); - final ConnegUtil util = new ConnegUtil(acceptStr); + // The best format for that Accept header. + PropertiesFormat format = util.getPropertiesFormat(); - // The best format for that Accept header. - PropertiesFormat format = util.getPropertiesFormat(); - - if (format == null) - format = PropertiesFormat.XML; + if (format == null) + format = PropertiesFormat.XML; - resp.setStatus(HTTP_OK); + resp.setStatus(HTTP_OK); - resp.setContentType(format.getDefaultMIMEType()); + resp.setContentType(format.getDefaultMIMEType()); - final OutputStream os = resp.getOutputStream(); - try { - final PropertiesWriter writer = PropertiesWriterRegistry.getInstance() - .get(format).getWriter(os); - writer.write(properties); - os.flush(); - } catch (IOException e) { - log.error(e, e); - throw launderThrowable(e, resp, ""); - } finally { - os.close(); - } + final OutputStream os = resp.getOutputStream(); + try { + final PropertiesWriter writer = PropertiesWriterRegistry + .getInstance().get(format).getWriter(os); + writer.write(properties); + os.flush(); + } catch (IOException e) { + // log.error(e, e); + throw launderThrowable(e, resp, ""); + } finally { + os.close(); } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -138,10 +138,9 @@ } - } catch (Exception ex) { + } catch (Throwable t) { - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); + throw BigdataRDFServlet.launderThrowable(t, resp, ""); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CountersServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CountersServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CountersServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -77,35 +77,6 @@ public CountersServlet() { } -// /** -// * Access to the {@link CounterSet} exposed by this service. -// */ -// private final ICounterSetAccess accessor; -// -// /** -// * The service reference iff one one specified to the ctor (may be null). -// */ -// private final IService service; -// -// /** -// * The minimum time before a client can force the re-materialization of the -// * {@link CounterSet}. This is designed to limit the impact of the client on -// * the service. -// * -// * TODO Configuration parameter for {@link #minUpdateLatency} -// */ -// private final long minUpdateLatency = 5000; -// -// /** -// * The last materialized {@link CounterSet}. -// */ -// private volatile CounterSet counterSet = null; -// -// /** -// * The timestamp of the last materialized {@link CounterSet}. -// */ -// private volatile long lastTimestamp = 0L; - /** * Performance counters * <pre> @@ -115,48 +86,9 @@ @Override protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { + + try { -// final ByteArrayOutputStream baos = new ByteArrayOutputStream( -// 2 * Bytes.kilobyte32); -// -// final InputStream is; - -// /* -// * If the request uri is one of the pre-declared resources then we send -// * that resource. -// */ -// final DeclaredResource decl = allowedClassPathResources.get(req.uri); -// -// if (decl != null) { -// -// // send that resource. -// return sendClasspathResource(decl); -// -// } - - /* - * Materialization the CounterSet iff necessary or stale. - * - * Note: This bit needs to be single threaded to avoid concurrent - * requests causing concurrent materialization of the counter set. - */ -// final ICounterSelector counterSelector; -// synchronized(this) { -// -// final long now = System.currentTimeMillis(); -// -// final long elapsed = now - lastTimestamp; -// -// if (counterSet == null || elapsed > minUpdateLatency/* ms */) { -// -// counterSet = accessor.getCounters(); -// -// } -// -// counterSelector = new CounterSetSelector(counterSet); -// -// } - // TODO Hook this how? (NSS does not define an IService right now) final IService service = null; @@ -255,6 +187,12 @@ if (log.isTraceEnabled()) log.trace("done"); + + } catch (Throwable t) { + + throw BigdataRDFServlet.launderThrowable(t, resp, ""); + + } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -131,7 +131,6 @@ */ final PipedOutputStream os = new PipedOutputStream(); final InputStream is = newPipedInputStream(os); - try { // Use this format for the query results. final RDFFormat format = RDFFormat.NTRIPLES; @@ -215,17 +214,10 @@ } - } catch (Throwable t) { + } catch (Throwable t) { - throw BigdataRDFServlet.launderThrowable(t, resp, queryStr); + throw BigdataRDFServlet.launderThrowable(t, resp, queryStr); - } - - } catch (Exception ex) { - - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); - } } @@ -382,10 +374,9 @@ } - } catch (Exception ex) { + } catch (Throwable t) { - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); + throw BigdataRDFServlet.launderThrowable(t, resp, ""); } @@ -480,8 +471,6 @@ try { - try { - BigdataSailRepositoryConnection conn = null; try { @@ -528,19 +517,20 @@ } - } catch (Throwable t) { + } catch (Throwable t) { - throw BigdataRDFServlet.launderThrowable(t, resp, ""); + throw BigdataRDFServlet.launderThrowable(t, resp, "s=" + s + ",p=" + + p + ",o=" + o + ",c=" + c); - } + } - } catch (Exception ex) { +// } catch (Exception ex) { +// +// // Will be rendered as an INTERNAL_ERROR. +// throw new RuntimeException(ex); +// +// } - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); - - } - } // static private transient final Resource[] nullArray = new Resource[]{}; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DescribeCacheServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DescribeCacheServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DescribeCacheServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -369,14 +369,14 @@ os.flush(); } catch (Throwable e) { - try { +// try { throw BigdataRDFServlet.launderThrowable(e, resp, "DESCRIBE" // queryStr // TODO Report as "DESCRIBE uri(s)". ); - } catch (Exception e1) { - throw new RuntimeException(e); - } +// } catch (Exception e1) { +// throw new RuntimeException(e); +// } } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -140,7 +140,11 @@ final String namespace = getNamespace(req); final String contentType = req.getContentType(); - if(contentType==null) buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, "Content-Type not specified."); + + if (contentType == null) + buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, + "Content-Type not specified."); + if (log.isInfoEnabled()) log.info("Request body: " + contentType); @@ -220,6 +224,7 @@ final AtomicLong nmodified = new AtomicLong(0L); BigdataSailRepositoryConnection conn = null; + boolean success = false; try { conn = getBigdataRDFContext() @@ -256,26 +261,26 @@ reportModifiedCount(resp, nmodified.get(), elapsed); + success = true; + return; - } catch(Throwable t) { - - if(conn != null) - conn.rollback(); - - throw new RuntimeException(t); - } finally { - if (conn != null) + if (conn != null) { + + if (!success) + conn.rollback(); + conn.close(); + + } } - } catch (Exception ex) { + } catch (Throwable t) { - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); + throw BigdataRDFServlet.launderThrowable(t, resp, ""); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -45,7 +45,6 @@ import com.bigdata.rdf.properties.PropertiesParserFactory; import com.bigdata.rdf.properties.PropertiesParserRegistry; import com.bigdata.rdf.sail.BigdataSail; -import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.sail.webapp.client.ConnectOptions; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.store.ScaleOutTripleStore; @@ -346,19 +345,23 @@ BigdataSail.Options.NAMESPACE, BigdataSail.Options.DEFAULT_NAMESPACE); - final long timestamp = ITx.UNISOLATED; + { - // resolve the namespace. - final AbstractTripleStore tripleStore = (AbstractTripleStore) getIndexManager() - .getResourceLocator().locate(namespace, timestamp); + final long timestamp = ITx.UNISOLATED; + + // resolve the namespace. + final AbstractTripleStore tripleStore = (AbstractTripleStore) getIndexManager() + .getResourceLocator().locate(namespace, timestamp); - if (tripleStore != null) { - /* - * Already exists. - */ - buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, "EXISTS: " - + namespace); - return; + if (tripleStore != null) { + /* + * Already exists. + */ + buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, + "EXISTS: " + namespace); + return; + } + } try { @@ -397,8 +400,6 @@ } catch (Throwable e) { - log.error(e, e); - throw launderThrowable(e, resp, ""); } @@ -419,54 +420,105 @@ final long timestamp = ITx.UNISOLATED; - final AbstractTripleStore tripleStore = getBigdataRDFContext() - .getTripleStore(namespace, timestamp); - - if (tripleStore == null) { - /* - * There is no such triple/quad store instance. - */ - buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); - return; - } - + boolean acquiredConnection = false; try { + + if (getIndexManager() instanceof Journal) { + // acquire permit from Journal. + ((Journal) getIndexManager()).acquireUnisolatedConnection(); + acquiredConnection = true; + } - final BigdataSail sail = new BigdataSail(tripleStore); + final AbstractTripleStore tripleStore = getBigdataRDFContext() + .getTripleStore(namespace, timestamp); - BigdataSailConnection con = null; - - try { - - sail.initialize(); - // This basically puts a lock on the KB instance. - con = sail.getUnisolatedConnection(); - // Destroy the KB instance. - tripleStore.destroy(); - // Commit. - con.commit(); - - } finally { - - if (con != null) - con.close(); - - sail.shutDown(); - + if (tripleStore == null) { + /* + * There is no such triple/quad store instance. + */ + buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); + return; } + // Destroy the KB instance. + tripleStore.destroy(); + + tripleStore.commit(); + buildResponse(resp, HTTP_OK, MIME_TEXT_PLAIN, "DELETED: " + namespace); } catch (Throwable e) { - log.error(e, e); - throw launderThrowable(e, resp, ""); + + } finally { + + if (acquiredConnection) { + ((Journal) getIndexManager()).releaseUnisolatedConnection(); + + } + } } + +// private void doDeleteNamespace(final HttpServletRequest req, +// final HttpServletResponse resp) throws IOException { +// +// final String namespace = getNamespace(req); +// +// final long timestamp = ITx.UNISOLATED; +// +// final AbstractTripleStore tripleStore = getBigdataRDFContext() +// .getTripleStore(namespace, timestamp); +// +// if (tripleStore == null) { +// /* +// * There is no such triple/quad store instance. +// */ +// buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); +// return; +// } +// +// try { +// +// final BigdataSail sail = new BigdataSail(tripleStore); +// +// BigdataSailConnection con = null; +// +// try { +// +// sail.initialize(); +// // This basically puts a lock on the KB instance. +// con = sail.getUnisolatedConnection(); +// // Destroy the KB instance. +// tripleStore.destroy(); +// // Commit. +// con.commit(); +// +// } finally { +// +// if (con != null) +// con.close(); +// +// sail.shutDown(); +// +// } +// +// buildResponse(resp, HTTP_OK, MIME_TEXT_PLAIN, "DELETED: " +// + namespace); +// +// } catch (Throwable e) { +// +// log.error(e, e); +// +// throw launderThrowable(e, resp, ""); +// +// } +// +// } /** * Send the configuration properties for the addressed KB instance. @@ -480,21 +532,21 @@ final String namespace = getNamespace(req); - long timestamp = getTimestamp(req); + final long timestamp = getTimestamp(req); - if (timestamp == ITx.READ_COMMITTED) { +// if (timestamp == ITx.READ_COMMITTED) { +// +// // Use the last commit point. +// timestamp = getIndexManager().getLastCommitTime(); +// +// } - // Use the last commit point. - timestamp = getIndexManager().getLastCommitTime(); - - } - final long tx = getBigdataRDFContext().newTx(timestamp); try { final AbstractTripleStore tripleStore = getBigdataRDFContext() - .getTripleStore(namespace, timestamp); + .getTripleStore(namespace, tx); if (tripleStore == null) { /* @@ -523,15 +575,15 @@ private void doDescribeNamespaces(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - long timestamp = getTimestamp(req); + final long timestamp = getTimestamp(req); - if (timestamp == ITx.READ_COMMITTED) { +// if (timestamp == ITx.READ_COMMITTED) { +// +// // Use the last commit point. +// timestamp = getIndexManager().getLastCommitTime(); +// +// } - // Use the last commit point. - timestamp = getIndexManager().getLastCommitTime(); - - } - final boolean describeEachNamedGraph; { final String s = req.getParameter(DESCRIBE_EACH_NAMED_GRAPH); @@ -565,8 +617,8 @@ final String namespace = getBigdataRDFContext().getConfig().namespace; - describeNamespace(req, g, namespace, describeEachNamedGraph, - timestamp); + describeNamespaceTx(req, g, namespace, describeEachNamedGraph, + tx); } else { @@ -574,12 +626,12 @@ * The set of registered namespaces for KBs. */ final List<String> namespaces = getBigdataRDFContext() - .getNamespaces(timestamp); + .getNamespacesTx(tx); for (String namespace : namespaces) { - describeNamespace(req, g, namespace, - describeEachNamedGraph, timestamp); + describeNamespaceTx(req, g, namespace, + describeEachNamedGraph, tx); } @@ -598,14 +650,14 @@ /** * Describe a namespace into the supplied Graph object. */ - private void describeNamespace(final HttpServletRequest req, + private void describeNamespaceTx(final HttpServletRequest req, final Graph g, final String namespace, - final boolean describeEachNamedGraph, final long timestamp) + final boolean describeEachNamedGraph, final long tx) throws IOException { // Get a view onto that KB instance for that timestamp. final AbstractTripleStore tripleStore = getBigdataRDFContext() - .getTripleStore(namespace, timestamp); + .getTripleStore(namespace, tx); if (tripleStore == null) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -133,6 +133,14 @@ */ static final transient String ATTR_UUID = "uuid"; + /** + * The name of the URL query parameter which indicates the timestamp against + * which an operation will be carried out. + * + * @see BigdataRDFServlet#getTimestamp(HttpServletRequest) + */ + static final transient String ATTR_TIMESTAMP = "timestamp"; + // /** // * The name of the request attribute for the {@link AbstractQueryTask}. // */ @@ -244,45 +252,57 @@ */ private void doServiceDescription(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { + + /** + * Protect the entire operation with a transaction. + * + * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency + * problem with list namespaces and create namespace </a> + */ + final long tx = getBigdataRDFContext().newTx(getTimestamp(req)); - final String namespace = getNamespace(req); + try { + + final AbstractTripleStore tripleStore = getBigdataRDFContext() + .getTripleStore(getNamespace(req), tx); - final long timestamp = getTimestamp(req); + if (tripleStore == null) { + /* + * There is no such triple/quad store instance. + */ + buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); + return; + } - final AbstractTripleStore tripleStore = getBigdataRDFContext() - .getTripleStore(namespace, timestamp); - - if (tripleStore == null) { + // The serviceURIs for this graph. + final String[] serviceURI = BigdataServlet.getServiceURIs( + getServletContext(), req); + /* - * There is no such triple/quad store instance. + * TODO Resolve the SD class name and ctor via a configuration + * property for extensible descriptions. */ - buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); - return; - } + final Graph g = new GraphImpl(); + { - // The serviceURIs for this graph. - final String[] serviceURI = BigdataServlet.getServiceURIs( - getServletContext(), req); + final SD sd = new SD(g, tripleStore, serviceURI); - /* - * TODO Resolve the SD class name and ctor via a configuration property - * for extensible descriptions. - */ - final Graph g = new GraphImpl(); - { + final SparqlEndpointConfig config = getBigdataRDFContext() + .getConfig(); - final SD sd = new SD(g, tripleStore, serviceURI); + sd.describeService(true/* describeStatistics */, + config.describeEachNamedGraph); - final SparqlEndpointConfig config = getBigdataRDFContext() - .getConfig(); + } - sd.describeService(true/* describeStatistics */, - config.describeEachNamedGraph); + sendGraph(req, resp, g); + } finally { + + getBigdataRDFContext().abortTx(tx); + } - sendGraph(req, resp, g); - } /** @@ -386,11 +406,11 @@ ft.get(); } catch (Throwable e) { - try { +// try { throw BigdataRDFServlet.launderThrowable(e, resp, updateStr); - } catch (Exception e1) { - throw new RuntimeException(e); - } +// } catch (Exception e1) { +// throw new RuntimeException(e); +// } } } @@ -627,11 +647,11 @@ } } catch (Throwable e) { - try { +// try { throw BigdataRDFServlet.launderThrowable(e, resp, queryStr); - } catch (Exception e1) { - throw new RuntimeException(e); - } +// } catch (Exception e1) { +// throw new RuntimeException(e); +// } } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -91,7 +91,7 @@ final String namespace = getNamespace(req); - final long timestamp = getTimestamp(req); + final long timestamp = getTimestamp(req); // FIXME Use newTx(). Why does this even look for a KB instance? final AbstractTripleStore tripleStore = getBigdataRDFContext() .getTripleStore(namespace, timestamp); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-11 13:13:24
|
Revision: 8466 http://sourceforge.net/p/bigdata/code/8466 Author: thompsonbry Date: 2014-06-11 13:13:13 +0000 (Wed, 11 Jun 2014) Log Message: ----------- Fix for Name2Addr prefix scan and improved correctness for LexiconRelation.prefixScan(). Key changes are to: - IKeyBuilderFactory - defines getPrimaryKeyBuilder() - LexiconRelation - uses the getPrimaryKeyBuilder() method. - Name2Addr - uses the getPrimaryKeyBuilder() method. Javadoc updates to PrefixFilter. Added @Override and final attributes to several classes that were touched by this fix. I have run through the TestLocalTripleStore and TestRWJournal test suites and everything is good. I am currently running TestBigdataSailWithQuads but do not anticipate any issues. I have verified that the existing tests for Name2Addr and the LexiconRelation prefix scans fail if the code uses the default collation strength rather than PRIMARY so we know that we have regression tests in place for those behaviors. See #974 (Name2Addr.indexNameScan(prefix) uses scan + filter) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/DefaultTupleSerializer.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/filter/PrefixFilter.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ASCIIKeyBuilderFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/DefaultKeyBuilderFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilderFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ThreadLocalKeyBuilderFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdTupleSerializer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestCompletionScan.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTCK.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStore.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/DefaultTupleSerializer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/DefaultTupleSerializer.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/DefaultTupleSerializer.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -102,12 +102,14 @@ private IRabaCoder leafKeysCoder; private IRabaCoder leafValsCoder; + @Override final public IRabaCoder getLeafKeysCoder() { return leafKeysCoder; } + @Override final public IRabaCoder getLeafValuesCoder() { return leafValsCoder; @@ -213,6 +215,7 @@ } + @Override public String toString() { final StringBuilder sb = new StringBuilder(); @@ -237,6 +240,7 @@ * that the specific configuration values are persisted, even when the * {@link DefaultTupleSerializer} is de-serialized on a different host. */ + @Override final public IKeyBuilder getKeyBuilder() { if(threadLocalKeyBuilderFactory == null) { @@ -259,6 +263,30 @@ } + @Override + final public IKeyBuilder getPrimaryKeyBuilder() { + + if(threadLocalKeyBuilderFactory == null) { + + /* + * This can happen if you use the de-serialization ctor by mistake. + */ + + throw new IllegalStateException(); + + } + + /* + * TODO This should probably to a reset() before returning the object. + * However, we need to verify that no callers are assuming that it does + * NOT do a reset and implicitly relying on passing the intermediate key + * via the return value (which would be very bad style). + */ + return threadLocalKeyBuilderFactory.getPrimaryKeyBuilder(); + + } + + @Override public byte[] serializeKey(final Object obj) { if (obj == null) @@ -277,6 +305,7 @@ * @return The serialized representation of the object as a byte[] -or- * <code>null</code> if the reference is <code>null</code>. */ + @Override public byte[] serializeVal(final V obj) { return SerializerUtil.serialize(obj); @@ -287,6 +316,7 @@ * De-serializes an object from the {@link ITuple#getValue() value} stored * in the tuple (ignores the key stored in the tuple). */ + @Override public V deserialize(ITuple tuple) { if (tuple == null) @@ -308,6 +338,7 @@ * @throws UnsupportedOperationException * always. */ + @Override public K deserializeKey(ITuple tuple) { throw new UnsupportedOperationException(); @@ -327,6 +358,7 @@ */ private final static transient byte VERSION = VERSION0; + @Override public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { @@ -346,6 +378,7 @@ } + @Override public void writeExternal(final ObjectOutput out) throws IOException { out.writeByte(VERSION); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -2910,11 +2910,19 @@ * specified for <i>this</i> index. * </p> */ + @Override public IKeyBuilder getKeyBuilder() { return getTupleSerializer().getKeyBuilder(); } + + @Override + public IKeyBuilder getPrimaryKeyBuilder() { + + return getTupleSerializer().getPrimaryKeyBuilder(); + + } /** * @see Configuration#getProperty(IIndexManager, Properties, String, String, Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/filter/PrefixFilter.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/filter/PrefixFilter.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/filter/PrefixFilter.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -26,7 +26,7 @@ * </p> * <h4>WARNING</h4> * <p> - * <strong>The prefix keys MUST be formed with {@link StrengthEnum#Identical}. + * <strong>The prefix keys MUST be formed with {@link StrengthEnum#Primary}. * This is necessary in order to match all keys in the index since it causes the * secondary characteristics to NOT be included in the prefix key even if they * are present in the keys in the index.</strong> Using other @@ -55,20 +55,21 @@ * <p> * at IDENTICAL strength. The additional bytes for the IDENTICAL strength * reflect the Locale specific Unicode sort key encoding of secondary - * characteristics such as case. The successor of the PRIMARY strength byte[] is + * characteristics such as case. The successor of the IDENTICAL strength byte[] + * is * </p> * * <pre> - * [43, 75, 89, 41, 68] + * [43, 75, 89, 41, 67, 1, 9, 1, 143, 9] * </pre> * * <p> * (one was added to the last byte) which spans all keys of interest. However - * the successor of the IDENTICAL strength byte[] would + * the successor of the PRIMARY strength byte[] would * </p> * * <pre> - * [43, 75, 89, 41, 67, 1, 9, 1, 143, 9] + * [43, 75, 89, 41, 68] * </pre> * * <p> @@ -81,8 +82,8 @@ * <pre> * Properties properties = new Properties(); * - * properties.setProperty(KeyBuilder.Options.STRENGTH, StrengthEnum.Primary - * .toString()); + * properties.setProperty(KeyBuilder.Options.STRENGTH, + * StrengthEnum.Primary.toString()); * * prefixKeyBuilder = KeyBuilder.newUnicodeInstance(properties); * </pre> @@ -104,7 +105,9 @@ * partition.... * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ + * + * @see <a href="http://trac.bigdata.com/ticket/974" > + * Name2Addr.indexNameScan(prefix) uses scan + filter </a> */ public class PrefixFilter<E> extends FilterBase implements ITupleFilter<E> { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ASCIIKeyBuilderFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ASCIIKeyBuilderFactory.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ASCIIKeyBuilderFactory.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -39,7 +39,6 @@ * Factory for instances that do NOT support Unicode. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class ASCIIKeyBuilderFactory implements IKeyBuilderFactory, Externalizable { @@ -59,6 +58,7 @@ /** * Representation includes all aspects of the {@link Serializable} state. */ + @Override public String toString() { StringBuilder sb = new StringBuilder(getClass().getName()); @@ -87,19 +87,35 @@ } + @Override public IKeyBuilder getKeyBuilder() { return KeyBuilder.newInstance(initialCapacity); } - public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + /** + * {@inheritDoc} + * <p> + * Note: The PRIMARY is identical to the as-configured {@link IKeyBuilder} + * for ASCII. + */ + @Override + public IKeyBuilder getPrimaryKeyBuilder() { + return getKeyBuilder(); + + } + + @Override + public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { + initialCapacity = in.readInt(); } - public void writeExternal(ObjectOutput out) throws IOException { + @Override + public void writeExternal(final ObjectOutput out) throws IOException { out.writeInt(initialCapacity); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/DefaultKeyBuilderFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/DefaultKeyBuilderFactory.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/DefaultKeyBuilderFactory.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -409,6 +409,7 @@ } + @Override public IKeyBuilder getKeyBuilder() { if(log.isDebugEnabled()) { @@ -422,6 +423,20 @@ } + @Override + public IKeyBuilder getPrimaryKeyBuilder() { + + if(log.isDebugEnabled()) { + + log.debug(toString()); + + } + + return KeyBuilder.newInstance(initialCapacity, collator, locale, + StrengthEnum.Primary, decompositionMode); + + } + /** * Text of the exception thrown when the ICU library is required but is not * available. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilderFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilderFactory.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilderFactory.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -32,7 +32,6 @@ * A factory for pre-configured {@link IKeyBuilder} instances. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface IKeyBuilderFactory { @@ -41,4 +40,15 @@ */ public IKeyBuilder getKeyBuilder(); + /** + * Return an instance of the configured {@link IKeyBuilder} that has been + * overridden to have {@link StrengthEnum#Primary} collation strength. This + * may be used to form successors for Unicode prefix scans without having + * the secondary sort ordering characteristics mucking things up. + * + * @see <a href="http://trac.bigdata.com/ticket/974" > + * Name2Addr.indexNameScan(prefix) uses scan + filter </a> + */ + public IKeyBuilder getPrimaryKeyBuilder(); + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ThreadLocalKeyBuilderFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ThreadLocalKeyBuilderFactory.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/keys/ThreadLocalKeyBuilderFactory.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -31,8 +31,9 @@ import com.bigdata.btree.IIndex; /** + * A thread-local implementation. + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class ThreadLocalKeyBuilderFactory implements IKeyBuilderFactory { @@ -58,6 +59,7 @@ */ private ThreadLocal<IKeyBuilder> threadLocalKeyBuilder = new ThreadLocal<IKeyBuilder>() { + @Override protected synchronized IKeyBuilder initialValue() { return delegate.getKeyBuilder(); @@ -67,13 +69,41 @@ }; /** + * {@inheritDoc} + * <p> * Return a {@link ThreadLocal} {@link IKeyBuilder} instance configured * using the {@link IKeyBuilderFactory} specified to the ctor. */ + @Override public IKeyBuilder getKeyBuilder() { return threadLocalKeyBuilder.get(); } + private ThreadLocal<IKeyBuilder> threadLocalPrimaryKeyBuilder = new ThreadLocal<IKeyBuilder>() { + + @Override + protected synchronized IKeyBuilder initialValue() { + + return delegate.getPrimaryKeyBuilder(); + + } + + }; + + /** + * {@inheritDoc} + * <p> + * Return a {@link ThreadLocal} {@link IKeyBuilder} instance configured + * using the {@link IKeyBuilderFactory} specified to the ctor but with the + * {@link StrengthEnum} overriden as {@link StrengthEnum#Primary}. + */ + @Override + public IKeyBuilder getPrimaryKeyBuilder() { + + return threadLocalPrimaryKeyBuilder.get(); + + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/Name2Addr.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -62,7 +62,6 @@ import com.bigdata.btree.ITuple; import com.bigdata.btree.ITupleIterator; import com.bigdata.btree.IndexMetadata; -import com.bigdata.btree.keys.CollatorEnum; import com.bigdata.btree.keys.DefaultKeyBuilderFactory; import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.btree.keys.IKeyBuilderFactory; @@ -82,9 +81,7 @@ import com.bigdata.resources.IndexManager; import com.bigdata.resources.ResourceManager; import com.bigdata.util.concurrent.ExecutionExceptions; -import com.ibm.icu.text.Collator; -import cutthecrap.utils.striterators.Filter; import cutthecrap.utils.striterators.IStriterator; import cutthecrap.utils.striterators.Resolver; import cutthecrap.utils.striterators.Striterator; @@ -185,7 +182,6 @@ * reference to the index and we need both on hand to do the commit. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ private class DirtyListener implements IDirtyListener, Comparable<DirtyListener> { @@ -194,6 +190,7 @@ boolean needsCheckpoint; long checkpointAddr = 0L; + @Override public String toString() { return "DirtyListener{name=" @@ -204,7 +201,8 @@ } - private DirtyListener(String name, ICheckpointProtocol btree, boolean needsCheckpoint) { + private DirtyListener(final String name, + final ICheckpointProtocol btree, final boolean needsCheckpoint) { assert name!=null; @@ -253,6 +251,7 @@ * * @param btree */ + @Override public void dirtyEvent(final ICheckpointProtocol btree) { assert btree == this.btree; @@ -549,6 +548,7 @@ /** * @return <i>self</i> */ + @Override public CommitIndexTask call() throws Exception { if (log.isInfoEnabled()) @@ -666,6 +666,7 @@ * >Flush indices in parallel during checkpoint to reduce IO * latency</a> */ + @Override synchronized public long handleCommit(final long commitTime) { @@ -1394,6 +1395,7 @@ } + @Override public String toString() { return "Entry{name=" + name + ",checkpointAddr=" + checkpointAddr @@ -1558,6 +1560,7 @@ */ private final static transient byte VERSION = VERSION0; + @Override public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { @@ -1575,6 +1578,7 @@ } + @Override public void writeExternal(final ObjectOutput out) throws IOException { super.writeExternal(out); @@ -1596,34 +1600,11 @@ * * @return The names of the indices spanned by that prefix in that index. * - * FIXME There is a problem with the prefix scan. It appears that we - * are not able to generate the key for a prefix correctly. This - * problem is being worked around by scanning the entire - * {@link Name2Addr} index and then filter for those entries that - * start with the specified prefix. This is not very scalable. - * <p> - * If you change {@link Name2Addr} to use {@link CollatorEnum#ASCII} - * then the prefix scan works correctly without that filter. The - * problem is related to how the {@link Collator} is encoding the - * keys. Neither the ICU nor the JDK collators work for this right - * now. At least the ICU collator winds up with some additional - * bytes after the "end" of the prefix that do not appear when you - * encode the entire index name. For example, compare "kb" and - * "kb.red". See TestName2Addr for more about this issue. - * <p> - * Fixing this problem MIGHT require a data migration. Or we might - * be able to handle this entirely by using an appropriate - * {@link Name2Addr#getKey(String)} and - * {@link Name2AddrTupleSerializer#serializeKey(Object)} - * implementation (depending on how the keys are being encoded). - * <p> - * Update: See <a - * href="https://sourceforge.net/apps/trac/bigdata/ticket/743"> - * AbstractTripleStore.destroy() does not filter for correct prefix - * </a> as well. Maybe the problem is just that we need to have the - * "." appended to the namespace. This could be something that is - * done automatically if the caller does not take care of it - * themselves. + * @see <a href="http://trac.bigdata.com/ticket/974" > + * Name2Addr.indexNameScan(prefix) uses scan + filter </a> + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/743"> + * AbstractTripleStore.destroy() does not filter for correct prefix + * </a> */ public static final Iterator<String> indexNameScan(final String prefix, final IIndex n2a) { @@ -1631,27 +1612,37 @@ final byte[] fromKey; final byte[] toKey; final boolean hasPrefix = prefix != null && prefix.length() > 0; - final boolean restrictScan = false; +// final boolean restrictScan = true; - if (hasPrefix && restrictScan) { + if (hasPrefix ) //&& restrictScan) + { /* * When the namespace prefix was given, generate the toKey as the * fixed length successor of the fromKey. + * + * Note: We MUST use StrengthEnum:=PRIMARY for the prefix scan in + * order to avoid the secondary collation ordering effects. */ - log.error("prefix=" + prefix); +// final IKeyBuilder keyBuilder = n2a.getIndexMetadata() +// .getTupleSerializer().getKeyBuilder(); +// final Properties properties = new Properties(); +// +// properties.setProperty(KeyBuilder.Options.STRENGTH, +// StrengthEnum.Primary.toString()); +// +// final IKeyBuilder keyBuilder = new DefaultKeyBuilderFactory( +// properties).getKeyBuilder(); final IKeyBuilder keyBuilder = n2a.getIndexMetadata() - .getTupleSerializer().getKeyBuilder(); - + .getPrimaryKeyBuilder(); + fromKey = keyBuilder.reset().append(prefix).getKey(); - // toKey = - // keyBuilder.reset().append(prefix).appendNul().getKey(); toKey = SuccessorUtil.successor(fromKey.clone()); - if (true || log.isDebugEnabled()) { + if (log.isDebugEnabled()) { log.error("fromKey=" + BytesUtil.toString(fromKey)); @@ -1670,6 +1661,9 @@ @SuppressWarnings("unchecked") final ITupleIterator<Entry> itr = n2a.rangeIterator(fromKey, toKey); + /* + * Add resolver from the tuple to the name of the index. + */ IStriterator sitr = new Striterator(itr); sitr = sitr.addFilter(new Resolver() { @@ -1686,38 +1680,63 @@ }); - if (hasPrefix && !restrictScan) { +// if (hasPrefix && !restrictScan) { +// +// /* +// * Only report the names that match the prefix. +// * +// * Note: For the moment, the filter is hacked by examining the +// * de-serialized Entry objects and only reporting those that start +// * with the [prefix]. +// */ +// +// sitr = sitr.addFilter(new Filter() { +// +// private static final long serialVersionUID = 1L; +// +// @Override +// public boolean isValid(final Object obj) { +// +// final String name = (String) obj; +// +// if (name.startsWith(prefix)) { +// +// // acceptable. +// return true; +// } +// return false; +// } +// }); +// +// } - /* - * Only report the names that match the prefix. - * - * Note: For the moment, the filter is hacked by examining the - * de-serialized Entry objects and only reporting those that start - * with the [prefix]. - */ - - sitr = sitr.addFilter(new Filter() { - - private static final long serialVersionUID = 1L; - - @Override - public boolean isValid(final Object obj) { - - final String name = (String) obj; - - if (name.startsWith(prefix)) { - - // acceptable. - return true; - } - return false; - } - }); - - } - return sitr; } +// /** +// * The SuccessorUtil does not work with CollatedKeys since it bumps the "meta/control" data +// * at the end of the key, rather than the "value" data of the key. +// * +// * It has been observed that the key data is delimited with a 01 byte, followed by meta/control +// * data with the key itself delimited by a 00 byte. +// * +// * Note that this has only been analyzed for the ICU collator, the standard Java collator does include +// * 00 bytes in the key. However, it too appears to delimit the value key with a 01 byte so the +// * same method should work. +// * +// * @param src - original key +// * @return the next key +// */ +// private static byte[] successor(final byte[] src) { +// final byte[] nxt = src.clone(); +// for (int i = 1; i < nxt.length; i++) { +// if (nxt[i] == 01) { // end of data +// nxt[i-1]++; +// break; +// } +// } +// +// return nxt; +// } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -82,7 +82,6 @@ * Test suite for {@link BufferMode#DiskRW} journals. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class TestRWJournal extends AbstractJournalTestCase { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -71,11 +71,8 @@ import com.bigdata.btree.IndexTypeEnum; import com.bigdata.btree.filter.PrefixFilter; import com.bigdata.btree.filter.TupleFilter; -import com.bigdata.btree.keys.DefaultKeyBuilderFactory; import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.btree.keys.KVO; -import com.bigdata.btree.keys.KeyBuilder; -import com.bigdata.btree.keys.StrengthEnum; import com.bigdata.cache.ConcurrentWeakValueCacheWithBatchedUpdates; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.IResourceLock; @@ -105,7 +102,6 @@ import com.bigdata.rdf.model.BigdataValueSerializer; import com.bigdata.rdf.rio.StatementBuffer; import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.spo.SPO; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.vocab.NoVocabulary; import com.bigdata.rdf.vocab.Vocabulary; @@ -1421,27 +1417,32 @@ } - /* + /** * The KeyBuilder used to form the prefix keys. * - * Note: The prefix keys are formed with IDENTICAL strength. This is + * Note: The prefix keys are formed with PRIMARY strength. This is * necessary in order to match all keys in the index since it causes the * secondary characteristics to NOT be included in the prefix key even * if they are present in the keys in the index. + * + * @see <a href="http://trac.bigdata.com/ticket/974" > + * Name2Addr.indexNameScan(prefix) uses scan + filter </a> */ - final LexiconKeyBuilder keyBuilder; - { + final LexiconKeyBuilder keyBuilder = ((Term2IdTupleSerializer) getTerm2IdIndex() + .getIndexMetadata().getTupleSerializer()) + .getLexiconPrimaryKeyBuilder(); +// { +// +// final Properties properties = new Properties(); +// +// properties.setProperty(KeyBuilder.Options.STRENGTH, +// StrengthEnum.Primary.toString()); +// +// keyBuilder = new Term2IdTupleSerializer( +// new DefaultKeyBuilderFactory(properties)).getLexiconKeyBuilder(); +// +// } - final Properties properties = new Properties(); - - properties.setProperty(KeyBuilder.Options.STRENGTH, - StrengthEnum.Primary.toString()); - - keyBuilder = new Term2IdTupleSerializer( - new DefaultKeyBuilderFactory(properties)).getLexiconKeyBuilder(); - - } - /* * Formulate the keys[]. * Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdTupleSerializer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdTupleSerializer.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdTupleSerializer.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -118,12 +118,30 @@ } /** + * Return a {@link LexiconKeyBuilder} that is setup with collation strength + * PRIMARY. + * + * @see <a href="http://trac.bigdata.com/ticket/974" > + * Name2Addr.indexNameScan(prefix) uses scan + filter </a> + */ + public LexiconKeyBuilder getLexiconPrimaryKeyBuilder() { + + /* + * FIXME We should save off a reference to this to reduce heap churn + * and then use that reference in this class. + */ + return new LexiconKeyBuilder(getPrimaryKeyBuilder()); + + } + + /** * You can not decode the term:id keys since they include Unicode sort keys * and that is a lossy transform. * * @throws UnsupportedOperationException * always */ + @Override public Object deserializeKey(ITuple tuple) { throw new UnsupportedOperationException(); @@ -136,6 +154,7 @@ * @param obj * The RDF {@link Value}. */ + @Override public byte[] serializeKey(Object obj) { return getLexiconKeyBuilder().value2Key((Value)obj); @@ -149,6 +168,7 @@ * @param obj * A term identifier expressed as a {@link TermId}. */ + @Override public byte[] serializeVal(final Object obj) { final IV<?,?> iv = (IV<?,?>) obj; @@ -169,6 +189,7 @@ * De-serializes the {@link ITuple} as a {@link IV} whose value is the * term identifier associated with the key. The key itself is not decodable. */ + @Override public IV deserialize(final ITuple tuple) { final ByteArrayBuffer b = tuple.getValueBuffer(); @@ -187,6 +208,7 @@ */ private final static transient byte VERSION = VERSION0; + @Override public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { @@ -204,6 +226,7 @@ } + @Override public void writeExternal(final ObjectOutput out) throws IOException { super.writeExternal(out); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestCompletionScan.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestCompletionScan.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestCompletionScan.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -63,7 +63,6 @@ * {@link LexiconRelation#prefixScan(org.openrdf.model.Literal[])}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class TestCompletionScan extends AbstractTripleStoreTestCase { @@ -85,7 +84,7 @@ */ public void test_completionScan() { - AbstractTripleStore store = getStore(); + final AbstractTripleStore store = getStore(); try { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTCK.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTCK.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTCK.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -38,7 +38,6 @@ * Test driver for debugging Sesame or DAWG manifest tests. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class TestTCK extends AbstractDataDrivenSPARQLTestCase { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStore.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStore.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -42,7 +42,6 @@ * various indices are NOT isolatable. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class TestLocalTripleStore extends AbstractTestCase { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2014-06-11 09:34:45 UTC (rev 8465) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2014-06-11 13:13:13 UTC (rev 8466) @@ -46,7 +46,6 @@ * the pipeline join algorithm. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class TestBigdataSailWithQuads extends AbstractBigdataSailTestCase { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-11 16:35:58
|
Revision: 8472 http://sourceforge.net/p/bigdata/code/8472 Author: thompsonbry Date: 2014-06-11 16:35:53 +0000 (Wed, 11 Jun 2014) Log Message: ----------- 1. Additional bug fixes for the REST API connection try/finally and launder throwable patterns. 2. Merged in a refactoring to support group commit at the NSS based on hierarchical locking (using the Name2Addr prefix scan) and the ConcurrencyManager + AbstractTask mechanism. This refactoring is not complete, but much of the NSS test suite passes when group commit is enabled. See #566 (NSS group commit) See #966 (Failed to get namespace list under concurrent update) Patched files: - LocalTripleStore: - getIndexManager() returns IJournal (was Journal) - QueryServlet - innocuous changes and FIXME comment block for SPARQL UPDATE to support group commit. - RestApiTask - new - RestApiTaskForIndexManager - new - RestApiTaskForJournal - new - UpdateServlet - adds fix to connection try/finally and launder throwable pattern. - AbstractTestNanoSparqlServerClient : conditional tripleStore.destroy() with FIXME for group commit. - AbstractTask - includes comments about how to create a hierarchical locking system using N2A scans. Unpatched files: - BigdataRDFServlet - no interesting changes. - BigdataServlet - pulled in submitApiTask(), getKBLocks(), and OLD_EXECUTION_MODEL = true. - DeleteServlet - reconciled. captures REST API task pattern. adds fixes to connection try/finally that were somehow overlooked. - InsertServlet - reconciled. captures REST API task pattern and fixes to connection try/finally and launderThrowable patterns. - MultiTenancyServlet - change is incorrect (deals with ITx.READ_COMMITTED). Need modify this class to use the new pattern. Other files: - BigdataStatics - added a global boolean that will allow us to enable the NSS group commit feature from a system property (com.bigdata.nssGroupCommit). - BigdataRDFContext - modified call() to use the try/finally pattern for SPARQL QUERY and UPDATE. - BlueprintsServlet - added the try/finally/launder pattern. - WorkbenchServlet - modified to no longer access the AbstractTripleStore and to use a ValueFactoryImpl instead. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/LocalTripleStore.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractTestNanoSparqlClient.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RestApiTask.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RestApiTaskForIndexManager.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RestApiTaskForJournal.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java 2014-06-11 15:52:14 UTC (rev 8471) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java 2014-06-11 16:35:53 UTC (rev 8472) @@ -27,12 +27,14 @@ package com.bigdata; +import com.bigdata.journal.IIndexManager; +import com.bigdata.relation.AbstractRelation; + /** * A class for those few statics that it makes sense to reference from other * places. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class BigdataStatics { @@ -109,4 +111,21 @@ } + /** + * FIXME GROUP COMMIT : Disable/Enable group commit on the Journal from the + * NSS API. Some global flag should control this and also disable the + * journal's semaphore and should disable the wrapping of BTree as an + * UnisolatedReadWriteIndex ( + * {@link AbstractRelation#getIndex(IIndexManager, String, long)}, and + * should disable the calls to commit() or abort() from the LocalTripleStore + * to the Journal. + * + * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/753" > HA + * doLocalAbort() should interrupt NSS requests and AbstractTasks </a> + * @see <a href="- http://sourceforge.net/apps/trac/bigdata/ticket/566" > + * Concurrent unisolated operations against multiple KBs </a> + */ + public static final boolean NSS_GROUP_COMMIT = Boolean + .getBoolean("com.bigdata.nssGroupCommit"); + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2014-06-11 15:52:14 UTC (rev 8471) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2014-06-11 16:35:53 UTC (rev 8472) @@ -1249,7 +1249,7 @@ * Flag is cleared if the task is aborted. This is used to refuse * access to resources for tasks that ignore interrupts. */ - boolean aborted = false; + volatile boolean aborted = false; /** * The {@link AbstractTask} increments various counters of interest to the @@ -1557,7 +1557,7 @@ /** * Return <code>true</code> iff the task declared this as a resource. * - * @param name + * @param theRequestedResource * The name of a resource. * * @return <code>true</code> iff <i>name</i> is a declared resource. @@ -1565,17 +1565,58 @@ * @throws IllegalArgumentException * if <i>name</i> is <code>null</code>. */ - public boolean isResource(String name) { - - if (name == null) + public boolean isResource(final String theRequestedResource) { + + if (theRequestedResource == null) throw new IllegalArgumentException(); - - for(String s : resource) { - - if(s.equals(name)) return true; - + + for (String theDeclaredResource : resource) { + + if (theDeclaredResource.equals(theRequestedResource)) { + /* + * Exact match. This resource was declared. + */ + return true; + } + + /** + * FIXME GROUP_COMMIT: Supporting this requires us to support + * efficient scans of the indices in Name2Addr having the prefix + * values declared by [resources] since getIndex(name) will fail if + * the Name2Addr entry has not been buffered within the [n2a] cache. + * + * @see <a + * href="http://sourceforge.net/apps/trac/bigdata/ticket/753" > + * HA doLocalAbort() should interrupt NSS requests and + * AbstractTasks </a> + * @see <a + * href="- http://sourceforge.net/apps/trac/bigdata/ticket/566" + * > Concurrent unisolated operations against multiple KBs </a> + */ +// if (theRequestedResource.startsWith(theDeclaredResource)) { +// +// // Possible prefix match. +// +// if (theRequestedResource.charAt(theDeclaredResource.length()) == '.') { +// +// /* +// * Prefix match. +// * +// * E.g., name:="kb.spo.osp" and the task declared the +// * resource "kb". In this case, "kb" is a PREFIX of the +// * declared resource and the next character is the separator +// * character for the resource names (this last point is +// * important to avoid unintended contention between +// * namespaces such as "kb" and "kb1"). +// */ +// return true; +// +// } +// +// } + } - + return false; } @@ -2085,46 +2126,53 @@ } + @Override public IResourceManager getResourceManager() { return delegate.getResourceManager(); } + @Override public IJournal getJournal() { return delegate.getJournal(); } + @Override public String[] getResource() { return delegate.getResource(); } + @Override public String getOnlyResource() { return delegate.getOnlyResource(); } + @Override public IIndex getIndex(String name) { return delegate.getIndex(name); } + @Override public TaskCounters getTaskCounters() { return delegate.getTaskCounters(); } + @Override public String toString() { - return getClass().getName()+"("+delegate.toString()+")"; - + return getClass().getName() + "(" + delegate.toString() + ")"; + } } @@ -2577,8 +2625,13 @@ } // read committed view IFF it exists otherwise [null] - return new GlobalRowStoreHelper(this).get(ITx.READ_COMMITTED); + // TODO Review. Make sure we have tx protection to avoid recycling of the view. + final long lastCommitTime = getLastCommitTime(); + return new GlobalRowStoreHelper(this).get(lastCommitTime); + + //return new GlobalRowStoreHelper(this).get(ITx.READ_COMMITTED); + } @Override @@ -2696,12 +2749,32 @@ * Disallowed methods (commit protocol and shutdown protocol). */ + /** + * {@inheritDoc} + * <p> + * Marks the task as aborted. The task will not commit. However, the + * task will continue to execute until control returns from its + * {@link AbstractTask#doTask()} method. + */ @Override public void abort() { - throw new UnsupportedOperationException(); + aborted = true; } + /** + * {@inheritDoc} + * <p> + * Overridden as NOP. Tasks do not directly invoke commit() on the + * Journal. + */ @Override + public long commit() { + if (aborted) + throw new IllegalStateException("aborted"); + return 0; + } + + @Override public void close() { throw new UnsupportedOperationException(); } @@ -2717,11 +2790,6 @@ } @Override - public long commit() { - throw new UnsupportedOperationException(); - } - - @Override public void setCommitter(int index, ICommitter committer) { throw new UnsupportedOperationException(); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/LocalTripleStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/LocalTripleStore.java 2014-06-11 15:52:14 UTC (rev 8471) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/LocalTripleStore.java 2014-06-11 16:35:53 UTC (rev 8472) @@ -33,6 +33,7 @@ import com.bigdata.btree.BTree; import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.IJournal; import com.bigdata.journal.ITx; import com.bigdata.journal.Journal; import com.bigdata.relation.locator.DefaultResourceLocator; @@ -55,13 +56,13 @@ final static private Logger log = Logger.getLogger(LocalTripleStore.class); - private final Journal store; + private final IJournal store; /** * The backing embedded database. */ @Override - public Journal getIndexManager() { + public IJournal getIndexManager() { return store; @@ -160,7 +161,7 @@ super(indexManager, namespace, timestamp, properties); - store = (Journal) indexManager; + store = (IJournal) indexManager; } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-06-11 15:52:14 UTC (rev 8471) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-06-11 16:35:53 UTC (rev 8472) @@ -1135,63 +1135,46 @@ abstract protected void doQuery(BigdataSailRepositoryConnection cxn, OutputStream os) throws Exception; + @Override final public Void call() throws Exception { BigdataSailRepositoryConnection cxn = null; + boolean success = false; try { + // Note: Will be UPDATE connection if UPDATE request!!! cxn = getQueryConnection(namespace, timestamp); if(log.isTraceEnabled()) log.trace("Query running..."); beginNanos = System.nanoTime(); -// try { - if (explain && !update) { - /* - * The data goes to a bit bucket and we send an - * "explanation" of the query evaluation back to the caller. - * - * Note: The trick is how to get hold of the IRunningQuery - * object. It is created deep within the Sail when we - * finally submit a query plan to the query engine. We have - * the queryId (on queryId2), so we can look up the - * IRunningQuery in [m_queries] while it is running, but - * once it is terminated the IRunningQuery will have been - * cleared from the internal map maintained by the - * QueryEngine, at which point we can not longer find it. - * - * Note: We can't do this for UPDATE since it would have - * a side-effect anyway. The way to "EXPLAIN" an UPDATE - * is to break it down into the component QUERY bits and - * execute those. - */ - doQuery(cxn, new NullOutputStream()); - } else { - doQuery(cxn, os); - os.flush(); - os.close(); - } - if(log.isTraceEnabled()) - log.trace("Query done."); -// } catch(Throwable t) { -// /* -// * Log the query and the exception together. -// */ -// log.error(t.getLocalizedMessage() + ":\n" + queryStr, t); -// } - return null; - } catch (Throwable t) { - log.error("Will abort: " + t, t); - if (cxn != null && !cxn.isReadOnly()) { + if (explain && !update) { /* - * Force rollback of the connection. + * The data goes to a bit bucket and we send an + * "explanation" of the query evaluation back to the caller. * - * Note: It is possible that the commit has already been - * processed, in which case this rollback() will be a NOP. - * This can happen when there is an IO error when - * communicating with the client, but the database has - * already gone through a commit. + * Note: The trick is how to get hold of the IRunningQuery + * object. It is created deep within the Sail when we + * finally submit a query plan to the query engine. We have + * the queryId (on queryId2), so we can look up the + * IRunningQuery in [m_queries] while it is running, but + * once it is terminated the IRunningQuery will have been + * cleared from the internal map maintained by the + * QueryEngine, at which point we can not longer find it. + * + * Note: We can't do this for UPDATE since it would have a + * side-effect anyway. The way to "EXPLAIN" an UPDATE is to + * break it down into the component QUERY bits and execute + * those. */ - cxn.rollback(); + doQuery(cxn, new NullOutputStream()); + success = true; + } else { + doQuery(cxn, os); + success = true; + os.flush(); + os.close(); } - throw new Exception(t); + if (log.isTraceEnabled()) + log.trace("Query done."); + return null; } finally { endNanos = System.nanoTime(); m_queries.remove(queryId); @@ -1204,11 +1187,26 @@ // } // } if (cxn != null) { + if (!success && !cxn.isReadOnly()) { + /* + * Force rollback of the connection. + * + * Note: It is possible that the commit has already been + * processed, in which case this rollback() will be a + * NOP. This can happen when there is an IO error when + * communicating with the client, but the database has + * already gone through a commit. + */ + try { + // Force rollback of the connection. + cxn.rollback(); + } catch (Throwable t) { + log.error(t, t); + } + } try { // Force close of the connection. cxn.close(); - if(log.isTraceEnabled()) - log.trace("Connection closed."); } catch (Throwable t) { log.error(t, t); } @@ -1432,6 +1430,7 @@ * <p> * This executes the SPARQL UPDATE and formats the HTTP response. */ + @Override protected void doQuery(final BigdataSailRepositoryConnection cxn, final OutputStream os) throws Exception { @@ -1439,24 +1438,31 @@ * Setup a change listener. It will notice the #of mutations. */ final CAT mutationCount = new CAT(); + cxn.addChangeLog(new IChangeLog(){ + @Override public void changeEvent(final IChangeRecord record) { mutationCount.increment(); } + @Override public void transactionBegin() { } + @Override public void transactionPrepare() { } + @Override public void transactionCommited(long commitTime) { } + @Override public void transactionAborted() { - }}); - + } + }); + // Prepare the UPDATE request. final BigdataSailUpdate update = setupUpdate(cxn); @@ -2106,10 +2112,11 @@ } /** - * Return a connection transaction. When the timestamp is associated with a - * historical commit point, this will be a read-only connection. When it is - * associated with the {@link ITx#UNISOLATED} view or a read-write - * transaction, this will be a mutable connection. + * Return a connection transaction, which may be read-only or support + * update. When the timestamp is associated with a historical commit point, + * this will be a read-only connection. When it is associated with the + * {@link ITx#UNISOLATED} view or a read-write transaction, this will be a + * mutable connection. * * @param namespace * The namespace. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-06-11 15:52:14 UTC (rev 8471) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-06-11 16:35:53 UTC (rev 8472) @@ -29,8 +29,11 @@ import java.io.InputStreamReader; import java.io.OutputStream; import java.io.Writer; +import java.util.HashSet; import java.util.LinkedList; import java.util.List; +import java.util.Set; +import java.util.concurrent.Future; import javax.servlet.ServletContext; import javax.servlet.http.HttpServlet; @@ -39,12 +42,18 @@ import org.apache.log4j.Logger; +import com.bigdata.BigdataStatics; import com.bigdata.ha.HAStatusEnum; import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.IConcurrencyManager; import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.Journal; +import com.bigdata.journal.TimestampUtility; import com.bigdata.quorum.AbstractQuorum; import com.bigdata.rdf.sail.webapp.client.IMimeTypes; import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.service.IBigdataFederation; /** * Useful glue for implementing service actions, but does not directly implement @@ -190,6 +199,149 @@ } + /** + * Submit a task and return a {@link Future} for that task. The task will be + * run on the appropriate executor service depending on the nature of the + * backing database and the view required by the task. + * + * @param task + * The task. + * + * @return The {@link Future} for that task. + * + * @throws DatasetNotFoundException + * + * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/753" > HA + * doLocalAbort() should interrupt NSS requests and AbstractTasks </a> + * @see <a href="- http://sourceforge.net/apps/trac/bigdata/ticket/566" > + * Concurrent unisolated operations against multiple KBs </a> + */ + @SuppressWarnings({ "unchecked", "rawtypes" }) + protected <T> Future<T> submitApiTask(final RestApiTask<T> task) + throws DatasetNotFoundException { + + final String namespace = task.getNamespace(); + + final long timestamp = task.getTimestamp(); + + final IIndexManager indexManager = getIndexManager(); + + if (!BigdataStatics.NSS_GROUP_COMMIT || indexManager instanceof IBigdataFederation + || TimestampUtility.isReadOnly(timestamp) + ) { + + /* + * Run on a normal executor service. + * + * Note: For scale-out, the operation will be applied using + * client-side global views of the indices. + * + * Note: This can be used for operations on read-only views (even on + * a Journal). This is helpful since we can avoid some overhead + * associated the AbstractTask lock declarations. + */ + + return indexManager.getExecutorService().submit( + new RestApiTaskForIndexManager(indexManager, task)); + + } else { + + /** + * Run on the ConcurrencyManager of the Journal. + * + * Mutation operations will be scheduled based on the pre-declared + * locks and will have exclusive access to the resources guarded by + * those locks when they run. + * + * FIXME GROUP COMMIT: The {@link AbstractTask} was written to + * require the exact set of resource lock declarations. However, for + * the REST API, we want to operate on all indices associated with a + * KB instance. This requires either: + * <p> + * (a) pre-resolving the names of those indices and passing them all + * into the AbstractTask; or + * <P> + * (b) allowing the caller to only declare the namespace and then to + * be granted access to all indices whose names are in that + * namespace. + * + * (b) is now possible with the fix to the Name2Addr prefix scan. + */ + + // Obtain the necessary locks for R/w access to KB indices. + final String[] locks = getLocksForKB((Journal) indexManager, + namespace); + + final IConcurrencyManager cc = ((Journal) indexManager) + .getConcurrencyManager(); + + // Submit task to ConcurrencyManager. Will acquire locks and run. + return cc.submit(new RestApiTaskForJournal(cc, task.getTimestamp(), + locks, task)); + + } + + } + + /** + * Acquire the locks for the named indices associated with the specified KB. + * + * @param indexManager + * The {@link Journal}. + * @param namespace + * The namespace of the KB instance. + * + * @return The locks for the named indices associated with that KB instance. + * + * @throws DatasetNotFoundException + * + * FIXME GROUP COMMIT : [This should be replaced by the use of + * the namespace and hierarchical locking support in + * AbstractTask.] This could fail to discover a recently create + * KB between the time when the KB is created and when the group + * commit for that create becomes visible. This data race exists + * because we are using [lastCommitTime] rather than the + * UNISOLATED view of the GRS. + * <p> + * Note: This data race MIGHT be closed by the default locator + * cache. If it records the new KB properties when they are + * created, then they should be visible. If they are not + * visible, then we have a data race. (But if it records them + * before the group commit for the KB create, then the actual KB + * indices will not be durable until the that group commit...). + * <p> + * Note: The problem can obviously be resolved by using the + * UNISOLATED index to obtain the KB properties, but that would + * serialize ALL updates. What we need is a suitable caching + * mechanism that (a) ensures that newly create KB instances are + * visible; and (b) has high concurrency for read-only requests + * for the properties for those KB instances. + */ + private static String[] getLocksForKB(final Journal indexManager, + final String namespace) throws DatasetNotFoundException { + + final long timestamp = indexManager.getLastCommitTime(); + + final AbstractTripleStore tripleStore = (AbstractTripleStore) indexManager + .getResourceLocator().locate(namespace, timestamp); + + if (tripleStore == null) + throw new DatasetNotFoundException("Not found: namespace=" + + namespace + ", timestamp=" + + TimestampUtility.toString(timestamp)); + + final Set<String> lockSet = new HashSet<String>(); + + lockSet.addAll(tripleStore.getSPORelation().getIndexNames()); + + lockSet.addAll(tripleStore.getLexiconRelation().getIndexNames()); + + final String[] locks = lockSet.toArray(new String[lockSet.size()]); + + return locks; + + } + // /** // * Return the {@link Quorum} -or- <code>null</code> if the // * {@link IIndexManager} is not participating in an HA {@link Quorum}. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-06-11 15:52:14 UTC (rev 8471) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-06-11 16:35:53 UTC (rev 8472) @@ -105,6 +105,7 @@ try { BigdataSailRepositoryConnection conn = null; + boolean success = false; try { conn = getBigdataRDFContext() @@ -116,6 +117,8 @@ graph.commit(); + success = true; + final long nmodified = graph.getMutationCountLastCommit(); final long elapsed = System.currentTimeMillis() - begin; @@ -124,17 +127,16 @@ return; - } catch(Throwable t) { - - if(conn != null) - conn.rollback(); - - throw new RuntimeException(t); - } finally { - if (conn != null) + if (conn != null) { + + if (!success) + conn.rollback(); + conn.close(); + + } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-06-11 15:52:14 UTC (rev 8471) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-06-11 16:35:53 UTC (rev 8472) @@ -48,6 +48,7 @@ import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; +import com.bigdata.rdf.sail.webapp.RestApiTask.RestApiMutationTask; import com.bigdata.rdf.sail.webapp.client.EncodeDecodeValue; import com.bigdata.rdf.sail.webapp.client.MiniMime; @@ -105,6 +106,14 @@ * process deleting the statements. This is done while it is holding the * unisolated connection which prevents concurrent modifications. Therefore * the entire SELECT + DELETE operation is ACID. + * + * FIXME GROUP COMMIT : Again, a pattern where a query is run to produce + * solutions that are then deleted from the database. Can we rewrite this to + * be a SPARQL UPDATE? (DELETE WHERE). Note that the ACID semantics of this + * operation would be broken by group commit since other tasks could have + * updated the KB since the lastCommitTime and been checkpointed and hence + * be visible to an unisolated operation without there being an intervening + * commit point. */ private void doDeleteWithQuery(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { @@ -158,6 +167,7 @@ final AtomicLong nmodified = new AtomicLong(0L); BigdataSailRepositoryConnection conn = null; + boolean success = false; try { conn = getBigdataRDFContext().getUnisolatedConnection( @@ -196,22 +206,23 @@ // Commit the mutation. conn.commit(); + success = true; + final long elapsed = System.currentTimeMillis() - begin; reportModifiedCount(resp, nmodified.get(), elapsed); - } catch(Throwable t) { - - if(conn != null) - conn.rollback(); - - throw new RuntimeException(t); - } finally { - if (conn != null) + if (conn != null) { + + if (!success) + conn.rollback(); + conn.close(); + } + } } catch (Throwable t) { @@ -258,8 +269,6 @@ private void doDeleteWithBody(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - final long begin = System.currentTimeMillis(); - final String baseURI = req.getRequestURL().toString(); final String namespace = getNamespace(req); @@ -325,16 +334,67 @@ } } - final RDFParser rdfParser = rdfParserFactory.getParser(); + submitApiTask( + new DeleteWithBodyTask(req, resp, namespace, + ITx.UNISOLATED, baseURI, defaultContext, + rdfParserFactory)).get(); + + } catch (Throwable t) { - final AtomicLong nmodified = new AtomicLong(0L); + throw BigdataRDFServlet.launderThrowable(t, resp, ""); + + } + } + + private static class DeleteWithBodyTask extends RestApiMutationTask<Void> { + + private final String baseURI; + private final Resource[] defaultContext; + private final RDFParserFactory rdfParserFactory; + + /** + * + * @param namespace + * The namespace of the target KB instance. + * @param timestamp + * The timestamp used to obtain a mutable connection. + * @param baseURI + * The base URI for the operation. + * @param defaultContext + * The context(s) for triples without an explicit named graph + * when the KB instance is operating in a quads mode. + * @param rdfParserFactory + * The factory for the {@link RDFParser}. This should have + * been chosen based on the caller's knowledge of the + * appropriate content type. + */ + public DeleteWithBodyTask(final HttpServletRequest req, + final HttpServletResponse resp, + final String namespace, final long timestamp, + final String baseURI, final Resource[] defaultContext, + final RDFParserFactory rdfParserFactory) { + super(req, resp, namespace, timestamp); + this.baseURI = baseURI; + this.defaultContext = defaultContext; + this.rdfParserFactory = rdfParserFactory; + } + + @Override + public Void call() throws Exception { + + final long begin = System.currentTimeMillis(); + BigdataSailRepositoryConnection conn = null; + boolean success = false; try { - conn = getBigdataRDFContext() - .getUnisolatedConnection(namespace); + conn = getUnisolatedConnection(); + final RDFParser rdfParser = rdfParserFactory.getParser(); + + final AtomicLong nmodified = new AtomicLong(0L); + rdfParser.setValueFactory(conn.getTripleStore() .getValueFactory()); @@ -356,32 +416,31 @@ // Commit the mutation. conn.commit(); + success = true; + final long elapsed = System.currentTimeMillis() - begin; - reportModifiedCount(resp, nmodified.get(), elapsed); + reportModifiedCount(nmodified.get(), elapsed); - } catch(Throwable t) { + return null; - if (conn != null) - conn.rollback(); - - throw new RuntimeException(t); - } finally { - if (conn != null) + if (conn != null) { + + if (!success) + conn.rollback(); + conn.close(); + } + } - } catch (Throwable t) { - - throw BigdataRDFServlet.launderThrowable(t, resp, ""); - } + + } - } - /** * Helper class removes statements from the sail as they are visited by a parser. */ @@ -429,10 +488,10 @@ } if (c.length >= 2) { - // removed from more than one context - nmodified.addAndGet(c.length); + // removed from more than one context + nmodified.addAndGet(c.length); } else { - nmodified.incrementAndGet(); + nmodified.incrementAndGet(); } } @@ -445,8 +504,6 @@ private void doDeleteWithAccessPath(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - final long begin = System.currentTimeMillis(); - final String namespace = getNamespace(req); final Resource s; @@ -471,68 +528,109 @@ try { - BigdataSailRepositoryConnection conn = null; - try { + submitApiTask( + new DeleteWithAccessPathTask(req, resp, namespace, + ITx.UNISOLATED, s, p, o, c)).get(); - conn = getBigdataRDFContext().getUnisolatedConnection( - namespace); + } catch (Throwable t) { - // Remove all statements matching that access path. -// final long nmodified = conn.getSailConnection() -// .getBigdataSail().getDatabase() -// .removeStatements(s, p, o, c); - - // Remove all statements matching that access path. - long nmodified = 0; - if (c != null && c.length > 0) { - for (Resource r : c) { - nmodified += conn.getSailConnection() - .getBigdataSail().getDatabase() - .removeStatements(s, p, o, r); - } - } else { - nmodified += conn.getSailConnection() - .getBigdataSail().getDatabase() - .removeStatements(s, p, o, null); + throw BigdataRDFServlet.launderThrowable(t, resp, "s=" + s + ",p=" + + p + ",o=" + o + ",c=" + c); + + } + + } + +// static private transient final Resource[] nullArray = new Resource[]{}; + + private static class DeleteWithAccessPathTask extends RestApiMutationTask<Void> { + + private Resource s; + private URI p; + private final Value o; + private final Resource[] c; + + /** + * + * @param namespace + * The namespace of the target KB instance. + * @param timestamp + * The timestamp used to obtain a mutable connection. + * @param baseURI + * The base URI for the operation. + * @param defaultContext + * The context(s) for triples without an explicit named graph + * when the KB instance is operating in a quads mode. + * @param rdfParserFactory + * The factory for the {@link RDFParser}. This should have + * been chosen based on the caller's knowledge of the + * appropriate content type. + */ + public DeleteWithAccessPathTask(final HttpServletRequest req, + final HttpServletResponse resp, // + final String namespace, final long timestamp,// + final Resource s, final URI p, final Value o, final Resource[] c) { + super(req, resp, namespace, timestamp); + this.s = s; + this.p = p; + this.o = o; + this.c = c; + } + + @Override + public Void call() throws Exception { + + final long begin = System.currentTimeMillis(); + + BigdataSailRepositoryConnection conn = null; + boolean success = false; + try { + + conn = getUnisolatedConnection(); + + // Remove all statements matching that access path. + // final long nmodified = conn.getSailConnection() + // .getBigdataSail().getDatabase() + // .removeStatements(s, p, o, c); + + // Remove all statements matching that access path. + long nmodified = 0; + if (c != null && c.length > 0) { + for (Resource r : c) { + nmodified += conn.getSailConnection().getBigdataSail() + .getDatabase().removeStatements(s, p, o, r); } - - // Commit the mutation. - conn.commit(); + } else { + nmodified += conn.getSailConnection().getBigdataSail() + .getDatabase().removeStatements(s, p, o, null); + } - final long elapsed = System.currentTimeMillis() - begin; - - reportModifiedCount(resp, nmodified, elapsed); + // Commit the mutation. + conn.commit(); - } catch(Throwable t) { - - if(conn != null) + success = true; + + final long elapsed = System.currentTimeMillis() - begin; + + reportModifiedCount(nmodified, elapsed); + + return null; + + } finally { + + if (conn != null) { + + if (!success) conn.rollback(); - - throw new RuntimeException(t); - - } finally { - if (conn != null) - conn.close(); + conn.close(); } - } catch (Throwable t) { + } - throw BigdataRDFServlet.launderThrowable(t, resp, "s=" + s + ",p=" - + p + ",o=" + o + ",c=" + c); - } - -// } catch (Exception ex) { -// -// // Will be rendered as an INTERNAL_ERROR. -// throw new RuntimeException(ex); -// -// } - + } -// static private transient final Resource[] nullArray = new Resource[]{}; - } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2014-06-11 15:52:14 UTC (rev 8471) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2014-06-11 16:35:53 UTC (rev 8472) @@ -45,8 +45,11 @@ import org.openrdf.rio.helpers.RDFHandlerBase; import org.openrdf.sail.SailException; +import com.bigdata.journal.ITx; +import com.bigdata.rdf.rio.IRDFParserOptions; import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; +import com.bigdata.rdf.sail.webapp.RestApiTask.RestApiMutationTask; import com.bigdata.rdf.sail.webapp.client.MiniMime; /** @@ -132,8 +135,6 @@ */ private void doPostWithBody(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - - final long begin = System.currentTimeMillis(); final String baseURI = req.getRequestURL().toString(); @@ -221,14 +222,71 @@ try { + submitApiTask( + new InsertWithBodyTask(req, resp, namespace, ITx.UNISOLATED, + baseURI, defaultContext, rdfParserFactory)).get(); + + } catch (Throwable t) { + + throw BigdataRDFServlet.launderThrowable(t, resp, ""); + + } + + } + + /** + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + * + * TODO The {@link IRDFParserOptions} defaults should be coming from + * the KB instance, right? What does the REST API say about this? + */ + private static class InsertWithBodyTask extends RestApiMutationTask<Void> { + + private final String baseURI; + private final Resource[] defaultContext; + private final RDFParserFactory rdfParserFactory; + + /** + * + * @param namespace + * The namespace of the target KB instance. + * @param timestamp + * The timestamp used to obtain a mutable connection. + * @param baseURI + * The base URI for the operation. + * @param defaultContext + * The context(s) for triples without an explicit named graph + * when the KB instance is operating in a quads mode. + * @param rdfParserFactory + * The factory for the {@link RDFParser}. This should have + * been chosen based on the caller's knowledge of the + * appropriate content type. + */ + public InsertWithBodyTask(final HttpServletRequest req, + final HttpServletResponse resp, + final String namespace, final long timestamp, + final String baseURI, final Resource[] defaultContext, + final RDFParserFactory rdfParserFactory) { + super(req, resp, namespace, timestamp); + this.baseURI = baseURI; + this.defaultContext = defaultContext; + this.rdfParserFactory = rdfParserFactory; + } + + @Override + public Void call() throws Exception { + + final long begin = System.currentTimeMillis(); + final AtomicLong nmodified = new AtomicLong(0L); BigdataSailRepositoryConnection conn = null; boolean success = false; try { - conn = getBigdataRDFContext() - .getUnisolatedConnection(namespace); + conn = getUnisolatedConnection(); /* * There is a request body, so let's try and parse it. @@ -258,35 +316,31 @@ conn.commit(); final long elapsed = System.currentTimeMillis() - begin; - - reportModifiedCount(resp, nmodified.get(), elapsed); - + + reportModifiedCount(nmodified.get(), elapsed); + success = true; - - return; + return (Void) null; + } finally { if (conn != null) { if (!success) conn.rollback(); - + conn.close(); } - + } - } catch (Throwable t) { - - throw BigdataRDFServlet.launderThrowable(t, resp, ""); - } - + } - /** + /** * POST with URIs of resources to be inserted (loads the referenced * resources). * @@ -371,25 +425,69 @@ try { - final AtomicLong nmodified = new AtomicLong(0L); + submitApiTask( + new InsertWithURLsTask(req, resp, namespace, + ITx.UNISOLATED, defaultContext, urls)).get(); + } catch (Throwable t) { + + throw launderThrowable(t, resp, "urls=" + urls); + + } + + } + + private static class InsertWithURLsTask extends RestApiMutationTask<Void> { + + private final Vector<URL> urls; + private final Resource[] defaultContext; + + /** + * + * @param namespace + * The namespace of the target KB instance. + * @param timestamp + * The timestamp used to obtain a mutable connection. + * @param baseURI + * The base URI for the operation. + * @param defaultContext + * The context(s) for triples without an explicit named graph + * when the KB instance is operating in a quads mode. + * @param urls + * The {@link URL}s whose contents will be parsed and loaded + * into the target KB. + */ + public InsertWithURLsTask(final HttpServletRequest req, + final HttpServletResponse resp, final String namespace, + final long timestamp, final Resource[] defaultContext, + final Vector<URL> urls) { + super(req, resp, namespace, timestamp); + this.urls = urls; + this.defaultContext = defaultContext; + } + + @Override + public Void call() throws Exception { + + final long begin = System.currentTimeMillis(); + BigdataSailRepositoryConnection conn = null; + boolean success = false; try { - conn = getBigdataRDFContext().getUnisolatedConnection( - namespace); + conn = getUnisolatedConnection(); + final AtomicLong nmodified = new AtomicLong(0L); + for (URL url : urls) { - // Use the default context if one was given and otherwise - // the URI from which the data are being read. -// final Resource defactoContext = defaultContext == null ? new URIImpl( -// url.toExternalForm()) : defaultContext; - final Resource[] defactoContext = - defaultContext.length == 0 - ? new Resource[] { new URIImpl(url.toExternalForm()) } - : defaultContext; - + // Use the default context if one was given and otherwise + // the URI from which the data are being read. +// final Resource defactoContext = defaultContext == null ? new URIImpl( +// url.toExternalForm()) : defaultContext; + final Resource[] defactoContext = defaultContext.length == 0 ? new Resource[] { new URIImpl( + url.toExternalForm()) } : defaultContext; + URLConnection hconn = null; try { @@ -411,7 +509,7 @@ */ final String contentType = hconn.getContentType(); - + RDFFormat format = RDFFormat.forMIMEType(new MiniMime( contentType).getMimeType()); @@ -420,10 +518,24 @@ /* * Try to get the RDFFormat from the URL's file * path. + * + * FIXME GROUP COMMIT: There is a potential issue + * where the existing code commits the response and + * returns, e.g., from the InsertServlet. Any task + * that does not fail (thrown exception) will + * commit. This means that mutations operations that + * fail will still attempt to join a commit point. + * This is inappropriate and could cause resource + * leaks (e.g., if the operation failed after + * writing on the Journal). We really should throw + * out a typed exception, but in launderThrowable() + * ignore that typed exception if the response has + * already been committed. That way the task will + * not join a commit point. */ - + format = RDFFormat.forFileName(url.getFile()); - + } if (format == null) { @@ -433,7 +545,7 @@ "Content-Type not recognized as RDF: " + contentType); - return; + return null; } @@ -441,12 +553,12 @@ .getInstance().get(format); if (rdfParserFactory == null) { - buildResponse(resp, HTTP_INTERNALERROR, + buildResponse(resp, HTTP_INTERNALERROR, MIME_TEXT_PLAIN, "Parser not found: Content-Type=" + contentType); - - return; + + return null; } final RDFParser rdfParser = rdfParserFactory @@ -462,66 +574,66 @@ rdfParser .setDatatypeHandling(RDFParser.DatatypeHandling.IGNORE); - rdfParser.setRDFHandler(new AddStatementHandler(conn - .getSailConnection(), nmodified, defactoContext)); + rdfParser + .setRDFHandler(new AddStatementHandler(conn + .getSailConnection(), nmodified, + defactoContext)); /* * Run the parser, which will cause statements to be * inserted. */ - rdfParser.parse(hconn.getInputStream(), url - .toExternalForm()/* baseURL */); + rdfParser.parse(hconn.getInputStream(), + url.toExternalForm()/* baseURL */); } finally { if (hconn instanceof HttpURLConnection) { /* * Disconnect, but only after we have loaded all the - * URLs. Disconnect is optional for java.net. It is a - * hint that you will not be accessing more resources on - * the connected host. By disconnecting only after all - * resources have been loaded we are basically assuming - * that people are more likely to load from a single - * host. + * URLs. Disconnect is optional for java.net. It is + * a hint that you will not be accessing more + * resources on the connected host. By disconnecting + * only after all resources have been loaded we are + * basically assuming that people are more likely to + * load from a single host. */ ((HttpURLConnection) hconn).disconnect(); } } - - } // next URI. + } // next URI. + // Commit the mutation. conn.commit(); + success = true; + final long elapsed = System.currentTimeMillis() - begin; - reportModifiedCount(resp, nmodified.get(), elapsed); - - } catch(Throwable t) { + reportModifiedCount(nmodified.get(), elapsed); - if(conn != null) - conn.rollback(); + return null; - throw new RuntimeException(t); - } finally { - if (conn != null) + if (conn != null) { + + if (!success) + conn.rollback(); + conn.close(); + } + } - } catch (Exception ex) { - - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); - } - + } - + /** * Helper class adds statements to the sail as they are visited by a parser. */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-06-11 15:52:14 UTC (rev 8471) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-06-11 16:35:53 UTC (rev 8472) @@ -343,7 +343,7 @@ } /* - * Setup task to execute the query. The task is executed on a thread + * Setup task to execute the request. The task is executed on a thread * pool. This bounds the possible concurrency of query execution (as * opposed to queries accepted for eventual execution). * @@ -353,13 +353,8 @@ */ try { - final OutputStream os = resp.getOutputStream(); - final BigdataRDFContext context = getBigdataRDFContext(); - // final boolean explain = - // req.getParameter(BigdataRDFContext.EXPLAIN) != null; - final UpdateTask updateTask; try { @@ -370,7 +365,7 @@ updateTask = (UpdateTask) context.getQueryTask(namespace, timestamp, updateStr, null/* acceptOverride */, req, - resp, os, true/* update */); + resp, resp.getOutputStream(), t... [truncated message content] |
From: <tho...@us...> - 2014-06-16 14:18:07
|
Revision: 8483 http://sourceforge.net/p/bigdata/code/8483 Author: thompsonbry Date: 2014-06-16 14:17:57 +0000 (Mon, 16 Jun 2014) Log Message: ----------- It looks like the ArbitraryLengthPathOp could be more defensive to avoid an NPE: ArbitraryLengthPathOp.java line 778 {{{ if (parentSolutionIn.isBound(gearing.outVar)) { // do this later now if (!bs.get(gearing.tVarOut).equals(parentSolutionIn.get(gearing.outVar))) { }}} Since we already know that there is a binding for gearing.outVar, this could be written as: {{{ if (parentSolutionIn.isBound(gearing.outVar)) { // do this now: note already known to be bound per test above. final IConstant<?> poutVar = parentSolutionIn.get(gearing.outVar); if (!poutVar.equals(bs.get(gearing.tVarOut))) { }}} This was noticed when observing an NPE when {{{bs.get(gearing.tVarOut)}}} evaluated to null. This is not the root cause of the problem. I am still looking for that. I have enabled the property-path test suite for the BigdataEmbeddedFederationSparqlTest. This test suite is not automatically run in CI due to resource leaks (which is documented on another ticket). However, you can now trivially recreate the problem by uncommenting the following line in BigdataSparqlTest and running the BigdataEmbeddedFederationSparqlTest. {{{ static final Collection<String> testURIs = Arrays.asList(new String[] { // property paths // "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/syntax-sparql1/manifest#sparql11-collection-01", }}} When run locally, the test fails as follows. The failure is the same as the one documented above. It is attempting to bind a null value onto a variable. The root cause is likely to be a failure to flow the solutions back to the query controller such that the results from the sub-query appear as unbound on the query controller. It could also be a failure to run the sub-query from the query controller. I have not diagnosed this further. {{{ org.openrdf.query.QueryEvaluationException: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.Exception: task=ChunkTask{query=eb7362c8-a987-4448-9113-99816a82311d,bopId=14,partitionId=-1,sinkId=17,altSinkId=null}, cause=java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException at com.bigdata.rdf.sail.Bigdata2Sesame2BindingSetIterator.hasNext(Bigdata2Sesame2BindingSetIterator.java:188) at org.openrdf.query.impl.TupleQueryResultImpl.hasNext(TupleQueryResultImpl.java:90) at info.aduna.iteration.Iterations.addAll(Iterations.java:71) at org.openrdf.query.impl.MutableTupleQueryResult.<init>(MutableTupleQueryResult.java:86) at org.openrdf.query.impl.MutableTupleQueryResult.<init>(MutableTupleQueryResult.java:92) at org.openrdf.query.parser.sparql.SPARQLQueryTest.compareTupleQueryResults(SPARQLQueryTest.java:244) at org.openrdf.query.parser.sparql.SPARQLASTQueryTest.runTest(SPARQLASTQueryTest.java:196) at junit.framework.TestCase.runBare(TestCase.java:127) at junit.framework.TestResult$1.protect(TestResult.java:106) at junit.framework.TestResult.runProtected(TestResult.java:124) at junit.framework.TestResult.run(TestResult.java:109) at junit.framework.TestCase.run(TestCase.java:118) at junit.framework.TestSuite.runTest(TestSuite.java:208) at junit.framework.TestSuite.run(TestSuite.java:203) at org.eclipse.jdt.internal.junit.runner.junit3.JUnit3TestReference.run(JUnit3TestReference.java:130) at org.eclipse.jdt.internal.junit.runner.TestExecution.run(TestExecution.java:38) at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:467) at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:683) at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.run(RemoteTestRunner.java:390) at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.main(RemoteTestRunner.java:197) Caused by: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.Exception: task=ChunkTask{query=eb7362c8-a987-4448-9113-99816a82311d,bopId=14,partitionId=-1,sinkId=17,altSinkId=null}, cause=java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException at com.bigdata.relation.accesspath.BlockingBuffer$BlockingIterator.checkFuture(BlockingBuffer.java:1523) at com.bigdata.relation.accesspath.BlockingBuffer$BlockingIterator._hasNext(BlockingBuffer.java:1710) at com.bigdata.relation.accesspath.BlockingBuffer$BlockingIterator.hasNext(BlockingBuffer.java:1563) at com.bigdata.striterator.AbstractChunkedResolverator._hasNext(AbstractChunkedResolverator.java:365) at com.bigdata.striterator.AbstractChunkedResolverator.hasNext(AbstractChunkedResolverator.java:341) at com.bigdata.rdf.sail.Bigdata2Sesame2BindingSetIterator.hasNext(Bigdata2Sesame2BindingSetIterator.java:134) ... 19 more Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.Exception: task=ChunkTask{query=eb7362c8-a987-4448-9113-99816a82311d,bopId=14,partitionId=-1,sinkId=17,altSinkId=null}, cause=java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException at java.util.concurrent.FutureTask.report(FutureTask.java:122) at java.util.concurrent.FutureTask.get(FutureTask.java:188) at com.bigdata.relation.accesspath.BlockingBuffer$BlockingIterator.checkFuture(BlockingBuffer.java:1454) ... 24 more Caused by: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.Exception: task=ChunkTask{query=eb7362c8-a987-4448-9113-99816a82311d,bopId=14,partitionId=-1,sinkId=17,altSinkId=null}, cause=java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException at com.bigdata.rdf.sail.RunningQueryCloseableIterator.checkFuture(RunningQueryCloseableIterator.java:59) at com.bigdata.rdf.sail.RunningQueryCloseableIterator.close(RunningQueryCloseableIterator.java:73) at com.bigdata.striterator.ChunkedWrappedIterator.close(ChunkedWrappedIterator.java:180) at com.bigdata.striterator.AbstractChunkedResolverator$ChunkConsumerTask.call(AbstractChunkedResolverator.java:297) at com.bigdata.striterator.AbstractChunkedResolverator$ChunkConsumerTask.call(AbstractChunkedResolverator.java:1) at java.util.concurrent.FutureTask.run(FutureTask.java:262) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:745) Caused by: java.util.concurrent.ExecutionException: java.lang.Exception: task=ChunkTask{query=eb7362c8-a987-4448-9113-99816a82311d,bopId=14,partitionId=-1,sinkId=17,altSinkId=null}, cause=java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException at com.bigdata.util.concurrent.Haltable.get(Haltable.java:273) at com.bigdata.bop.engine.AbstractRunningQuery.get(AbstractRunningQuery.java:1477) at com.bigdata.bop.engine.AbstractRunningQuery.get(AbstractRunningQuery.java:1) at com.bigdata.rdf.sail.RunningQueryCloseableIterator.checkFuture(RunningQueryCloseableIterator.java:46) ... 8 more Caused by: java.lang.Exception: task=ChunkTask{query=eb7362c8-a987-4448-9113-99816a82311d,bopId=14,partitionId=-1,sinkId=17,altSinkId=null}, cause=java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException at com.bigdata.bop.engine.ChunkedRunningQuery$ChunkTask.call(ChunkedRunningQuery.java:1335) at com.bigdata.bop.engine.ChunkedRunningQuery$ChunkTaskWrapper.run(ChunkedRunningQuery.java:894) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) at java.util.concurrent.FutureTask.run(FutureTask.java:262) at com.bigdata.concurrent.FutureTaskMon.run(FutureTaskMon.java:63) at com.bigdata.bop.engine.ChunkedRunningQuery$ChunkFutureTask.run(ChunkedRunningQuery.java:789) ... 3 more Caused by: java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException at java.util.concurrent.FutureTask.report(FutureTask.java:122) at java.util.concurrent.FutureTask.get(FutureTask.java:188) at com.bigdata.bop.engine.ChunkedRunningQuery$ChunkTask.call(ChunkedRunningQuery.java:1315) ... 8 more Caused by: java.lang.IllegalArgumentException at com.bigdata.bop.bindingSet.ListBindingSet.set(ListBindingSet.java:430) at com.bigdata.bop.ContextBindingSet.set(ContextBindingSet.java:74) at com.bigdata.bop.paths.ArbitraryLengthPathOp$ArbitraryLengthPathTask.processChunk(ArbitraryLengthPathOp.java:816) at com.bigdata.bop.paths.ArbitraryLengthPathOp$ArbitraryLengthPathTask.call(ArbitraryLengthPathOp.java:270) at com.bigdata.bop.paths.ArbitraryLengthPathOp$ArbitraryLengthPathTask.call(ArbitraryLengthPathOp.java:1) at java.util.concurrent.FutureTask.run(FutureTask.java:262) at com.bigdata.bop.engine.ChunkedRunningQuery$ChunkTask.call(ChunkedRunningQuery.java:1314) ... 8 more }}} See #942 (Property path failures in scale-out). Revision Links: -------------- http://sourceforge.net/p/bigdata/code/2 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataEmbeddedFederationSparqlTest.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java 2014-06-16 11:23:44 UTC (rev 8482) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java 2014-06-16 14:17:57 UTC (rev 8483) @@ -777,13 +777,14 @@ */ if (parentSolutionIn.isBound(gearing.outVar)) { - // do this later now - - if (!bs.get(gearing.tVarOut).equals(parentSolutionIn.get(gearing.outVar))) { - - if (log.isDebugEnabled()) { - log.debug("transitive output does not match incoming binding for output var, dropping"); - } + // do this now: note already known to be bound per test above. + final IConstant<?> poutVar = parentSolutionIn.get(gearing.outVar); + + if (!poutVar.equals(bs.get(gearing.tVarOut))) { + + if (log.isDebugEnabled()) { + log.debug("transitive output does not match incoming binding for output var, dropping"); + } continue; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataEmbeddedFederationSparqlTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataEmbeddedFederationSparqlTest.java 2014-06-16 11:23:44 UTC (rev 8482) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataEmbeddedFederationSparqlTest.java 2014-06-16 14:17:57 UTC (rev 8483) @@ -65,7 +65,6 @@ * {@link EmbeddedFederation}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class BigdataEmbeddedFederationSparqlTest extends BigdataSparqlTest { @@ -110,7 +109,7 @@ if(hideDatasetTests) suite1 = filterOutTests(suite1,"dataset"); - suite1 = filterOutTests(suite1, "property-paths"); +// suite1 = filterOutTests(suite1, "property-paths"); /** * BSBM BI use case query 5 @@ -157,6 +156,7 @@ final Factory factory = new Factory() { + @Override public SPARQLQueryTest createSPARQLQueryTest(String testURI, String name, String queryFileURL, String resultFileURL, Dataset dataSet, boolean laxCardinality) { @@ -166,6 +166,7 @@ } + @Override public SPARQLQueryTest createSPARQLQueryTest(String testURI, String name, String queryFileURL, String resultFileURL, Dataset dataSet, boolean laxCardinality, boolean checkOrder) { @@ -173,6 +174,7 @@ return new BigdataEmbeddedFederationSparqlTest(testURI, name, queryFileURL, resultFileURL, dataSet, laxCardinality, checkOrder) { + @Override protected Properties getProperties() { final Properties p = new Properties(super @@ -295,7 +297,8 @@ } - protected void tearDownBackend(IIndexManager backend) { + @Override + protected void tearDownBackend(final IIndexManager backend) { backend.destroy(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java 2014-06-16 11:23:44 UTC (rev 8482) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java 2014-06-16 14:17:57 UTC (rev 8483) @@ -67,7 +67,6 @@ * a {@link Journal} without full read/write transaction support. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class BigdataSparqlTest //extends SPARQLQueryTest // Sesame TupleExpr based evaluation This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-17 13:28:15
|
Revision: 8494 http://sourceforge.net/p/bigdata/code/8494 Author: thompsonbry Date: 2014-06-17 13:28:07 +0000 (Tue, 17 Jun 2014) Log Message: ----------- Fix for #965 (LBS does not work with HA1) The root cause was the init() method on HALoadBalancerServlet. init() disabled the LBS unless the NSS was in an HA GT 1 deployment model. I have modified the code to always enable the LBS servlet. This allows it to correctly rewrite itself out of the request when in the HA1 or non-HA modes. There are some code paths that need to be updated because either (a) they do not make an explicit choice about whether or not to use the LBS; (b) they do not parametrize the ContextPath of the web application; or (c) they are in the wrong package (client code should be in a separate patch from server code). Changes are to: - HALoadBalancerServlet: init() always succeeds. This fixes the core problem for this ticket. - TestHA1JournalServer: the test is linked to this ticket. - RemoteServiceCallImpl: Modified to use declared service configuration information to decide whether or not to use the LBS pattern for the remote service. The default is false, which works for all cases. The default may be overridden to be true if the end point is known to expose the bigdata LBS pattern. - IServiceOptions: added the isBigdataLBS() method. - ServiceOptionsBase: added default=false for isBigdataLBS() and setBigdataLBS() methods. - BigdataSailFactory: Added "FIXME" - This does not support the HA load balancer pattern and does not parameterize the value of the ContextPath. Also, should this class be part of the "client" package? - BigdataSailRemoteRepository: added constructor variant that accepts the boolean useLBS argument. The version of the constructor without that argument now defaults to useLBS:=true. This changes the default behavior of the client(!). - RemoteRepository: deprecated the constructor version that does not accept the useLBS parameter. This version of the constructor still default to useLBS:=false. It tends to be used from some less common code paths. - RemoteRepositoryManager: modified to specify useLBS:=true by default. - TestNSSHealthCheck: code cleanup. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/INativeServiceOptions.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IServiceOptions.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCallCreateParams.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceOptionsBase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java 2014-06-17 12:17:24 UTC (rev 8493) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java 2014-06-17 13:28:07 UTC (rev 8494) @@ -123,17 +123,24 @@ } + /** + * A simple transaction test against an HA1 mode server using the LBS. + * + * @see <a href="http://trac.bigdata.com/ticket/965" > Cannot run queries in + * LBS mode with HA1 setup </a> + */ public void testSimpleTransactionLBS() throws Exception { - - doStartA(); - - serverA.awaitHAReady(2, TimeUnit.SECONDS); - - awaitCommitCounter(1, new HAGlue[] { serverA }); - - simpleTransactionLBS(); - - awaitCommitCounter(2, new HAGlue[] { serverA }); + + doStartA(); + + serverA.awaitHAReady(2, TimeUnit.SECONDS); + + awaitCommitCounter(1, new HAGlue[] { serverA }); + + simpleTransactionLBS(); + + awaitCommitCounter(2, new HAGlue[] { serverA }); + } public void testMultiTransaction() throws Exception { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/INativeServiceOptions.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/INativeServiceOptions.java 2014-06-17 12:17:24 UTC (rev 8493) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/INativeServiceOptions.java 2014-06-17 13:28:07 UTC (rev 8494) @@ -32,7 +32,6 @@ * Additional options for native services. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface INativeServiceOptions extends IServiceOptions { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IServiceOptions.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IServiceOptions.java 2014-06-17 12:17:24 UTC (rev 8493) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IServiceOptions.java 2014-06-17 13:28:07 UTC (rev 8494) @@ -37,7 +37,6 @@ * Options and metadata for service end points. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface IServiceOptions { @@ -78,5 +77,13 @@ * query planner has locked in the join evaluation order. */ boolean isRunFirst(); + + /** + * Return <code>true</code> if the remote service is known to be a bigdata + * service that exposes the HA load balancer servlet (default + * <code>false</code>). The default may be overridden iff the end point is + * known to expose the bigdata LBS pattern. + */ + boolean isBigdataLBS(); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java 2014-06-17 12:17:24 UTC (rev 8493) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java 2014-06-17 13:28:07 UTC (rev 8494) @@ -29,7 +29,6 @@ import java.util.UUID; -import org.apache.http.HttpResponse; import org.apache.http.impl.client.DefaultHttpClient; import org.apache.http.impl.client.DefaultRedirectStrategy; import org.openrdf.query.BindingSet; @@ -49,8 +48,6 @@ * adjusting the {@link RemoteServiceOptions} for the service URI. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id: RemoteServiceCallImpl.java 6060 2012-03-02 16:07:38Z - * thompsonbry $ */ public class RemoteServiceCallImpl implements RemoteServiceCall { @@ -149,7 +146,9 @@ // Setup a standard strategy for following redirects. httpClient.setRedirectStrategy(new DefaultRedirectStrategy()); - final RemoteRepository repo = new RemoteRepository(uriStr,// + final RemoteRepository repo = new RemoteRepository(// + uriStr,// + params.getServiceOptions().isBigdataLBS(),// useLBS httpClient,// params.getTripleStore().getExecutorService() ); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCallCreateParams.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCallCreateParams.java 2014-06-17 12:17:24 UTC (rev 8493) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCallCreateParams.java 2014-06-17 13:28:07 UTC (rev 8494) @@ -37,7 +37,6 @@ * {@link ServiceCall} instance. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface ServiceCallCreateParams { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceOptionsBase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceOptionsBase.java 2014-06-17 12:17:24 UTC (rev 8493) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceOptionsBase.java 2014-06-17 13:28:07 UTC (rev 8494) @@ -29,19 +29,27 @@ /** * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ abstract public class ServiceOptionsBase implements IServiceOptions { private boolean isRunFirst = false; - + private boolean useLBS = false; + @Override public boolean isRunFirst() { return isRunFirst; } - - public void setRunFirst(final boolean newValue) { + + public void setRunFirst(final boolean newValue) { this.isRunFirst = newValue; } + @Override + public boolean isBigdataLBS() { + return useLBS; + } + + public void setBigdataLBS(final boolean newValue) { + this.useLBS = newValue; + } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java 2014-06-17 12:17:24 UTC (rev 8493) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java 2014-06-17 13:28:07 UTC (rev 8494) @@ -96,6 +96,10 @@ /** * Connect to a remote bigdata instance. + * + * FIXME This does not support the HA load balancer pattern and does not + * parameterize the value of the ContextPath. Also, should this class be + * part of the "client" package? */ public static BigdataSailRemoteRepository connect(final String serviceEndpoint) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java 2014-06-17 12:17:24 UTC (rev 8493) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepository.java 2014-06-17 13:28:07 UTC (rev 8494) @@ -1,5 +1,3 @@ -package com.bigdata.rdf.sail.remote; - /** Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. @@ -24,6 +22,8 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +package com.bigdata.rdf.sail.remote; + import java.io.File; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -66,9 +66,30 @@ /** * Ctor that simply specifies an endpoint and lets this class manage the * ClientConnectionManager for the HTTP client and the manage the - * ExecutorService. More convenient. + * ExecutorService. More convenient, but does not account for whether or not + * to use the LBS. + * + * @param sparqlEndpointURL + * The SPARQL end point URL */ - public BigdataSailRemoteRepository(final String sparqlEndpointURL) { + public BigdataSailRemoteRepository(final String sparqlEndpointURL) { + + this(sparqlEndpointURL, true/* useLBS */); + + } + + /** + * Ctor that simply specifies an endpoint and lets this class manage the + * ClientConnectionManager for the HTTP client and the manage the + * ExecutorService. + * + * @param sparqlEndpointURL + * The SPARQL end point URL + * @param useLBS + * <code>true</code> iff the LBS pattern should be used. + */ + public BigdataSailRemoteRepository(final String sparqlEndpointURL, + final boolean useLBS) { this.executor = Executors.newCachedThreadPool(); @@ -84,9 +105,9 @@ */ httpClient.setRedirectStrategy(new DefaultRedirectStrategy()); - this.nss = new RemoteRepository( - sparqlEndpointURL, httpClient, executor); - + this.nss = new RemoteRepository(sparqlEndpointURL, useLBS, httpClient, + executor); + } /** Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-06-17 12:17:24 UTC (rev 8493) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-06-17 13:28:07 UTC (rev 8494) @@ -386,8 +386,15 @@ * {@inheritDoc} * <p> * Extended to setup the as-configured {@link IHALoadBalancerPolicy}. + * <p> + * Note: If the deployment is does not support HA replication (e.g., either + * not HA or HA with replicationFactor:=1), then we still want to be able to + * forward to the local service. * * @throws ServletException + * + * @see <a href="http://trac.bigdata.com/ticket/965" > Cannot run queries in + * LBS mode with HA1 setup </a> */ @Override public void init() throws ServletException { @@ -405,42 +412,35 @@ final IIndexManager indexManager = BigdataServlet .getIndexManager(servletContext); - if (!(indexManager instanceof HAJournal)){ - // This is not an error, but the LBS is only for HA. - log.info("LBS Disabled - not HA"); - return; - } - if (indexManager instanceof AbstractJournal + if (indexManager instanceof HAJournal && ((AbstractJournal) indexManager).getQuorum() != null && ((AbstractJournal) indexManager).getQuorum() - .replicationFactor() == 1) { - // This is not an error, but the LBS is only for HA. - log.info("LBS Disabled - not HA"); - return; - } + .replicationFactor() > 1) { - { - // Get the as-configured policy. - final IHALoadBalancerPolicy policy = newInstance(// - servletConfig, // - HALoadBalancerServlet.class,// owningClass - IHALoadBalancerPolicy.class,// - InitParams.POLICY, InitParams.DEFAULT_POLICY); + { + // Get the as-configured policy. + final IHALoadBalancerPolicy policy = newInstance(// + servletConfig, // + HALoadBalancerServlet.class,// owningClass + IHALoadBalancerPolicy.class,// + InitParams.POLICY, InitParams.DEFAULT_POLICY); - // Set the as-configured policy. - setLBSPolicy(policy); + // Set the as-configured policy. + setLBSPolicy(policy); - } - { + } + { - final IHARequestURIRewriter rewriter = newInstance(// - servletConfig,// - HALoadBalancerServlet.class, // owningClass - IHARequestURIRewriter.class,// - InitParams.REWRITER, InitParams.DEFAULT_REWRITER); + final IHARequestURIRewriter rewriter = newInstance(// + servletConfig,// + HALoadBalancerServlet.class, // owningClass + IHARequestURIRewriter.class,// + InitParams.REWRITER, InitParams.DEFAULT_REWRITER); - setRewriter(rewriter); + setRewriter(rewriter); + } + } servletContext.setAttribute(BigdataServlet.ATTRIBUTE_LBS_PREFIX, @@ -850,6 +850,10 @@ * the request to the servlet at the resulting requestURI. This forwarding * effectively disables the LBS but still allows requests which target the * LBS to succeed against the webapp on the same host. + * <p> + * Note: If the deployment is does not support HA replication (e.g., either + * not HA or HA with replicationFactor:=1), then we still want to be able to + * forward to the local service. * * @param request * The request. @@ -858,6 +862,9 @@ * * @throws IOException * @throws ServletException + * + * @see <a href="http://trac.bigdata.com/ticket/965" > Cannot run queries in + * LBS mode with HA1 setup </a> */ public void forwardToLocalService(// final boolean isLeaderRequest,// Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-06-17 12:17:24 UTC (rev 8493) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-06-17 13:28:07 UTC (rev 8494) @@ -307,6 +307,20 @@ } + /** + * + * @param sparqlEndpointURL + * @param httpClient + * @param executor + * + * @deprecated This version does not force the caller to decide whether or + * not the LBS pattern will be used. In general, it should be + * used if the end point is bigdata. This class is generally, + * but not always, used with a bigdata end point. The main + * exception is SPARQL Basic Federated Query. For that use case + * we can not assume that the end point is bigdata and thus we + * can not use the LBS prefix. + */ public RemoteRepository(final String sparqlEndpointURL, final HttpClient httpClient, final Executor executor) { @@ -865,7 +879,7 @@ } if (add.uri != null) { - // set the resource to load. + // set the resource to load : FIXME REST API allows multiple URIs, but RemoteRepository does not. opts.addRequestParam("uri", add.uri); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2014-06-17 12:17:24 UTC (rev 8493) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2014-06-17 13:28:07 UTC (rev 8494) @@ -89,7 +89,7 @@ public RemoteRepositoryManager(final String serviceURL, final HttpClient httpClient, final Executor executor) { - this(serviceURL, false/* useLBS */, httpClient, executor); + this(serviceURL, true/* useLBS */, httpClient, executor); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java 2014-06-17 12:17:24 UTC (rev 8493) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/health/TestNSSHealthCheck.java 2014-06-17 13:28:07 UTC (rev 8494) @@ -375,9 +375,9 @@ * * @return The connection. * - * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/619"> - * RemoteRepository class should use application/x-www-form-urlencoded - * for large POST requests </a> + * @see <a href="http://trac.bigdata.com/ticket/619"> RemoteRepository class + * should use application/x-www-form-urlencoded for large POST requests + * </a> */ private HttpResponse doConnect(final ConnectOptions opts) throws Exception { @@ -452,57 +452,17 @@ } -// // conn = doConnect(urlString.toString(), opts.method); -// final URL url = new URL(urlString.toString()); -// conn = (HttpURLConnection) url.openConnection(); -// conn.setRequestMethod(opts.method); -// conn.setDoOutput(true); -// conn.setDoInput(true); -// conn.setUseCaches(false); -// conn.setReadTimeout(opts.timeout); -// conn.setRequestProperty("Accept", opts.acceptHeader); -// if (log.isDebugEnabled()) -// log.debug("Accept: " + opts.acceptHeader); - if (opts.entity != null) { -// if (opts.data == null) -// throw new AssertionError(); - -// final String contentLength = Integer.toString(opts.data.length); - -// conn.setRequestProperty("Content-Type", opts.contentType); -// conn.setRequestProperty("Content-Length", contentLength); - -// if (log.isDebugEnabled()) { -// log.debug("Content-Type: " + opts.contentType); -// log.debug("Content-Length: " + contentLength); -// } - -// final ByteArrayEntity entity = new ByteArrayEntity(opts.data); -// entity.setContentType(opts.contentType); - - ((HttpEntityEnclosingRequestBase) request).setEntity(opts.entity); + ((HttpEntityEnclosingRequestBase) request) + .setEntity(opts.entity); -// final OutputStream os = conn.getOutputStream(); -// try { -// os.write(opts.data); -// os.flush(); -// } finally { -// os.close(); -// } - } final HttpResponse response = m_httpClient.execute(request); return response; -// // connect. -// conn.connect(); -// -// return conn; - } catch (Throwable t) { /* * If something goes wrong, then close the http connection. @@ -513,10 +473,6 @@ if (request != null) request.abort(); -// // clean up the connection resources -// if (conn != null) -// conn.disconnect(); - } catch (Throwable t2) { // ignored. } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |