From: <mrp...@us...> - 2011-05-25 22:13:55
|
Revision: 4553 http://bigdata.svn.sourceforge.net/bigdata/?rev=4553&view=rev Author: mrpersonick Date: 2011-05-25 22:13:48 +0000 (Wed, 25 May 2011) Log Message: ----------- added a minRank feature and a count() feature for free text search Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/FullTextIndex.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/Hiterator.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/ITextIndexer.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BD.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestFullTextIndex.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/FreeTextSearchExpander.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSearchQuery.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/FullTextIndex.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/FullTextIndex.java 2011-05-25 22:04:05 UTC (rev 4552) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/FullTextIndex.java 2011-05-25 22:13:48 UTC (rev 4553) @@ -911,6 +911,7 @@ prefixMatch,// .4, // minCosine 1.0d, // maxCosine + 1, // minRank 10000, // maxRank false, // matchAllTerms this.timeout,// @@ -942,7 +943,7 @@ final double minCosine, final int maxRank) { return search(query, languageCode, false/* prefixMatch */, minCosine, - 1.0d, maxRank, false, this.timeout, TimeUnit.MILLISECONDS); + 1.0d, 1, maxRank, false, this.timeout, TimeUnit.MILLISECONDS); } @@ -982,8 +983,10 @@ * The minimum cosine that will be returned. * @param maxCosine * The maximum cosine that will be returned. - * @param maxRank - * The upper bound on the #of hits in the result set. + * @param minRank + * The min rank of the search result. + * @param maxRank + * The max rank of the search result. * @param prefixMatch * When <code>true</code>, the matches will be on tokens which * include the query tokens as a prefix. This includes exact @@ -1019,9 +1022,267 @@ public Hiterator<Hit> search(final String query, final String languageCode, final boolean prefixMatch, final double minCosine, final double maxCosine, - final int maxRank, final boolean matchAllTerms, + final int minRank, final int maxRank, final boolean matchAllTerms, long timeout, final TimeUnit unit) { +// final long begin = System.currentTimeMillis(); +// +//// if (languageCode == null) +//// throw new IllegalArgumentException(); +// +// if (query == null) +// throw new IllegalArgumentException(); +// +// if (minCosine < 0d || minCosine > 1d) +// throw new IllegalArgumentException(); +// +// if (minRank <= 0 || maxRank <= 0) +// throw new IllegalArgumentException(); +// +// if (minRank > maxRank) +// throw new IllegalArgumentException(); +// +// if (timeout < 0L) +// throw new IllegalArgumentException(); +// +// if (unit == null) +// throw new IllegalArgumentException(); +// +// if (log.isInfoEnabled()) +// log.info("languageCode=[" + languageCode + "], text=[" + query +// + "], minCosine=" + minCosine + ", maxRank=" + maxRank +// + ", matchAllTerms=" + matchAllTerms +// + ", timeout=" + timeout + ", unit=" + unit); +// +// if (timeout == 0L) { +// +// // treat ZERO as equivalent to MAX_LONG. +// timeout = Long.MAX_VALUE; +// +// } +// +// // tokenize the query. +// final TermFrequencyData qdata; +// { +// +// final TokenBuffer buffer = new TokenBuffer(1, this); +// +// /* +// * If we are using prefix match (* operator) then we don't want +// * to filter stopwords from the search query. +// */ +// final boolean filterStopwords = !prefixMatch; +// +// index(buffer, Long.MIN_VALUE/* docId */, +// Integer.MIN_VALUE/* fieldId */, languageCode, +// new StringReader(query), filterStopwords); +// +// if (buffer.size() == 0) { +// +// /* +// * There were no terms after stopword extration. +// */ +// +// log.warn("No terms after stopword extraction: query=" + query); +// +// final long elapsed = System.currentTimeMillis() - begin; +// +// return new Hiterator<Hit>(Arrays.asList(new Hit[] {}), elapsed, +// minCosine, maxRank); +// +// } +// +// qdata = buffer.get(0); +// +// qdata.normalize(); +// +// } +// +// final ConcurrentHashMap<Long/*docId*/,Hit> hits; +// { +// +// // @todo use size of collection as upper bound. +// final int initialCapacity = Math.min(maxRank,10000); +// +// hits = new ConcurrentHashMap<Long, Hit>(initialCapacity); +// +// } +// +// // run the queries. +// { +// +// final List<Callable<Object>> tasks = new ArrayList<Callable<Object>>( +// qdata.distinctTermCount()); +// +// for (TermMetadata md : qdata.terms.values()) { +// +// tasks.add(new ReadIndexTask(md.termText(), prefixMatch, +// md.localTermWeight, this, hits)); +// +// } +// +// final ExecutionHelper<Object> executionHelper = new ExecutionHelper<Object>( +// getExecutorService(), timeout, unit); +// +// try { +// +// executionHelper.submitTasks(tasks); +// +// } catch (InterruptedException ex) { +// +// log.warn("Interrupted - only partial results will be returned."); +// +// } catch (ExecutionException ex) { +// +// throw new RuntimeException(ex); +// +// } +// +// } +// +// /* +// * If match all is specified, remove any hits with a term count less +// * than the number of search tokens. +// */ +// if (matchAllTerms) { +// +// final int nterms = qdata.terms.size(); +// +// if (log.isInfoEnabled()) +// log.info("nterms: " + nterms); +// +// final Iterator<Map.Entry<Long,Hit>> it = hits.entrySet().iterator(); +// while (it.hasNext()) { +// final Hit hit = it.next().getValue(); +// if (log.isInfoEnabled()) +// log.info("hit terms: " + hit.getTermCount()); +// if (hit.getTermCount() != nterms) +// it.remove(); +// } +// +// } +// +// // #of hits. +// final int nhits = hits.size(); +// +// if (nhits == 0) { +// +// log.warn("No hits: languageCode=[" + languageCode + "], query=[" +// + query + "]"); +// +// } +// +// /* +// * Rank order the hits by relevance. +// * +// * @todo consider moving documents through a succession of N pools where +// * N is the #of distinct terms in the query. The read tasks would halt +// * if the size of the pool for N terms reached maxRank. This might (or +// * might not) help with triage since we could process hits by pool and +// * only compute the cosines for one pool at a time until we had enough +// * hits. +// */ +// +// if (log.isInfoEnabled()) +// log.info("Rank ordering "+nhits+" hits by relevance"); +// +// Hit[] a = hits.values().toArray(new Hit[nhits]); +// +// Arrays.sort(a); +// +// /* +// * If maxCosine is specified, prune the hits that are above the max +// */ +// if (maxCosine < 1.0d) { +// +// // find the first occurrence of a hit that is <= maxCosine +// int i = 0; +// for (Hit h : a) { +// if (h.getCosine() <= maxCosine) +// break; +// i++; +// } +// +// // no hits with relevance less than maxCosine +// if (i == a.length) { +// +// a = new Hit[0]; +// +// } else { +// +// // copy the hits from that first occurrence to the end +// final Hit[] tmp = new Hit[a.length - i]; +// System.arraycopy(a, i, tmp, 0, tmp.length); +// +// a = tmp; +// +// } +// +// } +// +// /* +// * If minRank is specified, prune the hits that below the min +// */ +// if (minRank > 1) { +// +// // no hits above minRank +// if (minRank > a.length) { +// +// a = new Hit[0]; +// +// } else { +// +// // copy the hits from the minRank to the end +// final Hit[] tmp = new Hit[a.length - (minRank-1)]; +// System.arraycopy(a, minRank-1, tmp, 0, tmp.length); +// +// a = tmp; +// +// } +// +// } +// +// final long elapsed = System.currentTimeMillis() - begin; +// +// if (log.isInfoEnabled()) +// log.info("Done: " + nhits + " hits in " + elapsed + "ms"); +// +// /* +// * Note: The caller will only see those documents which satisfy both +// * constraints (minCosine and maxRank). Results below a threshold will +// * be pruned. Any relevant results exceeding the maxRank will be pruned. +// */ +// return new Hiterator<Hit>(Arrays.asList(a), +// minCosine, maxRank-minRank+1); + + + final Hit[] a = _search(query, languageCode, prefixMatch, minCosine, + maxCosine, minRank, maxRank, matchAllTerms, timeout, unit); + + return new Hiterator<Hit>(Arrays.asList(a), 0.0d, Integer.MAX_VALUE); + + } + + public int count(final String query, final String languageCode, + final boolean prefixMatch, + final double minCosine, final double maxCosine, + final int minRank, final int maxRank, final boolean matchAllTerms, + long timeout, final TimeUnit unit) { + + final Hit[] a = _search(query, languageCode, prefixMatch, minCosine, + maxCosine, minRank, maxRank, matchAllTerms, timeout, unit); + + return a.length; + + } + + private Hit[] _search( + final String query, final String languageCode, + final boolean prefixMatch, + final double minCosine, final double maxCosine, + final int minRank, final int maxRank, + final boolean matchAllTerms, long timeout, final TimeUnit unit) { + final long begin = System.currentTimeMillis(); // if (languageCode == null) @@ -1033,10 +1294,10 @@ if (minCosine < 0d || minCosine > 1d) throw new IllegalArgumentException(); - if (maxCosine < 0d || maxCosine > 1d) + if (minRank <= 0 || maxRank <= 0) throw new IllegalArgumentException(); - if (maxRank <= 0) + if (minRank > maxRank) throw new IllegalArgumentException(); if (timeout < 0L) @@ -1047,7 +1308,10 @@ if (log.isInfoEnabled()) log.info("languageCode=[" + languageCode + "], text=[" + query - + "], minCosine=" + minCosine + ", maxRank=" + maxRank + + "], minCosine=" + minCosine + + ", maxCosine=" + maxCosine + + ", minRank=" + minRank + + ", maxRank=" + maxRank + ", matchAllTerms=" + matchAllTerms + ", timeout=" + timeout + ", unit=" + unit); @@ -1082,11 +1346,8 @@ log.warn("No terms after stopword extraction: query=" + query); - final long elapsed = System.currentTimeMillis() - begin; + return new Hit[] {}; - return new Hiterator<Hit>(Arrays.asList(new Hit[] {}), elapsed, - minCosine, maxRank); - } qdata = buffer.get(0); @@ -1146,15 +1407,22 @@ final int nterms = qdata.terms.size(); if (log.isInfoEnabled()) - log.info("nterms: " + nterms); + log.info("matchAll=true, nterms=" + nterms); final Iterator<Map.Entry<Long,Hit>> it = hits.entrySet().iterator(); + while (it.hasNext()) { + final Hit hit = it.next().getValue(); - if (log.isInfoEnabled()) + + if (log.isInfoEnabled()) { log.info("hit terms: " + hit.getTermCount()); - if (hit.getTermCount() != nterms) + } + + if (hit.getTermCount() != nterms) { it.remove(); + } + } } @@ -1167,6 +1435,8 @@ log.warn("No hits: languageCode=[" + languageCode + "], query=[" + query + "]"); + return new Hit[] {}; + } /* @@ -1203,7 +1473,7 @@ // no hits with relevance less than maxCosine if (i == a.length) { - a = new Hit[0]; + return new Hit[] {}; } else { @@ -1217,18 +1487,80 @@ } + /* + * If minCosine is specified, prune the hits that are below the min + */ + if (minCosine > 0.0d) { + + // find the first occurrence of a hit that is < minCosine + int i = 0; + for (Hit h : a) { + if (h.getCosine() < minCosine) + break; + i++; + } + + // no hits with relevance greater than minCosine + if (i == 0) { + + return new Hit[] {}; + + } else if (i < a.length) { + + // copy the hits from 0 up to that first occurrence + final Hit[] tmp = new Hit[i]; + System.arraycopy(a, 0, tmp, 0, tmp.length); + + a = tmp; + + } + + } + + /* + * If minRank is specified, prune the hits that rank higher than the min + */ + if (minRank > 1) { + + // no hits above minRank + if (minRank > a.length) { + + return new Hit[] {}; + + } else { + + // copy the hits from the minRank to the end + final Hit[] tmp = new Hit[a.length - (minRank-1)]; + System.arraycopy(a, minRank-1, tmp, 0, tmp.length); + + a = tmp; + + } + + } + + final int newMax = maxRank-minRank+1; + + /* + * If maxRank is specified, prune the hits that rank lower than the max + */ + if (newMax < a.length) { + + // copy the hits from the minRank to the end + final Hit[] tmp = new Hit[newMax]; + System.arraycopy(a, 0, tmp, 0, tmp.length); + + a = tmp; + + } + final long elapsed = System.currentTimeMillis() - begin; if (log.isInfoEnabled()) - log.info("Done: " + nhits + " hits in " + elapsed + "ms"); + log.info("Done: " + a.length + " hits in " + elapsed + "ms"); - /* - * Note: The caller will only see those documents which satisfy both - * constraints (minCosine and maxRank). Results below a threshold will - * be pruned. Any relevant results exceeding the maxRank will be pruned. - */ - return new Hiterator<Hit>(Arrays.asList(a), elapsed, minCosine, maxRank); - + return a; + } /* Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/Hiterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/Hiterator.java 2011-05-25 22:04:05 UTC (rev 4552) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/Hiterator.java 2011-05-25 22:13:48 UTC (rev 4553) @@ -14,7 +14,6 @@ final private Collection<A> hits; final private Iterator<A> src; - final private long elapsed; final private double minCosine; final private int maxRank; @@ -54,31 +53,18 @@ } /** - * The elapsed time in milliseconds required to compute the result set - * for the query. - */ - public long elapsed() { - - return elapsed; - - } - - /** * * @param hits * @param elapsed * @param minCosine * @param maxRank */ - public Hiterator(final Collection<A> hits, final long elapsed, + public Hiterator(final Collection<A> hits, final double minCosine, final int maxRank) { if (hits == null) throw new IllegalArgumentException(); - if (elapsed < 0) - throw new IllegalArgumentException(); - if (minCosine < 0d || minCosine > 1d) throw new IllegalArgumentException(); @@ -87,8 +73,6 @@ this.hits = hits; - this.elapsed = elapsed; - this.minCosine = minCosine; this.maxRank = maxRank; @@ -128,7 +112,7 @@ nextHit = src.next(); - if (rank + 1 >= maxRank || nextHit.getCosine() < minCosine) { + if (rank + 1 > maxRank || nextHit.getCosine() < minCosine) { exhausted = true; @@ -180,7 +164,7 @@ public String toString() { - return "Hiterator{elapsed=" + elapsed + ", minCosine=" + minCosine + return "Hiterator{minCosine=" + minCosine + ", maxRank=" + maxRank + ", nhits=" + hits.size() + "} : " + hits; Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/ITextIndexer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/ITextIndexer.java 2011-05-25 22:04:05 UTC (rev 4552) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/ITextIndexer.java 2011-05-25 22:13:48 UTC (rev 4553) @@ -103,8 +103,10 @@ * @param maxCosine * The maximum cosine that will be returned (in [minCosine:1.0]). * Useful for evaluating in relevance ranges. + * @param minRank + * The min rank of the search result. * @param maxRank - * The upper bound on the #of hits in the result set. + * The max rank of the search result. * @param matchAllTerms * if true, return only hits that match all search terms * @param timeout @@ -118,7 +120,51 @@ public Hiterator<A> search(final String query, final String languageCode, final boolean prefixMatch, final double minCosine, final double maxCosine, - final int maxRank, final boolean matchAllTerms, - long timeout, final TimeUnit unit); + final int minRank, final int maxRank, + final boolean matchAllTerms, long timeout, final TimeUnit unit); + /** + * Count free text search results. + * + * @param query + * The query (it will be parsed into tokens). + * @param languageCode + * The language code that should be used when tokenizing the + * query -or- <code>null</code> to use the default {@link Locale} + * ). + * @param prefixMatch + * When <code>true</code>, the matches will be on tokens which + * include the query tokens as a prefix. This includes exact + * matches as a special case when the prefix is the entire token, + * but it also allows longer matches. For example, + * <code>free</code> will be an exact match on <code>free</code> + * but a partial match on <code>freedom</code>. When + * <code>false</code>, only exact matches will be made. + * @param minCosine + * The minimum cosine that will be returned (in [0:maxCosine]). + * If you specify a minimum cosine of ZERO (0.0) you can drag in + * a lot of basically useless search results. + * @param maxCosine + * The maximum cosine that will be returned (in [minCosine:1.0]). + * Useful for evaluating in relevance ranges. + * @param minRank + * The min rank of the search result. + * @param maxRank + * The max rank of the search result. + * @param matchAllTerms + * if true, return only hits that match all search terms + * @param timeout + * The timeout -or- ZERO (0) for NO timeout (this is equivalent + * to using {@link Long#MAX_VALUE}). + * @param unit + * The unit in which the timeout is expressed. + * + * @return The result count. + */ + public int count(final String query, final String languageCode, + final boolean prefixMatch, + final double minCosine, final double maxCosine, + final int minRank, final int maxRank, + final boolean matchAllTerms, long timeout, final TimeUnit unit); + } Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BD.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BD.java 2011-05-25 22:04:05 UTC (rev 4552) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BD.java 2011-05-25 22:13:48 UTC (rev 4553) @@ -161,20 +161,44 @@ * select ?s * where { * ?s bd:search "scale-out RDF triplestore" . - * ?s bd:maxHits "5"^^xsd:int . + * ?s bd:maxRank "5"^^xsd:int . * } * * </pre> * - * The default is {@value #DEFAULT_MAX_HITS}. + * The default is {@value #DEFAULT_MAX_RANK}. */ - final URI MAX_HITS = new URIImpl(SEARCH_NAMESPACE+"maxHits"); + final URI MAX_RANK = new URIImpl(SEARCH_NAMESPACE+"maxRank"); /** - * The default for {@link #MAX_HITS}. + * The default for {@link #MAX_RANK}. */ - final int DEFAULT_MAX_HITS = Integer.MAX_VALUE; + final int DEFAULT_MAX_RANK = Integer.MAX_VALUE; + /** + * Magic predicate used to query for free text search metadata. Use + * in conjunction with {@link #SEARCH} as follows: + * <p> + * <pre> + * + * select ?s + * where { + * ?s bd:search "scale-out RDF triplestore" . + * ?s bd:minRank "5"^^xsd:int . + * } + * + * </pre> + * + * The default is {@value #DEFAULT_MIN_RANK}. + */ + final URI MIN_RANK = new URIImpl(SEARCH_NAMESPACE+"minRank"); + + /** + * The default for {@link #MIN_RANK} is 1, full text search results will + * start with the #1 most relevant hit by default. + */ + final int DEFAULT_MIN_RANK = 1; + /** * Magic predicate used to query for free text search metadata. Use in * conjunction with {@link #SEARCH} as follows: Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestFullTextIndex.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestFullTextIndex.java 2011-05-25 22:04:05 UTC (rev 4552) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestFullTextIndex.java 2011-05-25 22:13:48 UTC (rev 4553) @@ -134,7 +134,7 @@ final Hiterator hitr = store.getLexiconRelation().getSearchEngine() .search(query, languageCode, false/* prefixMatch */, minCosine, 1.0d/* maxCosine */, - Integer.MAX_VALUE/* maxRank */, + 1/* minRank */, Integer.MAX_VALUE/* maxRank */, false/* matchAllTerms */, Long.MAX_VALUE,//2L/* timeout */, TimeUnit.MILLISECONDS// TimeUnit.SECONDS Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java 2011-05-25 22:04:05 UTC (rev 4552) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java 2011-05-25 22:13:48 UTC (rev 4553) @@ -681,7 +681,9 @@ final Value s = sp.getSubjectVar().getValue(); final Value p = sp.getPredicateVar().getValue(); if (s == null && p != null && - (BD.RELEVANCE.equals(p) || BD.MAX_HITS.equals(p) || + (BD.RELEVANCE.equals(p) || + BD.MIN_RANK.equals(p) || + BD.MAX_RANK.equals(p) || BD.MIN_RELEVANCE.equals(p) || BD.MAX_RELEVANCE.equals(p) || BD.MATCH_ALL_TERMS.equals(p))) { @@ -1693,7 +1695,8 @@ com.bigdata.bop.Var.var(subjVar.getName()); IVariableOrConstant<IV> relevance = new Constant(DummyIV.INSTANCE); - Literal maxHits = null; + Literal minRank = null; + Literal maxRank = null; Literal minRelevance = null; Literal maxRelevance = null; boolean matchAllTerms = false; @@ -1713,11 +1716,16 @@ throw new IllegalArgumentException("illegal metadata: " + meta); } relevance = com.bigdata.bop.Var.var(oVar.getName()); - } else if (BD.MAX_HITS.equals(pVal)) { + } else if (BD.MIN_RANK.equals(pVal)) { if (oVal == null || !(oVal instanceof Literal)) { throw new IllegalArgumentException("illegal metadata: " + meta); } - maxHits = (Literal) oVal; + minRank = (Literal) oVal; + } else if (BD.MAX_RANK.equals(pVal)) { + if (oVal == null || !(oVal instanceof Literal)) { + throw new IllegalArgumentException("illegal metadata: " + meta); + } + maxRank = (Literal) oVal; } else if (BD.MIN_RELEVANCE.equals(pVal)) { if (oVal == null || !(oVal instanceof Literal)) { throw new IllegalArgumentException("illegal metadata: " + meta); @@ -1738,7 +1746,7 @@ final IAccessPathExpander expander = new FreeTextSearchExpander(database, (Literal) objValue, - maxHits, minRelevance, maxRelevance, matchAllTerms); + minRank, maxRank, minRelevance, maxRelevance, matchAllTerms); // Decide on the correct arity for the predicate. final BOp[] vars = new BOp[] { @@ -2208,7 +2216,8 @@ BD.DEFAULT_PREFIX_MATCH,//false/* prefixMatch */, BD.DEFAULT_MIN_RELEVANCE,//0d/* minCosine */, BD.DEFAULT_MAX_RELEVANCE,//1.0d/* maxCosine */, - BD.DEFAULT_MAX_HITS,//Integer.MAX_VALUE/* maxRank */, + BD.DEFAULT_MIN_RANK,//1/* minRank */, + BD.DEFAULT_MAX_RANK,//Integer.MAX_VALUE/* maxRank */, BD.DEFAULT_MATCH_ALL_TERMS,//false/* matchAllTerms */, BD.DEFAULT_TIMEOUT,//0/* timeout */, TimeUnit.MILLISECONDS); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/FreeTextSearchExpander.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/FreeTextSearchExpander.java 2011-05-25 22:04:05 UTC (rev 4552) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/FreeTextSearchExpander.java 2011-05-25 22:13:48 UTC (rev 4553) @@ -59,7 +59,7 @@ */ private final AbstractTripleStore database; - private final Literal query, maxHits, minRelevance, maxRelevance; + private final Literal query, minRank, maxRank, minRelevance, maxRelevance; private final boolean matchAllTerms; @@ -69,12 +69,12 @@ public FreeTextSearchExpander(final AbstractTripleStore database, final Literal query) { - this(database, query, null, null, null, false); + this(database, query, null, null, null, null, false); } public FreeTextSearchExpander(final AbstractTripleStore database, - final Literal query, final Literal maxHits, + final Literal query, final Literal minRank, final Literal maxRank, final Literal minRelevance, final Literal maxRelevance, final boolean matchAllTerms) { @@ -88,8 +88,10 @@ this.query = query; - this.maxHits = maxHits; + this.minRank = minRank; + this.maxRank = maxRank; + this.minRelevance = minRelevance; this.maxRelevance = maxRelevance; @@ -174,7 +176,8 @@ prefixMatch, minRelevance == null ? BD.DEFAULT_MIN_RELEVANCE : minRelevance.doubleValue()/* minCosine */, maxRelevance == null ? BD.DEFAULT_MAX_RELEVANCE : maxRelevance.doubleValue()/* maxCosine */, - maxHits == null ? BD.DEFAULT_MAX_HITS/*Integer.MAX_VALUE*/ : maxHits.intValue()+1/* maxRank */, + minRank == null ? BD.DEFAULT_MIN_RANK/*1*/ : minRank.intValue()/* minRank */, + maxRank == null ? BD.DEFAULT_MAX_RANK/*Integer.MAX_VALUE*/ : maxRank.intValue()/* maxRank */, matchAllTerms, BD.DEFAULT_TIMEOUT/*0L*//* timeout */, TimeUnit.MILLISECONDS); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSearchQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSearchQuery.java 2011-05-25 22:04:05 UTC (rev 4552) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSearchQuery.java 2011-05-25 22:13:48 UTC (rev 4553) @@ -805,7 +805,8 @@ BD.DEFAULT_PREFIX_MATCH,//false, // prefixMatch BD.DEFAULT_MIN_RELEVANCE,//0d, // minCosine BD.DEFAULT_MAX_RELEVANCE,//1.0d, // maxCosine - BD.DEFAULT_MAX_HITS,//10000, // maxRank (=maxResults + 1) + BD.DEFAULT_MIN_RANK,//1 + BD.DEFAULT_MAX_RANK,//10000, // maxRank (=maxResults + 1) BD.DEFAULT_MATCH_ALL_TERMS,//false, // matchAllTerms BD.DEFAULT_TIMEOUT,//1000L, // timeout TimeUnit.MILLISECONDS // unit @@ -841,8 +842,7 @@ " ?s <"+RDFS.LABEL+"> ?o . " + " ?o <"+BD.SEARCH+"> \""+searchQuery+"\" . " + " ?o <"+BD.RELEVANCE+"> ?score . " + -// " ?o <"+BD.MIN_RELEVANCE+"> \"0.6\" . " + - " ?o <"+BD.MAX_HITS+"> \""+maxHits+"\" . " + + " ?o <"+BD.MAX_RANK+"> \""+maxHits+"\" . " + "} " + "order by desc(?score)"; @@ -872,7 +872,8 @@ BD.DEFAULT_PREFIX_MATCH,//false, // prefixMatch BD.DEFAULT_MIN_RELEVANCE,//0d, // minCosine BD.DEFAULT_MAX_RELEVANCE,//1.0d, // maxCosine - maxHits+1, // maxRank (=maxResults + 1) + BD.DEFAULT_MIN_RANK,//1 + maxHits, // maxRank (=maxResults + 1) BD.DEFAULT_MATCH_ALL_TERMS,//false, // matchAllTerms BD.DEFAULT_TIMEOUT,//1000L, // timeout TimeUnit.MILLISECONDS // unit @@ -941,7 +942,8 @@ BD.DEFAULT_PREFIX_MATCH,//false, // prefixMatch minRelevance, // minCosine maxRelevance, // maxCosine - BD.DEFAULT_MAX_HITS,//10000, // maxRank (=maxResults + 1) + BD.DEFAULT_MIN_RANK,//1 + BD.DEFAULT_MAX_RANK,//10000, // maxRank (=maxResults + 1) BD.DEFAULT_MATCH_ALL_TERMS,//false, // matchAllTerms BD.DEFAULT_TIMEOUT,//1000L, // timeout TimeUnit.MILLISECONDS // unit @@ -1014,7 +1016,8 @@ BD.DEFAULT_PREFIX_MATCH,//false, // prefixMatch minRelevance, // minCosine maxRelevance, // maxCosine - BD.DEFAULT_MAX_HITS,//10000, // maxRank (=maxResults + 1) + BD.DEFAULT_MIN_RANK,//1 + BD.DEFAULT_MAX_RANK,//10000, // maxRank (=maxResults + 1) BD.DEFAULT_MATCH_ALL_TERMS,//false, // matchAllTerms BD.DEFAULT_TIMEOUT,//1000L, // timeout TimeUnit.MILLISECONDS // unit @@ -1089,7 +1092,8 @@ true, // prefixMatch minRelevance, // minCosine maxRelevance, // maxCosine - BD.DEFAULT_MAX_HITS,//10000, // maxRank (=maxResults + 1) + BD.DEFAULT_MIN_RANK,//1 + BD.DEFAULT_MAX_RANK,//10000, // maxRank (=maxResults + 1) BD.DEFAULT_MATCH_ALL_TERMS,//false, // matchAllTerms BD.DEFAULT_TIMEOUT,//1000L, // timeout TimeUnit.MILLISECONDS // unit @@ -1162,7 +1166,8 @@ true, // prefixMatch minRelevance, // minCosine maxRelevance, // maxCosine - BD.DEFAULT_MAX_HITS,//10000, // maxRank (=maxResults + 1) + BD.DEFAULT_MIN_RANK,//1 + BD.DEFAULT_MAX_RANK,//10000, // maxRank (=maxResults + 1) BD.DEFAULT_MATCH_ALL_TERMS,//false, // matchAllTerms BD.DEFAULT_TIMEOUT,//1000L, // timeout TimeUnit.MILLISECONDS // unit @@ -1231,7 +1236,8 @@ true, // prefixMatch minRelevance, // minCosine maxRelevance, // maxCosine - BD.DEFAULT_MAX_HITS,//10000, // maxRank (=maxResults + 1) + BD.DEFAULT_MIN_RANK,//1 + BD.DEFAULT_MAX_RANK,//10000, // maxRank (=maxResults + 1) true, // matchAllTerms BD.DEFAULT_TIMEOUT,//1000L, // timeout TimeUnit.MILLISECONDS // unit @@ -1254,6 +1260,105 @@ } + { // minRank, maxRank + + final String searchQuery = "how now brown cow"; + final int minRank = 2; + final int maxRank = 5; + final String query = + "select ?s ?o ?score " + + "where " + + "{ " + + " ?s <"+RDFS.LABEL+"> ?o . " + + " ?o <"+BD.SEARCH+"> \""+searchQuery+"\" . " + + " ?o <"+BD.RELEVANCE+"> ?score . " + + " ?o <"+BD.MIN_RANK+"> \""+minRank+"\" . " + + " ?o <"+BD.MAX_RANK+"> \""+maxRank+"\" . " + + "}"; + + if(log.isInfoEnabled()) + log.info("\n"+query); + + final TupleQuery tupleQuery = + cxn.prepareTupleQuery(QueryLanguage.SPARQL, query); + tupleQuery.setIncludeInferred(true /* includeInferred */); + TupleQueryResult result = tupleQuery.evaluate(); + + int i = 0; + while (result.hasNext()) { + final BindingSet tmp = result.next(); + if (log.isInfoEnabled()) + log.info(i + ": " + tmp.toString()); + i++; + } + assertTrue("wrong # of results: " + i, i == (maxRank-minRank+1)); + + result = tupleQuery.evaluate(); + + Collection<BindingSet> answer = new LinkedList<BindingSet>(); + + final ITextIndexer search = + sail.getDatabase().getLexiconRelation().getSearchEngine(); + final Hiterator<IHit> hits = + search.search(searchQuery, + null, // languageCode + true, // prefixMatch + BD.DEFAULT_MIN_RELEVANCE, // minCosine + BD.DEFAULT_MAX_RELEVANCE, // maxCosine + minRank,//1 + maxRank,//10000, // maxRank (=maxResults + 1) + false, // matchAllTerms + BD.DEFAULT_TIMEOUT,//1000L, // timeout + TimeUnit.MILLISECONDS // unit + ); + + while (hits.hasNext()) { + final IHit hit = hits.next(); + final IV id = new TermId(VTE.LITERAL, hit.getDocId()); + final Literal score = vf.createLiteral(hit.getCosine()); + final URI s = uris.get(id); + final Literal o = literals.get(id); + final BindingSet bs = createBindingSet( + new BindingImpl("s", s), + new BindingImpl("o", o), + new BindingImpl("score", score)); + if(log.isInfoEnabled()) + log.info(bs); + answer.add(bs); + } + + compare(result, answer); + + } + + { // countHits + + final String searchQuery = "how now brown cow"; + + final ITextIndexer search = + sail.getDatabase().getLexiconRelation().getSearchEngine(); + + final int i = search.count( + searchQuery, + null, // languageCode + true, // prefixMatch + BD.DEFAULT_MIN_RELEVANCE, // minCosine + BD.DEFAULT_MAX_RELEVANCE, // maxCosine + BD.DEFAULT_MIN_RANK, // minRank + BD.DEFAULT_MAX_RANK, // maxRank + false, // matchAllTerms + BD.DEFAULT_TIMEOUT,//1000L, // timeout + TimeUnit.MILLISECONDS // unit + ); + + if (log.isInfoEnabled()) { + log.info(i + " search results."); + } + + assertTrue("wrong # of results: " + i, i == 7); + + } + } finally { cxn.close(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-05-27 13:28:13
|
Revision: 4555 http://bigdata.svn.sourceforge.net/bigdata/?rev=4555&view=rev Author: thompsonbry Date: 2011-05-27 13:28:06 +0000 (Fri, 27 May 2011) Log Message: ----------- Removed support for the "asCommittedView" from AbstractTripleStore and the unit test for that support. This feature was not getting any use and had odd semantics. Added unit test for the DirectBufferPool to verify that it the acquire/release logic. Added assertion to the test suites to verify that acquired direct buffers are released. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestHelper.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreTransactionSemantics.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/DirectBufferPoolTestHelper.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-05-26 13:25:06 UTC (rev 4554) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-05-27 13:28:06 UTC (rev 4555) @@ -14,10 +14,11 @@ import org.apache.log4j.Logger; +import com.bigdata.counters.CAT; import com.bigdata.counters.CounterSet; import com.bigdata.counters.Instrument; import com.bigdata.counters.OneShotInstrument; -import com.bigdata.journal.DiskOnlyStrategy; +import com.bigdata.journal.IBufferStrategy; import com.bigdata.journal.TemporaryRawStore; import com.bigdata.journal.TransientBufferStrategy; import com.bigdata.rawstore.Bytes; @@ -36,7 +37,7 @@ * direct buffer for the operation which transfers the data from the * {@link TransientBufferStrategy} to disk. Therefore the data is copied into a * temporary buffer allocated from this pool and then the buffer is either - * handed off to the {@link DiskOnlyStrategy} for use as its write cache (in + * handed off to the {@link IBufferStrategy} for use as its write cache (in * which case the {@link TemporaryRawStore} holds a reference to the buffer and * releases it back to those pool when it is finalized) or the buffer is * immediately released back to this pool. @@ -65,8 +66,10 @@ * The name of the buffer pool. */ final private String name; - + /** + * A pool of direct {@link ByteBuffer}s which may be acquired. + * <p> * Note: This is NOT a weak reference collection since the JVM will leak * native memory. */ @@ -89,11 +92,22 @@ /** * The number {@link ByteBuffer}s allocated (must use {@link #lock} for - * updates or reads to be atomic). + * updates or reads to be atomic). This counter is incremented each time a + * buffer is allocated. Since we do not free buffers when they are released + * (to prevent an effective JVM memory leak) this counter is never + * decremented. */ private int size = 0; /** + * The #of {@link ByteBuffer}s which are currently acquired (must use + * {@link #lock} for updates or reads to be atomic). This counter is + * incremented when a buffer is acquired and decremented when a buffer + * is released. + */ + private int acquired = 0; + + /** * The maximum #of {@link ByteBuffer}s that will be allocated. */ private final int poolCapacity; @@ -114,11 +128,40 @@ private final Condition bufferRelease = lock.newCondition(); /** + * Package private counter of the total #of acquired buffers in all pools. + * This is used to check for memory leaks in the test suites. The value is + * reset before/after each test. + */ + static final CAT totalAcquireCount = new CAT(); + static final CAT totalReleaseCount = new CAT(); + + /** * The name of this buffer pool instance. */ public String getName() { return name; } + + /** + * The #of {@link ByteBuffer}s which are currently acquired. This counter is + * incremented when a buffer is acquired and decremented when a buffer is + * released. + */ + public int getAcquiredBufferCount() { + + lock.lock(); + + try { + + return acquired; + + } finally { + + lock.unlock(); + + } + + } /** * The capacity of the buffer as specified to the ctor. @@ -355,7 +398,7 @@ // The TimeoutException should not be thrown. throw new AssertionError(e); - + } } @@ -401,6 +444,9 @@ // the head of the pool must exist. final ByteBuffer b = pool.take(); + acquired++; + totalAcquireCount.increment(); + assertOurBuffer(b); // limit -> capacity; pos-> 0; mark cleared. @@ -454,6 +500,9 @@ if(!pool.offer(b, timeout, units)) return false; + acquired--; + totalReleaseCount.increment(); + /* * Signal ONE thread that there is a buffer available. * @@ -631,6 +680,8 @@ int bufferPoolCount = 0; // #of buffers currently allocated across all buffer pools. int bufferInUseCount = 0; + // #of buffers currently acquired across all buffer pools. + int totalAcquired = 0; // #of bytes currently allocated across all buffer pools. final AtomicLong totalBytesUsed = new AtomicLong(0L); // For each buffer pool. @@ -643,11 +694,14 @@ final int poolCapacity = p.getPoolCapacity(); final int bufferCapacity = p.getBufferCapacity(); + + final int acquired = p.getAcquiredBufferCount(); final long bytesUsed = poolSize * bufferCapacity; bufferPoolCount++; bufferInUseCount += poolSize; + totalAcquired += acquired; totalBytesUsed.addAndGet(bytesUsed); c.addCounter("poolCapacity", new OneShotInstrument<Integer>( @@ -656,6 +710,12 @@ c.addCounter("bufferCapacity", new OneShotInstrument<Integer>( bufferCapacity)); + c.addCounter("acquired", new Instrument<Integer>() { + public void sample() { + setValue(acquired); + } + }); + c.addCounter("poolSize", new Instrument<Integer>() { public void sample() { setValue(poolSize); @@ -676,7 +736,10 @@ /* * Totals. */ - + + tmp.addCounter("totalAcquired", new OneShotInstrument<Integer>( + totalAcquired)); + tmp.addCounter("bufferPoolCount", new OneShotInstrument<Integer>( bufferPoolCount)); Added: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/DirectBufferPoolTestHelper.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/DirectBufferPoolTestHelper.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/DirectBufferPoolTestHelper.java 2011-05-27 13:28:06 UTC (rev 4555) @@ -0,0 +1,93 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on May 27, 2011 + */ + +package com.bigdata.io; + +import junit.extensions.proxy.IProxyTest; +import junit.framework.Assert; +import junit.framework.TestCase; + +/** + * Some helper methods for CI. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class DirectBufferPoolTestHelper { + + /** + * Verify that any buffers acquired by the test have been released. + * <p> + * Note: This clears the counter as a side effect to prevent a cascade + * of tests from being failed. + */ + public static void checkBufferPools(final TestCase test) { + + checkBufferPools(test, null/*delegate*/); + + } + + /** + * Verify that any buffers acquired by the test have been released (variant + * when using an {@link IProxyTest}). + * <p> + * Note: This clears the counter as a side effect to prevent a cascade of + * tests from being failed. + * + * @param test + * The unit test instance. + * @param testClass + * The instance of the delegate test class for a proxy test + * suite. For example, TestWORMStrategy. + */ + public static void checkBufferPools(final TestCase test, + final TestCase testClass) { + + final long nacquired = DirectBufferPool.totalAcquireCount.get(); + final long nreleased = DirectBufferPool.totalReleaseCount.get(); + DirectBufferPool.totalAcquireCount.set(0L); + DirectBufferPool.totalReleaseCount.set(0L); + + if (nacquired != nreleased) { + + /* + * At least one buffer was acquired which was never released. + */ + + Assert.fail("Test did not release buffer(s)"// + + ": nacquired=" + nacquired // + + ", nreleased=" + nreleased // + + ", test=" + test.getClass() + "." + test.getName()// + + (testClass == null ? "" : ", testClass=" + + testClass.getClass().getName())// + ); + + } + + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/DirectBufferPoolTestHelper.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java 2011-05-26 13:25:06 UTC (rev 4554) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java 2011-05-27 13:28:06 UTC (rev 4555) @@ -28,6 +28,8 @@ package com.bigdata.io; +import java.nio.ByteBuffer; + import junit.framework.TestCase; /** @@ -51,11 +53,45 @@ super(arg0); } - /** @todo write tests. */ - public void test_nothing() { - -// fail("No tests written yet."); - + @Override + protected void setUp() throws Exception { + super.setUp(); + DirectBufferPoolTestHelper.checkBufferPools(this); } - + + @Override + protected void tearDown() throws Exception { + DirectBufferPoolTestHelper.checkBufferPools(this); + super.tearDown(); + } + + public void test_allocateRelease() throws InterruptedException { + + final int poolSizeBefore = DirectBufferPool.INSTANCE.getPoolSize(); + final int poolAcquiredBefore = DirectBufferPool.INSTANCE + .getAcquiredBufferCount(); + + final ByteBuffer b = DirectBufferPool.INSTANCE.acquire(); + + final int poolSizeDuring = DirectBufferPool.INSTANCE.getPoolSize(); + final int poolAcquiredDuring = DirectBufferPool.INSTANCE + .getAcquiredBufferCount(); + + assertEquals(poolSizeBefore + 1, poolSizeDuring); + assertEquals(poolAcquiredBefore + 1, poolAcquiredDuring); + + DirectBufferPool.INSTANCE.release(b); + + final int poolSizeAfter = DirectBufferPool.INSTANCE.getPoolSize(); + final int poolAcquiredAfter = DirectBufferPool.INSTANCE + .getAcquiredBufferCount(); + + // the pool size does not decrease. + assertEquals(poolSizeBefore + 1, poolSizeAfter); + + // the #of acquired buffers does decrease. + assertEquals(poolAcquiredBefore, poolAcquiredAfter); + + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestHelper.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestHelper.java 2011-05-26 13:25:06 UTC (rev 4554) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestHelper.java 2011-05-27 13:28:06 UTC (rev 4555) @@ -27,6 +27,8 @@ package com.bigdata.journal; +import com.bigdata.io.DirectBufferPoolTestHelper; + import junit.extensions.proxy.IProxyTest; import junit.framework.Assert; import junit.framework.TestCase; @@ -107,6 +109,10 @@ } + // Also check the direct buffer pools. + DirectBufferPoolTestHelper.checkBufferPools(test, testClass); + + } } Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2011-05-26 13:25:06 UTC (rev 4554) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2011-05-27 13:28:06 UTC (rev 4555) @@ -1851,65 +1851,65 @@ if (isReadOnly()) throw new IllegalStateException(); - /* - * Clear the reference since it was as of the last commit point. - */ - readCommittedRef = null; +// /* +// * Clear the reference since it was as of the last commit point. +// */ +// readCommittedRef = null; return 0l; } - /** - * A factory returning a read-committed view of the database. - * <p> - * Note: There is a distinct instance <i>per commit time</i>. If an - * intervening commit has occurred, then you will get back a new instance - * providing a read-consistent view as of the now most recent commit point. - * - * FIXME The [per commit time] constraint is actually a function of the - * {@link ITx#READ_COMMITTED} semantics as implemented by the - * {@link IIndexManager}. If the indices are {@link IClientIndex}s then - * the instances remain valid since all requests are delegated through the - * {@link DataService} layer. However, if they are {@link BTree}s, then the - * instances are NOT valid. - * <p> - * Perhaps the best way to deal with this is to have a ReadCommittedBTree or - * to modify BTree to intrinsically understand read-committed semantics and - * to reload from the most recent checkpoint after each commit. That way the - * index references would always remain valid. - * <p> - * However, we have to be much more careful about read-consistent (choose a - * timestamp corresponding to the last commit point or the last closure - * point) vs read-committed (writes become immediately visible once they are - * committed). - */ - final public AbstractTripleStore asReadCommittedView() { - - if (getTimestamp() == ITx.READ_COMMITTED) { - - return this; - - } - - synchronized(this) { - - AbstractTripleStore view = readCommittedRef == null ? null - : readCommittedRef.get(); - - if(view == null) { - - view = (AbstractTripleStore) getIndexManager().getResourceLocator() - .locate(getNamespace(), ITx.READ_COMMITTED); - - readCommittedRef = new SoftReference<AbstractTripleStore>(view); - - } - - return view; - - } - - } - private SoftReference<AbstractTripleStore> readCommittedRef; +// /** +// * A factory returning a read-committed view of the database. +// * <p> +// * Note: There is a distinct instance <i>per commit time</i>. If an +// * intervening commit has occurred, then you will get back a new instance +// * providing a read-consistent view as of the now most recent commit point. +// * +// * FIXME The [per commit time] constraint is actually a function of the +// * {@link ITx#READ_COMMITTED} semantics as implemented by the +// * {@link IIndexManager}. If the indices are {@link IClientIndex}s then +// * the instances remain valid since all requests are delegated through the +// * {@link DataService} layer. However, if they are {@link BTree}s, then the +// * instances are NOT valid. +// * <p> +// * Perhaps the best way to deal with this is to have a ReadCommittedBTree or +// * to modify BTree to intrinsically understand read-committed semantics and +// * to reload from the most recent checkpoint after each commit. That way the +// * index references would always remain valid. +// * <p> +// * However, we have to be much more careful about read-consistent (choose a +// * timestamp corresponding to the last commit point or the last closure +// * point) vs read-committed (writes become immediately visible once they are +// * committed). +// */ +// final public AbstractTripleStore asReadCommittedView() { +// +// if (getTimestamp() == ITx.READ_COMMITTED) { +// +// return this; +// +// } +// +// synchronized(this) { +// +// AbstractTripleStore view = readCommittedRef == null ? null +// : readCommittedRef.get(); +// +// if(view == null) { +// +// view = (AbstractTripleStore) getIndexManager().getResourceLocator() +// .locate(getNamespace(), ITx.READ_COMMITTED); +// +// readCommittedRef = new SoftReference<AbstractTripleStore>(view); +// +// } +// +// return view; +// +// } +// +// } +// private SoftReference<AbstractTripleStore> readCommittedRef; final public long getJustificationCount() { Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreTransactionSemantics.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreTransactionSemantics.java 2011-05-26 13:25:06 UTC (rev 4554) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreTransactionSemantics.java 2011-05-27 13:28:06 UTC (rev 4555) @@ -58,64 +58,64 @@ } } - /** - * Test the commit semantics in the context of a read-committed view of the - * database. - */ - public void test_commit() { - - final LocalTripleStore store = (LocalTripleStore) getStore(); - - try { - - // read-committed view of the same database. - final AbstractTripleStore view = store.asReadCommittedView(); - - final IV s = new TermId(VTE.URI, 1); - final IV p = new TermId(VTE.URI, 2); - final IV o = new TermId(VTE.URI, 3); - - // add the statement. - store.addStatements(new SPO[] { // - new SPO(s, p, o, StatementEnum.Explicit) // - },// - 1); - - final boolean stmtInStore = store.hasStatement(s, p, o); - - if(log.isInfoEnabled()) log.info("stmtInStore: " + stmtInStore); - - final boolean stmtInView = view.hasStatement(s, p, o); - - if(log.isInfoEnabled()) log.info("stmtInView: " + stmtInView); - - // visible in the repo. - assertTrue(stmtInStore); - - // not visible in the view. - assertFalse(stmtInView); - - // commit the transaction. - store.commit(); - - // now visible in the view - /* - * Note: this will fail if the Journal#getIndex(name,timestamp) does - * not return an index view with read-committed (vs read-consistent) - * semantics. For the index view to have read-committed semantics - * the view MUST update if there is an intervening commit. This is - * currently handled by returning a ReadCommittedView for this case - * rather than a BTree. - */ - assertTrue(view.hasStatement(s, p, o)); - - } finally { - - store.__tearDownUnitTest(); - - } - - } +// /** +// * Test the commit semantics in the context of a read-committed view of the +// * database. +// */ +// public void test_commit() { +// +// final LocalTripleStore store = (LocalTripleStore) getStore(); +// +// try { +// +// // read-committed view of the same database. +// final AbstractTripleStore view = store.asReadCommittedView(); +// +// final IV s = new TermId(VTE.URI, 1); +// final IV p = new TermId(VTE.URI, 2); +// final IV o = new TermId(VTE.URI, 3); +// +// // add the statement. +// store.addStatements(new SPO[] { // +// new SPO(s, p, o, StatementEnum.Explicit) // +// },// +// 1); +// +// final boolean stmtInStore = store.hasStatement(s, p, o); +// +// if(log.isInfoEnabled()) log.info("stmtInStore: " + stmtInStore); +// +// final boolean stmtInView = view.hasStatement(s, p, o); +// +// if(log.isInfoEnabled()) log.info("stmtInView: " + stmtInView); +// +// // visible in the repo. +// assertTrue(stmtInStore); +// +// // not visible in the view. +// assertFalse(stmtInView); +// +// // commit the transaction. +// store.commit(); +// +// // now visible in the view +// /* +// * Note: this will fail if the Journal#getIndex(name,timestamp) does +// * not return an index view with read-committed (vs read-consistent) +// * semantics. For the index view to have read-committed semantics +// * the view MUST update if there is an intervening commit. This is +// * currently handled by returning a ReadCommittedView for this case +// * rather than a BTree. +// */ +// assertTrue(view.hasStatement(s, p, o)); +// +// } finally { +// +// store.__tearDownUnitTest(); +// +// } +// +// } /** * Test of abort semantics. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-05-28 13:08:55
|
Revision: 4562 http://bigdata.svn.sourceforge.net/bigdata/?rev=4562&view=rev Author: thompsonbry Date: 2011-05-28 13:08:48 +0000 (Sat, 28 May 2011) Log Message: ----------- Bug fixes to TestTruthMaintenance to ensure correct close() of the backing TemporaryStore instances allocated during many of the tests in that suite. Various 'final' declarations in TemporaryStore. Added @ TRACE logging of stack traces in DirectBufferPool to assist in locating acquires() and releases(). Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStore.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestTruthMaintenance.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-05-28 12:39:31 UTC (rev 4561) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-05-28 13:08:48 UTC (rev 4562) @@ -522,6 +522,12 @@ // limit -> capacity; pos-> 0; mark cleared. state.buf.clear(); + if (log.isTraceEnabled()) { + final Throwable t = new RuntimeException( + "Stack trace of buffer acquisition"); + log.trace(t, t); + } + return state.buf; } finally { @@ -618,6 +624,12 @@ */ bufferRelease.signal(); + if (log.isTraceEnabled()) { + final Throwable t = new RuntimeException( + "Stack trace of buffer release"); + log.trace(t, t); + } + return true; } finally { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStore.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStore.java 2011-05-28 12:39:31 UTC (rev 4561) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStore.java 2011-05-28 13:08:48 UTC (rev 4562) @@ -376,7 +376,7 @@ return globalRowStoreHelper.getGlobalRowStore(); } - private GlobalRowStoreHelper globalRowStoreHelper = new GlobalRowStoreHelper(this); + final private GlobalRowStoreHelper globalRowStoreHelper = new GlobalRowStoreHelper(this); public BigdataFileSystem getGlobalFileSystem() { @@ -385,7 +385,7 @@ return globalFileSystemHelper.getGlobalFileSystem(); } - private GlobalFileSystemHelper globalFileSystemHelper = new GlobalFileSystemHelper(this); + final private GlobalFileSystemHelper globalFileSystemHelper = new GlobalFileSystemHelper(this); public DefaultResourceLocator getResourceLocator() { @@ -410,7 +410,7 @@ return resourceLockManager; } - private ResourceLockService resourceLockManager = new ResourceLockService(); + final private ResourceLockService resourceLockManager = new ResourceLockService(); public void close() { Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestTruthMaintenance.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestTruthMaintenance.java 2011-05-28 12:39:31 UTC (rev 4561) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestTruthMaintenance.java 2011-05-28 13:08:48 UTC (rev 4562) @@ -34,6 +34,7 @@ import java.util.Properties; import java.util.Random; import java.util.Set; + import org.apache.log4j.MDC; import org.openrdf.model.URI; import org.openrdf.model.impl.URIImpl; @@ -41,6 +42,7 @@ import org.openrdf.model.vocabulary.RDF; import org.openrdf.model.vocabulary.RDFS; import org.openrdf.rio.RDFFormat; + import com.bigdata.rdf.inf.TruthMaintenance; import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.model.BigdataStatement; @@ -99,6 +101,7 @@ */ public void test_filter_01() { + TempTripleStore focusStore = null; final AbstractTripleStore store = getStore(); try { @@ -147,7 +150,6 @@ /* * Setup a temporary store. */ - final TempTripleStore focusStore; { final Properties properties = store.getProperties(); @@ -216,6 +218,9 @@ assertEquals("#removed", 1, nremoved); } finally { + + if (focusStore != null) + focusStore.__tearDownUnitTest(); store.__tearDownUnitTest(); @@ -229,7 +234,8 @@ * verified (this is based on rdfs11). */ public void test_assertAll_01() { - + + TempTripleStore tempStore = null; final AbstractTripleStore store = getStore(); try { @@ -244,7 +250,7 @@ final BigdataURI rdfsSubClassOf = f.asValue(RDFS.SUBCLASSOF); - final TempTripleStore tempStore = tm.newTempTripleStore(); + tempStore = tm.newTempTripleStore(); // buffer writes on the tempStore. { @@ -282,6 +288,9 @@ assertTrue(store.hasStatement(U, rdfsSubClassOf, X)); } finally { + + if (tempStore != null) + tempStore.__tearDownUnitTest(); store.__tearDownUnitTest(); @@ -324,7 +333,7 @@ // add some assertions and verify aspects of their closure. { - StatementBuffer assertionBuffer = new StatementBuffer(tm + final StatementBuffer assertionBuffer = new StatementBuffer(tm .newTempTripleStore(), store, 100/* capacity */); assertionBuffer.add(U, rdfsSubClassOf, V); @@ -355,7 +364,7 @@ */ { - StatementBuffer retractionBuffer = new StatementBuffer(tm + final StatementBuffer retractionBuffer = new StatementBuffer(tm .newTempTripleStore(), store, 100/* capacity */); retractionBuffer.add(V, rdfsSubClassOf, X); @@ -382,7 +391,7 @@ */ { - StatementBuffer assertionBuffer = new StatementBuffer(tm + final StatementBuffer assertionBuffer = new StatementBuffer(tm .newTempTripleStore(), store, 100/* capacity */); assertionBuffer.add(V, rdfsSubClassOf, X); @@ -408,8 +417,8 @@ */ { - StatementBuffer retractionBuffer = new StatementBuffer(tm.newTempTripleStore(),store, - 100/* capacity */); + final StatementBuffer retractionBuffer = new StatementBuffer(tm + .newTempTripleStore(), store, 100/* capacity */); retractionBuffer.add(U, rdfsSubClassOf, X); @@ -451,9 +460,9 @@ try { final TruthMaintenance tm = new TruthMaintenance(store.getInferenceEngine()); - + final BigdataValueFactory f = store.getValueFactory(); - + final BigdataURI U = f.createURI("http://www.bigdata.com/U"); final BigdataURI V = f.createURI("http://www.bigdata.com/V"); final BigdataURI X = f.createURI("http://www.bigdata.com/X"); @@ -462,8 +471,8 @@ { + // Note: new triple store on shared temporary store! final TempTripleStore tempStore = tm.newTempTripleStore(); - // buffer writes on the tempStore. { @@ -492,8 +501,9 @@ + tempStore.dumpStore(store, true, true, false, true)); - System.err.println("Doing asserts."); - + if (log.isInfoEnabled()) + log.info("Doing asserts."); + // perform closure and write on the database. tm.assertAll(tempStore); @@ -507,8 +517,8 @@ assertTrue(store.hasStatement(U, rdfsSubClassOf, V)); assertTrue(store.hasStatement(V, rdfsSubClassOf, X)); assertTrue(store.hasStatement(U, rdfsSubClassOf, X)); - - // and verify their statement type. + + // and verify their statement type. assertEquals(StatementEnum.Explicit, store.getStatement(U, rdfsSubClassOf, V).getStatementType()); assertEquals(StatementEnum.Explicit, store.getStatement(V, @@ -516,18 +526,19 @@ assertEquals(StatementEnum.Explicit, store.getStatement(U, rdfsSubClassOf, X).getStatementType()); - // now retract + // now retract { + // Note: new triple store on shared temporary store! final TempTripleStore tempStore = tm.newTempTripleStore(); - // buffer writes on the tempStore. { final StatementBuffer retractionBuffer = new StatementBuffer( tempStore, store, 10/* capacity */); - assertTrue(tempStore == retractionBuffer.getStatementStore()); + assertTrue(tempStore == retractionBuffer + .getStatementStore()); /* * Retract this statement. It is explicitly present in the @@ -546,11 +557,12 @@ + tempStore.dumpStore(store, true, true, false, true)); - System.err.println("Doing retraction."); - + if (log.isInfoEnabled()) + log.info("Doing retraction."); + // perform closure and write on the database. tm.retractAll(tempStore); - + } if (log.isInfoEnabled()) @@ -613,7 +625,7 @@ // add some assertions and verify aspects of their closure. { - StatementBuffer assertionBuffer = new StatementBuffer(tm + final StatementBuffer assertionBuffer = new StatementBuffer(tm .newTempTripleStore(), store, 100/* capacity */); // stmt a @@ -632,7 +644,8 @@ tm.assertAll((TempTripleStore)assertionBuffer.getStatementStore()); // dump after closure. - if(log.isInfoEnabled())log.info("\n"+store.dumpStore(true,true,false)); + if (log.isInfoEnabled()) + log.info("\n" + store.dumpStore(true, true, false)); // explicit. assertTrue(store.hasStatement(user, currentGraph, foo )); @@ -641,7 +654,7 @@ // verify that stmt c is marked as explicit in the kb. - BigdataStatement stmtC = (BigdataStatement) store + final BigdataStatement stmtC = (BigdataStatement) store .getStatement(foo, rdftype, graph); assertNotNull(stmtC); @@ -658,7 +671,7 @@ */ { - StatementBuffer retractionBuffer = new StatementBuffer(tm + final StatementBuffer retractionBuffer = new StatementBuffer(tm .newTempTripleStore(), store, 100/* capacity */); retractionBuffer.add(user, currentGraph, foo); @@ -680,7 +693,7 @@ // verify that stmt c is marked as explicit in the kb. - BigdataStatement stmtC = (BigdataStatement) store + final BigdataStatement stmtC = (BigdataStatement) store .getStatement(foo, rdftype, graph); assertNotNull(stmtC); @@ -773,12 +786,12 @@ final TempTripleStore controlStore = new TempTripleStore(store .getProperties()); - // Note: maintains closure on the controlStore. - final TruthMaintenance tmControlStore = new TruthMaintenance( - controlStore.getInferenceEngine()); - try { + // Note: maintains closure on the controlStore. + final TruthMaintenance tmControlStore = new TruthMaintenance( + controlStore.getInferenceEngine()); + final StatementBuffer assertionBuffer = new StatementBuffer( tmControlStore.newTempTripleStore(), controlStore, 100/* capacity */); @@ -822,29 +835,29 @@ // if(true) fail("re-enable this test"); - URI a = new URIImpl("http://www.bigdata.com/a"); - URI b = new URIImpl("http://www.bigdata.com/b"); - URI entity = new URIImpl("http://www.bigdata.com/Entity"); - URI sameAs = OWL.SAMEAS; + final URI a = new URIImpl("http://www.bigdata.com/a"); + final URI b = new URIImpl("http://www.bigdata.com/b"); + final URI entity = new URIImpl("http://www.bigdata.com/Entity"); + final URI sameAs = OWL.SAMEAS; // /* // * Note: not using rdf:type to avoid entailments about (x rdf:type // * Class) and (x rdfs:subClassOf y) that are not required by this test. // */ // URI rdfType = new URIImpl("http://www.bigdata.com/type"); - URI rdfType = RDF.TYPE; + final URI rdfType = RDF.TYPE; - AbstractTripleStore store = getStore(); + final AbstractTripleStore store = getStore(); try { - InferenceEngine inf = store.getInferenceEngine(); + final InferenceEngine inf = store.getInferenceEngine(); - TruthMaintenance tm = new TruthMaintenance(inf); + final TruthMaintenance tm = new TruthMaintenance(inf); // add some assertions and verify aspects of their closure. { - - StatementBuffer assertionBuffer = new StatementBuffer(tm + + final StatementBuffer assertionBuffer = new StatementBuffer(tm .newTempTripleStore(), store, 100/* capacity */); // stmt a @@ -856,12 +869,13 @@ // assert the sameas assertionBuffer.add(a, sameAs, b ); - + // flush statements to the tempStore. assertionBuffer.flush(); - + // perform closure and write on the database. - tm.assertAll( (TempTripleStore)assertionBuffer.getStatementStore() ); + tm.assertAll((TempTripleStore) assertionBuffer + .getStatementStore()); // dump after closure. if (log.isInfoEnabled()) @@ -869,7 +883,7 @@ + store.dumpStore(store, true, true, false, true)); } - + /* * retract stmt A and update the closure. * @@ -877,8 +891,8 @@ * other explicit statements were not touched. */ { - - StatementBuffer retractionBuffer = new StatementBuffer(tm + + final StatementBuffer retractionBuffer = new StatementBuffer(tm .newTempTripleStore(), store, 100/* capacity */); // retract the sameas @@ -886,21 +900,22 @@ // flush statements to the tempStore. retractionBuffer.flush(); - + // update the closure. - tm.retractAll( (TempTripleStore)retractionBuffer.getStatementStore() ); + tm.retractAll((TempTripleStore) retractionBuffer + .getStatementStore()); // dump after re-closure. if (log.isInfoEnabled()) log.info("\ndump after re-closure:\n" + store.dumpStore(store, true, true, false, true)); - + } - + } finally { - + store.__tearDownUnitTest(); - + } } @@ -955,6 +970,7 @@ properties.setProperty(DataLoader.Options.CLOSURE, ClosureEnum.None.toString()); + TempTripleStore tmp = null; final AbstractTripleStore store = getStore(properties); try { @@ -979,7 +995,6 @@ * Make a copy of the graph (statements only) that will serve as * ground truth. */ - final TempTripleStore tmp; { final Properties p = new Properties(properties); @@ -1012,6 +1027,9 @@ fail("Not expecting: "+ex, ex); } finally { + + if(tmp != null) + tmp.__tearDownUnitTest(); store.__tearDownUnitTest(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-02 12:53:05
|
Revision: 4600 http://bigdata.svn.sourceforge.net/bigdata/?rev=4600&view=rev Author: thompsonbry Date: 2011-06-02 12:52:57 +0000 (Thu, 02 Jun 2011) Log Message: ----------- Working to resolve [1]. I've added versions of this test with truth maintenance enabled (for triples and sids modes). I've modified TestSids?, which runs before this test in the TestBigdataSailWithSids? test suite, to improve its finally clauses handling the open connection and the open sail. I've also checked TestTxCreate?, which runs before this test in the TestBigdataSailWithSids? and the TestBigdataSailWithQuads? test suites. There were ambiguous invocations of BigdataSailConnection#isOpen?() in two derived classes. I have modified to those to test the 'open' field instead. I've added versions of this test with truth maintenance enabled (for triples and sids modes). I've modified TestSids?, which runs before this test in the TestBigdataSailWithSids? test suite, to improve its finally clauses handling the open connection and the open sail. I've also checked TestTxCreate?, which runs before this test in the TestBigdataSailWithSids? and the TestBigdataSailWithQuads? test suites. There were ambiguous invocations of BigdataSailConnection#isOpen?() in two derived classes. I have modified to those to test the 'open' field instead. Renamed 'open' fields on BigdataSail? and BigdataSailConnection? as openSail and openConn to help identify incorrect inheritance problems. Renamed assertOpen() methods as assertOpenConn() and assertOpenSail(). Renamed assertWritable() as assertWritableConn() Modified toString() on the connection classes to show whether the connection is open. Made the anonymous BigdataSailConnection? classes into named inner classes in an attempt to prevent future ambiguity in the inheritance patterns. Note: It would be far better if BigdataSailConnection? were a static class since that would remove the opportunity entirely for incorrect inheritance patterns. TestSearchQuery - the Journal was not closing the TemporaryStoreFactory for a normal close() but only in destroyResources(). I moved that code into _close(), which is now overridden by the Journal. TestSearchQuery#test_restart() - improved nested try/finally to close connections. [1] https://sourceforge.net/apps/trac/bigdata/ticket/315 (TestRollbacksTx does not release buffers/close stores) Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryRawStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStore.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithSids.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithoutSids.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestProvenanceQuery.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestQuery.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSearchQuery.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacks.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacksTx.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacksTM.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java 2011-06-02 10:46:36 UTC (rev 4599) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -41,7 +41,6 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import org.apache.log4j.Logger; @@ -1188,16 +1187,32 @@ } - public void deleteResources() { - - super.deleteResources(); - +// public void deleteResources() { +// +// super.deleteResources(); +// +// // Note: can be null if error in ctor. +// if (tempStoreFactory != null) +// tempStoreFactory.closeAll(); +// +// } + + /** + * {@inheritDoc} + * <p> + * Overridden to close the {@link TemporaryStoreFactory}. + */ + @Override + protected void _close() { + + super._close(); + // Note: can be null if error in ctor. if (tempStoreFactory != null) tempStoreFactory.closeAll(); } - + public <T> Future<T> submit(AbstractTask<T> task) { return concurrencyManager.submit(task); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryRawStore.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryRawStore.java 2011-06-02 10:46:36 UTC (rev 4599) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryRawStore.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -62,10 +62,8 @@ */ public class TemporaryRawStore extends AbstractRawWormStore implements IMRMW { - protected static final Logger log = Logger.getLogger(TemporaryRawStore.class); + private static final Logger log = Logger.getLogger(TemporaryRawStore.class); -// protected static final boolean INFO = log.isInfoEnabled(); - /** * Note: various things must be synchronized on {@link #buf} in order to * serialize reads, writes, etc. This is because it is {@link #buf} on which Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStore.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStore.java 2011-06-02 10:46:36 UTC (rev 4599) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStore.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -34,6 +34,8 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; +import org.apache.log4j.Logger; + import com.bigdata.bfs.BigdataFileSystem; import com.bigdata.bfs.GlobalFileSystemHelper; import com.bigdata.btree.BTree; @@ -64,6 +66,8 @@ */ public class TemporaryStore extends TemporaryRawStore implements IBTreeManager { + private static final Logger log = Logger.getLogger(TemporaryStore.class); + /** * The size of the live index cache for the {@link Name2Addr} instance. * Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-06-02 10:46:36 UTC (rev 4599) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -545,7 +545,7 @@ * <code>true</code> iff the {@link BigdataSail} has been * {@link #initialize()}d and not {@link #shutDown()}. */ - private boolean open; + private boolean openSail; /** * Set <code>true</code> by ctor variants that open/create the database @@ -1066,9 +1066,9 @@ * if the {@link BigdataSail} has not been {@link #initialize()}d * or has been {@link #shutDown()}. */ - protected void assertOpen() { + protected void assertOpenSail() { - if (!open) + if (!openSail) throw new IllegalStateException(); } @@ -1079,7 +1079,7 @@ */ public boolean isOpen() { - return open; + return openSail; } @@ -1090,7 +1090,7 @@ @Override protected void initializeInternal() throws SailException { - if (open) + if (openSail) throw new IllegalStateException(); /* @@ -1103,7 +1103,7 @@ } - open = true; + openSail = true; } @@ -1127,7 +1127,7 @@ public void shutDown() throws SailException { - assertOpen(); + assertOpenSail(); /* * Note: DO NOT shutdown the query engine. It is shared by all @@ -1146,7 +1146,7 @@ */ protected void shutDownInternal() throws SailException { - if (open) { + if (openSail) { try { @@ -1161,7 +1161,7 @@ } finally { - open = false; + openSail = false; } @@ -1368,102 +1368,24 @@ } } - + /** - * Return a read-only connection backed by a read-only transaction. The + * Return a read-only connection backed by a read-only transaction. The * transaction will be closed when the connection is closed. - * @param timestamp The timestamp. + * + * @param timestamp + * The timestamp. + * * @return The transaction. + * * @throws IOException * @see ITransactionService#newTx(long) */ - private BigdataSailConnection _getReadOnlyConnection(final long timestamp) throws IOException { - - final String namespace = database.getNamespace(); + private BigdataSailConnection _getReadOnlyConnection(final long timestamp) + throws IOException { - final IIndexManager indexManager = database.getIndexManager(); + return new BigdataSailReadOnlyConnection(timestamp); - final ITransactionService txService = getTxService(); - - return new BigdataSailConnection(null/*lock*/,false/*unisolated*/) { - - /** - * The transaction id. - */ - private long tx; - - /** - * Constructor starts a new transaction. - */ - { - newTx(); - } - - /** - * Obtain a new read-only transaction from the journal's - * transaction service, and attach this SAIL connection to the new - * view of the database. - */ - protected void newTx() throws IOException { - - this.tx = txService.newTx(timestamp); - - final AbstractTripleStore txView = (AbstractTripleStore) indexManager - .getResourceLocator().locate(namespace, tx); - - attach(txView); - - } - - /** - * NOP - */ - @Override - public synchronized void commit() throws SailException { - - // NOP. - - } - - /** - * NOP - */ - @Override - public synchronized void rollback() throws SailException { - - // NOP - - } - - /** - * A specialized close that will also abort the current read-only - * transaction. - */ - @Override - public synchronized void close() throws SailException { - - if (!isOpen()) { - - return; - - } - - super.close(); - - try { - - txService.abort(tx); - - } catch(IOException ex) { - - throw new SailException(ex); - - } - - } - - }; - } /** @@ -1491,149 +1413,10 @@ } - final ITransactionService txService = getTxService(); - - final String namespace = database.getNamespace(); - final Lock readLock = lock.readLock(); readLock.lock(); - - return new BigdataSailConnection(readLock,false/*unisolated*/) { - - /** - * The transaction id. - */ - private long tx; - /** - * Constructor starts a new transaction. - */ - { - newTx(); - } - - /** - * Obtain a new read/write transaction from the journal's - * transaction service, and attach this SAIL connection to the new - * view of the database. - */ - protected void newTx() throws IOException { - - this.tx = txService.newTx(ITx.UNISOLATED); - - final AbstractTripleStore txView = (AbstractTripleStore) indexManager - .getResourceLocator().locate(namespace, tx); - - attach(txView); - - } - - /** - * A specialized commit that goes through the transaction service - * available on the journal's transaction manager. Once the commit - * happens, a new read/write transaction is automatically started - * so that this connection can continue to absorb writes. - * <p> - * Note: writes to the lexicon without dirtying the isolated indices - * (i.e. writes to the SPO relation) will cause the writes to the - * lexicon to never be committed. Probably not a significant issue. - */ - @Override - public synchronized void commit() throws SailException { - - /* - * don't double commit, but make a note that writes to the lexicon without - * dirtying the isolated indices will cause the writes to the lexicon to never - * be committed - */ - - assertWritable(); - - /* - * Flush any pending writes. - * - * Note: This must be done before you compute the closure so that the - * pending writes will be read by the inference engine when it computes - * the closure. - */ - - flushStatementBuffers(true/* assertions */, true/* retractions */); - - try { - - txService.commit(tx); - - newTx(); - - } catch(IOException ex) { - - throw new SailException(ex); - - } - - } - - /** - * A specialized rollback that goes through the transaction service - * available on the journal's transaction manager. Once the abort - * happens, a new read/write transaction is automatically started - * so that this connection can continue to absorb writes. - */ - @Override - public synchronized void rollback() throws SailException { - - /* - * Note: DO NOT invoke super.rollback(). That will cause a - * database (Journal) level abort(). The Journal level abort() - * will discard the writes buffered on the unisolated indices - * (the lexicon indices). That will cause lost updates and break - * the eventually consistent design for the TERM2ID and ID2TERM - * indices. - */ -// super.rollback(); - - try { - - txService.abort(tx); - - newTx(); - - } catch(IOException ex) { - - throw new SailException(ex); - - } - - } - - /** - * A specialized close that will also abort the current read/write - * transaction. - */ - @Override - public synchronized void close() throws SailException { - - if (!isOpen()) { - - return; - - } - - super.close(); - - try { - - txService.abort(tx); - - } catch(IOException ex) { - - throw new SailException(ex); - - } - - } - - }; + return new BigdataSailRWTxConnection(readLock); } @@ -1691,7 +1474,7 @@ /** * True iff the {@link SailConnection} is open. */ - private boolean open; + protected boolean openConn; /** * non-<code>null</code> iff truth maintenance is being performed. @@ -1749,9 +1532,10 @@ public String toString() { - return getClass().getName() + "{timestamp=" - + TimestampUtility.toString(database.getTimestamp()) + "}"; - + return getClass().getName() + "{timestamp=" + + TimestampUtility.toString(database.getTimestamp()) + + ",open=" + openConn + "}"; + } public BigdataSail getBigdataSail() { @@ -1900,15 +1684,15 @@ * * @param database */ - public synchronized void attach(final AbstractTripleStore database) { + protected synchronized void attach(final AbstractTripleStore database) { - BigdataSail.this.assertOpen(); + BigdataSail.this.assertOpenSail(); this.database = database; readOnly = database.isReadOnly(); - open = true; + openConn = true; assertBuffer = null; @@ -2159,7 +1943,7 @@ public void setNamespace(final String prefix, final String namespace) throws SailException { - assertWritable(); + assertWritableConn(); // database.addNamespace(namespace,prefix); namespaces.put(prefix, namespace); @@ -2318,7 +2102,7 @@ } - assertWritable(); + assertWritableConn(); // flush any pending retractions first! flushStatementBuffers(false/* flushAssertBuffer */, true/* flushRetractBuffer */); @@ -2352,7 +2136,7 @@ if (log.isInfoEnabled()) log.info("contexts=" + Arrays.toString(contexts)); - assertWritable(); + assertWritableConn(); // discard any pending writes. clearBuffers(); @@ -2615,7 +2399,7 @@ private synchronized int removeStatements(final Resource s, final URI p, final Value o, final Resource c) throws SailException { - assertWritable(); + assertWritableConn(); flushStatementBuffers(true/* flushAssertBuffer */, false/* flushRetractBuffer */); @@ -2875,7 +2659,7 @@ */ public synchronized void rollback() throws SailException { - assertWritable(); + assertWritableConn(); // discard buffered assertions and/or retractions. clearBuffers(); @@ -2899,7 +2683,7 @@ */ public synchronized void commit() throws SailException { - assertWritable(); + assertWritableConn(); /* * Flush any pending writes. @@ -2938,7 +2722,7 @@ final public boolean isOpen() throws SailException { - return open; + return openConn; } @@ -2959,7 +2743,7 @@ // assertOpen(); - if (!open) { + if (!openConn) { return; @@ -3002,7 +2786,7 @@ ((Journal) getDatabase().getIndexManager()) .releaseUnisolatedConnection(); } - open = false; + openConn = false; } } @@ -3101,9 +2885,9 @@ } - protected void assertOpen() throws SailException { + protected void assertOpenConn() throws SailException { - if(!open) { + if(!openConn) { throw new SailException("Closed"); @@ -3111,9 +2895,9 @@ } - protected void assertWritable() throws SailException { + protected void assertWritableConn() throws SailException { - assertOpen(); + assertOpenConn(); if (readOnly) { @@ -3342,7 +3126,7 @@ */ public synchronized void computeClosure() throws SailException { - assertWritable(); + assertWritableConn(); flushStatementBuffers(true/* assertions */, true/* retractions */); @@ -3357,7 +3141,7 @@ */ public synchronized void removeAllEntailments() throws SailException { - assertWritable(); + assertWritableConn(); flushStatementBuffers(true/* assertions */, true/* retractions */); @@ -3423,7 +3207,7 @@ * native joins and the BigdataEvaluationStatistics rely on * this. */ - Object[] newVals = replaceValues(dataset, tupleExpr, bindings); + final Object[] newVals = replaceValues(dataset, tupleExpr, bindings); dataset = (Dataset) newVals[0]; bindings = (BindingSet) newVals[1]; @@ -3455,9 +3239,10 @@ if (log.isInfoEnabled()) log.info("Optimized query: " + tupleExpr); - final Object[] newVals2 = replaceValues(dataset, tupleExpr, bindings); - dataset = (Dataset) newVals2[0]; - bindings = (BindingSet) newVals2[1]; + replaceValues(dataset, tupleExpr, bindings); +// final Object[] newVals2 = replaceValues(dataset, tupleExpr, bindings); +// dataset = (Dataset) newVals2[0]; +// bindings = (BindingSet) newVals2[1]; return tupleExpr; @@ -3543,7 +3328,7 @@ log.info("Optimized query: " + tupleExpr); final Object[] newVals2 = replaceValues(dataset, tupleExpr, bindings); - dataset = (Dataset) newVals2[0]; +// dataset = (Dataset) newVals2[0]; bindings = (BindingSet) newVals2[1]; // Note: evaluation begins with an empty binding set NOT the @@ -3610,6 +3395,302 @@ private IChangeLog changeLog; - } + } // class BigdataSailConnection + /** + * A connection backed by a read/write transaction. + */ + private class BigdataSailRWTxConnection extends BigdataSailConnection { + + /** + * The transaction service. + */ + private final ITransactionService txService; + + /** + * The transaction id. + */ + private long tx; + + /** + * Constructor starts a new transaction. + */ + public BigdataSailRWTxConnection(final Lock readLock) + throws IOException { + + super(readLock, false/* unisolated */); + + txService = getTxService(); + + newTx(); + + } + + /** + * Obtain a new read/write transaction from the journal's + * transaction service, and attach this SAIL connection to the new + * view of the database. + */ + protected void newTx() throws IOException { + + // The view of the database *outside* of this connection. + final AbstractTripleStore database = BigdataSail.this.database; + + // The namespace of the triple store. + final String namespace = database.getNamespace(); + + // Open a new read/write transaction. + this.tx = txService.newTx(ITx.UNISOLATED); + + try { + + /* + * Locate a view of the triple store isolated by that + * transaction. + */ + final AbstractTripleStore txView = (AbstractTripleStore) database + .getIndexManager().getResourceLocator().locate( + namespace, tx); + + // Attach that transaction view to this SailConnection. + attach(txView); + + } catch (Throwable t) { + + try { + txService.abort(tx); + } catch (IOException ex) { + log.error(ex, ex); + } + + throw new RuntimeException(t); + + } + + } + + /** + * A specialized commit that goes through the transaction service + * available on the journal's transaction manager. Once the commit + * happens, a new read/write transaction is automatically started + * so that this connection can continue to absorb writes. + * <p> + * Note: writes to the lexicon without dirtying the isolated indices + * (i.e. writes to the SPO relation) will cause the writes to the + * lexicon to never be committed. Probably not a significant issue. + */ + @Override + public synchronized void commit() throws SailException { + + /* + * don't double commit, but make a note that writes to the lexicon + * without dirtying the isolated indices will cause the writes to + * the lexicon to never be committed + */ + + assertWritableConn(); + + /* + * Flush any pending writes. + * + * Note: This must be done before you compute the closure so that + * the pending writes will be read by the inference engine when it + * computes the closure. + */ + + flushStatementBuffers(true/* assertions */, true/* retractions */); + + try { + + txService.commit(tx); + + newTx(); + + } catch(IOException ex) { + + throw new SailException(ex); + + } + + } + + /** + * A specialized rollback that goes through the transaction service + * available on the journal's transaction manager. Once the abort + * happens, a new read/write transaction is automatically started + * so that this connection can continue to absorb writes. + */ + @Override + public synchronized void rollback() throws SailException { + + /* + * Note: DO NOT invoke super.rollback(). That will cause a + * database (Journal) level abort(). The Journal level abort() + * will discard the writes buffered on the unisolated indices + * (the lexicon indices). That will cause lost updates and break + * the eventually consistent design for the TERM2ID and ID2TERM + * indices. + */ +// super.rollback(); + + try { + + txService.abort(tx); + + newTx(); + + } catch(IOException ex) { + + throw new SailException(ex); + + } + + } + + /** + * A specialized close that will also abort the current read/write + * transaction. + */ + @Override + public synchronized void close() throws SailException { + + if (!openConn) { + + return; + + } + + super.close(); + + try { + + txService.abort(tx); + + } catch(IOException ex) { + + throw new SailException(ex); + + } + + } + + } // class BigdataSailReadWriteTxConnection + + private class BigdataSailReadOnlyConnection extends BigdataSailConnection { + + /** + * The transaction service. + */ + private final ITransactionService txService; + + /** + * The transaction id. + */ + private long tx; + + /** + * Constructor starts a new transaction. + */ + BigdataSailReadOnlyConnection(final long timestamp) throws IOException { + + super(null/* lock */, false/* unisolated */); + + txService = getTxService(); + + newTx(timestamp); + + } + + /** + * Obtain a new read-only transaction from the journal's transaction + * service, and attach this SAIL connection to the new view of the + * database. + */ + protected void newTx(final long timestamp) throws IOException { + + // The view of the database *outside* of this connection. + final AbstractTripleStore database = BigdataSail.this.database; + + // The namespace of the triple store. + final String namespace = database.getNamespace(); + + // Obtain a new read-only transaction reading from that timestamp. + this.tx = txService.newTx(timestamp); + + try { + + /* + * Locate a view of the triple store isolated by that + * transaction. + */ + final AbstractTripleStore txView = (AbstractTripleStore) database + .getIndexManager().getResourceLocator().locate( + namespace, tx); + + // Attach that transaction view to this SailConnection. + attach(txView); + + } catch (Throwable t) { + + try { + txService.abort(tx); + } catch (IOException ex) { + log.error(ex, ex); + } + + throw new RuntimeException(t); + + } + + } + + /** + * NOP + */ + @Override + public synchronized void commit() throws SailException { + + // NOP. + + } + + /** + * NOP + */ + @Override + public synchronized void rollback() throws SailException { + + // NOP + + } + + /** + * A specialized close that will also abort the current read-only + * transaction. + */ + @Override + public synchronized void close() throws SailException { + + if (!openConn) { + + return; + + } + + super.close(); + + try { + + txService.abort(tx); + + } catch(IOException ex) { + + throw new SailException(ex); + + } + + } + + } // class BigdataSailReadOnlyConnection + } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithSids.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithSids.java 2011-06-02 10:46:36 UTC (rev 4599) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithSids.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -94,6 +94,7 @@ suite.addTestSuite(com.bigdata.rdf.sail.contrib.TestRollbacks.class); suite.addTestSuite(com.bigdata.rdf.sail.contrib.TestRollbacksTx.class); + suite.addTestSuite(com.bigdata.rdf.sail.contrib.TestRollbacksTM.class); return suite; Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithoutSids.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithoutSids.java 2011-06-02 10:46:36 UTC (rev 4599) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithoutSids.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -88,6 +88,7 @@ suite.addTestSuite(com.bigdata.rdf.sail.contrib.TestRollbacks.class); suite.addTestSuite(com.bigdata.rdf.sail.contrib.TestRollbacksTx.class); + suite.addTestSuite(com.bigdata.rdf.sail.contrib.TestRollbacksTM.class); return suite; Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestProvenanceQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestProvenanceQuery.java 2011-06-02 10:46:36 UTC (rev 4599) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestProvenanceQuery.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -85,10 +85,10 @@ final BigdataSail sail = getSail(); - sail.initialize(); - try { + sail.initialize(); + if (!((BigdataSail) sail).database.getStatementIdentifiers()) { log.warn("Statement identifiers are not enabled"); @@ -234,7 +234,8 @@ final BindingSet solution = itr.next(); - System.out.println("solution[" + i + "] : " + solution); + if (log.isInfoEnabled()) + log.info("solution[" + i + "] : " + solution); final Value actual = solution.getValue("Y"); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestQuery.java 2011-06-02 10:46:36 UTC (rev 4599) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestQuery.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -27,19 +27,19 @@ package com.bigdata.rdf.sail; import info.aduna.iteration.CloseableIteration; + import java.io.File; import java.io.FilenameFilter; import java.io.IOException; import java.util.HashSet; import java.util.Set; + import org.openrdf.model.URI; import org.openrdf.model.Value; import org.openrdf.model.impl.URIImpl; import org.openrdf.model.vocabulary.RDF; import org.openrdf.query.BindingSet; import org.openrdf.query.QueryEvaluationException; -import org.openrdf.query.QueryLanguage; -import org.openrdf.query.TupleQuery; import org.openrdf.query.algebra.Join; import org.openrdf.query.algebra.Projection; import org.openrdf.query.algebra.ProjectionElem; @@ -52,6 +52,7 @@ import org.openrdf.repository.RepositoryConnection; import org.openrdf.rio.RDFFormat; import org.openrdf.sail.SailException; + import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.store.DataLoader; @@ -77,11 +78,6 @@ } /** - * The namespace used when the LUBM data set was generated. - */ - final String ub = "http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#"; - - /** * Load the data set (LUBM with 1 university). * * @throws IOException @@ -121,9 +117,10 @@ } - log.info("Loading " + n + " files from " + dir); + if(log.isInfoEnabled()) + log.info("Loading " + n + " files from " + dir); - DataLoader dataLoader = sail.database.getDataLoader(); + final DataLoader dataLoader = sail.database.getDataLoader(); dataLoader.loadData(resource, baseURL, rdfFormat); @@ -161,14 +158,21 @@ final BigdataSail sail = getSail(); - sail.initialize(); - try { + sail.initialize(); + loadData(sail); final BigdataSailConnection conn = sail.getConnection(); + try { + + /** + * The namespace used when the LUBM data set was generated. + */ + final String ub = "http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#"; + final URI graduateStudent = new URIImpl(ub+"GraduateStudent"); final URI takesCourse = new URIImpl(ub+"takesCourse"); @@ -275,16 +279,18 @@ while (itr.hasNext()) { - BindingSet solution = itr.next(); + final BindingSet solution = itr.next(); - System.out.println("solution["+i+"] : "+solution); - + if (log.isInfoEnabled()) + log.info("solution[" + i + "] : " + solution); + final Value actual = solution.getValue("X"); final boolean found = expected.remove(actual); - assertTrue("Not expecting X=" + actual, found); - + if (!found) + fail("Not expecting X=" + actual); + i++; } @@ -301,11 +307,14 @@ finally { - conn.close(); - cxn.close(); } + } finally { + + conn.close(); + + } } finally { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSearchQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSearchQuery.java 2011-06-02 10:46:36 UTC (rev 4599) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSearchQuery.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -186,7 +186,8 @@ */ { - StatementBuffer sb = new StatementBuffer(sail.database, 100/* capacity */); + final StatementBuffer<Statement> sb = new StatementBuffer<Statement>( + sail.database, 100/* capacity */); sb.add(new URIImpl("http://www.bigdata.com/A"), RDFS.LABEL, new LiteralImpl("Yellow Rose")); @@ -376,10 +377,12 @@ " }"; // the ontology (nothing is indexed for full text search). - final Graph test_restart_1 = new GraphImpl(); { + final Graph test_restart_1 = new GraphImpl(); + { - test_restart_1.add(new StatementImpl(ORGANIZATION, RDFS.SUBCLASSOF, ENTITY)); - + test_restart_1.add(new StatementImpl(ORGANIZATION, RDFS.SUBCLASSOF, + ENTITY)); + } // the entity data (the rdfs:label gets indexed for full text search) @@ -422,8 +425,8 @@ { // load ontology and optionally the entity data. final RepositoryConnection cxn = repo.getConnection(); - cxn.setAutoCommit(false); try { + cxn.setAutoCommit(false); log.info("loading ontology"); cxn.add(test_restart_1); if (!doYouWantMeToBreak) { @@ -443,8 +446,8 @@ if (doYouWantMeToBreak) { // load the entity data. final RepositoryConnection cxn = repo.getConnection(); - cxn.setAutoCommit(false); try { + cxn.setAutoCommit(false); log.info("loading entity data"); cxn.add(test_restart_2); cxn.commit(); @@ -540,9 +543,9 @@ final BigdataSailRepository repo = new BigdataSailRepository(sail); final BigdataSailRepositoryConnection cxn = (BigdataSailRepositoryConnection) repo.getConnection(); - cxn.setAutoCommit(false); - try { + + cxn.setAutoCommit(false); final BNode a = new BNodeImpl("_:a"); final BNode b = new BNodeImpl("_:b"); @@ -702,12 +705,13 @@ final BigdataSailRepository repo = new BigdataSailRepository(sail); final BigdataSailRepositoryConnection cxn = (BigdataSailRepositoryConnection) repo.getConnection(); - cxn.setAutoCommit(false); try { - final ValueFactory vf = sail.getValueFactory(); + cxn.setAutoCommit(false); + final ValueFactory vf = sail.getValueFactory(); + final URI s1 = vf.createURI(BD.NAMESPACE+"s1"); final URI s2 = vf.createURI(BD.NAMESPACE+"s2"); final URI s3 = vf.createURI(BD.NAMESPACE+"s3"); @@ -1006,7 +1010,7 @@ result = tupleQuery.evaluate(); - Collection<BindingSet> answer = new LinkedList<BindingSet>(); + final Collection<BindingSet> answer = new LinkedList<BindingSet>(); final ITextIndexer search = sail.getDatabase().getLexiconRelation().getSearchEngine(); @@ -1226,7 +1230,7 @@ result = tupleQuery.evaluate(); - Collection<BindingSet> answer = new LinkedList<BindingSet>(); + final Collection<BindingSet> answer = new LinkedList<BindingSet>(); final ITextIndexer search = sail.getDatabase().getLexiconRelation().getSearchEngine(); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java 2011-06-02 10:46:36 UTC (rev 4599) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSids.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -95,13 +95,17 @@ public void testSids() throws Exception { final BigdataSail sail = getSail(); - sail.initialize(); - final BigdataSailRepository repo = new BigdataSailRepository(sail); - final BigdataSailRepositoryConnection cxn = - (BigdataSailRepositoryConnection) repo.getConnection(); - cxn.setAutoCommit(false); try { + + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + final BigdataSailRepositoryConnection cxn = + (BigdataSailRepositoryConnection) repo.getConnection(); + + try { + + cxn.setAutoCommit(false); cxn.add(getClass().getResourceAsStream("sids.rdf"), "", RDFFormat.RDFXML); @@ -176,8 +180,11 @@ } + } finally { + cxn.close(); + } + } finally { - cxn.close(); sail.__tearDownUnitTest(); } @@ -186,15 +193,19 @@ public void testSids2() throws Exception { final BigdataSail sail = getSail(); - sail.initialize(); - final BigdataSailRepository repo = new BigdataSailRepository(sail); - final BigdataSailRepositoryConnection cxn = - (BigdataSailRepositoryConnection) repo.getConnection(); - cxn.setAutoCommit(false); try { - final ValueFactory vf = sail.getValueFactory(); + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + final BigdataSailRepositoryConnection cxn = + (BigdataSailRepositoryConnection) repo.getConnection(); + + try { + + cxn.setAutoCommit(false); + + final ValueFactory vf = sail.getValueFactory(); final URI host1 = vf.createURI("http://localhost/host1"); final URI host = vf.createURI("http://domainnamespace.com/host#Host"); @@ -314,9 +325,13 @@ } } + + } finally { + + cxn.close(); + } } finally { - cxn.close(); sail.__tearDownUnitTest(); } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacks.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacks.java 2011-06-02 10:46:36 UTC (rev 4599) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacks.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -245,9 +245,9 @@ RepositoryConnection conn = null; try { int counter2 = 0; - conn = repo.getConnection(); - conn.setAutoCommit(false); - while (firstCause.get() == null&&counter<maxCounter) { + conn = repo.getConnection(); + conn.setAutoCommit(false); + while (firstCause.get() == null && counter < maxCounter) { if (writer) writer(conn); else Added: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacksTM.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacksTM.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacksTM.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -0,0 +1,66 @@ +/** +Copyright (C) SYSTAP, LLC 2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sail.contrib; + +import java.util.Properties; + +import com.bigdata.rdf.axioms.NoAxioms; +import com.bigdata.rdf.axioms.RdfsAxioms; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.vocab.NoVocabulary; +import com.bigdata.rdf.vocab.RDFSVocabulary; + +/** + * Concrete instance of {@link TestRollbacks} which overrides the properties to + * enable truth maintenance support in the SAIL. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class TestRollbacksTM extends TestRollbacks { + + public TestRollbacksTM() { + super(); + } + + public TestRollbacksTM(String name) { + super(name); + } + + @Override + public Properties getProperties() { + + final Properties props = super.getProperties(); + + props.setProperty(BigdataSail.Options.AXIOMS_CLASS, + RdfsAxioms.class.getName()); + props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, + RDFSVocabulary.class.getName()); + props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "true"); + props.setProperty(BigdataSail.Options.JUSTIFY, "true"); + + return props; + + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacksTM.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacksTx.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacksTx.java 2011-06-02 10:46:36 UTC (rev 4599) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacksTx.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -1,3 +1,25 @@ +/** +Copyright (C) SYSTAP, LLC 2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.rdf.sail.contrib; import java.util.Properties; @@ -4,6 +26,13 @@ import com.bigdata.rdf.sail.BigdataSail; +/** + * Concrete instance of {@link TestRollbacks} which overrides the properties to + * enable full transaction support in the SAIL. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ public class TestRollbacksTx extends TestRollbacks { public TestRollbacksTx() { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java 2011-06-02 10:46:36 UTC (rev 4599) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java 2011-06-02 12:52:57 UTC (rev 4600) @@ -168,14 +168,32 @@ : ((BigdataSailRepository) testRepository).getDatabase() .getIndexManager(); + /* + * Note: The code in the block below was taken verbatim from + * super.testDown() in order to explore a tear down issue in testOpen(). + */ super.tearDown(); +// { +// +// testCon2.close(); +// testCon2 = null; +// +// testCon.close(); +// testCon = null; +// +// testRepository.shutDown(); +// testRepository = null; +// +// vf = null; +// +// } if (backend != null) backend.destroy(); } - /** + /** * This test has been overridden because Sesame assumes "read-committed" * transaction semantics while bidata uses snapshot isolation for its * transactions. @@ -249,5 +267,18 @@ assertFalse(testCon2.hasStatement(bob, name, nameBob, false)); // Yes. This is snapshot isolation semantics. } - + +// /** +// * Copied into the local test suite unchanged in order to debug with this +// * test. +// */ +// @Override +// public void testOpen() throws Exception { +// assertTrue(testCon.isOpen()); +// assertTrue(testCon2.isOpen()); +// testCon.close(); +// assertFalse(testCon.isOpen()); +// assertTrue(testCon2.isOpen()); +// } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-02 18:07:09
|
Revision: 4607 http://bigdata.svn.sourceforge.net/bigdata/?rev=4607&view=rev Author: thompsonbry Date: 2011-06-02 18:07:02 +0000 (Thu, 02 Jun 2011) Log Message: ----------- Fixed cluster install for the QUADS_QUERY_BRANCH and verified that we can run SPARQL against the federation. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractLocalTransactionManager.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/IJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/ILocalTransactionManager.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/BigdataZooDefs.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/ServiceConfigurationZNodeEnum.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/ServiceConfigurationZNodeMonitorTask.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/DavidsTestBOps.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEmbeddedFederationWithQuads.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataFederationSparqlTest.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2011-06-02 16:59:34 UTC (rev 4606) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2011-06-02 18:07:02 UTC (rev 4607) @@ -2228,11 +2228,11 @@ } - /** - * Return the object providing the {@link AbstractLocalTransactionManager} - * for this journal. - */ - abstract public AbstractLocalTransactionManager getLocalTransactionManager(); +// /** +// * Return the object providing the {@link AbstractLocalTransactionManager} +// * for this journal. +// */ +// abstract public AbstractLocalTransactionManager getLocalTransactionManager(); public long commit() { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractLocalTransactionManager.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractLocalTransactionManager.java 2011-06-02 16:59:34 UTC (rev 4606) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractLocalTransactionManager.java 2011-06-02 18:07:02 UTC (rev 4607) @@ -25,19 +25,9 @@ /** * Logger. */ - protected static final Logger log = Logger - .getLogger(AbstractLocalTransactionManager.class); + private static final Logger log = Logger + .getLogger(AbstractLocalTransactionManager.class); - /** - * True iff the {@link #log} level is INFO or less. - */ - final static protected boolean INFO = log.isInfoEnabled(); - - /** - * True iff the {@link #log} level is DEBUG or less. - */ - final static protected boolean DEBUG = log.isDebugEnabled(); - public AbstractLocalTransactionManager() { } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2011-06-02 16:59:34 UTC (rev 4606) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2011-06-02 18:07:02 UTC (rev 4607) @@ -2426,6 +2426,12 @@ } + public ILocalTransactionManager getLocalTransactionManager() { + + return delegate.getLocalTransactionManager(); + + } + public IResourceLockService getResourceLockService() { return delegate.getResourceLockService(); @@ -2845,6 +2851,12 @@ } + public ILocalTransactionManager getLocalTransactionManager() { + + return delegate.getLocalTransactionManager(); + + } + public IResourceLockService getResourceLockService() { return delegate.getResourceLockService(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/IJournal.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/IJournal.java 2011-06-02 16:59:34 UTC (rev 4606) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/IJournal.java 2011-06-02 18:07:02 UTC (rev 4607) @@ -25,7 +25,6 @@ import java.util.Properties; -import com.bigdata.btree.keys.IKeyBuilderFactory; import com.bigdata.rawstore.IMRMW; /** @@ -55,5 +54,11 @@ * Immediate shutdown. */ public void shutdownNow(); - + + /** + * Return the object providing the local transaction manager for this + * journal. + */ + public ILocalTransactionManager getLocalTransactionManager(); + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/ILocalTransactionManager.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/ILocalTransactionManager.java 2011-06-02 16:59:34 UTC (rev 4606) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/ILocalTransactionManager.java 2011-06-02 18:07:02 UTC (rev 4607) @@ -36,8 +36,6 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ - * - * @todo we don't really need an interface for this as there is only one impl. */ public interface ILocalTransactionManager extends /* ITransactionManager, */IServiceShutdown { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java 2011-06-02 16:59:34 UTC (rev 4606) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/JournalDelegate.java 2011-06-02 18:07:02 UTC (rev 4607) @@ -43,8 +43,9 @@ import com.bigdata.sparse.SparseRowStore; public class JournalDelegate implements IJournal { - protected final AbstractJournal delegate; + private final AbstractJournal delegate; + public JournalDelegate(final AbstractJournal source) { this.delegate = source; } @@ -221,7 +222,11 @@ return delegate.getResourceLocator(); } - public IResourceLockService getResourceLockService() { + public ILocalTransactionManager getLocalTransactionManager() { + return delegate.getLocalTransactionManager(); + } + + public IResourceLockService getResourceLockService() { return delegate.getResourceLockService(); } Modified: branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/BigdataZooDefs.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/BigdataZooDefs.java 2011-06-02 16:59:34 UTC (rev 4606) +++ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/BigdataZooDefs.java 2011-06-02 18:07:02 UTC (rev 4607) @@ -274,4 +274,15 @@ */ String MASTER_ELECTION = "masterElection"; + /** + * The name of the znode that is a child of {@link #LOGICAL_SERVICE_PREFIX} + * serving as the root of the HA quorum for that logical service. For + * example: + * + * <pre> + * zpath=/benchmark/config/com.bigdata.service.jini.TransactionServer/logicalService0000000000/quorum + * </pre> + */ + String QUORUM = "quorum"; + } Modified: branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/ServiceConfigurationZNodeEnum.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/ServiceConfigurationZNodeEnum.java 2011-06-02 16:59:34 UTC (rev 4606) +++ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/ServiceConfigurationZNodeEnum.java 2011-06-02 18:07:02 UTC (rev 4607) @@ -42,7 +42,13 @@ /** * An ephemeral znode representing physical service instance. */ - PhysicalService; + PhysicalService, + + /** + * A persistent znode representing the quorum for a highly available logical + * service. + */ + Quorum; private ServiceConfigurationZNodeEnum(){} @@ -114,7 +120,7 @@ if (parent.endsWith(BigdataZooDefs.MASTER_ELECTION)) { /* - * The child is an EPHEMERAL znode representing a phsical + * The child is an EPHEMERAL znode representing a physical * service instance. */ @@ -144,6 +150,17 @@ } + if (child.endsWith(BigdataZooDefs.QUORUM)) { + + /* + * The child is an PERSISTENT znode representing the quorum for + * the logical service. + */ + + return Quorum; + + } + throw new RuntimeException("serviceConfigZPath=" + serviceConfigZPath + ", zpath=" + zpath); Modified: branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/ServiceConfigurationZNodeMonitorTask.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/ServiceConfigurationZNodeMonitorTask.java 2011-06-02 16:59:34 UTC (rev 4606) +++ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/ServiceConfigurationZNodeMonitorTask.java 2011-06-02 18:07:02 UTC (rev 4607) @@ -612,7 +612,11 @@ case MasterElectionLock: return NONE; + + case Quorum: + return NONE; + default: throw new AssertionError(zpath); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/DavidsTestBOps.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/DavidsTestBOps.java 2011-06-02 16:59:34 UTC (rev 4606) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/DavidsTestBOps.java 2011-06-02 18:07:02 UTC (rev 4607) @@ -52,12 +52,14 @@ import com.bigdata.rdf.vocab.NoVocabulary; /** + * A test suite developed for some bits and pieces of SPARQL 1.0 coverage. + * * @author <a href="mailto:mrp...@us...">Mike Personick</a> * @version $Id$ */ public class DavidsTestBOps extends ProxyBigdataSailTestCase { - protected static final Logger log = Logger.getLogger(DavidsTestBOps.class); + private static final Logger log = Logger.getLogger(DavidsTestBOps.class); @Override public Properties getProperties() { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEmbeddedFederationWithQuads.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEmbeddedFederationWithQuads.java 2011-06-02 16:59:34 UTC (rev 4606) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEmbeddedFederationWithQuads.java 2011-06-02 18:07:02 UTC (rev 4607) @@ -120,6 +120,8 @@ suite.addTestSuite(TestDescribe.class); + suite.addTestSuite(com.bigdata.rdf.sail.DavidsTestBOps.class); + // The Sesame TCK, including the SPARQL test suite. { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2011-06-02 16:59:34 UTC (rev 4606) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2011-06-02 18:07:02 UTC (rev 4607) @@ -115,6 +115,8 @@ suite.addTestSuite(com.bigdata.rdf.sail.TestRollbacksTx.class); suite.addTestSuite(com.bigdata.rdf.sail.TestMillisecondPrecisionForInlineDateTimes.class); + + suite.addTestSuite(com.bigdata.rdf.sail.DavidsTestBOps.class); // The Sesame TCK, including the SPARQL test suite. { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataFederationSparqlTest.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataFederationSparqlTest.java 2011-06-02 16:59:34 UTC (rev 4606) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataFederationSparqlTest.java 2011-06-02 18:07:02 UTC (rev 4607) @@ -60,7 +60,7 @@ * location, then it is convenient (but not necessary) to also specify its path. For * example: * <pre> - * -Dbigdata.configuration=/nas/bigdata/benchmark/config/bigdataStandalone.config + * -Dbigdata.configuration=/nas/bigdata/benchmark/config/bigdataCluster.config * -Dcom.bigdata.counters.linux.sysstat.path=/usr/local/bin * </pre> * This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-03 11:35:24
|
Revision: 4613 http://bigdata.svn.sourceforge.net/bigdata/?rev=4613&view=rev Author: thompsonbry Date: 2011-06-03 11:35:18 +0000 (Fri, 03 Jun 2011) Log Message: ----------- Bug fix for [1]. This appears to be a bug in Constant#equals(Object o). The code was written to allow comparison with IConstantOrVariable. However, IVariable#get() always throws an UnsupportedOperationException? since it is not possible to obtain the asBound value of a variable without reference to a binding set. I have modified Constant#equals() to return false unless the passed Object is another IConstant. I also added a fast code path for tests against self. This is sufficient to have the unit test pass. I am now running through the test suites for the SAIL to verify that this change has not broken anything else. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/Constant.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket276.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/Constant.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/Constant.java 2011-06-03 11:01:17 UTC (rev 4612) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/Constant.java 2011-06-03 11:35:18 UTC (rev 4613) @@ -137,14 +137,25 @@ final public boolean equals(final Object o) { - if(!(o instanceof IVariableOrConstant<?>)) { - - // incomparable types. + if (this == o) + return true; + + if(!(o instanceof IConstant<?>)) { + + /* + * Incomparable types. + * + * Note: This used to permit IVariableOrConstant, but it is not + * possible to invoke get() on an IVariable without a bindingSet + * against which to resolve its asBound value. + * + * See https://sourceforge.net/apps/trac/bigdata/ticket/276 + */ return false; } - final Object otherValue = ((IVariableOrConstant<?>) o).get(); + final Object otherValue = ((IConstant<?>) o).get(); // handles reference equality, including when both are null. if (value == otherValue) Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket276.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket276.java 2011-06-03 11:01:17 UTC (rev 4612) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket276.java 2011-06-03 11:35:18 UTC (rev 4613) @@ -19,12 +19,13 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ +*/ package com.bigdata.rdf.sail; import java.io.IOException; import java.util.Properties; +import java.util.Set; import org.openrdf.OpenRDFException; import org.openrdf.model.Statement; @@ -105,7 +106,7 @@ props.setProperty(BigdataSail.Options.ALLOW_SESAME_QUERY_EVALUATION, "false"); props.setProperty( - com.bigdata.rdf.store.AbstractTripleStore.Options.STATEMENT_IDENTIFIERS, + BigdataSail.Options.STATEMENT_IDENTIFIERS, "false"); return props; @@ -133,13 +134,13 @@ RDFHandlerException { try { repo.initialize(); - RepositoryConnection conn = repo.getConnection(); + final RepositoryConnection conn = repo.getConnection(); try { - ValueFactory vf = conn.getValueFactory(); + final ValueFactory vf = conn.getValueFactory(); addData(conn); - final String query = "SELECT ?x { ?x ?a ?t . ?x ?lookup ?l }"; - TupleQuery q = conn.prepareTupleQuery(QueryLanguage.SPARQL, + final String query = "SELECT ?x { ?x ?a ?t . ?x ?lookup ?l }"; + final TupleQuery q = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); q.setBinding( "a", @@ -147,9 +148,13 @@ q.setBinding("t", vf.createURI("os:class/Location")); q.setBinding("lookup", vf.createURI("os:prop/lookupName")); q.setBinding("l", vf.createLiteral("amsterdam")); - TupleQueryResult tqr = q.evaluate(); - while (tqr.hasNext()) - System.out.println(tqr.next().getBindingNames()); + final TupleQueryResult tqr = q.evaluate(); + while (tqr.hasNext()) { + final Set<String> bindingNames = tqr.next() + .getBindingNames(); + if (log.isInfoEnabled()) + log.info("bindingNames=" + bindingNames); + } tqr.close(); } finally { conn.close(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2011-06-06 21:27:24
|
Revision: 4635 http://bigdata.svn.sourceforge.net/bigdata/?rev=4635&view=rev Author: mrpersonick Date: 2011-06-06 21:27:16 +0000 (Mon, 06 Jun 2011) Log Message: ----------- working through test failures related to lex joins Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DatatypeBOp.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataURIImpl.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/FalseBOp.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/TrueBOp.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2011-06-06 17:37:43 UTC (rev 4634) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2011-06-06 21:27:16 UTC (rev 4635) @@ -65,6 +65,7 @@ import com.bigdata.relation.IRelation; import com.bigdata.relation.accesspath.AbstractUnsynchronizedArrayBuffer; import com.bigdata.relation.accesspath.AccessPath; +import com.bigdata.relation.accesspath.ArrayAccessPath; import com.bigdata.relation.accesspath.BlockingBuffer; import com.bigdata.relation.accesspath.BufferClosedException; import com.bigdata.relation.accesspath.IAccessPath; @@ -2016,6 +2017,14 @@ */ public int compareTo(final AccessPathTask o) { + /* + * Just go ahead and run the ArrayAccessPaths first. + */ + if (accessPath instanceof ArrayAccessPath) + return -1; + if (o.accessPath instanceof ArrayAccessPath) + return 1; + return BytesUtil.compareBytes(getFromKey(), o.getFromKey()); } Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2011-06-06 17:37:43 UTC (rev 4634) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2011-06-06 21:27:16 UTC (rev 4635) @@ -29,14 +29,21 @@ import java.math.BigDecimal; import java.math.BigInteger; +import java.math.RoundingMode; import java.util.ArrayList; import java.util.UUID; import org.apache.log4j.Logger; +import org.openrdf.model.Literal; +import org.openrdf.model.URI; +import org.openrdf.model.datatypes.XMLDatatypeUtil; +import org.openrdf.model.vocabulary.XMLSchema; +import org.openrdf.query.algebra.evaluation.ValueExprEvaluationException; import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.btree.keys.KeyBuilder; import com.bigdata.rawstore.Bytes; +import com.bigdata.rdf.error.SparqlTypeErrorException; import com.bigdata.rdf.internal.constraints.MathBOp.MathOp; import com.bigdata.rdf.model.BigdataBNode; import com.bigdata.rdf.model.BigdataLiteral; @@ -169,6 +176,74 @@ } + public static IV literalMath(final Literal l1, final Literal l2, + final MathOp op) + { + final URI dt1 = l1.getDatatype(); + final URI dt2 = l2.getDatatype(); + + // Only numeric value can be used in math expressions + if (dt1 == null || !XMLDatatypeUtil.isNumericDatatype(dt1)) { + throw new IllegalArgumentException("Not a number: " + l1); + } + if (dt2 == null || !XMLDatatypeUtil.isNumericDatatype(dt2)) { + throw new IllegalArgumentException("Not a number: " + l2); + } + + // Determine most specific datatype that the arguments have in common, + // choosing from xsd:integer, xsd:decimal, xsd:float and xsd:double as + // per the SPARQL/XPATH spec + URI commonDatatype; + + if (dt1.equals(XMLSchema.DOUBLE) || dt2.equals(XMLSchema.DOUBLE)) { + commonDatatype = XMLSchema.DOUBLE; + } else if (dt1.equals(XMLSchema.FLOAT) || dt2.equals(XMLSchema.FLOAT)) { + commonDatatype = XMLSchema.FLOAT; + } else if (dt1.equals(XMLSchema.DECIMAL) || dt2.equals(XMLSchema.DECIMAL)) { + commonDatatype = XMLSchema.DECIMAL; + } else if (op == MathOp.DIVIDE) { + // Result of integer divide is decimal and requires the arguments to + // be handled as such, see for details: + // http://www.w3.org/TR/xpath-functions/#func-numeric-divide + commonDatatype = XMLSchema.DECIMAL; + } else { + commonDatatype = XMLSchema.INTEGER; + } + + // Note: Java already handles cases like divide-by-zero appropriately + // for floats and doubles, see: + // http://www.particle.kth.se/~lindsey/JavaCourse/Book/Part1/Tech/ + // Chapter02/floatingPt2.html + + try { + if (commonDatatype.equals(XMLSchema.DOUBLE)) { + double left = l1.doubleValue(); + double right = l2.doubleValue(); + return IVUtility.numericalMath(left, right, op); + } + else if (commonDatatype.equals(XMLSchema.FLOAT)) { + float left = l1.floatValue(); + float right = l2.floatValue(); + return IVUtility.numericalMath(left, right, op); + } + else if (commonDatatype.equals(XMLSchema.DECIMAL)) { + BigDecimal left = l1.decimalValue(); + BigDecimal right = l2.decimalValue(); + return IVUtility.numericalMath(left, right, op); + } + else { // XMLSchema.INTEGER + BigInteger left = l1.integerValue(); + BigInteger right = l2.integerValue(); + return IVUtility.numericalMath(left, right, op); + } + } catch (NumberFormatException e) { + throw new SparqlTypeErrorException(); + } catch (ArithmeticException e) { + throw new SparqlTypeErrorException(); + } + + } + public static final IV numericalMath(final IV iv1, final IV iv2, final MathOp op) { @@ -202,34 +277,53 @@ final AbstractLiteralIV num1 = (AbstractLiteralIV) iv1; final AbstractLiteralIV num2 = (AbstractLiteralIV) iv2; - // if one's a BigDecimal we should use the BigDecimal comparator for both - if (dte1 == DTE.XSDDecimal || dte2 == DTE.XSDDecimal) { - return numericalMath(num1.decimalValue(), num2.decimalValue(), op); - } + // Determine most specific datatype that the arguments have in common, + // choosing from xsd:integer, xsd:decimal, xsd:float and xsd:double as + // per the SPARQL/XPATH spec - // same for BigInteger - if (dte1 == DTE.XSDInteger || dte2 == DTE.XSDInteger) { - return numericalMath(num1.integerValue(), num2.integerValue(), op); - } - - // fixed length numerics - if (dte1.isFloatingPointNumeric() || dte2.isFloatingPointNumeric()) { - // non-BigDecimal floating points - if (dte1 == DTE.XSDFloat || dte2 == DTE.XSDFloat) - return numericalMath(num1.floatValue(), num2.floatValue(), op); - else - return numericalMath(num1.doubleValue(), num2.doubleValue(), op); + if (dte1 == DTE.XSDDouble || dte2 == DTE.XSDDouble) { + return numericalMath(num1.doubleValue(), num2.doubleValue(), op); + } else if (dte1 == DTE.XSDFloat || dte2 == DTE.XSDFloat) { + return numericalMath(num1.floatValue(), num2.floatValue(), op); + } if (dte1 == DTE.XSDDecimal || dte2 == DTE.XSDDecimal) { + return numericalMath(num1.decimalValue(), num2.decimalValue(), op); + } if (op == MathOp.DIVIDE) { + // Result of integer divide is decimal and requires the arguments to + // be handled as such, see for details: + // http://www.w3.org/TR/xpath-functions/#func-numeric-divide + return numericalMath(num1.decimalValue(), num2.decimalValue(), op); } else { - // non-BigInteger integers - if (dte1 == DTE.XSDInt && dte2 == DTE.XSDInt) - return numericalMath(num1.intValue(), num2.intValue(), op); - else - return numericalMath(num1.longValue(), num2.longValue(), op); + return numericalMath(num1.integerValue(), num2.integerValue(), op); } + +// // if one's a BigDecimal we should use the BigDecimal comparator for both +// if (dte1 == DTE.XSDDecimal || dte2 == DTE.XSDDecimal) { +// return numericalMath(num1.decimalValue(), num2.decimalValue(), op); +// } +// +// // same for BigInteger +// if (dte1 == DTE.XSDInteger || dte2 == DTE.XSDInteger) { +// return numericalMath(num1.integerValue(), num2.integerValue(), op); +// } +// +// // fixed length numerics +// if (dte1.isFloatingPointNumeric() || dte2.isFloatingPointNumeric()) { +// // non-BigDecimal floating points +// if (dte1 == DTE.XSDFloat || dte2 == DTE.XSDFloat) +// return numericalMath(num1.floatValue(), num2.floatValue(), op); +// else +// return numericalMath(num1.doubleValue(), num2.doubleValue(), op); +// } else { +// // non-BigInteger integers +// if (dte1 == DTE.XSDInt && dte2 == DTE.XSDInt) +// return numericalMath(num1.intValue(), num2.intValue(), op); +// else +// return numericalMath(num1.longValue(), num2.longValue(), op); +// } } - private static final IV numericalMath(final BigDecimal left, + public static final IV numericalMath(final BigDecimal left, final BigDecimal right, final MathOp op) { switch(op) { @@ -240,7 +334,7 @@ case MULTIPLY: return new XSDDecimalIV(left.multiply(right)); case DIVIDE: - return new XSDDecimalIV(left.divide(right)); + return new XSDDecimalIV(left.divide(right, RoundingMode.HALF_UP)); case MIN: return new XSDDecimalIV(left.compareTo(right) < 0 ? left : right); case MAX: @@ -251,7 +345,7 @@ } - private static final IV numericalMath(final BigInteger left, + public static final IV numericalMath(final BigInteger left, final BigInteger right, final MathOp op) { switch(op) { @@ -273,7 +367,7 @@ } - private static final IV numericalMath(final float left, + public static final IV numericalMath(final float left, final float right, final MathOp op) { switch(op) { @@ -295,7 +389,7 @@ } - private static final IV numericalMath(final double left, + public static final IV numericalMath(final double left, final double right, final MathOp op) { switch(op) { @@ -317,50 +411,50 @@ } - private static final IV numericalMath(final int left, - final int right, final MathOp op) { - - switch(op) { - case PLUS: - return new XSDIntIV(left+right); - case MINUS: - return new XSDIntIV(left-right); - case MULTIPLY: - return new XSDIntIV(left*right); - case DIVIDE: - return new XSDIntIV(left/right); - case MIN: - return new XSDIntIV(Math.min(left,right)); - case MAX: - return new XSDIntIV(Math.max(left,right)); - default: - throw new UnsupportedOperationException(); - } - - } +// private static final IV numericalMath(final int left, +// final int right, final MathOp op) { +// +// switch(op) { +// case PLUS: +// return new XSDIntIV(left+right); +// case MINUS: +// return new XSDIntIV(left-right); +// case MULTIPLY: +// return new XSDIntIV(left*right); +// case DIVIDE: +// return new XSDIntIV(left/right); +// case MIN: +// return new XSDIntIV(Math.min(left,right)); +// case MAX: +// return new XSDIntIV(Math.max(left,right)); +// default: +// throw new UnsupportedOperationException(); +// } +// +// } +// +// private static final IV numericalMath(final long left, +// final long right, final MathOp op) { +// +// switch(op) { +// case PLUS: +// return new XSDLongIV(left+right); +// case MINUS: +// return new XSDLongIV(left-right); +// case MULTIPLY: +// return new XSDLongIV(left*right); +// case DIVIDE: +// return new XSDLongIV(left/right); +// case MIN: +// return new XSDLongIV(Math.min(left,right)); +// case MAX: +// return new XSDLongIV(Math.max(left,right)); +// default: +// throw new UnsupportedOperationException(); +// } +// +// } - private static final IV numericalMath(final long left, - final long right, final MathOp op) { - - switch(op) { - case PLUS: - return new XSDLongIV(left+right); - case MINUS: - return new XSDLongIV(left-right); - case MULTIPLY: - return new XSDLongIV(left*right); - case DIVIDE: - return new XSDLongIV(left/right); - case MIN: - return new XSDLongIV(Math.min(left,right)); - case MAX: - return new XSDLongIV(Math.max(left,right)); - default: - throw new UnsupportedOperationException(); - } - - } - /** * Used to test whether a given value constant can be used in an inline * filter or not. If so, we can use one of the inline constraints Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java 2011-06-06 17:37:43 UTC (rev 4634) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java 2011-06-06 21:27:16 UTC (rev 4635) @@ -41,6 +41,7 @@ import com.bigdata.rdf.error.SparqlTypeErrorException; import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.internal.IVUtility; +import com.bigdata.rdf.internal.TermId; import com.bigdata.rdf.model.BigdataValue; /** @@ -133,7 +134,10 @@ // probably would never hit this because of SameTermOp if (op == CompareOp.EQ && left.isTermId() && right.isTermId()) { - if (left.getTermId() == right.getTermId()) + final long tid1 = left.getTermId(); + final long tid2 = right.getTermId(); + + if (tid1 == tid2 && tid1 != TermId.NULL && tid2 != TermId.NULL) return true; } Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DatatypeBOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DatatypeBOp.java 2011-06-06 17:37:43 UTC (rev 4634) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DatatypeBOp.java 2011-06-06 21:27:16 UTC (rev 4635) @@ -97,7 +97,13 @@ } public IV get(final IBindingSet bs) { - + + final String namespace = (String) + getRequiredProperty(Annotations.NAMESPACE); + + final BigdataValueFactory vf = + BigdataValueFactoryImpl.getInstance(namespace); + final IV iv = get(0).get(bs); if (log.isDebugEnabled()) { @@ -108,6 +114,26 @@ if (iv == null) throw new SparqlTypeErrorException(); + if (iv.isNumeric()) { + + final BigdataURI datatype = vf.createURI(iv.getDTE().getDatatype()); + + IV datatypeIV = datatype.getIV(); + + if (datatypeIV == null) { + + datatypeIV = new TermId(VTE.URI, TermId.NULL); + datatype.setIV(datatypeIV); + + } + + // cache the value on the IV + datatypeIV.setValue(datatype); + + return datatypeIV; + + } + final BigdataValue val = iv.getValue(); if (val == null) @@ -127,12 +153,6 @@ } else if (literal.getLanguage() == null) { // simple literal - final String namespace = (String) - getRequiredProperty(Annotations.NAMESPACE); - - final BigdataValueFactory vf = - BigdataValueFactoryImpl.getInstance(namespace); - datatype = vf.asValue(XSD.STRING); } else { @@ -145,7 +165,7 @@ if (datatypeIV == null) { - datatypeIV = new TermId(VTE.valueOf(val), TermId.NULL); + datatypeIV = new TermId(VTE.URI, TermId.NULL); datatype.setIV(datatypeIV); } Added: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/FalseBOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/FalseBOp.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/FalseBOp.java 2011-06-06 21:27:16 UTC (rev 4635) @@ -0,0 +1,72 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +package com.bigdata.rdf.internal.constraints; + +import java.util.Map; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.IBindingSet; + +/** + * Always evaluates to false. + */ +public class FalseBOp extends XSDBooleanIVValueExpression { + + /** + * + */ + private static final long serialVersionUID = 1531344906063447800L; + + public static final FalseBOp INSTANCE = new FalseBOp(); + + private FalseBOp() { + + this(NOARGS, NOANNS); + + } + + /** + * Required shallow copy constructor. + */ + public FalseBOp(final BOp[] args, final Map<String, Object> anns) { + + super(args, anns); + + } + + /** + * Required deep copy constructor. + */ + public FalseBOp(final FalseBOp op) { + super(op); + } + + public boolean accept(final IBindingSet bs) { + + return false; + + } + +} Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java 2011-06-06 17:37:43 UTC (rev 4634) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java 2011-06-06 21:27:16 UTC (rev 4635) @@ -21,30 +21,50 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/** +Note: Portions of this file are copyright by Aduna. + +Copyright Aduna (http://www.aduna-software.com/) (c) 1997-2007. + +Licensed under the Aduna BSD-style license. +*/ + package com.bigdata.rdf.internal.constraints; +import java.util.LinkedHashSet; import java.util.Map; +import java.util.Set; +import org.apache.log4j.Logger; +import org.openrdf.model.Literal; + import com.bigdata.bop.BOp; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IValueExpression; +import com.bigdata.bop.IVariable; import com.bigdata.bop.ImmutableBOp; import com.bigdata.bop.NV; import com.bigdata.rdf.error.SparqlTypeErrorException; import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.internal.IVUtility; +import com.bigdata.rdf.model.BigdataValue; /** * A math expression involving a left and right IValueExpression operand. The * operation to be applied to the operands is specified by the * {@link Annotations#OP} annotation. */ -final public class MathBOp extends IVValueExpression { +final public class MathBOp extends IVValueExpression + implements INeedsMaterialization { /** * */ private static final long serialVersionUID = 9136864442064392445L; + + private static final transient Logger log = Logger.getLogger(MathBOp.class); + public interface Annotations extends ImmutableBOp.Annotations { @@ -141,7 +161,40 @@ if (right == null) throw new SparqlTypeErrorException.UnboundVarException(); - return IVUtility.numericalMath(left, right, op()); + try { + + if (log.isDebugEnabled()) { + log.debug(toString(left.toString(), right.toString())); + } + + if (left.isInline() && right.isInline()) { + + return IVUtility.numericalMath(left, right, op()); + + } else { + + final BigdataValue val1 = left.getValue(); + + final BigdataValue val2 = right.getValue(); + + if (!(val1 instanceof Literal) || !(val2 instanceof Literal)) { + throw new SparqlTypeErrorException(); + } + + return IVUtility.literalMath((Literal) val1, (Literal) val2, + op()); + + } + + } catch (IllegalArgumentException ex) { + + if (log.isDebugEnabled()) { + log.debug("illegal argument, filtering solution"); + } + + throw new SparqlTypeErrorException(); + + } } @@ -165,6 +218,15 @@ return sb.toString(); } + + private String toString(final String left, final String right) { + + final StringBuilder sb = new StringBuilder(); + sb.append(op()); + sb.append("(").append(left).append(", ").append(right).append(")"); + return sb.toString(); + + } final public boolean equals(final MathBOp m) { @@ -210,5 +272,27 @@ return h; } + + private volatile transient Set<IVariable<IV>> terms; + + public Set<IVariable<IV>> getTermsToMaterialize() { + + if (terms == null) { + + terms = new LinkedHashSet<IVariable<IV>>(); + + for (BOp bop : args()) { + + if (bop instanceof IVariable) + terms.add((IVariable<IV>) bop); + + } + + } + + return terms; + + } + } Added: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/TrueBOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/TrueBOp.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/TrueBOp.java 2011-06-06 21:27:16 UTC (rev 4635) @@ -0,0 +1,72 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +package com.bigdata.rdf.internal.constraints; + +import java.util.Map; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.IBindingSet; + +/** + * Always evaluates to true. + */ +public class TrueBOp extends XSDBooleanIVValueExpression { + + /** + * + */ + private static final long serialVersionUID = -6166507977125961015L; + + public static final TrueBOp INSTANCE = new TrueBOp(); + + private TrueBOp() { + + this(NOARGS, NOANNS); + + } + + /** + * Required shallow copy constructor. + */ + public TrueBOp(final BOp[] args, final Map<String, Object> anns) { + + super(args, anns); + + } + + /** + * Required deep copy constructor. + */ + public TrueBOp(final TrueBOp op) { + super(op); + } + + public boolean accept(final IBindingSet bs) { + + return true; + + } + +} Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2011-06-06 17:37:43 UTC (rev 4634) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2011-06-06 21:27:16 UTC (rev 4635) @@ -2455,6 +2455,16 @@ * {@link BigdataValue} for that term identifier in the lexicon. */ final public BigdataValue getTerm(final IV iv) { + + return getTerm(iv, true); + + } + + /** + * When readFromIndex=false, only handles inline, NULL, bnodes, SIDs, and + * the termCache - does not attempt to read from disk. + */ + final private BigdataValue getTerm(final IV iv, final boolean readFromIndex) { // if (false) { // alternative forces the standard code path. // final Collection<IV> ivs = new LinkedList<IV>(); @@ -2471,7 +2481,7 @@ // handle NULL, bnodes, statement identifiers, and the termCache. BigdataValue value = _getTermId(tid); - if (value != null) + if (value != null || !readFromIndex) return value; final IIndex ndx = getId2TermIndex(); @@ -2973,8 +2983,13 @@ final IV iv = term.get(); - final BigdataValue val = termCache.get(iv); + if (log.isDebugEnabled()) + log.debug("materializing: " + iv); + +// final BigdataValue val = termCache.get(iv); + final BigdataValue val = getTerm(iv, false); + if (val != null) { if (log.isDebugEnabled()) Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataURIImpl.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataURIImpl.java 2011-06-06 17:37:43 UTC (rev 4634) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataURIImpl.java 2011-06-06 21:27:16 UTC (rev 4635) @@ -50,6 +50,8 @@ import org.openrdf.model.URI; import org.openrdf.model.util.URIUtil; +import com.bigdata.rdf.internal.TermId; + /** * A URI. Use {@link BigdataValueFactory} to create instances of this class. * @@ -173,6 +175,7 @@ if ((o instanceof BigdataValue) // && getIV() != null// + && getIV().getTermId() != TermId.NULL && ((BigdataValue) o).getIV() != null// && ((BigdataValue) o).getValueFactory() == getValueFactory()) { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java 2011-06-06 17:37:43 UTC (rev 4634) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java 2011-06-06 21:27:16 UTC (rev 4635) @@ -105,6 +105,7 @@ import com.bigdata.rdf.internal.constraints.CompareBOp; import com.bigdata.rdf.internal.constraints.DatatypeBOp; import com.bigdata.rdf.internal.constraints.EBVBOp; +import com.bigdata.rdf.internal.constraints.FalseBOp; import com.bigdata.rdf.internal.constraints.FuncBOp; import com.bigdata.rdf.internal.constraints.IsBNodeBOp; import com.bigdata.rdf.internal.constraints.IsBoundBOp; @@ -113,6 +114,7 @@ import com.bigdata.rdf.internal.constraints.LangBOp; import com.bigdata.rdf.internal.constraints.LangMatchesBOp; import com.bigdata.rdf.internal.constraints.MathBOp; +import com.bigdata.rdf.internal.constraints.TrueBOp; import com.bigdata.rdf.internal.constraints.MathBOp.MathOp; import com.bigdata.rdf.internal.constraints.NotBOp; import com.bigdata.rdf.internal.constraints.OrBOp; @@ -1924,11 +1926,32 @@ } private IValueExpression<? extends IV> toVE(SameTerm sameTerm) { - final IValueExpression<? extends IV> iv1 = + final IValueExpression<? extends IV> left = toVE(sameTerm.getLeftArg()); - final IValueExpression<? extends IV> iv2 = + final IValueExpression<? extends IV> right = toVE(sameTerm.getRightArg()); - return new SameTermBOp(iv1, iv2); + + /* + * If a constant operand in the SameTerm op uses a value not found + * in the database, we must defer to the CompareBOp, which can perform + * value comparisons. SameTermBOp only works on IVs. + */ + + if (left instanceof Constant) { + final IV iv = ((Constant<? extends IV>) left).get(); + if (iv.isTermId() && iv.getTermId() == TermId.NULL) { + return new CompareBOp(left, right, CompareOp.EQ); + } + } + + if (right instanceof Constant) { + final IV iv = ((Constant<? extends IV>) right).get(); + if (iv.isTermId() && iv.getTermId() == TermId.NULL) { + return new CompareBOp(left, right, CompareOp.EQ); + } + } + + return new SameTermBOp(left, right); } private IValueExpression<? extends IV> toVE(final Compare compare) { @@ -1939,27 +1962,41 @@ /* * If the term is a Constant<URI> and the op is EQ or NE then we can - * do a sameTerm optimization. + * do a sameTerm optimization. The URI constant must be a real term + * in the database. */ final CompareOp op = compare.getOperator(); if (op == CompareOp.EQ || op == CompareOp.NE) { if (left instanceof Constant) { final IV iv = ((Constant<? extends IV>) left).get(); - if (iv.isURI()) { + if (iv.isURI() && iv.getTermId() != TermId.NULL) { return new SameTermBOp(left, right, op); } } if (right instanceof Constant) { final IV iv = ((Constant<? extends IV>) right).get(); - if (iv.isURI()) { + if (iv.isURI() && iv.getTermId() != TermId.NULL) { return new SameTermBOp(left, right, op); } } } + if (log.isDebugEnabled()) { + log.debug(left == right); + log.debug(left.equals(right)); + } + + if (left.equals(right)) { + if (compare.getOperator() == CompareOp.EQ) { + return TrueBOp.INSTANCE; + } else { + return FalseBOp.INSTANCE; + } + } + return new CompareBOp(left, right, compare.getOperator()); } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java 2011-06-06 17:37:43 UTC (rev 4634) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java 2011-06-06 21:27:16 UTC (rev 4635) @@ -178,7 +178,54 @@ * run. */ static final Collection<String> testURIs = Arrays.asList(new String[] { - + // 8, 9, 14-19, 23-30 + +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-01", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-02", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-03", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-04", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-05", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-06", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-07", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-08", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-09", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-10", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-11", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-12", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-13", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-14", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-15", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-16", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-17", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-18", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-19", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-20", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-21", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-22", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-23", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-24", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-25", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-26", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-27", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-28", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-29", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/type-promotion/manifest#type-promotion-30", + +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/expr-builtin/manifest#sameTerm-eq", + +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/open-world/manifest#open-eq-01", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/open-world/manifest#open-eq-02", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/open-world/manifest#open-eq-03", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/open-world/manifest#open-eq-04", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/open-world/manifest#open-eq-05", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/open-world/manifest#open-eq-06", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/open-world/manifest#open-eq-07", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/open-world/manifest#open-eq-08", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/open-world/manifest#open-eq-09", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/open-world/manifest#open-eq-10", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/open-world/manifest#open-eq-11", +// "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/open-world/manifest#open-eq-12", + /* * working through the new query engine failures: 0 errors, 11 failures */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-09 17:03:59
|
Revision: 4660 http://bigdata.svn.sourceforge.net/bigdata/?rev=4660&view=rev Author: thompsonbry Date: 2011-06-09 17:03:53 +0000 (Thu, 09 Jun 2011) Log Message: ----------- Improved logging in DirectBufferPool. Removed many uses of System.err in the sail unit tests. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBOps.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEmbeddedFederationWithQuads.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEvaluationStrategyImpl.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNestedOptionals.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataEmbeddedFederationSparqlTest.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServer.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestXMLBuilder.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-06-09 16:50:47 UTC (rev 4659) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-06-09 17:03:53 UTC (rev 4660) @@ -167,9 +167,11 @@ * another finalized. */ final ByteBuffer buf; + final int nacquired; synchronized(this) { buf = this.buf; this.buf = null; + nacquired = DirectBufferPool.this.acquired; } if (buf == null) return; @@ -192,7 +194,10 @@ * using the same ByteBuffer, each of which believes that they * "own" the reference). */ - log.error("Buffer release on finalize: AllocationStack", + leaked.increment(); + final long nleaked = leaked.get(); + log.error("Buffer release on finalize (nacquired=" + nacquired + + ",nleaked=" + nleaked + "): AllocationStack", allocationStack); } else { log.error("Buffer release on finalize."); @@ -231,6 +236,12 @@ * is released. */ private int acquired = 0; + + /** + * The #of buffers leaked out of {@link BufferState#finalize()} when + * {@link #DEBUG} is <code>true</code>. + */ + private final CAT leaked = new CAT(); /** * The maximum #of {@link ByteBuffer}s that will be allocated. Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBOps.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBOps.java 2011-06-09 16:50:47 UTC (rev 4659) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBOps.java 2011-06-09 17:03:53 UTC (rev 4660) @@ -29,7 +29,7 @@ import java.util.Collection; import java.util.LinkedList; import java.util.Properties; -import org.apache.log4j.Level; + import org.apache.log4j.Logger; import org.openrdf.model.Literal; import org.openrdf.model.URI; @@ -44,10 +44,7 @@ import org.openrdf.query.TupleQuery; import org.openrdf.query.TupleQueryResult; import org.openrdf.query.impl.BindingImpl; -import org.openrdf.repository.sail.SailRepository; -import org.openrdf.repository.sail.SailRepositoryConnection; -import org.openrdf.sail.Sail; -import org.openrdf.sail.memory.MemoryStore; + import com.bigdata.rdf.axioms.NoAxioms; import com.bigdata.rdf.lexicon.LexiconRelation; import com.bigdata.rdf.store.BD; @@ -552,7 +549,8 @@ TupleQueryResult result = tupleQuery.evaluate(); while (result.hasNext()) { - System.err.println(result.next()); + final BindingSet tmp = result.next(); + if(log.isInfoEnabled())log.info(tmp.toString()); } // Collection<BindingSet> solution = new LinkedList<BindingSet>(); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEmbeddedFederationWithQuads.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEmbeddedFederationWithQuads.java 2011-06-09 16:50:47 UTC (rev 4659) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEmbeddedFederationWithQuads.java 2011-06-09 17:03:53 UTC (rev 4660) @@ -308,7 +308,8 @@ } - System.err.println("Removing: "+f); + if(log.isInfoEnabled()) + log.info("Removing: "+f); if (!f.delete()) throw new RuntimeException("Could not remove: " + f); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEvaluationStrategyImpl.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEvaluationStrategyImpl.java 2011-06-09 16:50:47 UTC (rev 4659) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEvaluationStrategyImpl.java 2011-06-09 17:03:53 UTC (rev 4660) @@ -126,7 +126,7 @@ final TupleQuery tupleQuery = cxn.prepareTupleQuery( QueryLanguage.SPARQL, query); tupleQuery.evaluate(handler); - System.err.println(sw.toString()); + if(log.isInfoEnabled())log.info(sw.toString()); } @@ -222,7 +222,7 @@ tupleQuery.evaluate(handler); - System.err.println(sw.toString()); + if(log.isInfoEnabled())log.info(sw.toString()); } @@ -326,7 +326,7 @@ tupleQuery.evaluate(handler); - System.err.println(sw.toString()); + if(log.isInfoEnabled()) log.info(sw.toString()); } @@ -426,7 +426,7 @@ tupleQuery.evaluate(handler); - System.err.println(sw.toString()); + if(log.isInfoEnabled()) log.info(sw.toString()); } @@ -521,7 +521,7 @@ tupleQuery.evaluate(handler); - System.err.println(sw.toString()); + if(log.isInfoEnabled()) log.info(sw.toString()); } @@ -618,7 +618,7 @@ tupleQuery.evaluate(handler); - System.err.println(sw.toString()); + if(log.isInfoEnabled()) log.info(sw.toString()); } @@ -701,10 +701,10 @@ cxn.commit(); - System.err.println("<mike> = " + sail.getDatabase().getIV(mike)); - System.err.println("<jane> = " + sail.getDatabase().getIV(jane)); - System.err.println("\"Mike\" = " + sail.getDatabase().getIV(new LiteralImpl("Mike"))); - System.err.println("\"Jane\" = " + sail.getDatabase().getIV(new LiteralImpl("Jane"))); + if(log.isInfoEnabled()) log.info("<mike> = " + sail.getDatabase().getIV(mike)); + if(log.isInfoEnabled()) log.info("<jane> = " + sail.getDatabase().getIV(jane)); + if(log.isInfoEnabled()) log.info("\"Mike\" = " + sail.getDatabase().getIV(new LiteralImpl("Mike"))); + if(log.isInfoEnabled()) log.info("\"Jane\" = " + sail.getDatabase().getIV(new LiteralImpl("Jane"))); final double minRelevance = 0d; final String query = @@ -728,7 +728,7 @@ tupleQuery.evaluate(handler); - System.err.println(sw.toString()); + if(log.isInfoEnabled()) log.info(sw.toString()); } @@ -1050,7 +1050,7 @@ final TupleQuery tupleQuery = cxn.prepareTupleQuery( QueryLanguage.SPARQL, query); tupleQuery.evaluate(handler); - System.err.println(sw.toString()); + if(log.isInfoEnabled()) log.info(sw.toString()); } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNestedOptionals.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNestedOptionals.java 2011-06-09 16:50:47 UTC (rev 4659) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestNestedOptionals.java 2011-06-09 17:03:53 UTC (rev 4660) @@ -629,10 +629,10 @@ final SOpTreeBuilder stb = new SOpTreeBuilder(); final SOpTree tree = stb.collectSOps(tupleExpr); - if (INFO) { - System.err.println(query); + if (log.isInfoEnabled()) { + log.info(query); for (SOp bop : tree) { - System.err.println(bop); + log.info(bop); } } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataEmbeddedFederationSparqlTest.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataEmbeddedFederationSparqlTest.java 2011-06-09 16:50:47 UTC (rev 4659) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataEmbeddedFederationSparqlTest.java 2011-06-09 17:03:53 UTC (rev 4660) @@ -34,6 +34,7 @@ import junit.framework.TestSuite; import net.jini.config.ConfigurationException; +import org.apache.log4j.Logger; import org.openrdf.query.Dataset; import org.openrdf.query.parser.sparql.ManifestTest; import org.openrdf.query.parser.sparql.SPARQLQueryTest; @@ -67,6 +68,8 @@ */ public class BigdataEmbeddedFederationSparqlTest extends BigdataSparqlTest { + private static final Logger log = Logger.getLogger(BigdataEmbeddedFederationSparqlTest.class); + public BigdataEmbeddedFederationSparqlTest(String testURI, String name, String queryFileURL, String resultFileURL, Dataset dataSet, boolean laxCardinality) { @@ -273,7 +276,8 @@ } - System.err.println("Removing: "+f); + if(log.isInfoEnabled()) + log.info("Removing: "+f); if (!f.delete()) throw new RuntimeException("Could not remove: " + f); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServer.java 2011-06-09 16:50:47 UTC (rev 4659) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServer.java 2011-06-09 17:03:53 UTC (rev 4660) @@ -671,7 +671,8 @@ final String txt = getStreamContents(conn.getInputStream()); - System.out.println(txt); + if (log.isInfoEnabled()) + log.info(txt); } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestXMLBuilder.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestXMLBuilder.java 2011-06-09 16:50:47 UTC (rev 4659) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestXMLBuilder.java 2011-06-09 17:03:53 UTC (rev 4660) @@ -78,7 +78,8 @@ assertTrue(close == null); - System.out.println(xml.toString()); + if(log.isInfoEnabled()) + log.info(xml.toString()); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-10 00:16:09
|
Revision: 4682 http://bigdata.svn.sourceforge.net/bigdata/?rev=4682&view=rev Author: thompsonbry Date: 2011-06-10 00:16:02 +0000 (Fri, 10 Jun 2011) Log Message: ----------- Disabled the aspect of the thread-check logic at test tear down which was throwing an exception. It will just log @ ERROR immediately if there are any threads which were not properly terminated. I also moved the wait (for up to 2 seconds) for threads to terminate after the log @ ERROR. My thinking is that waiting might make the initial conditions for the next test cleaner, but it does not excuse the current test from failing to terminate its threads. Finally, I modified the message to include the test name (getName()) and the delegate class (getOurDelegate()). I will note that the information coming out of the thread-check logic is not yet very good. For example, you can see in this message that it is complaining about the main thread! ERROR: 8687 main com.bigdata.journal.ProxyTestCase.tearDown(ProxyTestCase.java:182): Threads left active after task: test=test_registerAndUse, delegate=com.bigdata.journal.TestWORMStrategy, startupCount=3, teardownCount=4, threads: Thread name=main Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ProxyTestCase.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/zookeeper/TestZLockImpl.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ProxyTestCase.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ProxyTestCase.java 2011-06-09 22:09:30 UTC (rev 4681) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ProxyTestCase.java 2011-06-10 00:16:02 UTC (rev 4682) @@ -143,44 +143,62 @@ * an entire suite of tests.) */ - int startupActiveThreads = 0; + private int startupActiveThreads = 0; + public void setUp() throws Exception { - startupActiveThreads = Thread.currentThread().getThreadGroup().activeCount(); + + startupActiveThreads = Thread.currentThread().getThreadGroup().activeCount(); getOurDelegate().setUp(this); + } - static boolean s_checkThreads = true; + private static boolean s_checkThreads = true; + public void tearDown() throws Exception { + getOurDelegate().tearDown(this); if (s_checkThreads) { + final ThreadGroup grp = Thread.currentThread().getThreadGroup(); - int tearDownActiveThreads = grp.activeCount(); + final int tearDownActiveThreads = grp.activeCount(); if (startupActiveThreads != tearDownActiveThreads) { final Thread[] threads = new Thread[tearDownActiveThreads]; grp.enumerate(threads); final StringBuilder info = new StringBuilder(); + boolean first = true; for(Thread t: threads) { - info.append(t.getName() + "\n"); + if(!first) + info.append(','); + // Note: t.getName() can return [null]! + info.append("[" + t.getName() + "]"); + first = false; } - String failMessage = "Threads left active after task, startupCount: " - + startupActiveThreads - + ", teardownCount: " + tearDownActiveThreads + final String failMessage = "Threads left active after task" + +": test=" + getName()// + + ", delegate="+getOurDelegate().getClass().getName() + + ", startupCount=" + startupActiveThreads + + ", teardownCount=" + tearDownActiveThreads + + ", thisThread="+Thread.currentThread().getName() + ", threads: " + info; - // Wait upto 2 seconds for threads to die off + + if (grp.activeCount() != startupActiveThreads) + log.error(failMessage); + + /* + * Wait up to 2 seconds for threads to die off so the next test + * will run more cleanly. + */ for (int i = 0; i < 20; i++) { - Thread.currentThread().sleep(100); + Thread.sleep(100); if (grp.activeCount() != startupActiveThreads) break; } - - if (grp.activeCount() != startupActiveThreads) - fail(failMessage); - else - log.warn(failMessage); + } + } super.tearDown(); Modified: branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/zookeeper/TestZLockImpl.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/zookeeper/TestZLockImpl.java 2011-06-09 22:09:30 UTC (rev 4681) +++ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/zookeeper/TestZLockImpl.java 2011-06-10 00:16:02 UTC (rev 4682) @@ -160,7 +160,8 @@ final int n = zookeeper.getChildren(zpath, false).size(); - log.info("nchildren=" + n); + if(log.isInfoEnabled()) + log.info("nchildren=" + n); if (n == 2) break; @@ -224,13 +225,13 @@ final Thread mainThread = Thread.currentThread(); - // a node that is guarenteed to be unique w/in the test namespace. + // a node that is guaranteed to be unique w/in the test namespace. final String zpath = "/test/" + getName() + UUID.randomUUID(); try { /* * verify no such node (should be unique and therefore not - * preexist). + * pre-exist). */ zookeeper.getChildren(zpath, false); fail("zpath exists: " + zpath); @@ -303,11 +304,18 @@ // break the lock. { + final String z = zpath + "/" + ((ZLockImpl) lock1).getLockRequestZNode(); - log.info("breaking lock: deleting " + z); + + if (log.isInfoEnabled()) + log.info("breaking lock: deleting " + z); + zookeeper.delete(z, -1/* version */); - log.info("broke lock: deleted " + z); + + if (log.isInfoEnabled()) + log.info("broke lock: deleted " + z); + } assertTrue(!lock1.isLockHeld()); @@ -375,7 +383,7 @@ public void test_sessionExpiredBeforeLockRequest() throws IOException, KeeperException, InterruptedException { - // a node that is guarenteed to be unique w/in the test namespace. + // a node that is guaranteed to be unique w/in the test namespace. final String zpath = "/test/" + getName() + UUID.randomUUID(); expireSession(zookeeper); @@ -426,7 +434,7 @@ public void test_sessionExpiredWhileHoldingLock() throws IOException, KeeperException, InterruptedException { - // a node that is guarenteed to be unique w/in the test namespace. + // a node that is guaranteed to be unique w/in the test namespace. final String zpath = "/test/" + getName() + UUID.randomUUID(); // obtain a lock object. @@ -475,7 +483,7 @@ public void test_destroyLock() throws KeeperException, InterruptedException, ExecutionException { - // a node that is guarenteed to be unique w/in the test namespace. + // a node that is guaranteed to be unique w/in the test namespace. final String zpath = "/test/" + getName() + UUID.randomUUID(); final int ntasks = 4; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-15 21:58:52
|
Revision: 4712 http://bigdata.svn.sourceforge.net/bigdata/?rev=4712&view=rev Author: thompsonbry Date: 2011-06-15 21:58:45 +0000 (Wed, 15 Jun 2011) Log Message: ----------- Hacked in a rough XHTML table rendering of the query stats. There is now an URL query parameter "showRunningQueryStats" which may be used to enable this for the currently running queries. I need to debug and pretty up the HTML rendering and then I can focus on providing an "explain" option for a query based on the same code. javadoc edits in PivotTable. made the inner Node class public in XMLBuilder. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/counters/query/PivotTable.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2011-06-15 19:35:12 UTC (rev 4711) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2011-06-15 21:58:45 UTC (rev 4712) @@ -28,6 +28,8 @@ package com.bigdata.bop.engine; +import java.io.IOException; +import java.io.Writer; import java.text.DateFormat; import java.util.Date; import java.util.Map; @@ -40,6 +42,7 @@ import com.bigdata.bop.IPredicate; import com.bigdata.bop.join.PipelineJoin; import com.bigdata.bop.join.PipelineJoin.PipelineJoinStats; +import com.bigdata.counters.render.XHTMLRenderer; import com.bigdata.rawstore.Bytes; import com.bigdata.rdf.sail.QueryHints; import com.bigdata.rdf.sail.Rule2BOpUtility; @@ -417,4 +420,367 @@ } + public static void getTableHeaderXHTML(final IRunningQuery q, final Writer w) + throws IOException { + + // header row. + w.write("<tr\n>"); + /* + * Common columns for the overall query and for each pipeline operator. + */ + w.write("<th>queryId</th>"); + w.write("<th>tag</th>"); + w.write("<th>beginTime</th>"); + w.write("<th>doneTime</th>"); + w.write("<th>deadline</th>"); + w.write("<th>elapsed</th>"); + w.write("<th>serviceId</th>"); + w.write("<th>cause</th>"); + w.write("<th>bop</th>"); + /* + * Columns for each pipeline operator. + */ + w.write("<th>evalOrder</th>"); // [0..n-1] + w.write("<th>bopId</th>"); + w.write("<th>predId</th>"); + w.write("<th>evalContext</th>"); + w.write("<th>controller</th>"); + // metadata considered by the static optimizer. + w.write("<th>staticBestKeyOrder</th>"); // original key order assigned + // by static optimizer. + w.write("<th>nvars</th>"); // #of variables in the predicate for a join. + w.write("<th>fastRangeCount</th>"); // fast range count used by the + // static optimizer. + // dynamics (aggregated for totals as well). + w.write("<th>fanIO</th>"); + w.write("<th>sumMillis</th>"); // cumulative milliseconds for eval of + // this operator. + w.write("<th>opCount</th>"); // cumulative #of invocations of tasks for + // this operator. + w.write("<th>chunksIn</th>"); + w.write("<th>unitsIn</th>"); + w.write("<th>chunksOut</th>"); + w.write("<th>unitsOut</th>"); + w.write("<th>joinRatio</th>"); // expansion rate multiplier in the + // solution count. + w.write("<th>accessPathDups</th>"); + w.write("<th>accessPathCount</th>"); + w.write("<th>accessPathRangeCount</th>"); + w.write("<th>accessPathChunksIn</th>"); + w.write("<th>accessPathUnitsIn</th>"); + // dynamics based on elapsed wall clock time. + w.write("<th>solutions/ms</th>"); + w.write("<th>mutations/ms</th>"); + // + // cost model(s) + // + w.write("</tr\n>"); + + } + + /** + * Write the table rows. + * + * @param q + * The query. + * @param w + * Where to write the rows. + * + * @throws IOException + */ + public static void getTableRowsXHTML(final IRunningQuery q, final Writer w) + throws IOException { + + final Integer[] order = BOpUtility.getEvaluationOrder(q.getQuery()); + + int orderIndex = 0; + + for (Integer bopId : order) { + + getTableRowXHTML(q, w, orderIndex, bopId, false/* summary */); + + orderIndex++; + + } + + } + + private static final String TD = "<td>"; + private static final String TDx = "</td\n>"; + + /** + * Return a tabular representation of the query {@link RunState}. + * + * @param q + * The {@link IRunningQuery}. + * @param evalOrder + * The evaluation order for the operator. + * @param bopId + * The identifier for the operator. + * @param summary + * <code>true</code> iff the summary for the query should be + * written. + * @return The row of the table. + */ + static private void getTableRowXHTML(final IRunningQuery q, final Writer w, + final int evalOrder, final Integer bopId, final boolean summary) + throws IOException { + + final DateFormat dateFormat = DateFormat.getDateTimeInstance( + DateFormat.FULL, DateFormat.FULL); + + // The elapsed time for the query (wall time in milliseconds). + final long elapsed = q.getElapsed(); + + // The serviceId on which the query is running : null unless scale-out. + final UUID serviceId = q.getQueryEngine().getServiceUUID(); + + // The thrown cause : null unless the query was terminated abnormally. + final Throwable cause = q.getCause(); + + w.write("<tr\n>"); + w.write(TD + cdata(q.getQueryId().toString()) + TDx); + w.write(TD + + cdata(q.getQuery().getProperty(QueryHints.TAG, + QueryHints.DEFAULT_TAG)) + TDx); + w.write(TD + dateFormat.format(new Date(q.getStartTime())) + TDx); + w.write(TD + cdata(dateFormat.format(new Date(q.getDoneTime()))) + TDx); + w.write(TD); + if (q.getDeadline() != Long.MAX_VALUE) + w.write(cdata(dateFormat.format(new Date(q.getDeadline())))); + w.write(TDx); + w.write(TD + cdata(Long.toString(elapsed)) + TDx); + w.write(TD + (serviceId == null ? NA : serviceId.toString()) + TDx); + w.write(TD); + if (cause != null) + w.write(cause.getLocalizedMessage()); + w.write(TDx); + + final Map<Integer, BOp> bopIndex = q.getBOpIndex(); + final Map<Integer, BOpStats> statsMap = q.getStats(); + final BOp bop = bopIndex.get(bopId); + + // the operator. + w.write(TD); + if (summary) { + /* + * The entire query (recursively). + */ + w.write(cdata(BOpUtility.toString(q.getQuery())));//.replace('\n', ' '))); + w.write(TDx); + w.write(TD); + w.write("total"); // summary line. + } else { + // Otherwise show just this bop. + w.write(cdata(bopIndex.get(bopId).toString())); + w.write(TDx); + w.write(TD); + w.write(evalOrder); // eval order for this bop. + } + w.write(TDx); + + w.write(TD); + w.write(Integer.toString(bopId)); + w.write(TDx); + { + /* + * Show the predicate identifier if this is a Join operator. + * + * @todo handle other kinds of join operators when added using a + * shared interface. + */ + final IPredicate<?> pred = (IPredicate<?>) bop + .getProperty(PipelineJoin.Annotations.PREDICATE); + w.write(TD); + if (pred != null) { + w.write(Integer.toString(pred.getId())); + } + w.write(TDx); + } + w.write(TD); + w.write(cdata(bop.getEvaluationContext().toString())); + w.write(TDx); + w.write(TD); + w.write(cdata(bop.getProperty(BOp.Annotations.CONTROLLER, + BOp.Annotations.DEFAULT_CONTROLLER).toString())); + w.write(TDx); + + /* + * Static optimizer metadata. + * + * FIXME Should report [nvars] be the expected asBound #of variables + * given the assigned evaluation order and the expectation of propagated + * bindings (optionals may leave some unbound). + */ + { + + @SuppressWarnings("unchecked") + final IPredicate pred = (IPredicate<?>) bop + .getProperty(PipelineJoin.Annotations.PREDICATE); + + if (pred != null) { + + final IKeyOrder<?> keyOrder = (IKeyOrder<?>) pred + .getProperty(Rule2BOpUtility.Annotations.ORIGINAL_INDEX); + + final Long rangeCount = (Long) pred + .getProperty(Rule2BOpUtility.Annotations.ESTIMATED_CARDINALITY); + + // keyorder + w.write(TD); + if (keyOrder != null) + w.write(keyOrder.toString()); + w.write(TDx); + + // nvars + w.write(TD); + if (keyOrder != null) + w.write(pred.getVariableCount(keyOrder)); + w.write(TDx); + + // rangeCount + w.write(TD); + if (rangeCount != null) + w.write(Long.toString(rangeCount)); + w.write(TDx); + + } else { + // keyorder + w.write(TD); + w.write(TDx); + // nvars + w.write(TD); + w.write(TDx); + // rangeCount + w.write(TD); + w.write(TDx); + } + } + + /* + * Dynamics. + */ + + int fanIO = 0; // @todo aggregate from RunState. + + final PipelineJoinStats stats = new PipelineJoinStats(); + if(summary) { + // Aggregate the statistics for all pipeline operators. + for (BOpStats t : statsMap.values()) { + stats.add(t); + } + } else { + // Just this operator. + final BOpStats tmp = statsMap.get(bopId); + if (tmp != null) + stats.add(tmp); + } + final long unitsIn = stats.unitsIn.get(); + final long unitsOut = stats.unitsOut.get(); + w.write(TDx); + w.write(TD); + w.write(Integer.toString(fanIO)); + w.write(TDx); + w.write(TD); + w.write(Long.toString(stats.elapsed.get())); + w.write(TDx); + w.write(TD); + w.write(Long.toString(stats.opCount.get())); + w.write(TDx); + w.write(TD); + w.write(Long.toString(stats.chunksIn.get())); + w.write(TDx); + w.write(TD); + w.write(Long.toString(stats.unitsIn.get())); + w.write(TDx); + w.write(TD); + w.write(Long.toString(stats.chunksOut.get())); + w.write(TDx); + w.write(TD); + w.write(Long.toString(stats.unitsOut.get())); + w.write(TDx); + w.write(TD); + w.write(unitsIn == 0 ? NA : Double.toString(unitsOut / (double) unitsIn)); + w.write(TDx); + w.write(TD); + w.write(Long.toString(stats.accessPathDups.get())); + w.write(TDx); + w.write(TD); + w.write(Long.toString(stats.accessPathCount.get())); + w.write(TDx); + w.write(TD); + w.write(Long.toString(stats.accessPathRangeCount.get())); + w.write(TDx); + w.write(TD); + w.write(Long.toString(stats.accessPathChunksIn.get())); + w.write(TDx); + w.write(TD); + w.write(Long.toString(stats.accessPathUnitsIn.get())); + w.write(TDx); + + /* + * Use the total elapsed time for the query (wall time). + */ + // solutions/ms + w.write(TD); + w.write(cdata(elapsed == 0 ? "0" : Long.toString(stats.unitsOut.get() + / elapsed))); + w.write(TDx); + // mutations/ms : @todo mutations/ms. + w.write(TD); + w.write(TDx); +// w.write(elapsed==0?0:stats.unitsOut.get()/elapsed); + + w.write("</tr\n>"); + + } + + /** + * Format the data as an (X)HTML table. The table will include a header + * which declares the columns, a detail row for each operator, and a summary + * row for the query as a whole. + * + * @param q + * The query. + * @param w + * Where to write the table. + * + * @throws IOException + */ + public static void getXHTMLTable(final IRunningQuery q, final Writer w) + throws IOException { + + // the table start tag. + { + /* + * Summary for the table. + */ + final String summary = "Query details"; + + /* + * Format the entire table now that we have all the data on hand. + */ + + w.write("<table border=\"1\" summary=\"" + attrib(summary) + + "\"\n>"); + + } + + getTableHeaderXHTML(q, w); + + getTableRowsXHTML(q, w); + + } + + private static String cdata(String s) { + + return XHTMLRenderer.cdata(s); + + } + + private static String attrib(String s) { + + return XHTMLRenderer.attrib(s); + + } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/counters/query/PivotTable.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/counters/query/PivotTable.java 2011-06-15 19:35:12 UTC (rev 4711) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/counters/query/PivotTable.java 2011-06-15 21:58:45 UTC (rev 4712) @@ -21,15 +21,15 @@ */ public class PivotTable { - protected static final Logger log = Logger.getLogger(PivotTable.class); + private static final Logger log = Logger.getLogger(PivotTable.class); /** - * The HistoryTable (converts counter heirarchy into regular table). + * The HistoryTable (converts counter hierarchy into regular table). */ public final HistoryTable src; /** - * The selected counters (redundent reference to {@link HistoryTable#a}. + * The selected counters (redundant reference to {@link HistoryTable#a}. */ public final ICounter[] a; Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2011-06-15 19:35:12 UTC (rev 4711) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2011-06-15 21:58:45 UTC (rev 4712) @@ -1,6 +1,7 @@ package com.bigdata.rdf.sail.webapp; import java.io.IOException; +import java.io.StringWriter; import java.util.Comparator; import java.util.Iterator; import java.util.List; @@ -14,7 +15,9 @@ import com.bigdata.bop.BOpUtility; import com.bigdata.bop.engine.IRunningQuery; import com.bigdata.bop.engine.QueryEngine; +import com.bigdata.bop.engine.QueryLog; import com.bigdata.bop.fed.QueryEngineFactory; +import com.bigdata.rawstore.Bytes; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.RunningQuery; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.util.HTMLUtility; @@ -70,6 +73,9 @@ final boolean showRunningQueries = req .getParameter("showRunningQueries") != null; + final boolean showRunningQueryStats = req + .getParameter("showRunningQueryStats") != null; + // Information about the KB (stats, properties). final boolean showKBInfo = req.getParameter("showKBInfo") != null; @@ -205,6 +211,10 @@ final Iterator<IRunningQuery> itr = ages.values().iterator(); + final StringWriter w = showRunningQueryStats ? new StringWriter( + Bytes.kilobyte32 * 8) + : null; + while (itr.hasNext()) { final IRunningQuery query = itr.next(); @@ -226,8 +236,24 @@ HTMLUtility.escapeForXHTML(BOpUtility .toString(query.getQuery()))); - } + if (showRunningQueryStats) { + + // Format as a table. + QueryLog.getXHTMLTable(query, w); + // Extract as String + final String s = w.getBuffer().toString(); + + // Add into the HTML document. + current.text(s); + + // Clear the buffer. + w.getBuffer().setLength(0); + + } + + } // next IRunningQuery. + } } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java 2011-06-15 19:35:12 UTC (rev 4711) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java 2011-06-15 21:58:45 UTC (rev 4712) @@ -117,7 +117,7 @@ } } - class Node { + public class Node { boolean m_open = true; String m_tag; Node m_parent; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-16 11:58:31
|
Revision: 4713 http://bigdata.svn.sourceforge.net/bigdata/?rev=4713&view=rev Author: thompsonbry Date: 2011-06-16 11:58:24 +0000 (Thu, 16 Jun 2011) Log Message: ----------- Progress on [1] (Add an "EXPLAIN" feature to the NanoSparqlServer). [1] https://sourceforge.net/apps/trac/bigdata/ticket/331 Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/HTMLUtility.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2011-06-15 21:58:45 UTC (rev 4712) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2011-06-16 11:58:24 UTC (rev 4713) @@ -308,7 +308,16 @@ final IPredicate<?> pred = (IPredicate<?>) bop .getProperty(PipelineJoin.Annotations.PREDICATE); if (pred != null) { - sb.append(Integer.toString(pred.getId())); + try { + final int predId = pred.getId(); + sb.append(Integer.toString(predId)); + } catch (IllegalStateException ex) { + /* + * All predicates SHOULD have a bopId, but this catches the + * error if one does not. + */ + sb.append(NA); + } } } sb.append('\t'); @@ -469,8 +478,8 @@ w.write("<th>accessPathChunksIn</th>"); w.write("<th>accessPathUnitsIn</th>"); // dynamics based on elapsed wall clock time. - w.write("<th>solutions/ms</th>"); - w.write("<th>mutations/ms</th>"); + w.write("<th>");w.write(cdata("solutions/ms"));w.write("</th>"); + w.write("<th>");w.write(cdata("mutations/ms"));w.write("</th>"); // // cost model(s) // @@ -550,7 +559,7 @@ w.write(cdata(dateFormat.format(new Date(q.getDeadline())))); w.write(TDx); w.write(TD + cdata(Long.toString(elapsed)) + TDx); - w.write(TD + (serviceId == null ? NA : serviceId.toString()) + TDx); + w.write(TD); w.write(cdata(serviceId == null ? NA : serviceId.toString()));w.write(TDx); w.write(TD); if (cause != null) w.write(cause.getLocalizedMessage()); @@ -561,23 +570,41 @@ final BOp bop = bopIndex.get(bopId); // the operator. - w.write(TD); if (summary) { /* * The entire query (recursively). */ - w.write(cdata(BOpUtility.toString(q.getQuery())));//.replace('\n', ' '))); + final String queryStr = BOpUtility.toString(q.getQuery()); + w.write(TD); + w.write("<a href=\"#\" title=\""); + w.write(attrib(queryStr));// the entire query as a tooltip. + w.write("\"\n>"); + // A slice of the query inline on the page. + w.write(cdata(queryStr.substring(0/* begin */, Math.min(64, queryStr + .length())))); + w.write("..."); + w.write("</a>"); w.write(TDx); w.write(TD); w.write("total"); // summary line. + w.write(TDx); } else { // Otherwise show just this bop. - w.write(cdata(bopIndex.get(bopId).toString())); + final String queryStr = bopIndex.get(bopId).toString(); + w.write(TD); + w.write("<a href=\"#\" title=\""); + w.write(attrib(queryStr));// the entire query as a tooltip. + w.write("\"\n>"); + // A slice of the query inline on the page. + w.write(cdata(queryStr.substring(0/* begin */, Math.min(64, queryStr + .length())))); + w.write("..."); + w.write("</a>"); w.write(TDx); w.write(TD); - w.write(evalOrder); // eval order for this bop. + w.write(Integer.toString(evalOrder)); // eval order for this bop. + w.write(TDx); } - w.write(TDx); w.write(TD); w.write(Integer.toString(bopId)); @@ -593,7 +620,16 @@ .getProperty(PipelineJoin.Annotations.PREDICATE); w.write(TD); if (pred != null) { - w.write(Integer.toString(pred.getId())); + try { + final int predId = pred.getId(); + w.write(Integer.toString(predId)); + } catch (IllegalStateException ex) { + /* + * All predicates SHOULD have a bopId, but this catches the + * error if one does not. + */ + w.write(cdata(NA)); + } } w.write(TDx); } @@ -635,7 +671,7 @@ // nvars w.write(TD); if (keyOrder != null) - w.write(pred.getVariableCount(keyOrder)); + w.write(Integer.toString(pred.getVariableCount(keyOrder))); w.write(TDx); // rangeCount @@ -677,7 +713,6 @@ } final long unitsIn = stats.unitsIn.get(); final long unitsOut = stats.unitsOut.get(); - w.write(TDx); w.write(TD); w.write(Integer.toString(fanIO)); w.write(TDx); @@ -700,7 +735,7 @@ w.write(Long.toString(stats.unitsOut.get())); w.write(TDx); w.write(TD); - w.write(unitsIn == 0 ? NA : Double.toString(unitsOut / (double) unitsIn)); + w.write(cdata(unitsIn == 0 ? NA : Double.toString(unitsOut / (double) unitsIn))); w.write(TDx); w.write(TD); w.write(Long.toString(stats.accessPathDups.get())); @@ -734,22 +769,38 @@ w.write("</tr\n>"); } - + /** - * Format the data as an (X)HTML table. The table will include a header - * which declares the columns, a detail row for each operator, and a summary - * row for the query as a whole. - * + * Write a summary row for the query. The table element, header, and footer + * must be written separately. * @param q - * The query. * @param w - * Where to write the table. - * + * @param sb * @throws IOException */ - public static void getXHTMLTable(final IRunningQuery q, final Writer w) - throws IOException { + static public void getSummaryRowXHTML(final IRunningQuery q, + final Writer w, final StringBuilder sb) throws IOException { + getTableRowXHTML(q, w, -1/* orderIndex */, q.getQuery().getId(), true/* summary */); + + } + + /** + * Format the data as an (X)HTML table. The table will include a header + * which declares the columns, a detail row for each operator (optional), + * and a summary row for the query as a whole. + * + * @param q + * The query. + * @param w + * Where to write the table. + * @param summaryOnly + * When <code>true</code> only the summary row will be written. + * @throws IOException + */ + public static void getTableXHTML(final IRunningQuery q, final Writer w, + final boolean summaryOnly) throws IOException { + // the table start tag. { /* @@ -768,8 +819,18 @@ getTableHeaderXHTML(q, w); - getTableRowsXHTML(q, w); - + if(summaryOnly) { + + getSummaryRowXHTML(q, w, sb); + + } else { + + getTableRowsXHTML(q, w); + + } + + w.write("</table\n>"); + } private static String cdata(String s) { @@ -783,4 +844,5 @@ return XHTMLRenderer.attrib(s); } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/HTMLUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/HTMLUtility.java 2011-06-15 21:58:45 UTC (rev 4712) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/HTMLUtility.java 2011-06-16 11:58:24 UTC (rev 4713) @@ -107,6 +107,10 @@ sb.append("'"); break; + case '/': + sb.append("/"); + break; + case '&': sb.append("&"); break; Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2011-06-15 21:58:45 UTC (rev 4712) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2011-06-16 11:58:24 UTC (rev 4713) @@ -21,6 +21,7 @@ import com.bigdata.rdf.sail.webapp.BigdataRDFContext.RunningQuery; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.util.HTMLUtility; +import com.bigdata.util.InnerCause; /** * A status page for the service. @@ -73,9 +74,12 @@ final boolean showRunningQueries = req .getParameter("showRunningQueries") != null; - final boolean showRunningQueryStats = req - .getParameter("showRunningQueryStats") != null; + final boolean showRunningQueryStats = req + .getParameter("showRunningQueryStats") != null; + final boolean showRunningQueryDetailStats = req + .getParameter("showRunningQueryDetailStats") != null; + // Information about the KB (stats, properties). final boolean showKBInfo = req.getParameter("showKBInfo") != null; @@ -84,7 +88,15 @@ final HTMLBuilder doc = new HTMLBuilder(); - XMLBuilder.Node current = doc.root("html").node("body"); + XMLBuilder.Node current = doc.root("html"); + { + current = current.node("head"); + current.node("meta").attr("http-equiv", "Content-Type").attr( + "content", "text/html;charset=utf-8").close(); + current.node("title").text("bigdata®").close(); + current = current.close();// close the head. + } + current = current.node("body"); current.node("p", "Accepted query count=" + getBigdataRDFContext().getQueryIdFactory().get()); @@ -177,7 +189,8 @@ } - if (showRunningQueries) { + if (showRunningQueries || showRunningQueryStats + || showRunningQueryDetailStats) { /* * Show the queries which are currently executing (actually running @@ -195,25 +208,41 @@ for (UUID queryId : queryIds) { - final IRunningQuery query = queryEngine - .getRunningQuery(queryId); + final IRunningQuery query; + try { + query = queryEngine.getRunningQuery(queryId); - if (query == null) { - // Already terminated. - continue; - } + if (query == null) { + + // Already terminated. + continue; + + } + + } catch (RuntimeException e) { + + if (InnerCause.isInnerCause(e, InterruptedException.class)) { + + // Already terminated. + continue; + + } + + throw new RuntimeException(e); + + } - ages.put(query.getElapsed(), query); + ages.put(query.getElapsed(), query); - } + } { final Iterator<IRunningQuery> itr = ages.values().iterator(); - final StringWriter w = showRunningQueryStats ? new StringWriter( - Bytes.kilobyte32 * 8) - : null; + final StringWriter w = showRunningQueryStats + || showRunningQueryDetailStats ? new StringWriter( + Bytes.kilobyte32 * 8) : null; while (itr.hasNext()) { @@ -224,23 +253,23 @@ continue; } - /* - * @todo The runstate and stats could be formatted into an - * HTML table ala QueryLog or RunState. - */ - current = current.node("p", - "age=" + query.getElapsed() + "ms").node("p", - "queryId=" + query.getQueryId()).node("p", - HTMLUtility.escapeForXHTML(query.toString())).node( - "p", - HTMLUtility.escapeForXHTML(BOpUtility - .toString(query.getQuery()))); + if (showRunningQueries) { + current = current.node("p", + "age=" + query.getElapsed() + "ms").node("p", + "queryId=" + query.getQueryId()).node("p", + HTMLUtility.escapeForXHTML(query.toString())) + .node( + "p", + HTMLUtility.escapeForXHTML(BOpUtility + .toString(query.getQuery()))); + } - if (showRunningQueryStats) { - - // Format as a table. - QueryLog.getXHTMLTable(query, w); + if (showRunningQueryStats || showRunningQueryDetailStats) { + // Format as a table. + QueryLog.getTableXHTML(query, w, + !showRunningQueryDetailStats); + // Extract as String final String s = w.getBuffer().toString(); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java 2011-06-15 21:58:45 UTC (rev 4712) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java 2011-06-16 11:58:24 UTC (rev 4713) @@ -44,6 +44,7 @@ */ public class XMLBuilder { + private final boolean xml; private final Writer m_writer; // private boolean m_pp = false; @@ -57,16 +58,7 @@ } public XMLBuilder(boolean xml, OutputStream outstr) throws IOException { - - if (outstr == null) { - m_writer = new StringWriter(); - } else { - m_writer = new OutputStreamWriter(outstr); - } - - if (xml) { - m_writer.write("<?xml version=\"1.0\"?>"); - } + this(xml,null/*encoding*/,outstr); } public XMLBuilder(boolean xml, String encoding) throws IOException { @@ -77,6 +69,8 @@ public XMLBuilder(boolean xml, String encoding, OutputStream outstr) throws IOException { + this.xml = xml; + if (outstr == null) { m_writer = new StringWriter(); } else { @@ -84,7 +78,16 @@ } if (xml) { - m_writer.write("<?xml version=\"1.0\" encoding=\"" + encoding + "\"?>"); + if(encoding!=null) { + m_writer.write("<?xml version=\"1.0\" encoding=\"" + encoding + "\"?>"); + } else { + m_writer.write("<?xml version=\"1.0\"?>"); + } + } else { + // TODO Note the optional encoding for use in a meta tag. + m_writer.write("<!DOCTYPE HTML PUBLIC"); + m_writer.write(" \"-//W3C//DTD HTML 4.01 Transitional//EN\""); + m_writer.write(" \"http://www.w3.org/TR/html4/loose.dtd\">"); } } @@ -167,11 +170,35 @@ return tmp.close(); } + /** + * Close the open element. + * @return The parent element. + * @throws IOException + */ public Node close() throws IOException { + return close(!xml); + } + + /** + * Close the open element. + * + * @param simpleEnd + * When <code>true</code> an open tag without a body will be + * closed by a single > symbol rather than the XML style + * &47;>. + * + * @return The parent element. + * @throws IOException + */ + public Node close(final boolean simpleEnd) throws IOException { assert(m_open); if (emptyBody()) { - m_writer.write("/>"); + if(simpleEnd) { + m_writer.write(">"); + } else { + m_writer.write("/>"); + } } else { m_writer.write("</" + m_tag + ">"); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-16 21:44:00
|
Revision: 4717 http://bigdata.svn.sourceforge.net/bigdata/?rev=4717&view=rev Author: thompsonbry Date: 2011-06-16 21:43:52 +0000 (Thu, 16 Jun 2011) Log Message: ----------- Added an "?explain" Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/HTMLUtility.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlClient.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/NullOutputStream.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2011-06-16 16:13:05 UTC (rev 4716) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2011-06-16 21:43:52 UTC (rev 4717) @@ -1258,6 +1258,11 @@ * * @return The {@link AbstractRunningQuery} -or- <code>null</code> if there * is no query associated with that query identifier. + * + * @throws InterruptedException + * if the query halted normally. + * @throws RuntimeException + * if the query halted with an error. */ public /*protected*/ AbstractRunningQuery getRunningQuery(final UUID queryId) { @@ -1452,5 +1457,89 @@ return runningQueries.keySet().toArray(new UUID[0]); } - + +// synchronized public void addListener(final IQueryEngineListener listener) { +// +// if (m_listeners == null) { +// +// m_listeners = new Vector<IQueryEngineListener>(); +// +// m_listeners.add(listener); +// +// } else { +// +// if (m_listeners.contains(listener)) { +// +// throw new IllegalStateException("Already registered: listener=" +// + listener); +// +// } +// +// m_listeners.add(listener); +// +// } +// +// } +// +// synchronized public void removeListener(IQueryEngineListener listener) { +// +// if( m_listeners == null ) { +// +// throw new IllegalStateException +// ( "Not registered: listener="+listener +// ); +// +// } +// +// if( ! m_listeners.remove( listener ) ) { +// +// throw new IllegalStateException +// ( "Not registered: listener="+listener +// ); +// +// } +// +// if(m_listeners.isEmpty()) { +// +// /* +// * Note: We can test whether or not listeners need to be notified +// * simply by testing m_listeners != null. +// */ +// +// m_listeners = null; +// +// } +// +// } +// +// // TODO Must deliver events in another thread! +// // TODO Must drop and drop any errors. +// // TODO Optimize with CopyOnWriteArray +// // Note: Security hole if we allow notification for queries w/o queryId. +// protected void fireQueryEndedEvent(final IRunningQuery query) { +// +// if (m_listeners == null) +// return; +// +// final IQueryEngineListener[] listeners = (IQueryEngineListener[]) m_listeners +// .toArray(new IQueryEngineListener[] {}); +// +// for (int i = 0; i < listeners.length; i++) { +// +// final IQueryEngineListener l = listeners[i]; +// +// l.queryEnded(query); +// +// } +// +// } +// +// private Vector<IQueryEngineListener> m_listeners; +// +// public interface IQueryEngineListener { +// +// void queryEnded(final IRunningQuery q); +// +// } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2011-06-16 16:13:05 UTC (rev 4716) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2011-06-16 21:43:52 UTC (rev 4717) @@ -58,6 +58,8 @@ public class QueryLog { private static final String NA = "N/A"; + private static final String TD = "<td>"; + private static final String TDx = "</td\n>"; protected static final transient Logger log = Logger .getLogger(QueryLog.class); @@ -429,9 +431,59 @@ } - public static void getTableHeaderXHTML(final IRunningQuery q, final Writer w) - throws IOException { + /** + * Format the data as an (X)HTML table. The table will include a header + * which declares the columns, a detail row for each operator (optional), + * and a summary row for the query as a whole. + * + * @param queryStr + * The original text of the query (e.g., a SPARQL query) + * (optional). + * @param q + * The {@link IRunningQuery}. + * @param w + * Where to write the table. + * @param summaryOnly + * When <code>true</code> only the summary row will be written. + * @param maxBopLength + * The maximum length to display from {@link BOp#toString()} and + * ZERO (0) to display everything. Data longer than this value + * will be accessible from a flyover, but not directly visible + * in the page. + * @throws IOException + */ + public static void getTableXHTML(final String queryStr, + final IRunningQuery q, final Writer w, final boolean summaryOnly, + final int maxBopLength) + throws IOException { + // the table start tag. + w.write("<table border=\"1\" summary=\"" + attrib("Query Statistics") + + "\"\n>"); + + getTableHeaderXHTML(q, w); + + if(summaryOnly) { + + getSummaryRowXHTML(queryStr, q, w, maxBopLength); + + } else { + + // Summary first. + getSummaryRowXHTML(queryStr, q, w, maxBopLength); + + // Then the detail rows. + getTableRowsXHTML(queryStr, q, w, maxBopLength); + + } + + w.write("</table\n>"); + + } + + public static void getTableHeaderXHTML(final IRunningQuery q, final Writer w) + throws IOException { + // header row. w.write("<tr\n>"); /* @@ -445,6 +497,7 @@ w.write("<th>elapsed</th>"); w.write("<th>serviceId</th>"); w.write("<th>cause</th>"); + w.write("<th>query</th>"); w.write("<th>bop</th>"); /* * Columns for each pipeline operator. @@ -487,18 +540,26 @@ } - /** - * Write the table rows. - * - * @param q - * The query. - * @param w - * Where to write the rows. - * - * @throws IOException - */ - public static void getTableRowsXHTML(final IRunningQuery q, final Writer w) - throws IOException { + /** + * Write the table rows. + * + * @param queryStr + * The query text (optional). + * @param q + * The {@link IRunningQuery}. + * @param w + * Where to write the rows. + * @param maxBopLength + * The maximum length to display from {@link BOp#toString()} and + * ZERO (0) to display everything. Data longer than this value + * will be accessible from a flyover, but not directly visible in + * the page. + * + * @throws IOException + */ + public static void getTableRowsXHTML(final String queryStr, + final IRunningQuery q, final Writer w, final int maxBopLength) + throws IOException { final Integer[] order = BOpUtility.getEvaluationOrder(q.getQuery()); @@ -506,34 +567,41 @@ for (Integer bopId : order) { - getTableRowXHTML(q, w, orderIndex, bopId, false/* summary */); - + getTableRowXHTML(queryStr, q, w, orderIndex, bopId, + false/* summary */, maxBopLength); + orderIndex++; } } - private static final String TD = "<td>"; - private static final String TDx = "</td\n>"; - - /** - * Return a tabular representation of the query {@link RunState}. - * - * @param q - * The {@link IRunningQuery}. - * @param evalOrder - * The evaluation order for the operator. - * @param bopId - * The identifier for the operator. - * @param summary - * <code>true</code> iff the summary for the query should be - * written. - * @return The row of the table. - */ - static private void getTableRowXHTML(final IRunningQuery q, final Writer w, - final int evalOrder, final Integer bopId, final boolean summary) - throws IOException { + /** + * Return a tabular representation of the query {@link RunState}. + * + * @param queryStr + * The query text (optional). + * @param q + * The {@link IRunningQuery}. + * @param evalOrder + * The evaluation order for the operator. + * @param bopId + * The identifier for the operator. + * @param summary + * <code>true</code> iff the summary for the query should be + * written. + * @param maxBopLength + * The maximum length to display from {@link BOp#toString()} and + * ZERO (0) to display everything. Data longer than this value + * will be accessible from a flyover, but not directly visible + * in the page. + * + * @return The row of the table. + */ + static private void getTableRowXHTML(final String queryStr, + final IRunningQuery q, final Writer w, final int evalOrder, + final Integer bopId, final boolean summary, final int maxBopLength) + throws IOException { final DateFormat dateFormat = DateFormat.getDateTimeInstance( DateFormat.FULL, DateFormat.FULL); @@ -564,24 +632,29 @@ if (cause != null) w.write(cause.getLocalizedMessage()); w.write(TDx); - + final Map<Integer, BOp> bopIndex = q.getBOpIndex(); final Map<Integer, BOpStats> statsMap = q.getStats(); final BOp bop = bopIndex.get(bopId); // the operator. if (summary) { + w.write(TD); + w.write(queryStr == null ? cdata(NA) : prettyPrintSparql(queryStr)); + w.write(TDx); /* * The entire query (recursively). */ - final String queryStr = BOpUtility.toString(q.getQuery()); + final String bopStr = BOpUtility.toString(q.getQuery()); w.write(TD); w.write("<a href=\"#\" title=\""); - w.write(attrib(queryStr));// the entire query as a tooltip. + w.write(attrib(bopStr));// the entire query as a tooltip. w.write("\"\n>"); - // A slice of the query inline on the page. - w.write(cdata(queryStr.substring(0/* begin */, Math.min(64, queryStr - .length())))); + // A slice of the query inline on the page or everything if + // maxBopLength<=0. + w.write(cdata(bopStr.substring(0/* begin */, + maxBopLength <= 0 ? bopStr.length() : Math.min( + maxBopLength, bopStr.length())))); w.write("..."); w.write("</a>"); w.write(TDx); @@ -589,14 +662,17 @@ w.write("total"); // summary line. w.write(TDx); } else { + w.write(TD); + w.write("...");// elide the original query string on a detail row. + w.write(TDx); // Otherwise show just this bop. - final String queryStr = bopIndex.get(bopId).toString(); + final String bopStr = bopIndex.get(bopId).toString(); w.write(TD); w.write("<a href=\"#\" title=\""); - w.write(attrib(queryStr));// the entire query as a tooltip. + w.write(attrib(bopStr));// the entire query as a tooltip. w.write("\"\n>"); // A slice of the query inline on the page. - w.write(cdata(queryStr.substring(0/* begin */, Math.min(64, queryStr + w.write(cdata(bopStr.substring(0/* begin */, Math.min(64, bopStr .length())))); w.write("..."); w.write("</a>"); @@ -773,66 +849,25 @@ /** * Write a summary row for the query. The table element, header, and footer * must be written separately. - * @param q - * @param w - * @param sb + * @param queryStr The original query text (optional). + * @param q The {@link IRunningQuery}. + * @param w Where to write the data. + * @param maxBopLength + * The maximum length to display from {@link BOp#toString()} and + * ZERO (0) to display everything. Data longer than this value + * will be accessible from a flyover, but not directly visible + * in the page. * @throws IOException */ - static public void getSummaryRowXHTML(final IRunningQuery q, - final Writer w, final StringBuilder sb) throws IOException { + static public void getSummaryRowXHTML(final String queryStr, + final IRunningQuery q, final Writer w, final int maxBopLength) + throws IOException { - getTableRowXHTML(q, w, -1/* orderIndex */, q.getQuery().getId(), true/* summary */); + getTableRowXHTML(queryStr, q, w, -1/* orderIndex */, q.getQuery() + .getId(), true/* summary */, maxBopLength); } - /** - * Format the data as an (X)HTML table. The table will include a header - * which declares the columns, a detail row for each operator (optional), - * and a summary row for the query as a whole. - * - * @param q - * The query. - * @param w - * Where to write the table. - * @param summaryOnly - * When <code>true</code> only the summary row will be written. - * @throws IOException - */ - public static void getTableXHTML(final IRunningQuery q, final Writer w, - final boolean summaryOnly) throws IOException { - - // the table start tag. - { - /* - * Summary for the table. - */ - final String summary = "Query details"; - - /* - * Format the entire table now that we have all the data on hand. - */ - - w.write("<table border=\"1\" summary=\"" + attrib(summary) - + "\"\n>"); - - } - - getTableHeaderXHTML(q, w); - - if(summaryOnly) { - - getSummaryRowXHTML(q, w, sb); - - } else { - - getTableRowsXHTML(q, w); - - } - - w.write("</table\n>"); - - } - private static String cdata(String s) { return XHTMLRenderer.cdata(s); @@ -845,4 +880,31 @@ } + private static String prettyPrintSparql(String s) { + +// return cdata(s); +// +// } + + s = s.replace("\n", " "); + + s = s.replace("PREFIX", "\nPREFIX"); + s = s.replace("select", "\nselect"); + s = s.replace("where", "\nwhere"); + s = s.replace("{","{\n"); + s = s.replace("}","\n}"); + s = s.replace(" ."," .\n"); // TODO Must not match within quotes (literals) or <> (URIs). +// s = s.replace("||","||\n"); +// s = s.replace("&&","&&\n"); + + s = cdata(s); + + s = s.replace("\n", "<br>"); + +// return "<pre>"+s+"</pre>"; + + return s; + + } + } Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/NullOutputStream.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/NullOutputStream.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/NullOutputStream.java 2011-06-16 21:43:52 UTC (rev 4717) @@ -0,0 +1,68 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on May 26, 2011 + */ + +package com.bigdata.io; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * An {@link OutputStream} which discards anything written on it. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: NullOutputStream.java 4582 2011-05-31 19:12:53Z thompsonbry $ + */ +public class NullOutputStream extends OutputStream { + + private boolean open = true; + + public NullOutputStream() { + } + + @Override + final public void write(int b) throws IOException { + if (!open) + throw new IOException(); + } + + @Override + final public void write(byte[] b) throws IOException { + if (!open) + throw new IOException(); + } + + @Override + final public void write(byte[] b, int len, int off) throws IOException { + if (!open) + throw new IOException(); + } + + final public void close() { + open = false; + } + +} Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/HTMLUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/HTMLUtility.java 2011-06-16 16:13:05 UTC (rev 4716) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/HTMLUtility.java 2011-06-16 21:43:52 UTC (rev 4717) @@ -78,7 +78,7 @@ * </p> */ - public static String escapeForXHTML(String s) { + public static String escapeForXHTML(final String s) { if( s == null ) { @@ -86,16 +86,16 @@ } - int len = s.length(); + final int len = s.length(); if (len == 0) return s; - StringBuffer sb = new StringBuffer(len + 20); + final StringBuffer sb = new StringBuffer(len + 20); for (int i = 0; i < len; i++) { - char ch = s.charAt(i); + final char ch = s.charAt(i); switch (ch) { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlClient.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlClient.java 2011-06-16 16:13:05 UTC (rev 4716) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlClient.java 2011-06-16 21:43:52 UTC (rev 4717) @@ -246,7 +246,8 @@ // Fully formed and encoded URL @todo use */* for ASK. final String urlString = opts.serviceURL + "?query=" - + URLEncoder.encode(opts.queryStr, "UTF-8") + + URLEncoder.encode(opts.queryStr, "UTF-8")// + + (opts.explain?"&explain=":"")// + (opts.defaultGraphUri == null ? "" : ("&default-graph-uri=" + URLEncoder.encode( opts.defaultGraphUri, "UTF-8"))); @@ -324,7 +325,7 @@ log.debug("Status Line: " + conn.getResponseMessage()); } - if (opts.showResults) { + if(opts.explain || opts.showResults) { // Write the response body onto stdout. showResults(conn); @@ -756,6 +757,8 @@ public String baseURI; /** The default graph URI (optional). */ public String defaultGraphUri = null; + /** When true, request an explanation for the query. */ + public boolean explain = false; /** The connection timeout (ms). */ public int timeout = DEFAULT_TIMEOUT; /** @@ -1291,6 +1294,10 @@ opts.showQuery = true; + } else if (arg.equals("-explain")) { + + opts.explain = true; + } else if (arg.equals("-showParseTree")) { opts.showParseTree = true; Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2011-06-16 16:13:05 UTC (rev 4716) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2011-06-16 21:43:52 UTC (rev 4717) @@ -43,9 +43,11 @@ import com.bigdata.bop.BufferAnnotations; import com.bigdata.bop.IPredicate; +import com.bigdata.bop.engine.IRunningQuery; import com.bigdata.bop.engine.QueryEngine; import com.bigdata.bop.join.PipelineJoin; import com.bigdata.btree.IndexMetadata; +import com.bigdata.io.NullOutputStream; import com.bigdata.journal.IBufferStrategy; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.ITx; @@ -55,9 +57,11 @@ import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.rdf.sail.BigdataSailBooleanQuery; import com.bigdata.rdf.sail.BigdataSailGraphQuery; +import com.bigdata.rdf.sail.BigdataSailQuery; import com.bigdata.rdf.sail.BigdataSailRepository; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; import com.bigdata.rdf.sail.BigdataSailTupleQuery; +import com.bigdata.rdf.sail.QueryHints; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.relation.AbstractResource; import com.bigdata.relation.RelationSchema; @@ -78,8 +82,14 @@ static private final transient Logger log = Logger .getLogger(BigdataRDFContext.class); + /** + * URL Query parameter used to request the explanation of a query rather + * than its results. + */ + protected static final String EXPLAIN = "explain"; + private final SparqlEndpointConfig m_config; - private final QueryParser m_engine; + private final QueryParser m_queryParser; /** * A thread pool for running accepted queries against the @@ -128,7 +138,7 @@ m_config = config; // used to parse queries. - m_engine = new SPARQLParserFactory().getParser(); + m_queryParser = new SPARQLParserFactory().getParser(); if (config.queryThreadPoolSize == 0) { @@ -282,17 +292,33 @@ */ protected final String baseURI; +// /** +// * Set to the timestamp as reported by {@link System#nanoTime()} when +// * the query begins to execute. +// */ +// final AtomicLong beginTime = new AtomicLong(); + /** * The queryId as assigned by the SPARQL end point (rather than the * {@link QueryEngine}). */ protected final Long queryId; - - /** - * The queryId used by the {@link QueryEngine}. - */ - protected final UUID queryId2; + /** + * The queryId used by the {@link QueryEngine}. If the application has + * not specified this using {@link QueryHints#QUERYID} then this is + * assigned and set on the query using {@link QueryHints#QUERYID}. This + * decision can not be made until we parse the query so the behavior is + * handled by the subclasses. + */ + volatile protected UUID queryId2; + + /** + * When true, provide an "explanation" for the query (query plan, query + * evaluation statistics) rather than the results of the query. + */ + final boolean explain; + /** * * @param namespace @@ -354,9 +380,10 @@ this.charset = charset; this.fileExt = fileExt; this.req = req; + this.explain = req.getParameter(EXPLAIN) != null; this.os = os; this.queryId = Long.valueOf(m_queryIdFactory.incrementAndGet()); - this.queryId2 = UUID.randomUUID(); +// this.queryId2 = UUID.randomUUID(); /* * Setup the baseURI for this request. It will be set to the @@ -408,6 +435,39 @@ } + /** + * Sets {@link #queryId2} to the {@link UUID} which will be associated + * with the {@link IRunningQuery}. If {@link QueryHints#QUERYID} has + * already been used by the application to specify the {@link UUID} then + * that {@link UUID} is noted. Otherwise, a random {@link UUID} is + * generated and assigned to the query by binding it on the query hints. + * <p> + * Note: This is also responsible for noticing the time at which the + * query begins to execute and storing the {@link RunningQuery} in the + * {@link #m_queries} map. + * + * @param query + * The query. + */ + protected void setQueryId(final BigdataSailQuery query) { + assert queryId2 == null; // precondition. + // Note the begin time for the query. + final long begin = System.nanoTime(); + // Figure out the effective UUID under which the query will run. + final String queryIdStr = query.getQueryHints().getProperty( + QueryHints.QUERYID); + if (queryIdStr == null) { + queryId2 = UUID.randomUUID(); + query.getQueryHints().setProperty(QueryHints.QUERYID, + queryId2.toString()); + } else { + queryId2 = UUID.fromString(queryIdStr); + } + // Stuff it in the map of running queries. + m_queries.put(queryId, new RunningQuery(queryId.longValue(), + queryId2, queryStr, begin)); + } + /** * Execute the query. * @@ -422,28 +482,40 @@ OutputStream os) throws Exception; final public Void call() throws Exception { - final long begin = System.nanoTime(); BigdataSailRepositoryConnection cxn = null; try { cxn = getQueryConnection(namespace, timestamp); - m_queries.put(queryId, new RunningQuery(queryId.longValue(), - queryId2, queryStr, begin)); if(log.isTraceEnabled()) log.trace("Query running..."); // try { - doQuery(cxn, os); + if(explain) { + /* + * The data goes to a bit bucket and we send an + * "explanation" of the query evaluation back to the caller. + * + * Note: The trick is how to get hold of the IRunningQuery + * object. It is created deep within the Sail when we + * finally submit a query plan to the query engine. We have + * the queryId (on queryId2), so we can look up the + * IRunningQuery in [m_queries] while it is running, but + * once it is terminated the IRunningQuery will have been + * cleared from the internal map maintained by the + * QueryEngine, at which point we can not longer find it. + */ + doQuery(cxn, new NullOutputStream()); + } else { + doQuery(cxn, os); + os.flush(); + os.close(); + } + if(log.isTraceEnabled()) + log.trace("Query done."); // } catch(Throwable t) { // /* // * Log the query and the exception together. // */ // log.error(t.getLocalizedMessage() + ":\n" + queryStr, t); // } - if(log.isTraceEnabled()) - log.trace("Query done - flushing results."); - os.flush(); - os.close(); - if(log.isTraceEnabled()) - log.trace("Query done - output stream closed."); return null; // } catch (Throwable t) { // // launder and rethrow the exception. @@ -493,6 +565,9 @@ final BigdataSailBooleanQuery query = cxn.prepareBooleanQuery( QueryLanguage.SPARQL, queryStr, baseURI); + // Figure out the UUID under which the query will execute. + setQueryId(query); + // Override query if data set protocol parameters were used. overrideDataset(query); @@ -534,6 +609,9 @@ final BigdataSailTupleQuery query = cxn.prepareTupleQuery( QueryLanguage.SPARQL, queryStr, baseURI); + // Figure out the UUID under which the query will execute. + setQueryId(query); + // Override query if data set protocol parameters were used. overrideDataset(query); @@ -574,6 +652,9 @@ final BigdataSailGraphQuery query = cxn.prepareGraphQuery( QueryLanguage.SPARQL, queryStr, baseURI); + // Figure out the UUID under which the query will execute. + setQueryId(query); + // Override query if data set protocol parameters were used. overrideDataset(query); @@ -647,13 +728,19 @@ * Therefore, we are in the position of having to parse the query here * and then again when it is executed.] */ - final ParsedQuery q = m_engine.parseQuery(queryStr, null/*baseURI*/); + final ParsedQuery q = m_queryParser.parseQuery(queryStr, null/*baseURI*/); if(log.isDebugEnabled()) log.debug(q.toString()); final QueryType queryType = QueryType.fromQuery(queryStr); + /* + * When true, provide an "explanation" for the query (query plan, query + * evaluation statistics) rather than the results of the query. + */ + final boolean explain = req.getParameter(EXPLAIN) != null; + /* * CONNEG for the MIME type. * @@ -667,7 +754,8 @@ * has some stuff related to generating Accept headers in their * RDFFormat which could bear some more looking into in this regard.) */ - final String acceptStr = req.getHeader("Accept"); + final String acceptStr = explain ? "text/html" : req + .getHeader("Accept"); switch (queryType) { case ASK: { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2011-06-16 16:13:05 UTC (rev 4716) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2011-06-16 21:43:52 UTC (rev 4717) @@ -1,15 +1,27 @@ package com.bigdata.rdf.sail.webapp; import java.io.IOException; +import java.io.OutputStream; +import java.io.StringWriter; +import java.util.UUID; import java.util.concurrent.FutureTask; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.log4j.Logger; +import com.bigdata.bop.engine.IRunningQuery; +import com.bigdata.bop.engine.QueryEngine; +import com.bigdata.bop.engine.QueryLog; +import com.bigdata.bop.fed.QueryEngineFactory; +import com.bigdata.journal.IIndexManager; import com.bigdata.journal.TimestampUtility; +import com.bigdata.rawstore.Bytes; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; +import com.bigdata.util.InnerCause; /** * SPARQL query handler for GET or POST verbs. @@ -80,6 +92,7 @@ final long timestamp = getTimestamp(req); + // The SPARQL query. final String queryStr = req.getParameter("query"); if(queryStr == null) { @@ -101,10 +114,15 @@ */ try { - final AbstractQueryTask queryTask = getBigdataRDFContext() - .getQueryTask(namespace, timestamp, queryStr, req, - resp.getOutputStream()); + final OutputStream os = resp.getOutputStream(); + + final BigdataRDFContext context = getBigdataRDFContext(); + + final boolean explain = req.getParameter(BigdataRDFContext.EXPLAIN) != null; + final AbstractQueryTask queryTask = context.getQueryTask(namespace, + timestamp, queryStr, req, os); + final FutureTask<Void> ft = new FutureTask<Void>(queryTask); if (log.isTraceEnabled()) @@ -170,12 +188,17 @@ } - // Begin executing the query (asynchronous) - getBigdataRDFContext().queryService.execute(ft); - - // wait for the Future. - ft.get(); + // Begin executing the query (asynchronous) + getBigdataRDFContext().queryService.execute(ft); + if (explain) { + // Send an explanation instead of the query results. + explainQuery(queryStr, queryTask, ft, os); + } else { + // Wait for the Future. + ft.get(); + } + } catch (Throwable e) { try { throw BigdataRDFServlet.launderThrowable(e, resp, queryStr); @@ -186,5 +209,124 @@ } + /** + * Sends an explanation for the query rather than the query results. The + * query is still run, but the query statistics are reported instead of the + * query results. + * + * @param queryStr + * @param queryTask + * @param ft + * @param os + * @throws Exception + */ + private void explainQuery(final String queryStr, + final AbstractQueryTask queryTask, final FutureTask<Void> ft, + final OutputStream os) throws Exception { + + /* + * Spin until either we have the IRunningQuery or the Future of the + * query is done (in which case we won't get it). + */ + if(log.isDebugEnabled()) + log.debug("Will build explanation"); + UUID queryId2 = null; + IRunningQuery q = null; + while (!ft.isDone() && queryId2 == null) { + try { + // Wait a bit for queryId2 to be assigned. + ft.get(1/* timeout */, TimeUnit.MILLISECONDS); + } catch(TimeoutException ex) { + // Ignore. + } + if (queryTask.queryId2 != null) { + queryId2 = queryTask.queryId2; + break; + } + } + if (queryId2 != null) { + if(log.isDebugEnabled()) + log.debug("Resolving IRunningQuery: queryId2=" + queryId2); + final IIndexManager indexManager = getBigdataRDFContext() + .getIndexManager(); + final QueryEngine queryEngine = QueryEngineFactory + .getQueryController(indexManager); + while (!ft.isDone() && q == null) { + try { + // Wait a bit for the IRunningQuery to *start*. + ft.get(1/* timeout */, TimeUnit.MILLISECONDS); + } catch(TimeoutException ex) { + // Ignore. + } + // Resolve the IRunningQuery. + try { + q = queryEngine.getRunningQuery(queryId2); + } catch (RuntimeException ex) { + if (InnerCause.isInnerCause(ex, InterruptedException.class)) { + // Ignore. Query terminated normally, but we don't have + // it. + } else { + // Ignore. Query has error, but we will get err from + // Future. + } + } + } + if (q != null) + if(log.isDebugEnabled()) + log.debug("Resolved IRunningQuery: query=" + q); + } + + // wait for the Future (will toss any exceptions). + ft.get(); + + /* + * Build the explanation. + */ + final HTMLBuilder doc = new HTMLBuilder(); + { + + XMLBuilder.Node current = doc.root("html"); + { + current = current.node("head"); + current.node("meta").attr("http-equiv", "Content-Type") + .attr("content", "text/html;charset=utf-8").close(); + current.node("title").text("bigdata®").close(); + current = current.close();// close the head. + } + current = current.node("body"); + + if (q != null) { + // Format query statistics as a table. + final StringWriter w = new StringWriter( + 8 * Bytes.kilobyte32); + QueryLog.getTableXHTML(queryStr, q, w, + true/* showQueryDetails */, 64/* maxBopLength */); + + // Add into the HTML document. + current.text(w.toString()); + } else { + current.node("p", + "Query ran too quickly to collect statistics."); + } + doc.closeAll(current); + } + + /* + * Send the response. + * + * TODO It would be better to stream this rather than buffer it in + * RAM. That also opens up the opportunity for real-time updates for + * long-running (analytic) queries, incremental information from the + * runtime query optimizer, etc. + */ + if(log.isDebugEnabled()) + log.debug("Sending explanation."); + os.write(doc.toString().getBytes("UTF-8")); + os.flush(); + os.close(); + if(log.isDebugEnabled()) + log.debug("Sent explanation."); + + } + } - Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2011-06-16 16:13:05 UTC (rev 4716) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2011-06-16 21:43:52 UTC (rev 4717) @@ -4,6 +4,7 @@ import java.io.StringWriter; import java.util.Comparator; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; @@ -12,7 +13,6 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import com.bigdata.bop.BOpUtility; import com.bigdata.bop.engine.IRunningQuery; import com.bigdata.bop.engine.QueryEngine; import com.bigdata.bop.engine.QueryLog; @@ -39,47 +39,56 @@ // static private final transient Logger log = Logger // .getLogger(StatusServlet.class); - /** - * <p> - * A status page. Options include: - * <dl> - * <dt>showQueries</dt> - * <dd>List SPARQL queries accepted by the SPARQL end point. The queries are - * listed in order of decreasing elapsed time.</dd> - * <dt>showRunningQueries</dt> - * <dd>List SPARQL queries accepted by the SPARQL end point which are - * currently executing on the {@link QueryEngine}. The queries are listed in - * order of decreasing elapsed time.</dd> - * <dt>showKBInfo</dt> - * <dd>Show some information about the {@link AbstractTripleStore} instance - * being served by this SPARQL end point.</dd> - * <dt>showNamespaces</dt> - * <dd>List the namespaces for the registered {@link AbstractTripleStore}s.</dd> - * </dl> - * </p> - * - * @todo This status page combines information about the addressed KB and - * the backing store. Those items should be split out onto different - * status requests. One should be at a URI for the database. The other - * should be at the URI of the SPARQL end point. - */ + /** + * <p> + * A status page. Options include: + * <dl> + * <dt>showQueries</dt> + * <dd>List SPARQL queries accepted by the SPARQL end point which are + * currently executing on the {@link QueryEngine}. The queries are listed in + * order of decreasing elapsed time. You can also specify + * <code>showQueries=details</code> to get a detailed breakdown of the query + * execution.</dd> + * <dt>showKBInfo</dt> + * <dd>Show some information about the {@link AbstractTripleStore} instance + * being served by this SPARQL end point.</dd> + * <dt>showNamespaces</dt> + * <dd>List the namespaces for the registered {@link AbstractTripleStore}s.</dd> + * </dl> + * </p> + * + * @todo This status page combines information about the addressed KB and + * the backing store. Those items should be split out onto different + * status requests. One should be at a URI for the database. The other + * should be at the URI of the SPARQL end point. + */ @Override protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - // SPARQL queries accepted by the SPARQL end point. + // IRunningQuery objects currently running on the query controller. final boolean showQueries = req.getParameter("showQueries") != null; - // IRunningQuery objects currently running on the query controller. - final boolean showRunningQueries = req - .getParameter("showRunningQueries") != null; + boolean showQueryDetails = false; + if (showQueries) { + for (String tmp : req.getParameterValues("showQueries")) { + if (tmp.equals("details")) + showQueryDetails = true; + } + } - final boolean showRunningQueryStats = req - .getParameter("showRunningQueryStats") != null; + /* + * The maximum inline length of BOp#toString() visible on the page. The + * entire thing is accessible via the title attribute (a flyover). Use + * ZERO (0) to see everything. + */ + int maxBopLength = 64; + if (req.getParameter("maxBopLength") != null) { + maxBopLength = Integer.valueOf(req.getParameter("maxBopLength")); + if (maxBopLength < 0) + maxBopLength = 0; + } - final boolean showRunningQueryDetailStats = req - .getParameter("showRunningQueryDetailStats") != null; - // Information about the KB (stats, properties). final boolean showKBInfo = req.getParameter("showKBInfo") != null; @@ -136,157 +145,136 @@ .getCounters().toString())); } + + if (!showQueries) { + // Nothing more to do. + return; + } - if (showQueries) { + // Marker timestamp used to report the age of queries. + final long now = System.nanoTime(); - /* - * Show the queries which are currently executing (accepted by the - * NanoSparqlServer). - */ + /* + * Map providing a cross walk from the QueryEngine's + * IRunningQuery.getQueryId() to NanoSparqlServer's + * RunningQuery.queryId. + */ + final Map<UUID,RunningQuery> crosswalkMap = new LinkedHashMap<UUID, RunningQuery>(); - final long now = System.nanoTime(); + /* + * Map providing the accepted RunningQuery objects in descending order + * by their elapsed run time. + */ + final TreeMap<Long, RunningQuery> acceptedQueryAge = newQueryMap(); - final TreeMap<Long, RunningQuery> ages = newQueryMap(); + { - { + final Iterator<RunningQuery> itr = getBigdataRDFContext() + .getQueries().values().iterator(); - final Iterator<RunningQuery> itr = getBigdataRDFContext() - .getQueries().values().iterator(); + while (itr.hasNext()) { - while (itr.hasNext()) { + final RunningQuery query = itr.next(); - final RunningQuery query = itr.next(); + crosswalkMap.put(query.queryId2, query); - final long age = now - query.begin; + final long age = now - query.begin; - ages.put(age, query); + acceptedQueryAge.put(age, query); - } + } - } + } - { + /* + * Show the queries which are currently executing (actually running on + * the QueryEngine). + */ - final Iterator<RunningQuery> itr = ages.values().iterator(); + final QueryEngine queryEngine = (QueryEngine) QueryEngineFactory + .getQueryController(getIndexManager()); - while (itr.hasNext()) { + final UUID[] queryIds = queryEngine.getRunningQueries(); - final RunningQuery query = (RunningQuery) itr.next(); + // final long now = System.nanoTime(); - final long age = now - query.begin; + /* + * Map providing the QueryEngine's IRunningQuery objects in order by + * descending elapsed evaluation time. + */ + final TreeMap<Long, IRunningQuery> runningQueryAge = newQueryMap(); - current = current.node( - "p", - "age=" - + java.util.concurrent.TimeUnit.NANOSECONDS - .toMillis(age) + "ms, queryId=" - + query.queryId + "\n").node("p", - HTMLUtility.escapeForXHTML(query.query) + "\n"); + for (UUID queryId : queryIds) { - } + final IRunningQuery query; + try { - } + query = queryEngine.getRunningQuery(queryId); - } + if (query == null) { - if (showRunningQueries || showRunningQueryStats - || showRunningQueryDetailStats) { + // Already terminated. + continue; - /* - * Show the queries which are currently executing (actually running - * on the QueryEngine). - */ + } - final QueryEngine queryEngine = (QueryEngine) QueryEngineFactory - .getQueryController(getIndexManager()); + } catch (RuntimeException e) { - final UUID[] queryIds = queryEngine.getRunningQueries(); + if (InnerCause.isInnerCause(e, InterruptedException.class)) { - // final long now = System.nanoTime(); + // Already terminated. + continue; - final TreeMap<Long, IRunningQuery> ages = newQueryMap(); - - for (UUID queryId : queryIds) { - - final IRunningQuery query; - try { - query = queryEngine.getRunningQuery(queryId); - - if (query == null) { - - // Already terminated. - continue; - - } - - } catch (RuntimeException e) { - - if (InnerCause.isInnerCause(e, InterruptedException.class)) { - - // Already terminated. - continue; - - } - - throw new RuntimeException(e); - } - ages.put(query.getElapsed(), query); + throw new RuntimeException(e); } - { + runningQueryAge.put(query.getElapsed(), query); - final Iterator<IRunningQuery> itr = ages.values().iterator(); + } - final StringWriter w = showRunningQueryStats - || showRunningQueryDetailStats ? new StringWriter( - Bytes.kilobyte32 * 8) : null; - - while (itr.hasNext()) { + { - final IRunningQuery query = itr.next(); + final Iterator<IRunningQuery> itr = runningQueryAge.values() + .iterator(); - if (query.isDone() && query.getCause() != null) { - // Already terminated (normal completion). - continue; - } + final StringWriter w = new StringWriter(Bytes.kilobyte32 * 8); - if (showRunningQueries) { - current = current.node("p", - "age=" + query.getElapsed() + "ms").node("p", - "queryId=" + query.getQueryId()).node("p", - HTMLUtility.escapeForXHTML(query.toString())) - .node( - "p", - HTMLUtility.escapeForXHTML(BOpUtility - .toString(query.getQuery()))); - } + while (itr.hasNext()) { - if (showRunningQueryStats || showRunningQueryDetailStats) { + final IRunningQuery query = itr.next(); - // Format as a table. - QueryLog.getTableXHTML(query, w, - !showRunningQueryDetailStats); + if (query.isDone() && query.getCause() != null) { + // Already terminated (normal completion). + continue; + } - // Extract as String - final String s = w.getBuffer().toString(); + // Lookup the NanoSparqlServer's RunningQuery object. + final RunningQuery acceptedQuery = crosswalkMap.get(query + .getQueryId()); - // Add into the HTML document. - current.text(s); + final String queryStr = acceptedQuery == null ? "N/A" + : acceptedQuery.query; - // Clear the buffer. - w.getBuffer().setLength(0); + // Format as a table. + QueryLog.getTableXHTML(queryStr, query, w, !showQueryDetails, + maxBopLength); - } - - } // next IRunningQuery. + // Extract as String + final String s = w.getBuffer().toString(); - } + // Add into the HTML document. + current.text(s); - } + // Clear the buffer. + w.getBuffer().setLength(0); + } // next IRunningQuery. + + } + doc.closeAll(current); buildResponse(resp, HTTP_OK, MIME_TEXT_HTML, doc.toString()); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java 2011-06-16 16:13:05 UTC (rev 4716) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/XMLBuilder.java 2011-06-16 21:43:52 UTC (rev 4717) @@ -169,9 +169,10 @@ return tmp.close(); } - + /** * Close the open element. + * * @return The parent element. * @throws IOException */ @@ -185,7 +186,7 @@ * @param simpleEnd * When <code>true</code> an open tag without a body will be * closed by a single > symbol rather than the XML style - * &47;>. + * />. * * @return The parent element. * @throws IOException @@ -200,7 +201,7 @@ m_writer.write("/>"); } } else { - m_writer.write("</" + m_tag + ">"); + m_writer.write("</" + m_tag + "\n>"); } m_open = false; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-17 12:30:59
|
Revision: 4719 http://bigdata.svn.sourceforge.net/bigdata/?rev=4719&view=rev Author: thompsonbry Date: 2011-06-17 12:30:51 +0000 (Fri, 17 Jun 2011) Log Message: ----------- javadoc on WriteCache (removed mentions of the defunct latch). test harness modifications to TestMROWTransactions. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2011-06-17 11:45:06 UTC (rev 4718) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2011-06-17 12:30:51 UTC (rev 4719) @@ -129,11 +129,11 @@ * prevents {@link #acquire()} during critical sections such as * {@link #flush(boolean, long, TimeUnit)}, {@link #reset()}, and * {@link #close()}. - * <p> - * Note: To avoid lock ordering problems, acquire the read lock before you - * increment the latch and acquire the write lock before you await the - * latch. */ +// * <p> +// * Note: To avoid lock ordering problems, acquire the read lock before you +// * increment the latch and acquire the write lock before you await the +// * latch. final private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); /** @@ -1287,11 +1287,11 @@ * reuse it to receive more writes. * <p> * Note: Keep private unless strong need for override since you can not call - * this method without holding the write lock and having the {@link #latch} - * at zero. + * this method without holding the write lock * * @param tmp */ + // ... and having the {@link #latch} at zero. private void _resetState(final ByteBuffer tmp) { if (tmp == null) @@ -1805,7 +1805,7 @@ * with a full buffer where there is not room for the dummy "remove" prefix. * Whilst we could of course ensure that a buffer with less than the space * required for prefixWrites should be moved immediately to the dirtlyList, - * there would still exist the possibillity that the clear could be + * there would still exist the possibility that the clear could be * requested on a buffer already on the dirtyList. It looks like this should * not matter, since each buffer update can be considered as an atomic * update even if the set of writes are individually not atomic (the updates @@ -1826,7 +1826,7 @@ * @throws InterruptedException * @throws IllegalStateException */ - public void clearAddrMap(final long addr) throws IllegalStateException, InterruptedException { + /*public*/ void clearAddrMap(final long addr) throws IllegalStateException, InterruptedException { final RecordMetadata entry = recordMap.remove(addr); if (prefixWrites) { // final int pos = entry.bufferOffset - 12; @@ -1876,7 +1876,7 @@ } protected void registerWriteStatus(long offset, int length, char action) { - // NOP to be overidden for debug if required + // NOP to be overridden for debug if required } boolean m_written = false; Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java 2011-06-17 11:45:06 UTC (rev 4718) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java 2011-06-17 12:30:51 UTC (rev 4719) @@ -6,7 +6,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import org.openrdf.model.BNode; @@ -18,8 +17,6 @@ import org.openrdf.model.impl.ContextStatementImpl; import org.openrdf.model.impl.StatementImpl; import org.openrdf.model.impl.URIImpl; -import org.openrdf.repository.RepositoryConnection; -import org.openrdf.repository.RepositoryResult; import com.bigdata.counters.CAT; import com.bigdata.journal.BufferMode; @@ -31,6 +28,7 @@ import com.bigdata.rdf.store.BD; import com.bigdata.rdf.store.BigdataStatementIterator; import com.bigdata.rdf.vocab.NoVocabulary; +import com.bigdata.util.InnerCause; import com.bigdata.util.concurrent.DaemonThreadFactory; /** @@ -77,6 +75,14 @@ } + protected void setUp() throws Exception { + super.setUp(); + } + + protected void tearDown() throws Exception { + super.tearDown(); + } + private URI uri(String s) { return new URIImpl(BD.NAMESPACE + s); } @@ -93,110 +99,110 @@ return new ContextStatementImpl(s, p, o, c); } - public void test_multiple_transaction() throws Exception { +// public void test_multiple_transaction() throws Exception { +// +// final int nthreads = 10; // +// final int nuris = 2000; // +// final int npreds = 50; // +// final Random r = new Random(); +// +// ExecutorService writers = Executors.newSingleThreadExecutor(DaemonThreadFactory.defaultThreadFactory()); +// ExecutorService readers = Executors.newFixedThreadPool(nthreads, DaemonThreadFactory.defaultThreadFactory()); +// +// final BigdataSail sail = getSail(); +// final URI[] subs = new URI[nuris]; +// for (int i = 0; i < nuris; i++) { +// subs[i] = uri("uri:" + i); +// } +// final URI[] preds = new URI[npreds]; +// for (int i = 0; i < npreds; i++) { +// preds[i] = uri("pred:" + i); +// } +// final AtomicInteger writes = new AtomicInteger(); +// final AtomicInteger reads = new AtomicInteger(); +// try { +// sail.initialize(); +// final BigdataSailRepository repo = new BigdataSailRepository(sail); +// +// // Writer task adds nwrites statements then commits +// class Writer implements Callable<Long> { +// final int nwrites; +// +// Writer(final int nwrites) { +// this.nwrites = nwrites; +// } +// +// public Long call() throws Exception { +// final RepositoryConnection tx1 = repo.getReadWriteConnection(); +// try { +// tx1.setAutoCommit(false); +// +// for (int i = 0; i < nwrites; i++) { +// tx1.add(stmt(subs[r.nextInt(500)], preds[r.nextInt(20)], subs[r.nextInt(500)])); +// writes.incrementAndGet(); +// } +// tx1.commit(); +// +// } finally { +// tx1.close(); +// } +// +// return null; +// } +// +// } +// +// // ReaderTask makes nreads and closes +// class Reader implements Callable<Long> { +// final int nreads; +// +// Reader(final int nwrites) { +// this.nreads = nwrites; +// } +// +// public Long call() throws Exception { +// final RepositoryConnection tx1 = repo.getReadOnlyConnection(); +// try { +// +// for (int i = 0; i < nreads; i++) { +// RepositoryResult<Statement> stats = tx1.getStatements(subs[r.nextInt(500)], null, null, true); +// while (stats.hasNext()) { +// stats.next(); +// reads.incrementAndGet(); +// } +// } +// +// } finally { +// tx1.close(); +// } +// +// return null; +// } +// +// } +// +// // let's schedule a few writers and readers +// for (int i = 0; i < 500; i++) { +// writers.submit(new Writer(500)); +// for (int rdrs = 0; rdrs < 20; rdrs++) { +// readers.submit(new Reader(50)); +// } +// } +// +// Thread.sleep(60 * 1000); +// writers.shutdownNow(); +// readers.shutdownNow(); +// writers.awaitTermination(5, TimeUnit.SECONDS); +// readers.awaitTermination(5, TimeUnit.SECONDS); +// System.out.println("Statements written: " + writes.get() + ", read: " + reads.get()); +// } finally { +// +// sail.__tearDownUnitTest(); +// +// } +// +// } - final int nthreads = 10; // - final int nuris = 2000; // - final int npreds = 50; // - final Random r = new Random(); - - ExecutorService writers = Executors.newSingleThreadExecutor(DaemonThreadFactory.defaultThreadFactory()); - ExecutorService readers = Executors.newFixedThreadPool(nthreads, DaemonThreadFactory.defaultThreadFactory()); - - final BigdataSail sail = getSail(); - final URI[] subs = new URI[nuris]; - for (int i = 0; i < nuris; i++) { - subs[i] = uri("uri:" + i); - } - final URI[] preds = new URI[npreds]; - for (int i = 0; i < npreds; i++) { - preds[i] = uri("pred:" + i); - } - final AtomicInteger writes = new AtomicInteger(); - final AtomicInteger reads = new AtomicInteger(); - try { - sail.initialize(); - final BigdataSailRepository repo = new BigdataSailRepository(sail); - - // Writer task adds nwrites statements then commits - class Writer implements Callable<Long> { - final int nwrites; - - Writer(final int nwrites) { - this.nwrites = nwrites; - } - - public Long call() throws Exception { - final RepositoryConnection tx1 = repo.getReadWriteConnection(); - try { - tx1.setAutoCommit(false); - - for (int i = 0; i < nwrites; i++) { - tx1.add(stmt(subs[r.nextInt(500)], preds[r.nextInt(20)], subs[r.nextInt(500)])); - writes.incrementAndGet(); - } - tx1.commit(); - - } finally { - tx1.close(); - } - - return null; - } - - } - - // ReaderTask makes nreads and closes - class Reader implements Callable<Long> { - final int nreads; - - Reader(final int nwrites) { - this.nreads = nwrites; - } - - public Long call() throws Exception { - final RepositoryConnection tx1 = repo.getReadOnlyConnection(); - try { - - for (int i = 0; i < nreads; i++) { - RepositoryResult<Statement> stats = tx1.getStatements(subs[r.nextInt(500)], null, null, true); - while (stats.hasNext()) { - stats.next(); - reads.incrementAndGet(); - } - } - - } finally { - tx1.close(); - } - - return null; - } - - } - - // let's schedule a few writers and readers - for (int i = 0; i < 500; i++) { - writers.submit(new Writer(500)); - for (int rdrs = 0; rdrs < 20; rdrs++) { - readers.submit(new Reader(50)); - } - } - - Thread.sleep(60 * 1000); - writers.shutdownNow(); - readers.shutdownNow(); - writers.awaitTermination(5, TimeUnit.SECONDS); - readers.awaitTermination(5, TimeUnit.SECONDS); - System.out.println("Statements written: " + writes.get() + ", read: " + reads.get()); - } finally { - - sail.__tearDownUnitTest(); - - } - - } - // similar to test_multiple_transactions but uses direct AbsractTripleStore // manipulations rather than RepositoryConnections public void test_multiple_csem_transaction() throws Exception { @@ -208,7 +214,7 @@ * session protection. If the protocol works correctly we should never * release session protection if any transaction has been initialized. * - * The mesage of "invalid address" would be generated if an allocation + * The message of "invalid address" would be generated if an allocation * has been freed and is no longer protected from recycling when an * attempt is made to read from it. */ @@ -218,29 +224,26 @@ final int npreds = 50; // final Random r = new Random(); - ExecutorService writers = Executors.newSingleThreadExecutor(DaemonThreadFactory.defaultThreadFactory()); - ExecutorService readers = Executors.newFixedThreadPool(nthreads, DaemonThreadFactory.defaultThreadFactory()); - - - final BigdataSail sail = getSail(); - sail.initialize(); - final BigdataSailRepository repo = new BigdataSailRepository(sail); - final AbstractTripleStore origStore = repo.getDatabase(); - - final URI[] subs = new URI[nuris]; - for (int i = 0; i < nuris; i++) { - subs[i] = uri("uri:" + i); - } - final URI[] preds = new URI[npreds]; - for (int i = 0; i < npreds; i++) { - preds[i] = uri("pred:" + i); - } final CAT writes = new CAT(); final CAT reads = new CAT(); - final AtomicReference<Exception> failex = new AtomicReference<Exception>(null); + final AtomicReference<Throwable> failex = new AtomicReference<Throwable>(null); + final BigdataSail sail = getSail(); try { - // Writer task adds nwrites statements then commits + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + final AbstractTripleStore origStore = repo.getDatabase(); + + final URI[] subs = new URI[nuris]; + for (int i = 0; i < nuris; i++) { + subs[i] = uri("uri:" + i); + } + final URI[] preds = new URI[npreds]; + for (int i = 0; i < npreds; i++) { + preds[i] = uri("pred:" + i); + } + + // Writer task adds nwrites statements then commits class Writer implements Callable<Long> { final int nwrites; @@ -251,7 +254,7 @@ public Long call() throws Exception { try { final boolean isQuads = origStore.isQuads(); - // Thread.sleep(r.nextInt(2000) + 500); + Thread.sleep(r.nextInt(2000) + 500); try { for (int i = 0; i < nwrites; i++) { @@ -273,11 +276,19 @@ log.info("Commit"); } } - } catch (IllegalStateException ise) { - failex.compareAndSet(null, ise); - log.error(ise, ise); - } catch (Throwable t) { - log.error(t, t); + } catch (Throwable ise) { + if (!InnerCause.isInnerCause(ise, + InterruptedException.class)) { + if (failex + .compareAndSet(null/* expected */, ise/* newValue */)) { + log.error("firstCause:" + ise, ise); + } else { + if (log.isInfoEnabled()) + log.info("Other error: " + ise, ise); + } + } else { + // Ignore. + } } return null; } @@ -298,12 +309,12 @@ .getIndexManager()).newTx(ITx.READ_COMMITTED); try { - AbstractTripleStore readstore = (AbstractTripleStore) origStore + final AbstractTripleStore readstore = (AbstractTripleStore) origStore .getIndexManager().getResourceLocator() .locate(origStore.getNamespace(), txId); for (int i = 0; i < nreads; i++) { - BigdataStatementIterator stats = readstore + final BigdataStatementIterator stats = readstore .getStatements(subs[r.nextInt(nuris)], null, null); while (stats.hasNext()) { @@ -320,33 +331,50 @@ } catch (Throwable t) { log.error(t, t); } - return null; - } + return null; + } - } + } - // let's schedule a few writers and readers (more than needed) - for (int i = 0; i < 2000; i++) { - writers.submit(new Writer(500/*nwrite*/)); - for (int rdrs = 0; rdrs < 60; rdrs++) { - readers.submit(new Reader(20/*nread*/)); - } - } - - // let the writers run riot for a time - Thread.sleep(30 * 1000); - writers.shutdownNow(); - readers.shutdownNow(); - writers.awaitTermination(5, TimeUnit.SECONDS); - readers.awaitTermination(5, TimeUnit.SECONDS); - { - Exception ex = failex.get(); - if (ex != null) { - log.error(failex.get()); - fail("Test failed", ex); - } - } - System.out.println("Statements written: " + writes.get() + ", read: " + reads.get()); + ExecutorService writers = null; + ExecutorService readers = null; + try { + + writers = Executors.newSingleThreadExecutor(DaemonThreadFactory + .defaultThreadFactory()); + + readers = Executors.newFixedThreadPool(nthreads, + DaemonThreadFactory.defaultThreadFactory()); + + // let's schedule a few writers and readers (more than needed) + for (int i = 0; i < 3000; i++) { + writers.submit(new Writer(500/* nwrite */)); + for (int rdrs = 0; rdrs < 60; rdrs++) { + readers.submit(new Reader(20/* nread */)); + } + } + + // let the writers run riot for a time + Thread.sleep(60 * 1000); + writers.shutdownNow(); + readers.shutdownNow(); + writers.awaitTermination(5, TimeUnit.SECONDS); + readers.awaitTermination(5, TimeUnit.SECONDS); + { + final Throwable ex = failex.get(); + if (ex != null) { + fail("Test failed: firstCause=" + ex, ex); + } + } + if (log.isInfoEnabled()) + log.info("Statements written: " + writes.get() + ", read: " + + reads.get()); + } finally { + if (writers != null) + writers.shutdownNow(); + if (readers != null) + readers.shutdownNow(); + } } finally { sail.__tearDownUnitTest(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-18 21:18:02
|
Revision: 4737 http://bigdata.svn.sourceforge.net/bigdata/?rev=4737&view=rev Author: thompsonbry Date: 2011-06-18 21:17:55 +0000 (Sat, 18 Jun 2011) Log Message: ----------- Changes per MikeP's request to the NanoSparqlServer page to include the original query, the sesame operator tree, and the full bop plan for each running query. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2011-06-18 11:59:26 UTC (rev 4736) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2011-06-18 21:17:55 UTC (rev 4737) @@ -647,16 +647,20 @@ */ final String bopStr = BOpUtility.toString(q.getQuery()); w.write(TD); - w.write("<a href=\"#\" title=\""); - w.write(attrib(bopStr));// the entire query as a tooltip. - w.write("\"\n>"); + if (maxBopLength != 0) { + w.write("<a href=\"#\" title=\""); + w.write(attrib(bopStr));// the entire query as a tooltip. + w.write("\"\n>"); + } // A slice of the query inline on the page or everything if // maxBopLength<=0. w.write(cdata(bopStr.substring(0/* begin */, maxBopLength <= 0 ? bopStr.length() : Math.min( maxBopLength, bopStr.length())))); - w.write("..."); - w.write("</a>"); + if (maxBopLength != 0) { + w.write("..."); + w.write("</a>"); + } w.write(TDx); w.write(TD); w.write("total"); // summary line. @@ -668,14 +672,19 @@ // Otherwise show just this bop. final String bopStr = bopIndex.get(bopId).toString(); w.write(TD); - w.write("<a href=\"#\" title=\""); - w.write(attrib(bopStr));// the entire query as a tooltip. - w.write("\"\n>"); + if(maxBopLength!=0) { + w.write("<a href=\"#\" title=\""); + w.write(attrib(bopStr));// the entire query as a tooltip. + w.write("\"\n>"); + } // A slice of the query inline on the page. - w.write(cdata(bopStr.substring(0/* begin */, Math.min(64, bopStr - .length())))); - w.write("..."); - w.write("</a>"); + w.write(cdata(bopStr.substring(0/* begin */, + maxBopLength <= 0 ? bopStr.length() : Math.min( + maxBopLength, bopStr.length())))); + if (maxBopLength != 0) { + w.write("..."); + w.write("</a>"); + } w.write(TDx); w.write(TD); w.write(Integer.toString(evalOrder)); // eval order for this bop. Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2011-06-18 11:59:26 UTC (rev 4736) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2011-06-18 21:17:55 UTC (rev 4737) @@ -473,7 +473,7 @@ // Stuff it in the map of running queries. m_queries.put(queryId, new RunningQuery(queryId.longValue(), - queryId2, queryStr, begin)); + queryId2, queryStr, begin, this)); } @@ -835,22 +835,30 @@ */ final UUID queryId2; - /** The query. */ - final String query; + /** + * The task executing the query. + */ + final AbstractQueryTask queryTask; +// /** The query. */ +// final String query; + /** The timestamp when the query was accepted (ns). */ final long begin; public RunningQuery(final long queryId, final UUID queryId2, - final String query, final long begin) { + final String query, final long begin, + final AbstractQueryTask queryTask) { this.queryId = queryId; this.queryId2 = queryId2; - this.query = query; +// this.query = query; this.begin = begin; + + this.queryTask = queryTask; } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2011-06-18 11:59:26 UTC (rev 4736) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2011-06-18 21:17:55 UTC (rev 4737) @@ -355,7 +355,7 @@ * into the HTML document we are building for the client. */ QueryLog.getTableXHTML(queryStr, q, w, - true/* showQueryDetails */, 64/* maxBopLength */); + true/* showQueryDetails */, 0/* maxBopLength */); // // Add into the HTML document. // statsNode.text(w.toString()); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2011-06-18 11:59:26 UTC (rev 4736) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2011-06-18 21:17:55 UTC (rev 4737) @@ -18,6 +18,7 @@ import com.bigdata.bop.engine.QueryEngine; import com.bigdata.bop.engine.QueryLog; import com.bigdata.bop.fed.QueryEngineFactory; +import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.RunningQuery; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.util.HTMLUtility; @@ -82,7 +83,7 @@ * entire thing is accessible via the title attribute (a flyover). Use * ZERO (0) to see everything. */ - int maxBopLength = 64; + int maxBopLength = 0; if (req.getParameter("maxBopLength") != null) { maxBopLength = Integer.valueOf(req.getParameter("maxBopLength")); if (maxBopLength < 0) @@ -163,13 +164,13 @@ * IRunningQuery.getQueryId() to NanoSparqlServer's * RunningQuery.queryId. */ - final Map<UUID,RunningQuery> crosswalkMap = new LinkedHashMap<UUID, RunningQuery>(); + final Map<UUID/* IRunningQuery.queryId */, RunningQuery> crosswalkMap = new LinkedHashMap<UUID, RunningQuery>(); /* * Map providing the accepted RunningQuery objects in descending order * by their elapsed run time. */ - final TreeMap<Long, RunningQuery> acceptedQueryAge = newQueryMap(); + final TreeMap<Long/*elapsed*/, RunningQuery> acceptedQueryAge = newQueryMap(); { @@ -259,12 +260,33 @@ final RunningQuery acceptedQuery = crosswalkMap.get(query .getQueryId()); - final String queryStr = acceptedQuery == null ? "N/A" - : acceptedQuery.query; + final String queryStr; + if (acceptedQuery != null) { + + final AbstractQueryTask queryTask = acceptedQuery.queryTask; + + queryStr = acceptedQuery.queryTask.queryStr; + + current.node("h2", "SPARQL").node("p", + HTMLUtility.escapeForXHTML(queryTask.queryStr)); + + current.node("h2", "Parsed Query").node( + "pre", + HTMLUtility.escapeForXHTML(queryTask.sailQuery + .toString())); + + } else { + + queryStr = "N/A"; + + } + + current.node("h2", "Query Evaluation Statistics").node("p"); + // Format as a table, writing onto the response. - QueryLog.getTableXHTML(queryStr, query, w, !showQueryDetails, - maxBopLength); + QueryLog.getTableXHTML(queryStr, query, w, + !showQueryDetails, maxBopLength); // // Extract as String // final String s = w.getBuffer().toString(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-22 19:54:27
|
Revision: 4772 http://bigdata.svn.sourceforge.net/bigdata/?rev=4772&view=rev Author: thompsonbry Date: 2011-06-22 19:54:20 +0000 (Wed, 22 Jun 2011) Log Message: ----------- Replaced the use of SliceOp, which forces all solutions to be serialized, with EndOp. SliceOp SHOULD be used where bigdata imposes a native restriction on the OFFSET and LIMIT of the solutions delivered to the application. EndOp is preferred when solutions must be materialized on the query controller in scale-out if the OFFSET and LIMIT are not constrained. See https://sourceforge.net/apps/trac/bigdata/ticket/227 Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/StartOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOp2BOpUtility.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/EndOp.java Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/EndOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/EndOp.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/EndOp.java 2011-06-22 19:54:20 UTC (rev 4772) @@ -0,0 +1,121 @@ +package com.bigdata.bop.bset; + +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.FutureTask; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.BOpContext; +import com.bigdata.bop.BOpEvaluationContext; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.PipelineOp; +import com.bigdata.bop.solutions.SliceOp; +import com.bigdata.relation.accesspath.IAsynchronousIterator; +import com.bigdata.relation.accesspath.IBlockingBuffer; + +/** + * A operator which may be used at the end of query pipelines when there is a + * requirement marshal solutions back to the query controller by no requirement + * to {@link SliceOp slice} solutions. The primary use case for {@link EndOp} is + * when it is evaluated on the query controller so the results will be streamed + * back to the query controller in scale-out. You MUST specify + * {@link BOp.Annotations#EVALUATION_CONTEXT} as + * {@link BOpEvaluationContext#CONTROLLER} when it is to be used for this + * purpose. + * + * FIXME This is hacked to extend {@link SliceOp} instead as that appears to be + * necessary due to a persistent bug. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/227 + */ +public class EndOp extends PipelineOp {//SliceOp {//CopyOp { + +// private static final Logger log = Logger.getLogger(EndOp.class); + + /** + * + */ + private static final long serialVersionUID = 1L; + + public EndOp(EndOp op) { + super(op); + } + + public EndOp(BOp[] args, Map<String, Object> annotations) { + +// super(args, ensureSharedState(annotations)); + super(args, annotations); + + switch (getEvaluationContext()) { + case CONTROLLER: + break; + default: + throw new UnsupportedOperationException( + Annotations.EVALUATION_CONTEXT + "=" + + getEvaluationContext()); + } + + } + +// static private Map<String, Object> ensureSharedState( +// Map<String, Object> annotations) { +// +// annotations.put(PipelineOp.Annotations.SHARED_STATE, true); +// +// return annotations; +// +// } + + public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { + + return new FutureTask<Void>(new OpTask(this, context)); + + } + + /** + * Copy the source to the sink or the alternative sink depending on the + * condition. + */ + static private class OpTask implements Callable<Void> { + + private final PipelineOp op; + + private final BOpContext<IBindingSet> context; + + OpTask(final PipelineOp op, final BOpContext<IBindingSet> context) { + + this.op = op; + + this.context = context; + + } + + public Void call() throws Exception { + + final IAsynchronousIterator<IBindingSet[]> source = context + .getSource(); + + final IBlockingBuffer<IBindingSet[]> sink = context.getSink(); + +// boolean didRun = false; + + while (source.hasNext()) { + + final IBindingSet[] chunk = source.next(); + + sink.add(chunk); + +// didRun = true; + + } + +// if(didRun) +// sink.flush(); + + return null; + + } + + } + +} Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/StartOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/StartOp.java 2011-06-22 18:06:38 UTC (rev 4771) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/StartOp.java 2011-06-22 19:54:20 UTC (rev 4772) @@ -3,10 +3,19 @@ import java.util.Map; import com.bigdata.bop.BOp; +import com.bigdata.bop.PipelineOp; /** * A version of {@link CopyOp} which is always evaluated on the query * controller. + * <p> + * Note: {@link CopyOp} and {@link StartOp} are the same. {@link StartOp} exists + * solely to reflect its functional role at the end of the query pipeline. + * <p> + * Note: {@link StartOp} is generally NOT required in a query plan. It is more + * of a historical artifact than something that we actually need to have in the + * query pipeline. It is perfectly possible to have the query pipeline begin + * with any of the {@link PipelineOp pipeline operators}. */ public class StartOp extends CopyOp { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java 2011-06-22 18:06:38 UTC (rev 4771) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java 2011-06-22 19:54:20 UTC (rev 4772) @@ -21,10 +21,10 @@ import com.bigdata.bop.IVariable; import com.bigdata.bop.NV; import com.bigdata.bop.PipelineOp; +import com.bigdata.bop.bset.EndOp; import com.bigdata.bop.join.PipelineJoin; import com.bigdata.bop.joinGraph.rto.JoinGraph; import com.bigdata.bop.solutions.DistinctBindingSetOp; -import com.bigdata.bop.solutions.SliceOp; /** * Class accepts a join group and partitions it into a join graph and a tail @@ -1135,14 +1135,16 @@ * necessary? (It is causing runtime errors when not wrapped). * Is this a bopId collision which is not being detected? * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/227 + * * [This should perhaps be moved into the caller.] */ - lastOp = new SliceOp(new BOp[] { lastOp }, NV + lastOp = new EndOp(new BOp[] { lastOp }, NV .asMap(new NV[] { new NV(JoinGraph.Annotations.BOP_ID, idFactory.nextId()), // new NV(JoinGraph.Annotations.EVALUATION_CONTEXT, - BOpEvaluationContext.CONTROLLER),// - new NV(PipelineOp.Annotations.SHARED_STATE,true),// + BOpEvaluationContext.CONTROLLER)// +// new NV(PipelineOp.Annotations.SHARED_STATE,true),// }) // ); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java 2011-06-22 18:06:38 UTC (rev 4771) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java 2011-06-22 19:54:20 UTC (rev 4772) @@ -65,6 +65,7 @@ import com.bigdata.bop.ap.filter.DistinctFilter; import com.bigdata.bop.bindingSet.HashBindingSet; import com.bigdata.bop.bset.ConditionalRoutingOp; +import com.bigdata.bop.bset.EndOp; import com.bigdata.bop.bset.StartOp; import com.bigdata.bop.controller.AbstractSubqueryOp; import com.bigdata.bop.controller.Steps; @@ -87,12 +88,12 @@ import com.bigdata.rdf.internal.TermId; import com.bigdata.rdf.internal.VTE; import com.bigdata.rdf.internal.constraints.INeedsMaterialization; -import com.bigdata.rdf.internal.constraints.INeedsMaterialization.Requirement; import com.bigdata.rdf.internal.constraints.IsInlineBOp; import com.bigdata.rdf.internal.constraints.IsMaterializedBOp; import com.bigdata.rdf.internal.constraints.NeedsMaterializationBOp; import com.bigdata.rdf.internal.constraints.SPARQLConstraint; import com.bigdata.rdf.internal.constraints.TryBeforeMaterializationConstraint; +import com.bigdata.rdf.internal.constraints.INeedsMaterialization.Requirement; import com.bigdata.rdf.lexicon.LexPredicate; import com.bigdata.rdf.spo.DefaultGraphSolutionExpander; import com.bigdata.rdf.spo.ISPO; @@ -266,12 +267,12 @@ * controller so the results will be streamed back to the query * controller in scale-out. */ - tmp = new SliceOp(new BOp[] { tmp }, NV.asMap(// + tmp = new EndOp(new BOp[] { tmp }, NV.asMap(// new NV(BOp.Annotations.BOP_ID, idFactory .incrementAndGet()), // new NV(BOp.Annotations.EVALUATION_CONTEXT, - BOpEvaluationContext.CONTROLLER),// - new NV(PipelineOp.Annotations.SHARED_STATE,true)// + BOpEvaluationContext.CONTROLLER)// +// new NV(PipelineOp.Annotations.SHARED_STATE,true)// )); } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOp2BOpUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOp2BOpUtility.java 2011-06-22 18:06:38 UTC (rev 4771) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOp2BOpUtility.java 2011-06-22 19:54:20 UTC (rev 4772) @@ -54,6 +54,8 @@ import com.bigdata.bop.PipelineOp; import com.bigdata.bop.ap.Predicate; import com.bigdata.bop.bset.ConditionalRoutingOp; +import com.bigdata.bop.bset.CopyOp; +import com.bigdata.bop.bset.EndOp; import com.bigdata.bop.bset.StartOp; import com.bigdata.bop.controller.SubqueryHashJoinOp; import com.bigdata.bop.controller.SubqueryOp; @@ -390,12 +392,12 @@ * with SliceOp which interactions with SubqueryOp to allow * incorrect termination under some circumstances. */ - left = new SliceOp(new BOp[] { left }, NV.asMap(// + left = new EndOp(new BOp[] { left }, NV.asMap(// new NV(BOp.Annotations.BOP_ID, idFactory .incrementAndGet()), // new NV(BOp.Annotations.EVALUATION_CONTEXT, - BOpEvaluationContext.CONTROLLER),// - new NV(PipelineOp.Annotations.SHARED_STATE, true)// + BOpEvaluationContext.CONTROLLER)// +// new NV(PipelineOp.Annotations.SHARED_STATE, true)// )); } @@ -781,11 +783,11 @@ } - final PipelineOp slice = new SliceOp(new BOp[] { left }, NV.asMap(// + final PipelineOp slice = new EndOp(new BOp[] { left }, NV.asMap(// new NV(BOp.Annotations.BOP_ID, idFactory.incrementAndGet()), // new NV(BOp.Annotations.EVALUATION_CONTEXT, - BOpEvaluationContext.CONTROLLER),// - new NV(PipelineOp.Annotations.SHARED_STATE, true)// + BOpEvaluationContext.CONTROLLER)// +// new NV(PipelineOp.Annotations.SHARED_STATE, true)// )); return slice; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2011-06-24 14:41:21
|
Revision: 4789 http://bigdata.svn.sourceforge.net/bigdata/?rev=4789&view=rev Author: martyncutcher Date: 2011-06-24 14:41:14 +0000 (Fri, 24 Jun 2011) Log Message: ----------- Fixes problem with session protection where releaseSessions failed to clear writes from the writeCache that had been freed but written prior to the last commit. It also fixes a problem where the releaseSession failed to maintain an accurate count of current free bits that could result in the Allocator on the free list not having any free space. The test has been refined to fail fast. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2011-06-24 12:31:19 UTC (rev 4788) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2011-06-24 14:41:14 UTC (rev 4789) @@ -363,8 +363,11 @@ if (m_addr != 0) { // check active! for (int i = 0; i < m_live.length; i++) { int chkbits = m_transients[i]; + // check all addresses set in m_transients NOT set in m_live + chkbits &= ~m_live[i]; + + // reset transients to live OR commit m_transients[i] = m_live[i] | m_commit[i]; - chkbits &= ~m_transients[i]; final int startBit = i * 32; @@ -384,4 +387,92 @@ return sb.toString(); } + /** + * @return number of bits that will be cleared in a session release + */ + int sessionBits() { + int freebits = 0; + + if (m_addr != 0) { // check active! + for (int i = 0; i < m_live.length; i++) { + int chkbits = m_transients[i]; + if (chkbits != 0) { + // chkbits &= ~(m_live[i] | m_commit[i]); + chkbits &= ~m_live[i]; + + if (chkbits != 0) { + // there are writes to clear + for (int b = 0; b < 32; b++) { + if ((chkbits & (1 << b)) != 0) { + freebits++; + } + } + } + } + } + } + + return freebits; + } + + /** + * @return number of bits immediately available for allocation + */ + int freeBits() { + int freebits = 0; + + if (m_addr != 0) { // check active! + for (int i = 0; i < m_live.length; i++) { + int chkbits = ~m_transients[i]; + + if (chkbits != 0) { + if (chkbits == 0xFFFFFFFF) { + freebits += 32; + } else { + for (int b = 0; b < 32; b++) { + if ((chkbits & (1 << b)) != 0) { + freebits++; + } + } + } + } + } + } else { + freebits += m_live.length * 32; + } + + return freebits; + } + + /** + * transients frees as defined by those bits set in transients but NOT set + * in live + * @return number of transient frees + */ + int transientBits() { + int freebits = 0; + + if (m_addr != 0) { // check active! + for (int i = 0; i < m_live.length; i++) { + int chkbits = m_transients[i] & ~m_live[i]; + + if (chkbits != 0) { + if (chkbits == 0xFFFFFFFF) { + freebits += 32; + } else { + for (int b = 0; b < 32; b++) { + if ((chkbits & (1 << b)) != 0) { + freebits++; + } + } + } + } + } + } else { + freebits += m_live.length * 32; + } + + return freebits; + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2011-06-24 12:31:19 UTC (rev 4788) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2011-06-24 14:41:14 UTC (rev 4789) @@ -25,6 +25,7 @@ package com.bigdata.rwstore; import java.util.*; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.io.*; @@ -252,6 +253,17 @@ } if (!protectTransients) { + /** + * This assert will trip if any address was freed under + * session protection and therefore remained accessible + * until released. + * The value returned by releaseSession should be zero + * since all "frees" should already have removed any + * writes to the writeCacheService + */ + assert m_sessionFrees.intValue() == 0; + // assert block.releaseSession(m_store.m_writeCache) == 0; + block.m_transients = block.m_live.clone(); } @@ -544,6 +556,9 @@ } private boolean m_freeWaiting = true; + + // track number of frees to be cleared on session releases + private AtomicInteger m_sessionFrees = new AtomicInteger(0); public boolean free(final int addr, final int size) { return free(addr, size, false); @@ -566,9 +581,9 @@ * without first clearing addresses them from the writeCacheService */ final boolean tmp = m_sessionActive; - m_sessionActive = m_store.isSessionProtected(); + m_sessionActive = tmp || m_store.isSessionProtected(); if (tmp && !m_sessionActive) throw new AssertionError(); - + try { if (((AllocBlock) m_allocBlocks.get(block)) .freeBit(offset % nbits, m_sessionActive && !overideSession)) { // bit adjust @@ -580,8 +595,23 @@ checkFreeList(); } else { m_freeTransients++; + + if (m_sessionActive) { + boolean assertsEnabled = false; + assert assertsEnabled = true; + if (assertsEnabled){ + final int sessionFrees = m_sessionFrees.incrementAndGet(); + int sessionBits = 0; + for (AllocBlock ab : m_allocBlocks) { + sessionBits += ab.sessionBits(); + } + assert sessionFrees <= sessionBits : "sessionFrees: " + sessionFrees + " > sessionBits: " + sessionBits; + } + } + } + if (m_statsBucket != null) { m_statsBucket.delete(size); } @@ -698,16 +728,14 @@ return value; } else { - if (log.isDebugEnabled()) { - StringBuilder sb = new StringBuilder(); - sb.append("FixedAllocator returning null address, with freeBits: " + m_freeBits + "\n"); - - for (AllocBlock ab: m_allocBlocks) { - sb.append(ab.show() + "\n"); - } - - log.debug(sb); - } + StringBuilder sb = new StringBuilder(); + sb.append("FixedAllocator returning null address, with freeBits: " + m_freeBits + "\n"); + + for (AllocBlock ab: m_allocBlocks) { + sb.append(ab.show() + "\n"); + } + + log.error(sb); return 0; } @@ -873,20 +901,61 @@ } if (this.m_sessionActive) { - if (log.isTraceEnabled()) - log.trace("Allocator: #" + m_index + " releasing session protection"); - - int releasedAllocations = 0; - for (AllocBlock ab : m_allocBlocks) { - releasedAllocations += ab.releaseSession(cache); - } - - m_freeBits += releasedAllocations; - m_freeTransients -= releasedAllocations; - - checkFreeList(); - - m_sessionActive = m_store.isSessionProtected(); + final int start = m_sessionFrees.intValue(); + // try { + if (log.isTraceEnabled()) + log.trace("Allocator: #" + m_index + " releasing session protection"); + + + int releasedAllocations = 0; + for (AllocBlock ab : m_allocBlocks) { + releasedAllocations += ab.releaseSession(cache); + } + + assert !m_store.isSessionProtected() : "releaseSession called with isSessionProtected: true"; + + m_sessionActive = false; // should only need indicate that it contains no cached writes + + + m_freeBits = freebits(); + final int freebits = freebits(); + if (m_freeBits > freebits) + log.error("m_freeBits too high: " + m_freeBits + " > (calc): " + freebits); + + m_freeTransients = transientbits(); + + checkFreeList(); + + // assert m_sessionFrees == releasedAllocations : "Allocator: " + hashCode() + " m_sessionFrees: " + m_sessionFrees + " != released: " + releasedAllocations; + if (start > releasedAllocations) { + log.error("BAD! Allocator: " + hashCode() + ", size: " + m_size + " m_sessionFrees: " + m_sessionFrees.intValue() + " > released: " + releasedAllocations); + } else { + // log.error("GOOD! Allocator: " + hashCode() + ", size: " + m_size + " m_sessionFrees: " + m_sessionFrees.intValue() + " <= released: " + releasedAllocations); + } + // } finally { + final int end = m_sessionFrees.getAndSet(0); + assert start == end : "SessionFrees concurrent modification: " + start + " != " + end; + // } + } else { + assert m_sessionFrees.intValue() == 0 : "Session Inactive with sessionFrees: " + m_sessionFrees.intValue(); } } + + private int freebits() { + int freeBits = 0; + for (AllocBlock ab : m_allocBlocks) { + freeBits += ab.freeBits(); + } + + return freeBits; + } + + private int transientbits() { + int freeBits = 0; + for (AllocBlock ab : m_allocBlocks) { + freeBits += ab.transientBits(); + } + + return freeBits; + } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2011-06-24 12:31:19 UTC (rev 4788) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2011-06-24 14:41:14 UTC (rev 4789) @@ -394,7 +394,7 @@ private final Quorum<?,?> m_quorum; - private final RWWriteCacheService m_writeCache; + final RWWriteCacheService m_writeCache; /** * The actual allocation sizes as read from the store. @@ -1602,8 +1602,6 @@ * transaction protection and isolated AllocationContexts. */ if (this.isSessionProtected()) { - final boolean overrideSession = context != null && alloc.canImmediatelyFree(addr, sze, context); - if (context != null) { if (alloc.canImmediatelyFree(addr, sze, context)) { immediateFree(addr, sze, true); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java 2011-06-24 12:31:19 UTC (rev 4788) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java 2011-06-24 14:41:14 UTC (rev 4789) @@ -218,7 +218,7 @@ * has been freed and is no longer protected from recycling when an * attempt is made to read from it. */ - final int nthreads = 10; // up count to increase chance startup condition + final int nthreads = 5; // up count to increase chance startup condition // decrement to increase chance of idle (no sessions) final int nuris = 2000; // number of unique subject/objects final int npreds = 50; // @@ -328,8 +328,19 @@ } finally { ((Journal) origStore.getIndexManager()).abort(txId); } - } catch (Throwable t) { - log.error(t, t); + } catch (Throwable ise) { + if (!InnerCause.isInnerCause(ise, + InterruptedException.class)) { + if (failex + .compareAndSet(null/* expected */, ise/* newValue */)) { + log.error("firstCause:" + ise, ise); + } else { + if (log.isInfoEnabled()) + log.info("Other error: " + ise, ise); + } + } else { + // Ignore. + } } return null; } @@ -347,15 +358,20 @@ DaemonThreadFactory.defaultThreadFactory()); // let's schedule a few writers and readers (more than needed) - for (int i = 0; i < 3000; i++) { + // writers.submit(new Writer(5000000/* nwrite */)); + for (int i = 0; i < 5000; i++) { writers.submit(new Writer(500/* nwrite */)); - for (int rdrs = 0; rdrs < 60; rdrs++) { - readers.submit(new Reader(20/* nread */)); + for (int rdrs = 0; rdrs < 20; rdrs++) { + readers.submit(new Reader(60/* nread */)); } } - // let the writers run riot for a time - Thread.sleep(60 * 1000); + // let the writers run riot for a time, checking for failure + for (int i = 0; i < 60; i++) { + Thread.sleep(1000); + if (failex.get() != null) + break; + } writers.shutdownNow(); readers.shutdownNow(); writers.awaitTermination(5, TimeUnit.SECONDS); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-24 15:10:48
|
Revision: 4790 http://bigdata.svn.sourceforge.net/bigdata/?rev=4790&view=rev Author: thompsonbry Date: 2011-06-24 15:10:39 +0000 (Fri, 24 Jun 2011) Log Message: ----------- The 3rd party operation integration feature is done. See TestSetBindingSets for an example of how to make use of this feature. I've made the Property for the QueryHints null if there are no query hints and modified the various classes which look things up in that Properties object. I've backed out the changes which expose this facility in the BigdataSailQuery interface. For now, the mechanism is available to people who directly invoke BigdataSailConnection#evaluate(...). I prefer this approach with its minimum propagation of the API change until we get some feedback on the utility of this integration. See https://sourceforge.net/apps/trac/bigdata/ticket/267 Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/ThickAsynchronousIterator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataBindingSetResolverator.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataSolutionResolverator.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataStatementIteratorImpl.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailBooleanQuery.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailGraphQuery.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailQuery.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailTupleQuery.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOp2BOpUtility.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/BigdataSPARQLParser.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSetBinding.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataOpenRDFBindingSetsResolverator.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestSetBindingSets.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/ThickAsynchronousIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/ThickAsynchronousIterator.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/ThickAsynchronousIterator.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -82,16 +82,16 @@ } - private final void assertOpen() { - - if (!open) - throw new IllegalStateException(); - - } +// private final void assertOpen() { +// +// if (!open) +// throw new IllegalStateException(); +// +// } public boolean hasNext() { - return lastIndex + 1 < a.length; + return open && lastIndex + 1 < a.length; } Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -0,0 +1,152 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +/* + * Created on Oct 28, 2008 + */ + +package com.bigdata.relation.accesspath; + +import java.util.NoSuchElementException; +import java.util.concurrent.TimeUnit; + +import com.bigdata.striterator.IChunkedIterator; +import com.bigdata.striterator.ICloseableIterator; + +/** + * An {@link IAsynchronousIterator} that wraps an {@link IChunkedIterator} or a + * {@link ICloseableIterator}. + * + * @param E + * The generic type of the visited chunks. + * @param F + * The generic type of the source elements. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: ThickAsynchronousIterator.java 2265 2009-10-26 12:51:06Z + * thompsonbry $ + */ +public class WrappedAsynchronousIterator<E,F> implements IAsynchronousIterator<E> { + + private transient boolean open = true; + + private final IChunkedIterator<F> src; + + /** + * + * @param src + * The source. + * + * @throws IllegalArgumentException + * if <i>src</i> is <code>null</code>. + */ + public WrappedAsynchronousIterator(final IChunkedIterator<F> src) { + + if (src == null) + throw new IllegalArgumentException(); + + this.src = src; + + } + +// private final void assertOpen() { +// +// if (!open) +// throw new IllegalStateException(); +// +// } + + public boolean hasNext() { + + return open && src.hasNext(); + + } + + @SuppressWarnings("unchecked") + public E next() { + + if (!hasNext()) + throw new NoSuchElementException(); + + return (E) src.nextChunk(); + + } + + public void remove() { + + src.remove(); + + } + + /* + * ICloseableIterator. + */ + + public void close() { + + if (open) { + + open = false; + +// if (src instanceof ICloseableIterator<?>) { + + ((ICloseableIterator<?>) src).close(); + +// } + + } + + } + + /* + * IAsynchronousIterator. + */ + + public boolean isExhausted() { + + return !hasNext(); + + } + + /** + * Delegates to {@link #hasNext()} since all data are local and timeouts can + * not occur. + */ + public boolean hasNext(long timeout, TimeUnit unit) { + + return hasNext(); + + } + + /** + * Delegates to {@link #next()} since all data are local and timeouts can + * not occur. + */ + public E next(long timeout, TimeUnit unit) { + + return next(); + + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/WrappedAsynchronousIterator.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -50,12 +50,12 @@ */ abstract public class AbstractChunkedResolverator<E,F,S> implements ICloseableIterator<F> { - final protected static Logger log = Logger.getLogger(AbstractChunkedResolverator.class); + final private static Logger log = Logger.getLogger(AbstractChunkedResolverator.class); - /** - * True iff the {@link #log} level is DEBUG or less. - */ - final protected static boolean DEBUG = log.isDebugEnabled(); +// /** +// * True iff the {@link #log} level is DEBUG or less. +// */ +// final protected static boolean DEBUG = log.isDebugEnabled(); /** * The source iterator. @@ -202,7 +202,7 @@ try { - if (DEBUG) + if (log.isDebugEnabled()) log.debug("Start"); final long begin = System.currentTimeMillis(); @@ -224,7 +224,7 @@ nchunks++; nelements += chunk.length; - if (DEBUG) + if (log.isDebugEnabled()) log.debug("nchunks="+nchunks+", chunkSize="+chunk.length); } @@ -307,7 +307,7 @@ // elapsed += (now - begin); - if (DEBUG) + if (log.isDebugEnabled()) log.debug("nextChunk ready"); } @@ -315,7 +315,7 @@ // the next resolved element. final F f = chunk[++lastIndex]; - if (DEBUG) + if (log.isDebugEnabled()) log.debug("lastIndex=" + lastIndex + ", chunk.length=" + chunk.length + ", stmt=" + f); Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataBindingSetResolverator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataBindingSetResolverator.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataBindingSetResolverator.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -1,6 +1,5 @@ package com.bigdata.rdf.store; -import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Iterator; @@ -8,6 +7,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; +import org.apache.log4j.Logger; import org.openrdf.model.Value; import com.bigdata.bop.Constant; @@ -31,7 +31,9 @@ public class BigdataBindingSetResolverator extends AbstractChunkedResolverator<IBindingSet, IBindingSet, AbstractTripleStore> { - + + final private static Logger log = Logger.getLogger(BigdataBindingSetResolverator.class); + private final IVariable[] required; /** Added: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataOpenRDFBindingSetsResolverator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataOpenRDFBindingSetsResolverator.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataOpenRDFBindingSetsResolverator.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -0,0 +1,235 @@ +package com.bigdata.rdf.store; + +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +import org.apache.log4j.Logger; +import org.openrdf.model.Value; +import org.openrdf.query.Binding; +import org.openrdf.query.BindingSet; + +import com.bigdata.bop.Constant; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.Var; +import com.bigdata.bop.bindingSet.ListBindingSet; +import com.bigdata.rdf.internal.DummyIV; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.lexicon.LexiconRelation; +import com.bigdata.rdf.model.BigdataValue; +import com.bigdata.rdf.model.BigdataValueFactory; +import com.bigdata.relation.accesspath.BlockingBuffer; +import com.bigdata.relation.rule.eval.ISolution; +import com.bigdata.striterator.AbstractChunkedResolverator; +import com.bigdata.striterator.IChunkedOrderedIterator; + +/** + * Efficiently resolve openrdf {@link BindingSet}s to bigdata {@link IBindingSets}s. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class BigdataOpenRDFBindingSetsResolverator + extends + AbstractChunkedResolverator<BindingSet, IBindingSet, AbstractTripleStore> { + + final private static Logger log = Logger + .getLogger(BigdataOpenRDFBindingSetsResolverator.class); + + /** + * + * @param db + * Used to resolve RDF {@link Value}s to {@link IV}s. + * @param src + * The source iterator (will be closed when this iterator is + * closed). + * + * FIXME must accept reverse bnodes map (from term identifier to + * blank nodes) for resolution of blank nodes within a Sesame + * connection context. [Is this comment relevant for this class?] + */ + public BigdataOpenRDFBindingSetsResolverator(final AbstractTripleStore db, + final IChunkedOrderedIterator<BindingSet> src) { + + super(db, src, new BlockingBuffer<IBindingSet[]>( + db.getChunkOfChunksCapacity(), + db.getChunkCapacity(), + db.getChunkTimeout(), + TimeUnit.MILLISECONDS)); + + } + + /** + * Strengthens the return type. + */ + public BigdataOpenRDFBindingSetsResolverator start( + final ExecutorService service) { + + return (BigdataOpenRDFBindingSetsResolverator) super.start(service); + + } + + /** + * Resolve a chunk of {@link BindingSet}s into a chunk of + * {@link IBindingSet}s in which RDF {@link Value}s have been resolved to + * {@link IV}s. + */ + protected IBindingSet[] resolveChunk(final BindingSet[] chunk) { + + if (log.isInfoEnabled()) + log.info("Fetched chunk: size=" + chunk.length); + + /* + * Create a collection of the distinct term identifiers used in this + * chunk. + * + * Note: The [initialCapacity] is only an estimate. There are normally + * multiple values in each binding set. However, it is also common for + * the same Value to appear across different solutions in a chunk. + */ + + final int initialCapacity = chunk.length; + + final Collection<Value> valueSet = new LinkedHashSet<Value>(initialCapacity); + + for (BindingSet bindingSet : chunk) { + + for(Binding binding : bindingSet) { + + final Value value = binding.getValue(); + + if(value!=null) { + + valueSet.add(value); + + } + + } + + } + + if (log.isInfoEnabled()) + log.info("Resolving " + valueSet.size() + " term identifiers"); + + final LexiconRelation r = state.getLexiconRelation(); + + final BigdataValueFactory vf = r.getValueFactory(); + + final int nvalues = valueSet.size(); + + /* + * Convert to a BigdataValue[], building up a Map used to translate from + * Value to BigdataValue as we go. + */ + final BigdataValue[] values = new BigdataValue[nvalues]; + final Map<Value,BigdataValue> map = new LinkedHashMap<Value, BigdataValue>(nvalues); + { + + int i = 0; + + for (Value value : valueSet) { + + final BigdataValue val = vf.asValue(value); + + map.put(value, val); + + values[i++] = val; + + } + + } + + // Batch resolve against the database. + r.addTerms(values, nvalues, true/*readOnly*/); + + // Assemble a chunk of resolved elements + { + + final IBindingSet[] chunk2 = new IBindingSet[chunk.length]; + int i = 0; + for (BindingSet e : chunk) { + + final IBindingSet f = getBindingSet(e, map); + + chunk2[i++] = f; + + } + + // return the chunk of resolved elements. + return chunk2; + + } + + } + + /** + * Resolve the RDF {@link Value}s in the {@link BindingSet} using the map + * populated when we fetched the current chunk and return the + * {@link IBindingSet} for that solution in which term identifiers have been + * resolved to their corresponding {@link BigdataValue}s. + * + * @param solution + * A solution whose {@link Long}s will be interpreted as term + * identifiers and resolved to the corresponding + * {@link BigdataValue}s. + * + * @return The corresponding {@link IBindingSet} in which the term + * identifiers have been resolved to {@link BigdataValue}s. + * + * @throws IllegalStateException + * if the {@link IBindingSet} was not materialized with the + * {@link ISolution}. + */ + @SuppressWarnings("unchecked") + private IBindingSet getBindingSet(final BindingSet bindingSet, + final Map<Value,BigdataValue> map) { + + if (bindingSet == null) + throw new IllegalArgumentException(); + + if (map == null) + throw new IllegalArgumentException(); + + final IBindingSet out = new ListBindingSet(); + + for(Binding binding : bindingSet) { + + final String name = binding.getName(); + + final Value value = binding.getValue(); + + final BigdataValue outVal = map.get(value); + + /* + * TODO An alternative to using a DummyIV would be to drop the + * BindingSet if there are any Values in it which are not known to + * the database. + */ + if (outVal != null) { + + final Constant<?> c; + + if (outVal.getIV() == null) { + + c = new Constant(DummyIV.INSTANCE); + + } else { + + c = new Constant(outVal.getIV()); + + } + + out.set(Var.var(name), c); + + } + + } + + return out; + + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataOpenRDFBindingSetsResolverator.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataSolutionResolverator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataSolutionResolverator.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataSolutionResolverator.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -7,6 +7,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; +import org.apache.log4j.Logger; import org.openrdf.model.Value; import com.bigdata.bop.Constant; @@ -31,6 +32,8 @@ extends AbstractChunkedResolverator<ISolution, IBindingSet, AbstractTripleStore> { + final private static Logger log = Logger.getLogger(BigdataSolutionResolverator.class); + /** * * @param db Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataStatementIteratorImpl.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataStatementIteratorImpl.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataStatementIteratorImpl.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -6,6 +6,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; +import org.apache.log4j.Logger; import org.openrdf.model.Value; import com.bigdata.rdf.internal.IV; @@ -34,6 +35,8 @@ AbstractChunkedResolverator<ISPO, BigdataStatement, AbstractTripleStore> implements BigdataStatementIterator { + final private static Logger log = Logger.getLogger(BigdataStatementIteratorImpl.class); + /** * An optional map of known blank node term identifiers and the * corresponding {@link BigdataBNodeImpl} objects. This map may be used to @@ -100,7 +103,7 @@ @Override protected BigdataStatement[] resolveChunk(final ISPO[] chunk) { - if (DEBUG) + if (log.isDebugEnabled()) log.debug("chunkSize=" + chunk.length); /* @@ -152,7 +155,7 @@ } - if (DEBUG) + if (log.isDebugEnabled()) log.debug("Resolving " + ivs.size() + " term identifiers"); /* Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl3.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -142,11 +142,14 @@ import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.store.BD; import com.bigdata.rdf.store.BigdataBindingSetResolverator; +import com.bigdata.rdf.store.BigdataOpenRDFBindingSetsResolverator; +import com.bigdata.relation.accesspath.AccessPath; import com.bigdata.relation.accesspath.ElementFilter; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBuffer; import com.bigdata.relation.accesspath.IElementFilter; import com.bigdata.relation.accesspath.ThickAsynchronousIterator; +import com.bigdata.relation.accesspath.WrappedAsynchronousIterator; import com.bigdata.relation.rule.IAccessPathExpander; import com.bigdata.relation.rule.IProgram; import com.bigdata.relation.rule.IQueryOptions; @@ -159,6 +162,7 @@ import com.bigdata.striterator.ChunkedWrappedIterator; import com.bigdata.striterator.Dechunkerator; import com.bigdata.striterator.DistinctFilter; +import com.bigdata.striterator.IChunkedIterator; import com.bigdata.striterator.IChunkedOrderedIterator; import com.bigdata.striterator.ICloseableIterator; @@ -292,19 +296,31 @@ /** * Logger. */ - protected static final Logger log = + private static final Logger log = Logger.getLogger(BigdataEvaluationStrategyImpl3.class); protected final BigdataTripleSource tripleSource; protected final Dataset dataset; + final private CloseableIteration<BindingSet, QueryEvaluationException> bindingSets; + private final AbstractTripleStore database; /** + * + * @param tripleSource + * @param dataset + * @param bindingSets + * An optional source for zero or more binding sets which will be + * fed into the start of the native query evaluation. + * @param nativeJoins + * @param allowSesameQueryEvaluation */ public BigdataEvaluationStrategyImpl3( - final BigdataTripleSource tripleSource, final Dataset dataset, + final BigdataTripleSource tripleSource, + final Dataset dataset, + final CloseableIteration<BindingSet, QueryEvaluationException> bindingSets, final boolean nativeJoins, final boolean allowSesameQueryEvaluation) { @@ -312,6 +328,7 @@ this.tripleSource = tripleSource; this.dataset = dataset; + this.bindingSets = bindingSets; this.database = tripleSource.getDatabase(); this.nativeJoins = nativeJoins; this.allowSesameQueryEvaluation = allowSesameQueryEvaluation; @@ -346,7 +363,7 @@ CloseableIteration<BindingSet, QueryEvaluationException> result; result = this.evaluate(projection.getArg(), bindings); - QueryBindingSet empty = new QueryBindingSet(); + final QueryBindingSet empty = new QueryBindingSet(); result = new ProjectionIterator(projection, result, empty); return result; } @@ -1028,10 +1045,45 @@ final UUID queryId = queryIdStr == null ? UUID.randomUUID() : UUID .fromString(queryIdStr); - // Wrap the input binding sets (or an empty binding set if there is no - // input). - final IAsynchronousIterator<IBindingSet[]> source = newBindingSetIterator(bs != null ? toBindingSet(bs) - : new ListBindingSet()); + /* + * Setup the input binding sets which will be fed into the query + * pipeline. + */ + final IAsynchronousIterator<IBindingSet[]> source; + if (bs != null && bindingSets != null && bs.size() > 0) + throw new QueryEvaluationException( + "BindingSet and BindingSets are mutually exclusive."); + if (bindingSets != null) { + /* + * A stream of input binding sets will be fed into the query + * pipeline (zero or more). + */ + // align openrdf CloseableIteration with Bigdata IClosableIterator. + final IChunkedOrderedIterator<BindingSet> src = new ChunkedWrappedIterator<BindingSet>( + new Sesame2BigdataIterator<BindingSet, QueryEvaluationException>( + bindingSets)); + // efficient resolution of Value[]s to IV[]s for binding set chunks. + final ICloseableIterator<IBindingSet> src2 = new BigdataOpenRDFBindingSetsResolverator( + database, src).start(database.getExecutorService()); + // chunk up the binding sets. + final IChunkedOrderedIterator<IBindingSet> src3 = new ChunkedWrappedIterator<IBindingSet>( + src2); + // wrap as an asynchronous iterator. + source = new WrappedAsynchronousIterator<IBindingSet[], IBindingSet>( + src3); + } else if (bs != null) { + /* + * A single input binding set will be fed into the query pipeline + * using the supplied bindings. + */ + source = newBindingSetIterator(toBindingSet(bs)); + } else { + /* + * A single empty input binding set will be fed into the query + * pipeline. + */ + source = newBindingSetIterator(new ListBindingSet()); + } IRunningQuery runningQuery = null; try { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -3186,20 +3186,27 @@ * See {@link #evaluate(TupleExpr, Dataset, BindingSet, boolean, Properties)}. */ public CloseableIteration<? extends BindingSet, QueryEvaluationException> evaluate( - TupleExpr tupleExpr, Dataset dataset, - final BindingSet bindings, final boolean includeInferred) - throws SailException { - return evaluate(tupleExpr, dataset, bindings, includeInferred, new Properties()); + final TupleExpr tupleExpr, // + final Dataset dataset,// + final BindingSet bindings,// + final boolean includeInferred// + ) throws SailException { + + return evaluate(tupleExpr, dataset, bindings, + null/* bindingSets */, includeInferred, new Properties()); + } /** * Return the optimized operator tree. Useful for debugging. */ - public synchronized TupleExpr optimize( - TupleExpr tupleExpr, Dataset dataset, - BindingSet bindings, final boolean includeInferred, - final Properties queryHints) - throws SailException { + /*public*/ synchronized TupleExpr optimize( + TupleExpr tupleExpr,// + Dataset dataset,// + BindingSet bindings,// + final boolean includeInferred,// + final Properties queryHints// + ) throws SailException { if (log.isInfoEnabled()) log.info("Optimizing query: " + tupleExpr + ", dataSet=" @@ -3228,11 +3235,9 @@ final BigdataTripleSource tripleSource = new BigdataTripleSource(this, includeInferred); - final BigdataEvaluationStrategy strategy = - new BigdataEvaluationStrategyImpl3( - tripleSource, dataset, nativeJoins, - allowSesameQueryEvaluation - ); + final BigdataEvaluationStrategy strategy = new BigdataEvaluationStrategyImpl3( + tripleSource, dataset, null/* bindingSets */, nativeJoins, + allowSesameQueryEvaluation); final QueryOptimizerList optimizerList = new QueryOptimizerList(); optimizerList.add(new BindingAssigner()); @@ -3263,30 +3268,55 @@ } /** - * Note: The <i>includeInferred</i> argument is applied in two ways. - * First, inferences are stripped out of the {@link AccessPath}. - * Second, query time expansion of - * <code>foo rdf:type rdfs:Resource</code>, owl:sameAs, etc. - * <p> - * Note: Query time expansion can be disabled independently using - * {@link Options#QUERY_TIME_EXPANDER}, but not on a per-query basis. - * <p> - * QueryHints are a set of properties that are parsed from a SPARQL - * query. See {@link QueryHints#PREFIX} for more information. * - * @todo The [bindings] are supposed to be inputs to the query - * evaluation, but I am still not quite clear what the role of the - * "input" binding set is here. Based on their own code, e.g., - * MemoryStore, and the Sesame TCK, it is clear that evaluation - * needs to proceed against an empty binding set once it gets - * started. + * @param bindings + * The initial binding set which will be feed into the native + * query evaluation. + * + * @param bindingSets + * An optional source for zero or more binding sets which + * will be fed into the start of the native query evaluation. + * When non-<code>null</code> <i>bindings</i> MUST be empty. + * + * @param includeInferred + * The <i>includeInferred</i> argument is applied in two + * ways. First, inferences are stripped out of the + * {@link AccessPath}. Second, query time expansion of + * <code>foo rdf:type rdfs:Resource</code>, owl:sameAs, etc. + * <p> + * Note: Query time expansion can be disabled independently + * using {@link Options#QUERY_TIME_EXPANDER}, but not on a + * per-query basis. + * + * @param queryHints + * A set of properties that are parsed from a SPARQL query. + * See {@link QueryHints#PREFIX} for more information. + * + * @throws SailException + * if <i>bindingSets</i> is non-<code>null</code> and + * <i>bindingSet</i> is non-empty. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/267 */ public synchronized CloseableIteration<? extends BindingSet, QueryEvaluationException> evaluate( - TupleExpr tupleExpr, Dataset dataset, - BindingSet bindings, final boolean includeInferred, - final Properties queryHints) - throws SailException { + TupleExpr tupleExpr,// + Dataset dataset,// + BindingSet bindings,// + final CloseableIteration<BindingSet, QueryEvaluationException> bindingSets, + final boolean includeInferred,// + final Properties queryHints// + ) throws SailException { + if (tupleExpr == null) + throw new SailException(); + + if (bindings == null) // required by optimize() and probably others. + throw new SailException(); + + if (bindings != null && bindingSets != null && bindings.size() > 0) + throw new SailException( + "The bindings and bindingSets options are mutually exclusive."); + if (log.isInfoEnabled()) log.info("Evaluating query: " + tupleExpr + ", dataSet=" + dataset + ", includeInferred=" + includeInferred); @@ -3318,8 +3348,11 @@ final BigdataEvaluationStrategy strategy = new BigdataEvaluationStrategyImpl3( - tripleSource, dataset, nativeJoins, - allowSesameQueryEvaluation + tripleSource, // + dataset, // + bindingSets,// + nativeJoins,// + allowSesameQueryEvaluation// ); final QueryOptimizerList optimizerList = new QueryOptimizerList(); @@ -3349,7 +3382,6 @@ // caller's bindingSet. final CloseableIteration<BindingSet, QueryEvaluationException> itr = strategy .evaluate(tupleExpr, - // org.openrdf.query.impl.EmptyBindingSet.getInstance(), bindings, queryHints); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailBooleanQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailBooleanQuery.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailBooleanQuery.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -40,27 +40,39 @@ } /** - * Overriden to use query hints from SPARQL queries. Query hints are - * embedded in query strings as namespaces. - * See {@link QueryHints#PREFIX} for more information. + * {@inheritDoc} + * <p> + * Overridden to use query hints from SPARQL queries. Query hints are + * embedded in query strings as namespaces. + * + * @see QueryHints */ @Override public boolean evaluate() throws QueryEvaluationException { - ParsedBooleanQuery parsedBooleanQuery = getParsedQuery(); - TupleExpr tupleExpr = parsedBooleanQuery.getTupleExpr(); + + final ParsedBooleanQuery parsedBooleanQuery = getParsedQuery(); + + final TupleExpr tupleExpr = parsedBooleanQuery.getTupleExpr(); + Dataset dataset = getDataset(); + if (dataset == null) { + // No external dataset specified, use query's own dataset (if any) dataset = parsedBooleanQuery.getDataset(); + } try { - BigdataSailConnection sailCon = + + final BigdataSailConnection sailCon = (BigdataSailConnection) getConnection().getSailConnection(); CloseableIteration<? extends BindingSet, QueryEvaluationException> bindingsIter; - bindingsIter = sailCon.evaluate(tupleExpr, dataset, getBindings(), getIncludeInferred(), queryHints); + bindingsIter = sailCon.evaluate(tupleExpr, dataset, getBindings(), + null/* bindingSets */, getIncludeInferred(), queryHints); + bindingsIter = enforceMaxQueryTime(bindingsIter); try { @@ -76,17 +88,43 @@ } public TupleExpr getTupleExpr() throws QueryEvaluationException { + TupleExpr tupleExpr = getParsedQuery().getTupleExpr(); + try { - BigdataSailConnection sailCon = + + final BigdataSailConnection sailCon = (BigdataSailConnection) getConnection().getSailConnection(); + tupleExpr = sailCon.optimize(tupleExpr, getActiveDataset(), getBindings(), getIncludeInferred(), queryHints); + return tupleExpr; - } - catch (SailException e) { + + } catch (SailException e) { + throw new QueryEvaluationException(e.getMessage(), e); + } + } +// synchronized public void setBindingSets( +// final CloseableIteration<BindingSet, QueryEvaluationException> bindings) { +// +// if (this.bindings != null) +// throw new IllegalStateException(); +// +// this.bindings = bindings; +// +// } +// +// synchronized public CloseableIteration<BindingSet, QueryEvaluationException> getBindingSets() { +// +// return bindings; +// +// } +// +// private CloseableIteration<BindingSet, QueryEvaluationException> bindings; + } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailGraphQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailGraphQuery.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailGraphQuery.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -230,11 +230,14 @@ public boolean isDescribe() { return describe; } - + /** - * Overriden to use query hints from SPARQL queries. Query hints are - * embedded in query strings as namespaces. - * See {@link QueryHints#PREFIX} for more information. + * {@inheritDoc} + * <p> + * Overridden to use query hints from SPARQL queries. Query hints are + * embedded in query strings as namespaces. + * + * @see QueryHints */ @Override public GraphQueryResult evaluate() throws QueryEvaluationException { @@ -242,13 +245,15 @@ try { final TupleExpr tupleExpr = getParsedQuery().getTupleExpr(); - final BigdataSailConnection sailCon = + + final BigdataSailConnection sailCon = (BigdataSailConnection) getConnection().getSailConnection(); - CloseableIteration<? extends BindingSet, QueryEvaluationException> bindingsIter = - sailCon.evaluate( - tupleExpr, getActiveDataset(), getBindings(), - getIncludeInferred(), queryHints); + CloseableIteration<? extends BindingSet, QueryEvaluationException> bindingsIter = sailCon + .evaluate(tupleExpr, getActiveDataset(), getBindings(), + null/* bindingSets */, getIncludeInferred(), + queryHints); + // Filters out all partial and invalid matches bindingsIter = new FilterIteration<BindingSet, QueryEvaluationException>( @@ -266,10 +271,14 @@ if (!useNativeConstruct) { // Convert the BindingSet objects to actual RDF statements + final ValueFactory vf = getConnection().getRepository().getValueFactory(); + final CloseableIteration<Statement, QueryEvaluationException> stIter; - stIter = new ConvertingIteration<BindingSet, Statement, QueryEvaluationException>(bindingsIter) { + stIter = new ConvertingIteration<BindingSet, Statement, QueryEvaluationException>( + bindingsIter) { + @Override protected Statement convert(BindingSet bindingSet) { final Resource subject = (Resource)bindingSet.getValue("subject"); @@ -279,26 +288,34 @@ if (context == null) { return vf.createStatement(subject, predicate, object); - } - else { + } else { return vf.createStatement(subject, predicate, object, context); } } - + }; - return new GraphQueryResultImpl(getParsedQuery().getQueryNamespaces(), stIter); + return new GraphQueryResultImpl(getParsedQuery() + .getQueryNamespaces(), stIter); } else { - // native construct. + /* + * Native construct. + */ + // Convert the BindingSet objects to actual RDF statements - final ValueFactory vf = getConnection().getRepository().getValueFactory(); + final ValueFactory vf = getConnection().getRepository() + .getValueFactory(); + final CloseableIteration<? extends Statement, QueryEvaluationException> stIter; - stIter = new BigdataConstructIterator(sailCon.getTripleStore(), bindingsIter, vf); + + stIter = new BigdataConstructIterator(sailCon.getTripleStore(), + bindingsIter, vf); + return new GraphQueryResultImpl(getParsedQuery() .getQueryNamespaces(), stIter); - + } } catch (SailException e) { @@ -311,16 +328,43 @@ * Return the same optimized operator tree as what would be executed. */ public TupleExpr getTupleExpr() throws QueryEvaluationException { + TupleExpr tupleExpr = getParsedQuery().getTupleExpr(); + try { - BigdataSailConnection sailCon = + + final BigdataSailConnection sailCon = (BigdataSailConnection) getConnection().getSailConnection(); + tupleExpr = sailCon.optimize(tupleExpr, getActiveDataset(), getBindings(), getIncludeInferred(), queryHints); + return tupleExpr; + } catch (SailException e) { + throw new QueryEvaluationException(e.getMessage(), e); + } + } +// synchronized public void setBindingSets( +// final CloseableIteration<BindingSet, QueryEvaluationException> bindings) { +// +// if (this.bindings != null) +// throw new IllegalStateException(); +// +// this.bindings = bindings; +// +// } +// +// synchronized public CloseableIteration<BindingSet, QueryEvaluationException> getBindingSets() { +// +// return bindings; +// +// } +// +// private CloseableIteration<BindingSet, QueryEvaluationException> bindings; + } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailQuery.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailQuery.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -23,10 +23,14 @@ package com.bigdata.rdf.sail; +import info.aduna.iteration.CloseableIteration; + import java.util.Properties; +import org.openrdf.query.BindingSet; import org.openrdf.query.QueryEvaluationException; import org.openrdf.query.algebra.TupleExpr; +import org.openrdf.query.impl.AbstractQuery; /** * Extension API for bigdata queries. @@ -39,7 +43,7 @@ /** * Returns a copy of the Sesame operator tree that will or would be - * evaluated by this query. + * evaluated by this query (debugging purposes only). */ TupleExpr getTupleExpr() throws QueryEvaluationException; @@ -48,6 +52,40 @@ * in query strings as namespaces. See {@link QueryHints#PREFIX} for more * information. */ - Properties getQueryHints(); - + Properties getQueryHints(); + +// /** +// * Integration point for 3rd party operators and/or data sources such as an +// * external GIS index. The supplied iteration will be used to feed initial +// * solutions into the native query evaluation performed by bigdata. +// * <p> +// * This provides a batch oriented version of the ability to set some initial +// * bindings on an {@link AbstractQuery} (you can provide many input +// * {@link BindingSet}s using this method, not just some variables on the +// * initial {@link BindingSet}). +// * <p> +// * This does not provide a "what-if" facility. Each input binding set +// * provided via this method must be consistent with the data and the query +// * for the bindings to flow through. Binding sets which are not consistent +// * with the data and the query will be pruned. +// * +// * @param bindings +// * The bindings to feed into the query evaluation. +// * +// * @throws IllegalStateException +// * if the bindings have already been set to a non- +// * <code>null</code> value. +// * +// * @see AbstractQuery#setBinding(String, org.openrdf.model.Value) +// * +// * @see https://sourceforge.net/apps/trac/bigdata/ticket/267 +// */ +// void setBindingSets( +// final CloseableIteration<BindingSet, QueryEvaluationException> bindings); +// +// /** +// * Return the iteration set by {@link #setBindingSets(CloseableIteration)}. +// */ +// CloseableIteration<BindingSet, QueryEvaluationException> getBindingSets(); + } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -18,6 +18,7 @@ import com.bigdata.journal.TimestampUtility; import com.bigdata.rdf.changesets.IChangeLog; import com.bigdata.rdf.changesets.IChangeRecord; +import com.bigdata.rdf.model.BigdataValueFactory; import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.sail.sparql.BigdataSPARQLParser; import com.bigdata.rdf.store.AbstractTripleStore; @@ -57,10 +58,17 @@ @Override public BigdataSailConnection getSailConnection() { - return (BigdataSailConnection)super.getSailConnection(); - + return (BigdataSailConnection)super.getSailConnection(); + } + @Override + public BigdataValueFactory getValueFactory() { + + return (BigdataValueFactory) super.getValueFactory(); + + } + /** * {@inheritDoc} * <p> Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailTupleQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailTupleQuery.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailTupleQuery.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -25,7 +25,12 @@ * See {@link QueryHints#PREFIX} for more information. */ private final Properties queryHints; - + + /** + * Set lazily by {@link #getTupleExpr()} + */ + private volatile TupleExpr tupleExpr; + public Properties getQueryHints() { return queryHints; @@ -60,8 +65,9 @@ final BigdataSailConnection sailCon = (BigdataSailConnection) getConnection() .getSailConnection(); - bindingsIter = sailCon.evaluate(tupleExpr, getActiveDataset(), - getBindings(), getIncludeInferred(), queryHints); + bindingsIter = sailCon.evaluate(tupleExpr, getActiveDataset(), + getBindings(), null/* bindingSets */, getIncludeInferred(), + queryHints); bindingsIter = enforceMaxQueryTime(bindingsIter); @@ -74,28 +80,51 @@ } - } + } - public TupleExpr getTupleExpr() throws QueryEvaluationException { + public TupleExpr getTupleExpr() throws QueryEvaluationException { - TupleExpr tupleExpr = getParsedQuery().getTupleExpr(); + if (tupleExpr == null) { + + TupleExpr tupleExpr = getParsedQuery().getTupleExpr(); - try { + try { - final BigdataSailConnection sailCon = (BigdataSailConnection) getConnection() - .getSailConnection(); + final BigdataSailConnection sailCon = (BigdataSailConnection) getConnection() + .getSailConnection(); - tupleExpr = sailCon.optimize(tupleExpr, getActiveDataset(), - getBindings(), getIncludeInferred(), queryHints); + tupleExpr = sailCon.optimize(tupleExpr, getActiveDataset(), + getBindings(), getIncludeInferred(), queryHints); - return tupleExpr; + this.tupleExpr = tupleExpr; - } catch (SailException e) { + } catch (SailException e) { - throw new QueryEvaluationException(e.getMessage(), e); - - } + throw new QueryEvaluationException(e.getMessage(), e); + } + } + + return tupleExpr; + } +// synchronized public void setBindingSets( +// final CloseableIteration<BindingSet, QueryEvaluationException> bindings) { +// +// if (this.bindings != null) +// throw new IllegalStateException(); +// +// this.bindings = bindings; +// +// } +// +// synchronized public CloseableIteration<BindingSet, QueryEvaluationException> getBindingSets() { +// +// return bindings; +// +// } +// +// private CloseableIteration<BindingSet, QueryEvaluationException> bindings; + } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Rule2BOpUtility.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -306,6 +306,9 @@ public static PipelineOp applyQueryHints(PipelineOp op, final Properties queryHints) { + if (queryHints == null) + return op; + final Enumeration<?> pnames = queryHints.propertyNames(); while (pnames.hasMoreElements()) { @@ -409,9 +412,10 @@ */ final BOpContextBase context = new BOpContextBase(queryEngine); - final QueryOptimizerEnum optimizer = QueryOptimizerEnum - .valueOf(queryHints.getProperty(QueryHints.OPTIMIZER, - QueryOptimizerEnum.Static.toString())); + final QueryOptimizerEnum optimizer = queryHints == null ? QueryOptimizerEnum.Static + : QueryOptimizerEnum.valueOf(queryHints.getProperty( + QueryHints.OPTIMIZER, QueryOptimizerEnum.Static + .toString())); // The evaluation plan order. final int[] order; Added: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -0,0 +1,101 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 4, 2008 + */ + +package com.bigdata.rdf.sail; + +import info.aduna.iteration.CloseableIteration; + +import com.bigdata.striterator.ICloseableIterator; + +/** + * Class aligns a Sesame 2 {@link CloseableIteration} with a bigdata + * {@link ICloseableIterator}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: Bigdata2SesameIteration.java 2265 2009-10-26 12:51:06Z + * thompsonbry $ + * @param <T> + * The generic type of the visited elements. + * @param <E> + * The generic type of the exceptions thrown by the Sesame 2 + * {@link CloseableIteration}. + */ +public class Sesame2BigdataIterator<T, E extends Exception> implements + ICloseableIterator<T> { + + private final CloseableIteration<? extends T,E> src; + + public Sesame2BigdataIterator(final CloseableIteration<? extends T,E> src) { + + if (src == null) + throw new IllegalArgumentException(); + + this.src = src; + + } + + public void close() { + + try { + src.close(); + } catch(Exception e) { + throw new RuntimeException(e); + } + + } + + public boolean hasNext() { + + try { + return src.hasNext(); + } catch(Exception e) { + throw new RuntimeException(e); + } + + } + + public T next() { + + try { + return src.next(); + } catch(Exception e) { + throw new RuntimeException(e); + } + + } + + public void remove() { + + try { + src.remove(); + } catch(Exception e) { + throw new RuntimeException(e); + } + + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/Sesame2BigdataIterator.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOp2BOpUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOp2BOpUtility.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sop/SOp2BOpUtility.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -76,7 +76,7 @@ public class SOp2BOpUtility { - protected static final Logger log = Logger.getLogger(SOp2BOpUtility.class); + private static final Logger log = Logger.getLogger(SOp2BOpUtility.class); public static PipelineOp convert(final SOpTree sopTree, final AtomicInteger idFactory, final AbstractTripleStore db, @@ -962,8 +962,9 @@ } protected static boolean useHashJoin(final Properties queryHints) { - final boolean hashJoin = Boolean.valueOf(queryHints.getProperty( - QueryHints.HASH_JOIN, QueryHints.DEFAULT_HASH_JOIN)); + final boolean hashJoin = queryHints == null ? false : Boolean + .valueOf(queryHints.getProperty(QueryHints.HASH_JOIN, + QueryHints.DEFAULT_HASH_JOIN)); if (log.isInfoEnabled()) { log.info(queryHints); log.info(queryHints.getProperty(QueryHints.HASH_JOIN)); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/BigdataSPARQLParser.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/BigdataSPARQLParser.java 2011-06-24 14:41:14 UTC (rev 4789) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/sparql/BigdataSPARQLParser.java 2011-06-24 15:10:39 UTC (rev 4790) @@ -1,4 +1,28 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ /* + * Portions of this code are: + * * Copyright Aduna (http://www.aduna-software.com/) (c) 1997-2007. * * Licensed under the Aduna BSD-style license. @@ -121,52 +145,46 @@ static private Properties getQueryHints(final ASTQueryContainer qc) throws MalformedQueryException { -// try { - final Properties queryHints = new Properties(); -// // currently only supporting SPARQL -// if (ql == QueryLanguage.SPARQL) { -// // the next four lines were taken directly from -// // org.openrdf.query.parser.sparql.SPARQLParser.parseQuery(String queryStr, String baseURI) -// final ASTQueryContainer qc = SyntaxTreeBuilder -// ... [truncated message content] |