This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tob...@us...> - 2014-04-05 01:16:54
|
Revision: 8057 http://sourceforge.net/p/bigdata/code/8057 Author: tobycraig Date: 2014-04-05 01:16:51 +0000 (Sat, 05 Apr 2014) Log Message: ----------- #879 - Fixed problem with URIs containing URI-encoded elements in explore tab Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-05 00:35:08 UTC (rev 8056) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-05 01:16:51 UTC (rev 8057) @@ -710,7 +710,7 @@ } var settings = { type: 'POST', - data: 'query=' + encodeURI(query), + data: 'query=' + encodeURIComponent(query), dataType: 'json', accepts: {'json': 'application/sparql-results+json'}, success: updateExploreStart, This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-05 00:35:12
|
Revision: 8056 http://sourceforge.net/p/bigdata/code/8056 Author: tobycraig Date: 2014-04-05 00:35:08 +0000 (Sat, 05 Apr 2014) Log Message: ----------- Changed new workbench to default. Old one is available at /bigdata/html/old.html. Also made query tab default instead of load in new workbench. Modified Paths: -------------- branches/RDR/bigdata-war/src/html/index.html Added Paths: ----------- branches/RDR/bigdata-war/src/html/old.html Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-04 19:56:42 UTC (rev 8055) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-05 00:35:08 UTC (rev 8056) @@ -1,144 +1,201 @@ -<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" -"http://www.w3.org/TR/html4/loose.dtd"> -<html> -<head profile="http://www.w3.org/2005/10/profile"> -<link rel="icon" - type="image/png" - href="/bigdata/html/favicon.ico" /> -<meta http-equiv="Content-Type" content="text/html;charset=utf-8" > -<title>bigdata® NanoSparqlServer</title> -<!-- $Id$ --> -<!-- junit test marker: index.html --> -</head> -<body> - -<h2>Welcome to bigdata®.</h2> -<p>Please consult the -<a href="http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer" - target="_blank" - > documentation</a> for information on using the NanoSparqlServer's REST Api. </br>See the - <a href="http://wiki.bigdata.com/wiki/index.php/Main_Page" - target="_blank" - >wiki</a> for help on query optimization, bigdata SPARQL extensions, etc. -</p> - -<p> -The following URLs should be active when deployed in the default configuration: -</p> -<dl> -<dt>http://hostname:port/bigdata</dt> -<dd>This page.</dd> -<dt>http://hostname:port/bigdata/sparql</dt> -<dd>The SPARQL REST API (<a href="/bigdata/sparql">Service Description + VoID Description</a>).</dd> -<dt>http://hostname:port/bigdata/namespace</dt> -<dd>VoID <a href="/bigdata/namespace">graph of available KBs</a> from this service.</dd> -<dt>http://hostname:port/bigdata/status</dt> -<dd>A <a href="/bigdata/status">status</a> page.</dd> -<dt>http://hostname:port/bigdata/counters</dt> -<dd>A <a href="/bigdata/counters"> performance counters</a> page.</dd> -</dl> - -<p> -Where <i>hostname</i> is the name of this host and <i>port</i> is the port at -which this page was accessed. -</p> - -<!-- Note: Some applications (firefox 7) can not handle a GET with a very long - URL. For that reason ONLY this operation defaults to a POST. You SHOULD - use GET for database queries since they are, by and large, idempotent. - --> -<h2><a href="http://www.w3.org/TR/sparql11-query/" - title="W3C SPARQL 1.1 Query Recommendation" - target="_blank" - > SPARQL Query </a></h2> -<FORM action="/bigdata/sparql" method="post" name="QUERY"> - <P> - <TEXTAREA name="query" rows="10" cols="80" title="Enter SPARQL Query." - >SELECT * { ?s ?p ?o } LIMIT 1</TEXTAREA> - </P><P> - Tenant Namespace - <INPUT type="text" name="namespace" title="Tenant namespace." - > (leave empty for default KB) - </P><P> - <INPUT type="submit" value="Send" title="Submit query."> - <INPUT type="checkbox" name="explain" value="true" - title="Explain query plan rather than returning the query results." - > Explain - (<INPUT type="checkbox" name="explain" value="details" - title="Explain query plan rather than returning the query results (with extra details)." - > Details) - <INPUT type="checkbox" name="analytic" value="true" - title="Enable the analytic query package." - > Analytic -<!-- TODO Uncomment to reveal the RTO option. --> - <INPUT type="checkbox" name="RTO" value="true" - title="Enable the Runtime Query Optimizer (RTO) - This is an alpha feature." - > RTO (Alpha) -<!-- --> - <INPUT type="checkbox" name="xhtml" value="true" - title="Request XHTML response (results formatted as table)." - checked="checked" - > XHTML - </P> -</FORM> -<h2><a href="http://www.w3.org/TR/sparql11-update/" - title="W3C SPARQL Update Recommendation" - target="_blank" - >SPARQL Update</a></h2> -<FORM action="/bigdata/sparql" method="post"> - <P> - <TEXTAREA name="update" rows="10" cols="80" title="Enter SPARQL Update." - > -PREFIX dc: <http://purl.org/dc/elements/1.1/> -INSERT DATA -{ - <http://example/book1> dc:title "A new book" ; - dc:creator "A.N.Other" . -}</TEXTAREA> - </P><P> - Tenant Namespace - <INPUT type="text" name="namespace" title="Tenant namespace." - > (leave empty for default KB) - </P><P> - <INPUT type="submit" value="Send" title="Submit Update."> - <!--INPUT type="checkbox" name="explain" value="true" - title="Explain query plan rather than returning the query results." - > Explain--> - <INPUT type="checkbox" name="analytic" value="true" - title="Enable the analytic query package." - > Analytic - <INPUT type="checkbox" name="monitor" value="true" - title="Monitor the execution of the UPDATE request." - checked="checked" - > Monitor - </P> -</FORM> -<p> -<!-- Note: Some common characters need to be escaped here and also in the SPARQL - examples above. - --> -Here are some useful namespaces: -</p> -<pre> -prefix dc: <http://purl.org/dc/elements/1.1/> -prefix xsd: <http://www.w3.org/2001/XMLSchema#> -prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> -prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> -prefix owl: <http://www.w3.org/2002/07/owl#> -prefix foaf: <http://xmlns.com/foaf/0.1/> -prefix hint: <http://www.bigdata.com/queryHints#> -prefix bd: <http://www.bigdata.com/rdf#> -prefix bds: <http://www.bigdata.com/rdf/search#> -</pre> -<!-- Note: Use SPARQL Update "LOAD" instead. -<h2>Upload Data (URL):</h2> -<form action="sparql" method="post"> - <p> - <textarea name="uri" rows="1" cols="100">file:/</textarea> - </p><p> - <input type="submit" value="Upload"> - </p> -</form> ---> -</body> -</html> \ No newline at end of file +<!DOCTYPE html> +<html lang="en"> + <head profile="http://www.w3.org/2005/10/profile"> + <link rel="icon" + type="image/png" + href="/bigdata/html/favicon.ico" /> + <!-- meta charset="utf-8" --> + <meta http-equiv="Content-Type" content="text/html;charset=utf-8" > + <title>Bigdata Workbench</title> + <link rel="stylesheet" href="/bigdata/html/css/style.css"> + </head> + + <body> + <div id="container"> + + <div id="top"> + <img src="/bigdata/html/images/logo.png" id="logo"> + <form id="search-form"><label for="search-text">Search:</label> <input type="text" id="search-text"></form> + <p>Current namespace: <span id="current-namespace"></span></p> + </div> + + <div id="tab-selector"> + <a data-target="query" class="active">Query</a> + <a data-target="load">Load</a> + <a data-target="explore">Explore</a> + <a data-target="status">Status</a> + <a data-target="performance">Performance</a> + <a data-target="namespaces">Namespaces</a> + </div> + + <div class="tab" id="load-tab"> + + <div class="box"> + + <div class="namespace-shortcuts"> + <ul> + <li data-ns="prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>">RDF</li> + <li data-ns="prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>">RDFS</li> + <li data-ns="prefix owl: <http://www.w3.org/2002/07/owl#>">OWL</li> + <li data-ns="prefix bd: <http://www.bigdata.com/rdf#> ">BD</li> + <li data-ns="prefix bds: <http://www.bigdata.com/rdf/search#> ">BDS</li> + <li data-ns="prefix foaf: <http://xmlns.com/foaf/0.1/>">FOAF</li> + <li data-ns="prefix hint: <http://www.bigdata.com/queryHints#>">HINT</li> + <li data-ns="prefix dc: <http://purl.org/dc/elements/1.1/>">DC</li> + <li data-ns="prefix xsd: <http://www.w3.org/2001/XMLSchema#>">XSD</li> + </ul> + </div> + + <textarea id="load-box" placeholder="(Type in or drag a file containing RDF data, a SPARQL update or a file path or URL)"></textarea> + <p id="large-file-message">Your file is too large to display here, but will be uploaded as normal. <a href="#" id="clear-file">Remove file</a></p> + <p> + <input type="file" id="load-file"><br> + <label for="load-type">Type:</label> + <select id="load-type"> + <option value="sparql" selected="selected">SPARQL</option> + <option value="rdf">RDF</option> + <option value="path">File path</option> + </select> + <span id="rdf-type-container"> + <label for="rdf-type">Format:</label> + <select id="rdf-type"> + <option value="">Select RDF format</option> + <option value="n-quads">N-Quads</option> + <option value="n-triples">N-Triples</option> + <option value="n3">Notation3</option> + <option value="rdf/xml">RDF/XML</option> + <option value="trig">TriG</option> + <option value="trix">TriX</option> + <option value="turtle">Turtle</option> + </select> + </span> + </p> + <hr class="shadow"> + <button id="load-load">Load</button> + </div> + + <div class="box"> + <pre id="load-response"></pre> + </div> + + <div class="bottom"> + <button id="load-clear">Clear output</button> + </div> + + </div> + + <div class="tab" id="query-tab"> + <div class="box"> + + <div class="namespace-shortcuts"> + <ul> + <li data-ns="prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>">RDF</li> + <li data-ns="prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>">RDFS</li> + <li data-ns="prefix owl: <http://www.w3.org/2002/07/owl#>">OWL</li> + <li data-ns="prefix bd: <http://www.bigdata.com/rdf#> ">BD</li> + <li data-ns="prefix bds: <http://www.bigdata.com/rdf/search#> ">BDS</li> + <li data-ns="prefix foaf: <http://xmlns.com/foaf/0.1/>">FOAF</li> + <li data-ns="prefix hint: <http://www.bigdata.com/queryHints#>">HINT</li> + <li data-ns="prefix dc: <http://purl.org/dc/elements/1.1/>">DC</li> + <li data-ns="prefix xsd: <http://www.w3.org/2001/XMLSchema#>">XSD</li> + </ul> + </div> + + <form id="query-form"> + <textarea id="query-box" name="query" placeholder="(Input a SPARQL query)"></textarea> + + <a href="#" id="advanced-features-toggle">Advanced features</a> + + <div id="advanced-features"> + <input type="checkbox" id="query-explain"> <label for="query-explain">Explain</label> + <input type="checkbox" name="analytic" value="true" id="query-analytic"> <label for="query-analytic">Analytic</label> + <input type="checkbox" name="RTO" value="true" id="query-rto"> <label for="query-rto">Runtime Query Optimizer</label> + </div> + + <hr class="shadow"> + + <div id="load-buttons"> + <input type="submit" value="Execute"> + <input type="reset" value="Clear"> + </div> + </form> + + </div> + + <div id="query-response" class="box"> + </div> + + <div id="query-explanation" class="box"> + </div> + + <div class="bottom"> + <button id="query-export-csv">Export CSV</button> + <button id="query-export-json">Export JSON</button> + <button id="query-export-xml">Export XML</button> + <button id="query-response-clear">Clear</button> + </div> + + </div> + + <div class="tab" id="explore-tab"> + + <div class="box"> + <p>Enter a URI to begin navigation <form id="explore-form"><input type="text"> <input type="submit"></form></p> + </div> + + <div id="explore-results"> + <div class="box" id="explore-header"></div> + <div class="box" id="explore-incoming"></div> + <div class="box" id="explore-outgoing"></div> + <div class="box" id="explore-attributes"></div> + </div> + + </div> + + <div class="tab" id="status-tab"> + + <div class="box"> + <p>Accepted query count: <span id="accepted-query-count"></span></p> + <p>Running query count: <span id="running-query-count"></span></p> + <p>Show <a href="#" id="show-queries">queries</a>, <a href="#" id="show-query-details">query details</a>.</p> + <pre id="status-numbers"></pre> + <ul id="running-queries"></ul> + </div> + + </div> + + <div class="tab" id="performance-tab"> + + <div class="box"></div> + + </div> + + <div class="tab" id="namespaces-tab"> + + <div class="box"> + <h1>Namespaces</h1> + <ul id="namespaces-list"></ul> + <br> + <a href="/bigdata/namespace">Download VoID description of all namespaces</a> + </div> + + <div id="namespace-properties" class="box"> + <h1></h1> + <table></table> + </div> + + <div class="box"> + <form id="namespace-create"><input type="text"> <input type="submit" value="Create namespace"></form> + </div> + + </div> + + </div> + + <!--[if IE]><script src="//html5shiv.googlecode.com/svn/trunk/html5.js"></script><![endif]--> + <script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script> + <script>window.jQuery || document.write('<script src="/bigdata/html/js/vendor/jquery.min.js"><\/script>')</script> + <script src="/bigdata/html/js/vendor/jquery.hotkeys.js"></script> + <script src="/bigdata/html/js/workbench.js"></script> + </body> +</html> Copied: branches/RDR/bigdata-war/src/html/old.html (from rev 8028, branches/RDR/bigdata-war/src/html/index.html) =================================================================== --- branches/RDR/bigdata-war/src/html/old.html (rev 0) +++ branches/RDR/bigdata-war/src/html/old.html 2014-04-05 00:35:08 UTC (rev 8056) @@ -0,0 +1,144 @@ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" +"http://www.w3.org/TR/html4/loose.dtd"> +<html> +<head profile="http://www.w3.org/2005/10/profile"> +<link rel="icon" + type="image/png" + href="/bigdata/html/favicon.ico" /> +<meta http-equiv="Content-Type" content="text/html;charset=utf-8" > +<title>bigdata® NanoSparqlServer</title> +<!-- $Id$ --> +<!-- junit test marker: index.html --> +</head> +<body> + +<h2>Welcome to bigdata®.</h2> +<p>Please consult the +<a href="http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer" + target="_blank" + > documentation</a> for information on using the NanoSparqlServer's REST Api. </br>See the + <a href="http://wiki.bigdata.com/wiki/index.php/Main_Page" + target="_blank" + >wiki</a> for help on query optimization, bigdata SPARQL extensions, etc. +</p> + +<p> +The following URLs should be active when deployed in the default configuration: +</p> +<dl> +<dt>http://hostname:port/bigdata</dt> +<dd>This page.</dd> +<dt>http://hostname:port/bigdata/sparql</dt> +<dd>The SPARQL REST API (<a href="/bigdata/sparql">Service Description + VoID Description</a>).</dd> +<dt>http://hostname:port/bigdata/namespace</dt> +<dd>VoID <a href="/bigdata/namespace">graph of available KBs</a> from this service.</dd> +<dt>http://hostname:port/bigdata/status</dt> +<dd>A <a href="/bigdata/status">status</a> page.</dd> +<dt>http://hostname:port/bigdata/counters</dt> +<dd>A <a href="/bigdata/counters"> performance counters</a> page.</dd> +</dl> + +<p> +Where <i>hostname</i> is the name of this host and <i>port</i> is the port at +which this page was accessed. +</p> + +<!-- Note: Some applications (firefox 7) can not handle a GET with a very long + URL. For that reason ONLY this operation defaults to a POST. You SHOULD + use GET for database queries since they are, by and large, idempotent. + --> +<h2><a href="http://www.w3.org/TR/sparql11-query/" + title="W3C SPARQL 1.1 Query Recommendation" + target="_blank" + > SPARQL Query </a></h2> +<FORM action="/bigdata/sparql" method="post" name="QUERY"> + <P> + <TEXTAREA name="query" rows="10" cols="80" title="Enter SPARQL Query." + >SELECT * { ?s ?p ?o } LIMIT 1</TEXTAREA> + </P><P> + Tenant Namespace + <INPUT type="text" name="namespace" title="Tenant namespace." + > (leave empty for default KB) + </P><P> + <INPUT type="submit" value="Send" title="Submit query."> + <INPUT type="checkbox" name="explain" value="true" + title="Explain query plan rather than returning the query results." + > Explain + (<INPUT type="checkbox" name="explain" value="details" + title="Explain query plan rather than returning the query results (with extra details)." + > Details) + <INPUT type="checkbox" name="analytic" value="true" + title="Enable the analytic query package." + > Analytic +<!-- TODO Uncomment to reveal the RTO option. --> + <INPUT type="checkbox" name="RTO" value="true" + title="Enable the Runtime Query Optimizer (RTO) - This is an alpha feature." + > RTO (Alpha) +<!-- --> + <INPUT type="checkbox" name="xhtml" value="true" + title="Request XHTML response (results formatted as table)." + checked="checked" + > XHTML + </P> +</FORM> +<h2><a href="http://www.w3.org/TR/sparql11-update/" + title="W3C SPARQL Update Recommendation" + target="_blank" + >SPARQL Update</a></h2> +<FORM action="/bigdata/sparql" method="post"> + <P> + <TEXTAREA name="update" rows="10" cols="80" title="Enter SPARQL Update." + > +PREFIX dc: <http://purl.org/dc/elements/1.1/> +INSERT DATA +{ + <http://example/book1> dc:title "A new book" ; + dc:creator "A.N.Other" . +}</TEXTAREA> + </P><P> + Tenant Namespace + <INPUT type="text" name="namespace" title="Tenant namespace." + > (leave empty for default KB) + </P><P> + <INPUT type="submit" value="Send" title="Submit Update."> + <!--INPUT type="checkbox" name="explain" value="true" + title="Explain query plan rather than returning the query results." + > Explain--> + <INPUT type="checkbox" name="analytic" value="true" + title="Enable the analytic query package." + > Analytic + <INPUT type="checkbox" name="monitor" value="true" + title="Monitor the execution of the UPDATE request." + checked="checked" + > Monitor + </P> +</FORM> +<p> +<!-- Note: Some common characters need to be escaped here and also in the SPARQL + examples above. + --> +Here are some useful namespaces: +</p> +<pre> +prefix dc: <http://purl.org/dc/elements/1.1/> +prefix xsd: <http://www.w3.org/2001/XMLSchema#> +prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> +prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> +prefix owl: <http://www.w3.org/2002/07/owl#> +prefix foaf: <http://xmlns.com/foaf/0.1/> +prefix hint: <http://www.bigdata.com/queryHints#> +prefix bd: <http://www.bigdata.com/rdf#> +prefix bds: <http://www.bigdata.com/rdf/search#> +</pre> +<!-- Note: Use SPARQL Update "LOAD" instead. +<h2>Upload Data (URL):</h2> +<form action="sparql" method="post"> + <p> + <textarea name="uri" rows="1" cols="100">file:/</textarea> + </p><p> + <input type="submit" value="Upload"> + </p> +</form> +--> +</body> +</html> \ No newline at end of file This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-04-04 19:56:45
|
Revision: 8055 http://sourceforge.net/p/bigdata/code/8055 Author: mrpersonick Date: 2014-04-04 19:56:42 +0000 (Fri, 04 Apr 2014) Log Message: ----------- added support for multiple bindings to an output variable on a gas program service call Modified Paths: -------------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IBinder.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/BFS.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/CC.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/BaseGASProgram.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASService.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.rq Added Paths: ----------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/BinderBase.java Added: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/BinderBase.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/BinderBase.java (rev 0) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/BinderBase.java 2014-04-04 19:56:42 UTC (rev 8055) @@ -0,0 +1,70 @@ +/** + Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package com.bigdata.rdf.graph; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import org.openrdf.model.Value; +import org.openrdf.model.ValueFactory; + +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IVariable; + +/** + * A base class for IBinders. + */ +public abstract class BinderBase<VS, ES, ST> implements IBinder<VS, ES, ST> { + + /** + * The ordinal index of the variable that is bound by this + * {@link BinderBase}. By convention, index ZERO is the vertex. Indices + * greater than ZERO are typically aspects of the state of the vertex. + */ + @Override + public abstract int getIndex(); + + /** + * Subclasses can implement this method if they follow the old single + * bind paradigm. + */ + public abstract Value bind(ValueFactory vf, final IGASState<VS, ES, ST> state, Value v); + + /** + * Call {@link #bind(ValueFactory, IGASState, Value)}. + */ + @Override + @SuppressWarnings("unchecked") + public List<Value> bind(final ValueFactory vf, final IGASState<VS, ES, ST> state, + final Value u, final IVariable<?>[] outVars, final IBindingSet bs) { + + final Value val = bind(vf, state, u); + + if (val == null) { + + return Collections.EMPTY_LIST; + + } else { + + return Arrays.asList(new Value[] { val }); + + } + + } + +} + Property changes on: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/BinderBase.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IBinder.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IBinder.java 2014-04-04 17:15:05 UTC (rev 8054) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IBinder.java 2014-04-04 19:56:42 UTC (rev 8055) @@ -15,9 +15,14 @@ */ package com.bigdata.rdf.graph; +import java.util.List; + import org.openrdf.model.Value; import org.openrdf.model.ValueFactory; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IVariable; + /** * An interface that may be used to extract variable bindings for the * vertices visited by the algorithm. @@ -35,17 +40,35 @@ int getIndex(); /** + * New multi-binding strategy allows binders to bind multiple values to + * a given output variable (multiplying the number of solutions by the + * number of bindings). + * * @param vf * The {@link ValueFactory} used to create the return * {@link Value}. + * + * @param state + * The {@link IGASState}. + * * @param u - * The vertex. + * The vertex. * + * @param outVars + * The array of output variables. + * + * @param bs + * The current binding set. Can be used to conditionally bind + * values based on the current solution. + * * @return The {@link Value} for that ordinal variable or * <code>null</code> if there is no binding for that ordinal * variable. */ - Value bind(ValueFactory vf, final IGASState<VS, ES, ST> state, Value u); + List<Value> bind(ValueFactory vf, IGASState<VS, ES, ST> state, + Value u, IVariable<?>[] outVars, IBindingSet bs); +// Value bind(ValueFactory vf, final IGASState<VS, ES, ST> state, Value u); + } Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/BFS.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/BFS.java 2014-04-04 17:15:05 UTC (rev 8054) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/BFS.java 2014-04-04 19:56:42 UTC (rev 8055) @@ -25,6 +25,7 @@ import org.openrdf.model.Value; import org.openrdf.model.ValueFactory; +import com.bigdata.rdf.graph.BinderBase; import com.bigdata.rdf.graph.EdgesEnum; import com.bigdata.rdf.graph.Factory; import com.bigdata.rdf.graph.FrontierEnum; @@ -282,7 +283,7 @@ final List<IBinder<BFS.VS, BFS.ES, Void>> tmp = super.getBinderList(); - tmp.add(new IBinder<BFS.VS, BFS.ES, Void>() { + tmp.add(new BinderBase<BFS.VS, BFS.ES, Void>() { @Override public int getIndex() { @@ -296,9 +297,10 @@ return vf.createLiteral(state.getState(u).depth.get()); } + }); - tmp.add(new IBinder<BFS.VS, BFS.ES, Void>() { + tmp.add(new BinderBase<BFS.VS, BFS.ES, Void>() { @Override public int getIndex() { @@ -312,6 +314,7 @@ return state.getState(u).predecessor.get(); } + }); return tmp; Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/CC.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/CC.java 2014-04-04 17:15:05 UTC (rev 8054) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/CC.java 2014-04-04 19:56:42 UTC (rev 8055) @@ -27,6 +27,7 @@ import org.openrdf.model.Value; import org.openrdf.model.ValueFactory; +import com.bigdata.rdf.graph.BinderBase; import com.bigdata.rdf.graph.EdgesEnum; import com.bigdata.rdf.graph.Factory; import com.bigdata.rdf.graph.FrontierEnum; @@ -319,7 +320,7 @@ final List<IBinder<CC.VS, CC.ES, Value>> tmp = super.getBinderList(); - tmp.add(new IBinder<CC.VS, CC.ES, Value>() { + tmp.add(new BinderBase<CC.VS, CC.ES, Value>() { @Override public int getIndex() { @@ -333,6 +334,7 @@ return state.getState(u).label.get(); } + }); return tmp; Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java 2014-04-04 17:15:05 UTC (rev 8054) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java 2014-04-04 19:56:42 UTC (rev 8055) @@ -15,20 +15,25 @@ */ package com.bigdata.rdf.graph.analytics; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; -import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.LinkedHashSet; +import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import org.apache.log4j.Logger; import org.openrdf.model.Statement; +import org.openrdf.model.URI; import org.openrdf.model.Value; import org.openrdf.model.ValueFactory; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IVariable; +import com.bigdata.rdf.graph.BinderBase; import com.bigdata.rdf.graph.EdgesEnum; import com.bigdata.rdf.graph.Factory; import com.bigdata.rdf.graph.FrontierEnum; @@ -39,14 +44,15 @@ import com.bigdata.rdf.graph.IGASState; import com.bigdata.rdf.graph.IPredecessor; import com.bigdata.rdf.graph.impl.BaseGASProgram; +import com.bigdata.rdf.internal.IV; /** - * Breadth First Search (BFS) is an iterative graph traversal primitive. The - * frontier is expanded iteratively until no new vertices are discovered. Each - * visited vertex is marked with the round (origin ZERO) in which it was - * visited. This is its distance from the initial frontier. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * PATHS is an iterative graph traversal operation. The frontier is expanded + * iteratively until no new vertices are discovered, or until the target + * vertices have all been reached. Each vertex is marked with its depth and with + * a list of all predecessors and their edges to the vertex. This algorithm is + * useful for creating a complete connected subgraph between a source and a set + * of targets. */ public class PATHS extends BaseGASProgram<PATHS.VS, PATHS.ES, Void> implements IPredecessor<PATHS.VS, PATHS.ES, Void> { @@ -70,11 +76,12 @@ private final AtomicInteger depth = new AtomicInteger(-1); /** - * The predecessors are the first source vertex to visit a given target - * vertex. + * The predecessors are the source vertices to visit a given target + * vertex. Each one has a list of edges along which they were able to + * reach this vertex. */ - private final Set<Value> predecessors = - Collections.synchronizedSet(new LinkedHashSet<Value>()); + private final Map<Value, Set<URI>> predecessors = + Collections.synchronizedMap(new LinkedHashMap<Value, Set<URI>>()); /** * The depth at which this vertex was first visited (origin ZERO) and @@ -87,13 +94,33 @@ } /** - * Return the first vertex to discover this vertex during BFS traversal. + * Return the vertices that discovered this vertex during BFS traversal. */ - public Set<Value> predecessors() { + public Map<Value, Set<URI>> predecessors() { return predecessors; } + + /** + * Add a predecessor (might have already been added) and the edge + * along which the predecessor discovered this vertex. + */ + public synchronized void addPredecessor(final Value pred, final URI edge) { + + Set<URI> edges = predecessors.get(pred); + + if (edges == null) { + + edges = new LinkedHashSet<URI>(); + + predecessors.put(pred, edges); + + } + + edges.add(edge); + + } /** * Note: This marks the vertex at the current traversal depth. @@ -103,21 +130,20 @@ * first visited the vertex (this helps to avoid multiple * scheduling of a vertex). */ - public boolean visit(final int depth, final Value predecessor) { - if (predecessor != null) - this.predecessors.add(predecessor); + public boolean visit(final int depth, final Value pred, final URI edge) { + + if (pred != null) { +// this.predecessors.add(pred); + addPredecessor(pred, edge); + } + if (this.depth.compareAndSet(-1/* expect */, depth/* newValue */)) { // Scheduled by this thread. return true; } + return false; -// synchronized (this) { -// if (this.depth == -1) { -// this.depth = depth; -// return true; -// } -// return false; -// } + } @Override @@ -187,7 +213,7 @@ public void initVertex(final IGASContext<PATHS.VS, PATHS.ES, Void> ctx, final IGASState<PATHS.VS, PATHS.ES, Void> state, final Value u) { - state.getState(u).visit(0, null/* predecessor */); + state.getState(u).visit(0, null/* predecessor */, null/* edge */); } @@ -245,11 +271,6 @@ public void scatter(final IGASState<PATHS.VS, PATHS.ES, Void> state, final IGASScheduler sch, final Value u, final Statement e) { -// if (state.getTargetVertices().contains(u)) { -// // don't schedule any more vertices, we've hit a target -// return; -// } - // remote vertex state. final Value v = state.getOtherVertex(u, e); @@ -257,7 +278,7 @@ // final VS otherState = state.getState(e.getObject()/* v */); // visit. - if (otherState.visit(state.round() + 1, u/* predecessor */)) { + if (otherState.visit(state.round() + 1, u/* predecessor */, e.getPredicate())) { /* * This is the first visit for the remote vertex. Add it to the @@ -284,9 +305,12 @@ * <dt>{@value Bindings#DEPTH}</dt> * <dd>The depth at which the vertex was first encountered during traversal. * </dd> - * <dt>{@value Bindings#PREDECESSOR}</dt> - * <dd>The predecessor is the first vertex that discovers a given vertex + * <dt>{@value Bindings#PREDECESSORS}</dt> + * <dd>The predecessors are all the vertices that discovers a given vertex * during traversal.</dd> + * <dt>{@value Bindings#EDGES}</dt> + * <dd>These are the edges along which each predecessor discovered a given + * vertex during traversal.</dd> * </dl> */ @Override @@ -294,7 +318,7 @@ final List<IBinder<PATHS.VS, PATHS.ES, Void>> tmp = super.getBinderList(); - tmp.add(new IBinder<PATHS.VS, PATHS.ES, Void>() { + tmp.add(new BinderBase<PATHS.VS, PATHS.ES, Void>() { @Override public int getIndex() { @@ -308,28 +332,89 @@ return vf.createLiteral(state.getState(u).depth.get()); } + }); tmp.add(new IBinder<PATHS.VS, PATHS.ES, Void>() { @Override public int getIndex() { - return Bindings.PREDECESSOR; + return Bindings.PREDECESSORS; } @Override - public Value bind(final ValueFactory vf, - final IGASState<PATHS.VS, PATHS.ES, Void> state, final Value u) { + public List<Value> bind(final ValueFactory vf, + final IGASState<PATHS.VS, PATHS.ES, Void> state, + final Value u, final IVariable<?>[] outVars, + final IBindingSet bs) { - final String s = Arrays.toString(state.getState(u).predecessors.toArray()); + final VS vs = state.getState(u); - if (log.isTraceEnabled()) { - log.trace(s); + return new LinkedList<Value>(vs.predecessors().keySet()); + + } + + }); + + tmp.add(new IBinder<PATHS.VS, PATHS.ES, Void>() { + + @Override + public int getIndex() { + return Bindings.EDGES; + } + + @Override + @SuppressWarnings({ "rawtypes", "unchecked" }) + public List<Value> bind(final ValueFactory vf, + final IGASState<PATHS.VS, PATHS.ES, Void> state, + final Value u, final IVariable<?>[] outVars, + final IBindingSet bs) { + + /* + * We want to return a different set of edges depending on + * which predecessor the caller is asking about. We can + * find that information in the binding set. + */ + + final IVariable<?> var = outVars[Bindings.PREDECESSORS]; + + if (!bs.isBound(var)) { + + if (log.isTraceEnabled()) { + log.trace("no predecessors"); + } + + return Collections.EMPTY_LIST; + } - return vf.createLiteral(s); + final IV predIV = (IV) bs.get(var).get(); + + final Value predVal; + + if (predIV instanceof Value) { + + predVal = (Value) predIV; + + } else if (predIV.hasValue()) { + + predVal = predIV.getValue(); + + } else { + + throw new RuntimeException("FIXME"); + + } + + final VS vs = state.getState(u); + + /* + * Return the edges for this predecessor. + */ + return new LinkedList<Value>(vs.predecessors().get(predVal)); } + }); return tmp; @@ -349,70 +434,18 @@ int DEPTH = 1; /** - * The BFS predecessor is the first vertex to discover a given vertex. + * The predecessors are all vertices to discover a given vertex. * */ - int PREDECESSOR = 2; + int PREDECESSORS = 2; + /** + * The edges along which each predecessor discovered a given vertex. + */ + int EDGES = 3; + } -// /** -// * Reduce the active vertex state, returning a histogram reporting the #of -// * vertices at each distance from the starting vertex. There will always be -// * one vertex at depth zero - this is the starting vertex. For each -// * successive depth, the #of vertices that were labeled at that depth is -// * reported. This is essentially the same as reporting the size of the -// * frontier in each round of the traversal, but the histograph is reported -// * based on the vertex state. -// * -// * @author <a href="mailto:tho...@us...">Bryan -// * Thompson</a> -// */ -// protected static class HistogramReducer implements -// IReducer<VS, ES, Void, Map<Integer, AtomicLong>> { -// -// private final ConcurrentHashMap<Integer, AtomicLong> values = new ConcurrentHashMap<Integer, AtomicLong>(); -// -// @Override -// public void visit(final IGASState<VS, ES, Void> state, final Value u) { -// -// final VS us = state.getState(u); -// -// if (us != null) { -// -// final Integer depth = Integer.valueOf(us.depth()); -// -// AtomicLong newval = values.get(depth); -// -// if (newval == null) { -// -// final AtomicLong oldval = values.putIfAbsent(depth, -// newval = new AtomicLong()); -// -// if (oldval != null) { -// -// // lost data race. -// newval = oldval; -// -// } -// -// } -// -// newval.incrementAndGet(); -// -// } -// -// } -// -// @Override -// public Map<Integer, AtomicLong> get() { -// -// return Collections.unmodifiableMap(values); -// -// } -// -// } - /* * TODO Do this in parallel for each specified target vertex. */ @@ -428,10 +461,6 @@ final IGASState<PATHS.VS, PATHS.ES, Void> gasState = ctx.getGASState(); -// for (Value v : gasState.values()) { -// log.trace(v); -// } - final Set<Value> retainSet = new HashSet<Value>(); for (Value v : targetVertices) { @@ -450,20 +479,6 @@ visitPredecessors(gasState, v, retainSet); -// Value current = v; -// -// while (current != null) { -// -// retainSet.add(current); -// -// final PATHS.VS currentState = gasState.getState(current); -// -// final Value predecessor = currentState.predecessor(); -// -// current = predecessor; -// -// } - } // next target vertex. gasState.retainAll(retainSet); @@ -476,7 +491,7 @@ final PATHS.VS currentState = gasState.getState(v); - for (Value pred : currentState.predecessors()) { + for (Value pred : currentState.predecessors().keySet()) { if (pred == null) { @@ -498,71 +513,4 @@ } -// @Override -// public <T> IReducer<VS, ES, Void, T> getDefaultAfterOp() { -// -// class NV implements Comparable<NV> { -// public final int n; -// public final long v; -// public NV(final int n, final long v) { -// this.n = n; -// this.v = v; -// } -// @Override -// public int compareTo(final NV o) { -// if (o.n > this.n) -// return -1; -// if (o.n < this.n) -// return 1; -// return 0; -// } -// } -// -// final IReducer<VS, ES, Void, T> outerReducer = new IReducer<VS, ES, Void, T>() { -// -// final HistogramReducer innerReducer = new HistogramReducer(); -// -// @Override -// public void visit(IGASState<VS, ES, Void> state, Value u) { -// -// innerReducer.visit(state, u); -// -// } -// -// @Override -// public T get() { -// -// final Map<Integer, AtomicLong> h = innerReducer.get(); -// -// final NV[] a = new NV[h.size()]; -// -// int i = 0; -// -// for (Map.Entry<Integer, AtomicLong> e : h.entrySet()) { -// -// a[i++] = new NV(e.getKey().intValue(), e.getValue().get()); -// -// } -// -// Arrays.sort(a); -// -// System.out.println("distance, frontierSize, sumFrontierSize"); -// long sum = 0L; -// for (NV t : a) { -// -// System.out.println(t.n + ", " + t.v + ", " + sum); -// -// sum += t.v; -// -// } -// -// return null; -// } -// -// }; -// -// return outerReducer; -// -// } - } Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java 2014-04-04 17:15:05 UTC (rev 8054) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java 2014-04-04 19:56:42 UTC (rev 8055) @@ -25,6 +25,7 @@ import org.openrdf.model.Value; import org.openrdf.model.ValueFactory; +import com.bigdata.rdf.graph.BinderBase; import com.bigdata.rdf.graph.EdgesEnum; import com.bigdata.rdf.graph.Factory; import com.bigdata.rdf.graph.FrontierEnum; @@ -352,7 +353,7 @@ final List<IBinder<PR.VS, PR.ES, Double>> tmp = super.getBinderList(); - tmp.add(new IBinder<PR.VS, PR.ES, Double>() { + tmp.add(new BinderBase<PR.VS, PR.ES, Double>() { @Override public int getIndex() { @@ -366,6 +367,7 @@ return vf.createLiteral(state.getState(u).getValue()); } + }); return tmp; Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java 2014-04-04 17:15:05 UTC (rev 8054) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java 2014-04-04 19:56:42 UTC (rev 8055) @@ -25,6 +25,11 @@ import org.openrdf.model.Value; import org.openrdf.model.ValueFactory; +import sun.reflect.generics.reflectiveObjects.NotImplementedException; + +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IVariable; +import com.bigdata.rdf.graph.BinderBase; import com.bigdata.rdf.graph.EdgesEnum; import com.bigdata.rdf.graph.Factory; import com.bigdata.rdf.graph.FrontierEnum; @@ -441,7 +446,7 @@ final List<IBinder<SSSP.VS, SSSP.ES, Integer>> tmp = super .getBinderList(); - tmp.add(new IBinder<SSSP.VS, SSSP.ES, Integer>() { + tmp.add(new BinderBase<SSSP.VS, SSSP.ES, Integer>() { @Override public int getIndex() { @@ -456,9 +461,10 @@ return vf.createLiteral(state.getState(u).dist()); } + }); - tmp.add(new IBinder<SSSP.VS, SSSP.ES, Integer>() { + tmp.add(new BinderBase<SSSP.VS, SSSP.ES, Integer>() { @Override public int getIndex() { @@ -472,6 +478,7 @@ return state.getState(u).predecessor.get(); } + }); return tmp; Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/BaseGASProgram.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/BaseGASProgram.java 2014-04-04 17:15:05 UTC (rev 8054) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/BaseGASProgram.java 2014-04-04 19:56:42 UTC (rev 8055) @@ -26,6 +26,8 @@ import org.openrdf.model.Value; import org.openrdf.model.ValueFactory; + +import com.bigdata.rdf.graph.BinderBase; import com.bigdata.rdf.graph.EdgesEnum; import com.bigdata.rdf.graph.Factory; import com.bigdata.rdf.graph.FrontierEnum; @@ -237,7 +239,7 @@ final List<IBinder<VS, ES, ST>> tmp = new LinkedList<IBinder<VS, ES, ST>>(); - tmp.add(new IBinder<VS, ES, ST>() { + tmp.add(new BinderBase<VS, ES, ST>() { @Override public int getIndex() { Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASService.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASService.java 2014-04-04 17:15:05 UTC (rev 8054) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASService.java 2014-04-04 19:56:42 UTC (rev 8055) @@ -30,6 +30,7 @@ import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; +import java.util.ListIterator; import java.util.Set; import org.apache.log4j.Logger; @@ -995,8 +996,10 @@ @Override public void visit(final IGASState<VS, ES, ST> state, final Value u) { - final IBindingSet bs = new ListBindingSet(); + final List<IBindingSet> bSets = new LinkedList<IBindingSet>(); + bSets.add(new ListBindingSet()); + for (IBinder<VS, ES, ST> b : binderList) { // The variable for this binder. @@ -1004,65 +1007,118 @@ if(var == null) continue; - - /* - * TODO This does too much work. The API is defined in terms - * of openrdf Value objects rather than IVs because it is in - * a different package (not bigdata specific). The - * getBinderList() method should be moved to the code that - * exposes the service (this class) so it can do bigdata - * specific things and DO LESS WORK. This would be a good - * thing to do at the time that we add support for FuzzySSSP - * (which is not an IGASProgram and hence breaks the model - * anyway). - */ - final Value val = b.bind(vf, state, u); - if (val == null) - continue; + final Iterator<IBindingSet> it = bSets.iterator(); + + final List<IBindingSet> bSets2 = new LinkedList<IBindingSet>(); + + while (it.hasNext()) { + + final IBindingSet parent = it.next(); + + if (log.isTraceEnabled()) + log.trace("parent: " + parent); + + final List<Value> vals = + b.bind(vf, state, u, outVars, parent); + + if (vals.size() == 0) { + + // do nothing, leave the parent in the bSets + + } else if (vals.size() == 1) { + + /* + * Bind the single value, leave the parent in the + * bSets. + */ + + final Value val = vals.get(0); + + bind(var, val, parent); + + if (log.isTraceEnabled()) + log.trace("parent (after bind): " + parent); + + } else { + + /* + * Remove the parent from the bSets, for each new + * value, clone the parent, bind the value, and add + * the new solution to the bSets + */ + + for (Value val : vals) { + + final IBindingSet child = parent.clone(); + + bind(var, val, child); + + if (log.isTraceEnabled()) + log.trace("child: " + child); + + bSets2.add(child); + + } + + it.remove(); + + } + + } - if (val instanceof IV) { + bSets.addAll(bSets2); + + } - // The value is already an IV. - bs.set(var, new Constant((IV) val)); + // Add to the set of generated solutions. + tmp.addAll(bSets); - } else { + } - /* - * The Value is a BigdataValueImpl (if the bind() method - * used the supplied ValueFactory). We need to convert - * it to an IV and this code ASSUMES that we can do this - * using an inline IV with the as configured KB. (This - * will work for anything numeric, but not for strings.) - */ - final IV<BigdataValueImpl, ?> iv = lex - .getLexiconConfiguration().createInlineIV(val); + @SuppressWarnings({ "unchecked", "rawtypes" }) + protected void bind(final IVariable<?> var, final Value val, final IBindingSet bs) { + + if (val == null) + return; - if (iv != null) { + if (val instanceof IV) { - iv.setValue((BigdataValueImpl) val); + // The value is already an IV. + bs.set(var, new Constant((IV) val)); - bs.set(var, new Constant(iv)); - - } else if (val instanceof BigdataValue) { - - bs.set(var, new Constant(DummyConstantNode.toDummyIV((BigdataValue) val))); - - } else { - - throw new RuntimeException("FIXME"); - - } + } else { + /* + * The Value is a BigdataValueImpl (if the bind() method + * used the supplied ValueFactory). We need to convert + * it to an IV and this code ASSUMES that we can do this + * using an inline IV with the as configured KB. (This + * will work for anything numeric, but not for strings.) + */ + final IV<BigdataValueImpl, ?> iv = lex + .getLexiconConfiguration().createInlineIV(val); + + if (iv != null) { + + iv.setValue((BigdataValueImpl) val); + + bs.set(var, new Constant(iv)); + + } else if (val instanceof BigdataValue) { + + bs.set(var, new Constant(DummyConstantNode.toDummyIV((BigdataValue) val))); + + } else { + + throw new RuntimeException("FIXME"); + } } - // Add to the set of generated solutions. - tmp.add(bs); - } - + @Override public IBindingSet[] get() { Modified: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.rq =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.rq 2014-04-04 17:15:05 UTC (rev 8054) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.rq 2014-04-04 19:56:42 UTC (rev 8055) @@ -4,7 +4,7 @@ gas:program gas:gasClass "com.bigdata.rdf.graph.analytics.PATHS" . gas:program gas:in </:target> . # starting point gas:program gas:target </:source1> . # target vertices - gas:program gas:target </:source2> . # target vertices + # gas:program gas:target </:source2> . # target vertices gas:program gas:traversalDirection "Reverse" . # gas:program gas:maxIterations 2 . gas:program gas:maxIterationsAfterTargets 0 . @@ -12,7 +12,9 @@ gas:program gas:maxVisited 100000 . gas:program gas:out ?s . # bound to the visited vertices. gas:program gas:out1 ?depth . # bound to the depth + gas:program gas:out2 ?o . # bound to the pred + gas:program gas:out3 ?p . # bound to the edge } - ?s </:edge> ?o . - filter(?s != </:target>) . + #?s </:edge> ?o . + #filter(?s != </:target>) . } order by ?depth \ No newline at end of file This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-04 17:15:10
|
Revision: 8054 http://sourceforge.net/p/bigdata/code/8054 Author: thompsonbry Date: 2014-04-04 17:15:05 +0000 (Fri, 04 Apr 2014) Log Message: ----------- Replacing the version in SVN that had disabled the code path to write the cache blocks on the HALog. Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-04 17:06:57 UTC (rev 8053) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-04 17:15:05 UTC (rev 8054) @@ -1250,9 +1250,8 @@ throws IllegalStateException, InterruptedException, ExecutionException, IOException { -// if (quorum == null || !quorum.isHighlyAvailable() + if (quorum == null) { //|| !quorum.isHighlyAvailable() // || !quorum.getClient().isLeader(quorumToken)) { - if (quorum == null) { return; } @@ -1346,20 +1345,20 @@ private void writeCacheBlock(final WriteCache cache) throws InterruptedException, ExecutionException, IOException { - /* - * IFF HA - * - * TODO isHA should be true even if the quorum is not highly - * available since there still could be other services in the write - * pipeline (e.g., replication to an offline HAJournalServer prior - * to changing over into an HA3 quorum or off-site replication). The - * unit tests need to be updated to specify [isHighlyAvailable] for - * ALL quorum based test runs. - */ - final boolean isHA = quorum != null && quorum.isHighlyAvailable(); +// /* +// * IFF HA +// * +// * TODO isHA should be true even if the quorum is not highly +// * available since there still could be other services in the write +// * pipeline (e.g., replication to an offline HAJournalServer prior +// * to changing over into an HA3 quorum or off-site replication). The +// * unit tests need to be updated to specify [isHighlyAvailable] for +// * ALL quorum based test runs. +// */ +// final boolean isHA = quorum != null && quorum.isHighlyAvailable(); // IFF HA and this is the quorum leader. - final boolean isHALeader = isHA + final boolean isHALeader = quorum != null && quorum.getClient().isLeader(quorumToken); /* @@ -1440,14 +1439,16 @@ * then clean up the documentation here (see the commented * out version of this line below). */ - quorumMember.logWriteCacheBlock(pkg.getMessage(), pkg.getData().duplicate()); - - // ASYNC MSG RMI + NIO XFER. + quorumMember.logWriteCacheBlock(pkg.getMessage(), pkg.getData().duplicate()); + if (quorum.replicationFactor() > 1) { - remoteWriteFuture = quorumMember.replicate(null/* req */, pkg.getMessage(), - pkg.getData().duplicate()); - - counters.get().nsend++; + + // ASYNC MSG RMI + NIO XFER. + remoteWriteFuture = quorumMember.replicate(null/* req */, + pkg.getMessage(), pkg.getData().duplicate()); + + counters.get().nsend++; + } /* This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-04 17:07:00
|
Revision: 8053 http://sourceforge.net/p/bigdata/code/8053 Author: tobycraig Date: 2014-04-04 17:06:57 +0000 (Fri, 04 Apr 2014) Log Message: ----------- #873 - Fixed error on reloading of explore Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-04 16:55:54 UTC (rev 8052) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-04 17:06:57 UTC (rev 8053) @@ -86,12 +86,17 @@ $('.namespace-service-description').click(function(e) { return confirm('This can be an expensive operation. Proceed anyway?'); }); + NAMESPACES_READY = true; }); } function selectNamespace(name) { // for programmatically selecting a namespace with just its name - $('#namespaces-list li[data-name=' + name + '] a.use-namespace').click(); + if(!NAMESPACES_READY) { + setTimeout(function() { selectNamespace(name); }, 10); + } else { + $('#namespaces-list li[data-name=' + name + '] a.use-namespace').click(); + } } function useNamespace(name, url) { @@ -164,7 +169,7 @@ useNamespace(DEFAULT_NAMESPACE, url); }); } -var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACE_URL, fileContents; +var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACE_URL, NAMESPACES_READY, fileContents; getDefaultNamespace(); @@ -773,11 +778,19 @@ $('#explore-results a').click(function(e) { e.preventDefault(); var components = parseHash(this.hash); - selectNamespace(components[2]); - explore(components[3]); + exploreNamespacedURI(components[2], components[3]); }); } +function exploreNamespacedURI(namespace, uri, nopush) { + if(!NAMESPACES_READY) { + setTimeout(function() { exploreNamespacedURI(namespace, uri, nopush); }, 10); + } else { + selectNamespace(namespace); + explore(uri, nopush); + } +} + function explore(uri, nopush) { $('#explore-form input[type=text]').val(uri); $('#explore-form').submit(); @@ -802,8 +815,7 @@ $('#tab-selector a:first').click(); } else { if(hash[1] == 'explore') { - selectNamespace(hash[2]); - explore(hash[3], true); + exploreNamespacedURI(hash[2], hash[3], true); } else { $('a[data-target=' + hash[1] + ']').click(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-04 16:55:58
|
Revision: 8052 http://sourceforge.net/p/bigdata/code/8052 Author: thompsonbry Date: 2014-04-04 16:55:54 +0000 (Fri, 04 Apr 2014) Log Message: ----------- Missed 2 files in the last commit. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA Copied: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA (from rev 8047, branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdata/bigdataHA) =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA 2014-04-04 16:55:54 UTC (rev 8052) @@ -0,0 +1,46 @@ +# Environment for bigdata HA services. +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Note: You MUST provide the location of the executable scripts and the +# pid file that is written by $binDir/startHAServices. These SHOULD be +# absolute path names. + +#binDir= +#pidFile= + +## +# The following variables configure the startHAServices script, which +# passes them through to HAJournal.config. +## + +# Name of the bigdata gederation of services. Override for real install. +export FEDNAME=bigdataInstallTest + +# This is different for each HA replication cluster in the same federation +# of services. If you have multiple such replication cluster, then just +# given each such cluster its own name. +export LOGICAL_SERVICE_ID=HAJournalServer-1 + +# Local directory where the service will store its state. +export FED_DIR=/var/bigdata/${FEDNAME} + +# Apache River - NO default for "LOCATORS". +export GROUPS="$FEDNAME" +#export LOCATORS="jini://bigdata15/,jini://bigdata16/,jini://bigdata17/" + +# Apache ZooKeeper - NO default. +#export ZK_SERVERS="bigdata15:2081,bigdata16:2081,bigdata17:2081"; + +# All of these have defaults. Override as necessary. +#export REPLICATION_FACTOR=3 +#export HA_PORT=9090 +#export JETTY_PORT=8080 +#export JETTY_XML=var/jetty/WEB-INF/jetty.xml +#export JETTY_RESOURCE_BASE=var/jetty +#export COLLECT_QUEUE_STATISTICS= +#export COLLECT_PLATFORM_STATISTICS= +#export GANGLIA_REPORT= +#export GANGLIA_LISTENER= +#export SYSSTAT_DIR= Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-04-04 16:52:01 UTC (rev 8051) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-04-04 16:55:54 UTC (rev 8052) @@ -49,7 +49,7 @@ #/sbin/sysctl -w vm.swappiness=0 # Setup the environment. -source /etc/default/bigdata/bigdataHA +source /etc/default/bigdataHA if [ -z "$binDir" ]; then echo $"$0 : environment not setup: binDir is undefined." This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-04 16:52:04
|
Revision: 8051 http://sourceforge.net/p/bigdata/code/8051 Author: thompsonbry Date: 2014-04-04 16:52:01 +0000 (Fri, 04 Apr 2014) Log Message: ----------- Changed the location of the bigdataHA defaults from /etc/default/bigdata/bigdataHA to /etc/default/bigdataHA That is, there is no longer a /etc/default/bigdata directory. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-04-04 16:12:05 UTC (rev 8050) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-04-04 16:52:01 UTC (rev 8051) @@ -1085,8 +1085,8 @@ todir="${dist.dir}/etc/init.d" /> <chmod file="${dist.dir}/etc/init.d/bigdataHA" perm="755" /> - <copy file="${src.resources}/etc/default/bigdata/bigdataHA" - todir="${dist.dir}/etc/default/bigdata" /> + <copy file="${src.resources}/etc/default/bigdataHA" + todir="${dist.dir}/etc/default" /> <copy file="${src.resources}/bin/config/browser.config" todir="${dist.bin.config}" /> @@ -1245,7 +1245,7 @@ bigdata/doc/NOTICE - copyright NOTICE files. bigdata/doc/docs - javadoc (FIXME INSTALL JAVADOC, HA wiki page) bigdata/etc/init.d/bigdataHA - HA services start/stop script. - bigdata/etc/default/bigdata/bigdataHA - HA services required config file. + bigdata/etc/default/bigdataHA - HA services required config file. Note: This directory structure is currently reused for the rpm, but the top-level of the rpm directory structure includes the release version as This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-04 16:12:11
|
Revision: 8050 http://sourceforge.net/p/bigdata/code/8050 Author: thompsonbry Date: 2014-04-04 16:12:05 +0000 (Fri, 04 Apr 2014) Log Message: ----------- Modified the HARestoreUtility to optionally accept the name of the file onto which the most recent snapshot (or the specified snapshot) will be decompressed. This is to support automatic restore scenarios, e.g., to SSD on EC2 from EBS. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2014-04-04 16:05:53 UTC (rev 8049) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java 2014-04-04 16:12:05 UTC (rev 8050) @@ -48,8 +48,14 @@ /** * Utility class may be used to apply HALog files to a {@link Journal}, rolling - * it forward to a specific commit point. + * it forward to a specific commit point. This class can decompress a snapshot + * file for processing. It can also identify the most recent snapshot in the + * snapshot directory, and then decompress that snapshot for processing. When + * starting with a snapshot, the target journal file may be specified on the + * command line. * + * @see #main(String[]) + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ public class HARestore { @@ -384,17 +390,31 @@ * @param args * <code>[options] journalOrSnapshotFileOrSnapshotDir haLogDir</code> * <br> - * where <code>journalFile</code> is the name of the journal file<br> + * where <code>journalOrSnapshotFileOrSnapshotDir</code> is the + * name of the journal file (ending in <code>.jnl</code>), the + * name of a specific snapshot file (ending in + * <code>.jnl.gz</code>), or the name of the snapshot directory + * (this is generally a directory named <code>snapshot</code> + * that is a child of the service directory) <br> * where <code>haLogDir</code> is the name of a directory - * containing zero or more HALog files<br> + * containing zero or more HALog files (this is generally a + * directory name <code>HALog</code> that is a child of the + * service directory)<br> * where <code>options</code> are any of: * <dl> - * <dt>-l</dt> <dd>List available commit points, but do not apply + * <dt>-l</dt> + * <dd>List available commit points, but do not apply * them. This option provides information about the current * commit point on the journal and the commit points available in - * the HALog files.</dd> <dt>-h commitCounter</dt> <dd>The last - * commit counter that will be applied (halting point for - * restore).</dd> + * the HALog files.</dd> + * <dt>-h commitCounter</dt> + * <dd>The last commit counter that will be applied (halting + * point for restore).</dd> + * <dt>-o journalFile</dt> + * <dd>When restoring from a snapshot, this parameter specifies + * the name of the journal file to be created. It is an error + * if the file exists (this utility will not overwrite an existing + * journal file).</dd> * </dl> * * @return <code>0</code> iff the operation was fully successful. @@ -424,6 +444,8 @@ int i = 0; boolean listCommitPoints = false; + + String decompressTargetFile = null; // Defaults to Long.MAX_VALUE. long haltingCommitCounter = Long.MAX_VALUE; @@ -447,10 +469,16 @@ else if (arg.equals("-h")) { - haltingCommitCounter = Long.parseLong(args[i + 1]); + haltingCommitCounter = Long.parseLong(args[++i]); } + else if (arg.equals("-o")) { + + decompressTargetFile = args[++i]; + + } + else throw new RuntimeException("Unknown argument: " + arg); @@ -475,10 +503,12 @@ /* * File is a directory. * + * We assume that it is the snapshot directory. + * * Locate the most recent snapshot in that directory structure. */ - File tmp = CommitCounterUtility.findGreatestCommitCounter( + final File tmp = CommitCounterUtility.findGreatestCommitCounter( journalFile, SnapshotManager.SNAPSHOT_FILTER); if (tmp == null) { @@ -507,8 +537,8 @@ /* * File is a snapshot. * - * Decompress the snapshot onto a temporary file in the current - * working directory. + * Decompress the snapshot onto either a temporary file or the file + * specified by the caller (in which case the file must not exist). */ // source is the snapshot. @@ -517,10 +547,24 @@ final long commitCounter = SnapshotManager .parseCommitCounterFile(journalFile.getName()); - // temporary file in the same directory as the snapshot. - final File out = File.createTempFile("" + commitCounter + "-", - Journal.Options.JNL, journalFile.getAbsoluteFile() - .getParentFile()); + final File out; + if (decompressTargetFile == null) { + /* + * Temporary file in the current working directory + */ + out = File.createTempFile("restored-from-snapshot" + "-" + + commitCounter + "-", Journal.Options.JNL, journalFile + .getAbsoluteFile().getParentFile()); + } else { + /* + * Decompress onto a file specified by the caller. + */ + out = new File(decompressTargetFile); + if (out.exists()) { + // Do not decompress onto an existing file. + throw new IOException("File exists: " + out); + } + } System.out.println("Decompressing " + in + " to " + out); @@ -531,7 +575,9 @@ } - // Validate journal file. + /* + * Log some metadata about the journal file. + */ { System.out.println("Journal File: " + journalFile); @@ -559,6 +605,9 @@ } + /* + * Open the journal. + */ try { final Properties properties = new Properties(); @@ -579,6 +628,9 @@ try { + /* + * Apply zero or more HALog files to roll forward the journal. + */ final HARestore util = new HARestore(journal, haLogDir); util.restore(listCommitPoints, haltingCommitCounter); @@ -601,7 +653,7 @@ private static void usage(final String[] args) { - System.err.println("usage: (-l|-h commitPoint) <journalFile> haLogDir"); + System.err.println("usage: (-l|-h haltingCommitPoint|-o outputJournalFile) <journalFile|snapshotFile|snapshotDir> haLogDir"); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-04-04 16:05:57
|
Revision: 8049 http://sourceforge.net/p/bigdata/code/8049 Author: martyncutcher Date: 2014-04-04 16:05:53 +0000 (Fri, 04 Apr 2014) Log Message: ----------- Rollback isHA check to fix CI errors, and review for HA1 Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-04 15:24:46 UTC (rev 8048) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-04 16:05:53 UTC (rev 8049) @@ -1356,7 +1356,7 @@ * unit tests need to be updated to specify [isHighlyAvailable] for * ALL quorum based test runs. */ - final boolean isHA = quorum != null; + final boolean isHA = quorum != null && quorum.isHighlyAvailable(); // IFF HA and this is the quorum leader. final boolean isHALeader = isHA This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-04 15:24:49
|
Revision: 8048 http://sourceforge.net/p/bigdata/code/8048 Author: thompsonbry Date: 2014-04-04 15:24:46 +0000 (Fri, 04 Apr 2014) Log Message: ----------- Fixes to build.xml for changed location of the configuration file for the init.d start of bigdata HA services to /etc/default/bigdata/bigdataHA. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-04-04 15:19:44 UTC (rev 8047) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-04-04 15:24:46 UTC (rev 8048) @@ -1085,8 +1085,8 @@ todir="${dist.dir}/etc/init.d" /> <chmod file="${dist.dir}/etc/init.d/bigdataHA" perm="755" /> - <copy file="${src.resources}/etc/bigdata/bigdataHA.config" - todir="${dist.dir}/etc/bigdata" /> + <copy file="${src.resources}/etc/default/bigdata/bigdataHA" + todir="${dist.dir}/etc/default/bigdata" /> <copy file="${src.resources}/bin/config/browser.config" todir="${dist.bin.config}" /> @@ -1245,7 +1245,7 @@ bigdata/doc/NOTICE - copyright NOTICE files. bigdata/doc/docs - javadoc (FIXME INSTALL JAVADOC, HA wiki page) bigdata/etc/init.d/bigdataHA - HA services start/stop script. - bigdata/etc/bigdata/bigdataHA.config - HA services required config file. + bigdata/etc/default/bigdata/bigdataHA - HA services required config file. Note: This directory structure is currently reused for the rpm, but the top-level of the rpm directory structure includes the release version as This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-04 15:19:49
|
Revision: 8047 http://sourceforge.net/p/bigdata/code/8047 Author: thompsonbry Date: 2014-04-04 15:19:44 +0000 (Fri, 04 Apr 2014) Log Message: ----------- Relocate and rename the configuration file for the init.d script for HA to /etc/default/bigdata/bigdataHA to be in keeping with conventions. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdata/ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdata/bigdataHA Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdata/bigdataHA.config Copied: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdata/bigdataHA (from rev 7912, branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/bigdataHA.config) =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdata/bigdataHA (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdata/bigdataHA 2014-04-04 15:19:44 UTC (rev 8047) @@ -0,0 +1,46 @@ +# Environment for bigdata HA services. +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Note: You MUST provide the location of the executable scripts and the +# pid file that is written by $binDir/startHAServices. These SHOULD be +# absolute path names. + +#binDir= +#pidFile= + +## +# The following variables configure the startHAServices script, which +# passes them through to HAJournal.config. +## + +# Name of the bigdata gederation of services. Override for real install. +export FEDNAME=bigdataInstallTest + +# This is different for each HA replication cluster in the same federation +# of services. If you have multiple such replication cluster, then just +# given each such cluster its own name. +export LOGICAL_SERVICE_ID=HAJournalServer-1 + +# Local directory where the service will store its state. +export FED_DIR=/var/bigdata/${FEDNAME} + +# Apache River - NO default for "LOCATORS". +export GROUPS="$FEDNAME" +#export LOCATORS="jini://bigdata15/,jini://bigdata16/,jini://bigdata17/" + +# Apache ZooKeeper - NO default. +#export ZK_SERVERS="bigdata15:2081,bigdata16:2081,bigdata17:2081"; + +# All of these have defaults. Override as necessary. +#export REPLICATION_FACTOR=3 +#export HA_PORT=9090 +#export JETTY_PORT=8080 +#export JETTY_XML=var/jetty/WEB-INF/jetty.xml +#export JETTY_RESOURCE_BASE=var/jetty +#export COLLECT_QUEUE_STATISTICS= +#export COLLECT_PLATFORM_STATISTICS= +#export GANGLIA_REPORT= +#export GANGLIA_LISTENER= +#export SYSSTAT_DIR= Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdata/bigdataHA.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/bigdataHA.config 2014-03-04 22:43:01 UTC (rev 7912) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdata/bigdataHA.config 2014-04-04 15:19:44 UTC (rev 8047) @@ -1,46 +0,0 @@ -# Environment for bigdata HA services. -# -# binDir - The directory containing the installed scripts. -# pidFile - The pid is written on this file. -# -# Note: You MUST provide the location of the executable scripts and the -# pid file that is written by $binDir/startHAServices. These SHOULD be -# absolute path names. - -#binDir= -#pidFile= - -## -# The following variables configure the startHAServices script, which -# passes them through to HAJournal.config. -## - -# Name of the bigdata gederation of services. Override for real install. -export FEDNAME=bigdataInstallTest - -# This is different for each HA replication cluster in the same federation -# of services. If you have multiple such replication cluster, then just -# given each such cluster its own name. -export LOGICAL_SERVICE_ID=HAJournalServer-1 - -# Local directory where the service will store its state. -export FED_DIR=/var/bigdata/${FEDNAME} - -# Apache River - NO default for "LOCATORS". -export GROUPS="$FEDNAME" -#export LOCATORS="jini://bigdata15/,jini://bigdata16/,jini://bigdata17/" - -# Apache ZooKeeper - NO default. -#export ZK_SERVERS="bigdata15:2081,bigdata16:2081,bigdata17:2081"; - -# All of these have defaults. Override as necessary. -#export REPLICATION_FACTOR=3 -#export HA_PORT=9090 -#export JETTY_PORT=8080 -#export JETTY_XML=var/jetty/WEB-INF/jetty.xml -#export JETTY_RESOURCE_BASE=var/jetty -#export COLLECT_QUEUE_STATISTICS= -#export COLLECT_PLATFORM_STATISTICS= -#export GANGLIA_REPORT= -#export GANGLIA_LISTENER= -#export SYSSTAT_DIR= Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-04-04 15:08:58 UTC (rev 8046) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-04-04 15:19:44 UTC (rev 8047) @@ -49,7 +49,7 @@ #/sbin/sysctl -w vm.swappiness=0 # Setup the environment. -source bigdata/bigdataHA.config +source /etc/default/bigdata/bigdataHA if [ -z "$binDir" ]; then echo $"$0 : environment not setup: binDir is undefined." This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-04 15:09:01
|
Revision: 8046 http://sourceforge.net/p/bigdata/code/8046 Author: thompsonbry Date: 2014-04-04 15:08:58 +0000 (Fri, 04 Apr 2014) Log Message: ----------- The GROUPS, LOCATORS, and ZK_SERVERS environment variables were not being passed through by the startHAServices script. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-04-04 14:53:14 UTC (rev 8045) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-04-04 15:08:58 UTC (rev 8046) @@ -77,6 +77,9 @@ -DFED_DIR=${FED_DIR}\ -DDATA_DIR=${DATA_DIR}\ -DREPLICATION_FACTOR=${REPLICATION_FACTOR}\ + -DGROUPS=${GROUPS}\ + -DLOCATORS=${LOCATORS}\ + -DZK_SERVERS=${ZK_SERVERS}\ -DHA_PORT=${HA_PORT}\ "-Djetty.port=${JETTY_PORT}"\ "-Djetty.resourceBase=${JETTY_RESOURCE_BASE}"\ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-04 14:53:17
|
Revision: 8045 http://sourceforge.net/p/bigdata/code/8045 Author: thompsonbry Date: 2014-04-04 14:53:14 +0000 (Fri, 04 Apr 2014) Log Message: ----------- Fix to syntax in HAJournal.config Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2014-04-04 14:50:43 UTC (rev 8044) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2014-04-04 14:53:14 UTC (rev 8045) @@ -83,7 +83,7 @@ private static serviceDir = new File(fedDir,logicalServiceId+File.separator+"HAJournalServer"); // journal data directory. - private static dataDir = new File(ConfigMath.getProperty("DATA_DIR",serviceDir.toString())); + private static dataDir = new File(ConfigMath.getProperty("DATA_DIR",""+serviceDir)); // HA log directory. private static haLogDir = new File(serviceDir,"HALog"); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-04 14:50:46
|
Revision: 8044 http://sourceforge.net/p/bigdata/code/8044 Author: thompsonbry Date: 2014-04-04 14:50:43 +0000 (Fri, 04 Apr 2014) Log Message: ----------- Fix to syntax in HAJournal.config Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2014-04-04 14:48:24 UTC (rev 8043) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2014-04-04 14:50:43 UTC (rev 8044) @@ -83,7 +83,7 @@ private static serviceDir = new File(fedDir,logicalServiceId+File.separator+"HAJournalServer"); // journal data directory. - private static dataDir = new File(ConfigMath.getProperty("DATA_DIR",serviceDir.toString()); + private static dataDir = new File(ConfigMath.getProperty("DATA_DIR",serviceDir.toString())); // HA log directory. private static haLogDir = new File(serviceDir,"HALog"); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-04 14:48:26
|
Revision: 8043 http://sourceforge.net/p/bigdata/code/8043 Author: thompsonbry Date: 2014-04-04 14:48:24 +0000 (Fri, 04 Apr 2014) Log Message: ----------- Define DATA_DIR as the location of the journal file as an environment variable that is passed through from startHAServices and used by HAJournal.config. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2014-04-03 23:39:45 UTC (rev 8042) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2014-04-04 14:48:24 UTC (rev 8043) @@ -83,7 +83,7 @@ private static serviceDir = new File(fedDir,logicalServiceId+File.separator+"HAJournalServer"); // journal data directory. - private static dataDir = serviceDir; + private static dataDir = new File(ConfigMath.getProperty("DATA_DIR",serviceDir.toString()); // HA log directory. private static haLogDir = new File(serviceDir,"HALog"); Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-04-03 23:39:45 UTC (rev 8042) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2014-04-04 14:48:24 UTC (rev 8043) @@ -75,6 +75,7 @@ -DFEDNAME=${FEDNAME}\ -DLOGICAL_SERVICE_ID=${LOGICAL_SERVICE_ID}\ -DFED_DIR=${FED_DIR}\ + -DDATA_DIR=${DATA_DIR}\ -DREPLICATION_FACTOR=${REPLICATION_FACTOR}\ -DHA_PORT=${HA_PORT}\ "-Djetty.port=${JETTY_PORT}"\ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-03 23:39:47
|
Revision: 8042 http://sourceforge.net/p/bigdata/code/8042 Author: tobycraig Date: 2014-04-03 23:39:45 +0000 (Thu, 03 Apr 2014) Log Message: ----------- #873 - Fixed double history entry for SID component clicks Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-03 23:05:18 UTC (rev 8041) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-03 23:39:45 UTC (rev 8042) @@ -638,10 +638,6 @@ var match = uri.match(re); if(match) { $('#explore-header').html('<h1><< <<a href="' + buildExploreHash(match[1]) + '">' + match[1] + '</a>><br><<a href="' + buildExploreHash(match[2]) + '">' + match[2] + '</a> ><br><<a href="' + buildExploreHash(match[3]) + '">' + match[3] + '</a> > >></h1>'); - $('#explore-header h1 a').click(function(e) { - e.preventDefault(); - explore(this.text); - }); } else { $('#explore-header').html('<h1>' + uri + '</h1>'); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-03 23:05:23
|
Revision: 8041 http://sourceforge.net/p/bigdata/code/8041 Author: tobycraig Date: 2014-04-03 23:05:18 +0000 (Thu, 03 Apr 2014) Log Message: ----------- #873 - Added history capability so user can browse around their exploration history. Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-03 22:01:57 UTC (rev 8040) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-03 23:05:18 UTC (rev 8041) @@ -86,8 +86,6 @@ $('.namespace-service-description').click(function(e) { return confirm('This can be an expensive operation. Proceed anyway?'); }); - - READY = true; }); } @@ -166,7 +164,7 @@ useNamespace(DEFAULT_NAMESPACE, url); }); } -var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACE_URL, READY, fileContents; +var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACE_URL, fileContents; getDefaultNamespace(); @@ -591,13 +589,12 @@ } else { var text = binding.value; } - text = escapeHTML(text); - text = text.replace(/\n/g, '<br>'); + linkText = escapeHTML(text).replace(/\n/g, '<br>'); if(binding.type == 'typed-literal') { var tdData = ' class="literal" data-datatype="' + binding.datatype + '"'; } else { if(binding.type == 'uri' || binding.type == 'sid') { - text = '<a href="#">' + text + '</a>'; + text = '<a href="' + buildExploreHash(text) + '">' + linkText + '</a>'; } var tdData = ' class="' + binding.type + '"'; if(binding['xml:lang']) { @@ -640,7 +637,7 @@ var re = /<< *<([^<>]*)> *<([^<>]*)> *<([^<>]*)> *>>/; var match = uri.match(re); if(match) { - $('#explore-header').html('<h1><< <<a href="#">' + match[1] + '</a>><br><<a href="#">' + match[2] + '</a> ><br><<a href="#">' + match[3] + '</a> > >></h1>'); + $('#explore-header').html('<h1><< <<a href="' + buildExploreHash(match[1]) + '">' + match[1] + '</a>><br><<a href="' + buildExploreHash(match[2]) + '">' + match[2] + '</a> ><br><<a href="' + buildExploreHash(match[3]) + '">' + match[3] + '</a> > >></h1>'); $('#explore-header h1 a').click(function(e) { e.preventDefault(); explore(this.text); @@ -651,6 +648,10 @@ } }); +function buildExploreHash(uri) { + return '#explore:' + NAMESPACE + ':' + uri; +} + function loadURI(target) { // identify if this is a vertex or a SID target = target.trim().replace(/\n/g, ' '); @@ -706,8 +707,6 @@ } else { var query = edgeQuery.replace('SID', target); } - console.log('Explore query for ' + (vertex ? 'vertex ' : 'edge ') + target); - console.log(query); var settings = { type: 'POST', data: 'query=' + encodeURI(query), @@ -720,8 +719,6 @@ } function updateExploreStart(data) { - console.log('Explore results'); - console.log(data); var results = data.results.bindings.length > 0; // clear tables @@ -731,13 +728,13 @@ $.each(data.results.bindings, function(i, binding) { var cols = [binding.col1, binding.col2].map(function(col) { if(col.type == 'sid') { - var output = getSID(col); + var uri = getSID(col); } else { - var output = col.value; + var uri = col.value; } - output = escapeHTML(output).replace(/\n/g, '<br>'); + output = escapeHTML(uri).replace(/\n/g, '<br>'); if(col.type == 'uri' || col.type == 'sid') { - output = '<a href="#">' + output + '</a>'; + output = '<a href="' + buildExploreHash(uri) + '">' + output + '</a>'; } return output; }); @@ -748,7 +745,7 @@ } else { var sid = '<< <' + $('#explore-form input[type=text]').val() + '> <' + binding.col1.value + '> <' + binding.col2.value + '> >>'; } - star = '<a href="#" data-sid="' + sid + '"><< * (' + star + ') >></a>'; + star = '<a href="' + buildExploreHash(sid) + '"><< * (' + star + ') >></a>'; } else { star = ''; } @@ -779,16 +776,44 @@ $('#explore-results a').click(function(e) { e.preventDefault(); - explore($(this).data('sid') ? $(this).data('sid') : this.text); + var components = parseHash(this.hash); + selectNamespace(components[2]); + explore(components[3]); }); } -function explore(uri) { +function explore(uri, nopush) { $('#explore-form input[type=text]').val(uri); $('#explore-form').submit(); showTab('explore'); + if(!nopush) { + history.pushState(null, null, '#explore:' + NAMESPACE + ':' + uri); + } } +function parseHash(hash) { + // match #tab:namespace:uri + // :namespace:uri group optional + // namespace optional + var re = /#([^:]+)(?::([^:]*):(.+))?/; + return hash.match(re); +} + +// handle history buttons and initial display of first tab +window.addEventListener("popstate", function(e) { + var hash = parseHash(this.location.hash); + if(!hash) { + $('#tab-selector a:first').click(); + } else { + if(hash[1] == 'explore') { + selectNamespace(hash[2]); + explore(hash[3], true); + } else { + $('a[data-target=' + hash[1] + ']').click(); + } + } +}); + function updateExploreError(jqXHR, textStatus, errorThrown) { $('#explore-results .box').html(''); $('#explore-header').html('Error! ' + textStatus + ' ' + errorThrown); @@ -927,40 +952,4 @@ return $('<div/>').text(text).html(); } -function initialExplore(namespace, uri) { - if(!READY) { - setTimeout(function() { initialExplore(namespace, uri); }, 10); - } else { - if(namespace != '') { - selectNamespace(namespace); - } - explore(uri); - } -} - -if(window.location.hash) { - // remove # and see if there is some data to retrieve for this hash - var hash = window.location.hash.substring(1); - var i = hash.indexOf(':'); - if(i != -1) { - var data = hash.substring(i + 1); - hash = hash.substring(0, i); - // currently only the explore tab uses this - // data is in the form namespace:uri - // if no namespace is specified, use the default one - // TODO: this may need to be rethought if we start remembering the namespace the user selects - if(hash == 'explore') { - i = data.indexOf(':'); - var namespace = data.substring(0, i); - var uri = data.substring(i + 1); - - // wait for namespaces to be retrieved - initialExplore(namespace, uri); - } - } - $('a[data-target=' + hash + ']').click(); -} else { - $('#tab-selector a:first').click(); -} - }); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-04-03 22:02:00
|
Revision: 8040 http://sourceforge.net/p/bigdata/code/8040 Author: mrpersonick Date: 2014-04-03 22:01:57 +0000 (Thu, 03 Apr 2014) Log Message: ----------- bind the predecessors list to an outvar Modified Paths: -------------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java 2014-04-03 22:01:30 UTC (rev 8039) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java 2014-04-03 22:01:57 UTC (rev 8040) @@ -15,6 +15,7 @@ */ package com.bigdata.rdf.graph.analytics; +import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Iterator; @@ -309,22 +310,28 @@ } }); -// tmp.add(new IBinder<PATHS.VS, PATHS.ES, Void>() { -// -// @Override -// public int getIndex() { -// return Bindings.PREDECESSOR; -// } -// -// @Override -// public Value bind(final ValueFactory vf, -// final IGASState<PATHS.VS, PATHS.ES, Void> state, final Value u) { -// -// return state.getState(u).predecessor.get(); -// -// } -// }); + tmp.add(new IBinder<PATHS.VS, PATHS.ES, Void>() { + + @Override + public int getIndex() { + return Bindings.PREDECESSOR; + } + + @Override + public Value bind(final ValueFactory vf, + final IGASState<PATHS.VS, PATHS.ES, Void> state, final Value u) { + final String s = Arrays.toString(state.getState(u).predecessors.toArray()); + + if (log.isTraceEnabled()) { + log.trace(s); + } + + return vf.createLiteral(s); + + } + }); + return tmp; } @@ -341,11 +348,11 @@ */ int DEPTH = 1; -// /** -// * The BFS predecessor is the first vertex to discover a given vertex. -// * -// */ -// int PREDECESSOR = 2; + /** + * The BFS predecessor is the first vertex to discover a given vertex. + * + */ + int PREDECESSOR = 2; } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-04-03 22:01:35
|
Revision: 8039 http://sourceforge.net/p/bigdata/code/8039 Author: mrpersonick Date: 2014-04-03 22:01:30 +0000 (Thu, 03 Apr 2014) Log Message: ----------- allow strings to be produced as output and bound to outvars Modified Paths: -------------- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASService.java Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASService.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASService.java 2014-04-03 21:32:58 UTC (rev 8038) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASService.java 2014-04-03 22:01:30 UTC (rev 8039) @@ -67,6 +67,7 @@ import com.bigdata.rdf.model.BigdataValue; import com.bigdata.rdf.model.BigdataValueImpl; import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; +import com.bigdata.rdf.sparql.ast.DummyConstantNode; import com.bigdata.rdf.sparql.ast.GraphPatternGroup; import com.bigdata.rdf.sparql.ast.IGroupMemberNode; import com.bigdata.rdf.sparql.ast.StatementPatternNode; @@ -1037,10 +1038,22 @@ final IV<BigdataValueImpl, ?> iv = lex .getLexiconConfiguration().createInlineIV(val); - iv.setValue((BigdataValueImpl) val); + if (iv != null) { - bs.set(var, new Constant(iv)); + iv.setValue((BigdataValueImpl) val); + bs.set(var, new Constant(iv)); + + } else if (val instanceof BigdataValue) { + + bs.set(var, new Constant(DummyConstantNode.toDummyIV((BigdataValue) val))); + + } else { + + throw new RuntimeException("FIXME"); + + } + } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-04-03 21:33:00
|
Revision: 8038 http://sourceforge.net/p/bigdata/code/8038 Author: mrpersonick Date: 2014-04-03 21:32:58 +0000 (Thu, 03 Apr 2014) Log Message: ----------- added an analytic to produce a connected subgraph between a source and one or more targets Added Paths: ----------- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/TestPaths.java branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths1.rq branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths1.ttl branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths2.rq branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths2.ttl branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths3.rq branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths3.ttl branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.rq branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.ttl Added: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/TestPaths.java =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/TestPaths.java (rev 0) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/TestPaths.java 2014-04-03 21:32:58 UTC (rev 8038) @@ -0,0 +1,220 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 16, 2009 + */ + +package com.bigdata.rdf.sail.graph; + +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Properties; + +import org.apache.commons.io.IOUtils; +import org.apache.log4j.Logger; +import org.openrdf.model.URI; +import org.openrdf.model.impl.URIImpl; +import org.openrdf.query.QueryLanguage; +import org.openrdf.query.TupleQuery; +import org.openrdf.query.TupleQueryResult; +import org.openrdf.rio.RDFFormat; + +import com.bigdata.rdf.axioms.NoAxioms; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.sail.BigdataSailRepository; +import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; +import com.bigdata.rdf.sail.ProxyBigdataSailTestCase; +import com.bigdata.rdf.vocab.BaseVocabulary; +import com.bigdata.rdf.vocab.NoVocabulary; +import com.bigdata.rdf.vocab.RDFSVocabulary; +import com.bigdata.rdf.vocab.VocabularyDecl; + +public class TestPaths extends ProxyBigdataSailTestCase { + + protected static final Logger log = Logger.getLogger(TestPaths.class); + + protected static final boolean INFO = log.isInfoEnabled(); + + @Override + public Properties getProperties() { + + Properties props = super.getProperties(); + + props.setProperty(BigdataSail.Options.AXIOMS_CLASS, NoAxioms.class.getName()); + props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, NoVocabulary.class.getName()); + props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false"); + props.setProperty(BigdataSail.Options.JUSTIFY, "false"); + props.setProperty(BigdataSail.Options.TEXT_INDEX, "false"); + + return props; + + } + + /** + * + */ + public TestPaths() { + } + + /** + * @param arg0 + */ + public TestPaths(String arg0) { + super(arg0); + } + + protected void load(final BigdataSailRepositoryConnection cxn, final String resource) throws Exception { + + final InputStream is = getClass().getResourceAsStream(resource); + + cxn.add(is, "", RDFFormat.TURTLE); + + } + + protected InputStream open(final String resource) throws Exception { + + return getClass().getResourceAsStream(resource); + + } + +// public void testSimpleBFS() throws Exception { +// +// final BigdataSail sail = getSail(); +// sail.initialize(); +// final BigdataSailRepository repo = new BigdataSailRepository(sail); +// +// final BigdataSailRepositoryConnection cxn = repo.getConnection(); +// cxn.setAutoCommit(false); +// +// try { +// +// cxn.add(open("paths1.ttl"), "", RDFFormat.TURTLE); +// cxn.commit(); +// +// log.trace("\n"+sail.getDatabase().dumpStore()); +// +// final String query = IOUtils.toString(open("paths1.rq")); +// +// log.trace("\n"+query); +// +// final TupleQuery tqr = cxn.prepareTupleQuery(QueryLanguage.SPARQL, query); +// +// final TupleQueryResult result = tqr.evaluate(); +// +// while (result.hasNext()) { +// +// log.trace(result.next()); +// +// } +// +// result.close(); +// +// } finally { +// cxn.close(); +// sail.__tearDownUnitTest(); +// } +// +// } +// +// public void testSimpleSSSP() throws Exception { +// +// final BigdataSail sail = getSail(); +// sail.initialize(); +// final BigdataSailRepository repo = new BigdataSailRepository(sail); +// +// final BigdataSailRepositoryConnection cxn = repo.getConnection(); +// cxn.setAutoCommit(false); +// +// try { +// +// cxn.add(open("paths2.ttl"), "", RDFFormat.TURTLE); +// cxn.commit(); +// +// log.trace("\n"+sail.getDatabase().dumpStore()); +// +// final String query = IOUtils.toString(open("paths2.rq")); +// +// log.trace("\n"+query); +// +// final TupleQuery tqr = cxn.prepareTupleQuery(QueryLanguage.SPARQL, query); +// +// final TupleQueryResult result = tqr.evaluate(); +// +// while (result.hasNext()) { +// +// log.trace(result.next()); +// +// } +// +// result.close(); +// +// } finally { +// cxn.close(); +// sail.__tearDownUnitTest(); +// } +// +// } + + public void testPaths() throws Exception { + + final BigdataSail sail = getSail(); + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + + final BigdataSailRepositoryConnection cxn = repo.getConnection(); + cxn.setAutoCommit(false); + + try { + + cxn.add(open("paths4.ttl"), "", RDFFormat.TURTLE); + cxn.commit(); + + log.trace("\n"+sail.getDatabase().dumpStore()); + + final String query = IOUtils.toString(open("paths4.rq")); + + log.trace("\n"+query); + + final TupleQuery tqr = cxn.prepareTupleQuery(QueryLanguage.SPARQL, query); + + final TupleQueryResult result = tqr.evaluate(); + + while (result.hasNext()) { + + log.trace(result.next()); + + } + + result.close(); + + } finally { + cxn.close(); + sail.__tearDownUnitTest(); + } + + } + +} Property changes on: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/TestPaths.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths1.rq =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths1.rq (rev 0) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths1.rq 2014-04-03 21:32:58 UTC (rev 8038) @@ -0,0 +1,11 @@ +PREFIX gas: <http://www.bigdata.com/rdf/gas#> +SELECT * { + SERVICE gas:service { + gas:program gas:gasClass "com.bigdata.rdf.graph.analytics.BFS" . + gas:program gas:in </:source> . # starting point + gas:program gas:target </:target> . # target vertices + gas:program gas:out ?v . # bound to the visited vertices. + gas:program gas:out1 ?depth . # bound to the depth of the visited vertices. + gas:program gas:out2 ?pred . # bound to the predecessor. + } +} order by ?depth \ No newline at end of file Added: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths1.ttl =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths1.ttl (rev 0) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths1.ttl 2014-04-03 21:32:58 UTC (rev 8038) @@ -0,0 +1,13 @@ +@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . +@prefix bd: <http://www.bigdata.com/rdf#> . +@prefix : <:> . + +# first path +:source :edge :a . +:a :edge :b . +:b :edge :target . + +# second path +:source :edge :c . +:c :edge :d . +:d :edge :target . Added: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths2.rq =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths2.rq (rev 0) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths2.rq 2014-04-03 21:32:58 UTC (rev 8038) @@ -0,0 +1,11 @@ +PREFIX gas: <http://www.bigdata.com/rdf/gas#> +SELECT * { + SERVICE gas:service { + gas:program gas:gasClass "com.bigdata.rdf.graph.analytics.SSSP" . + gas:program gas:in </:source> . # starting point + gas:program gas:target </:target> . # target vertices + gas:program gas:out ?s . # bound to the visited vertices. + gas:program gas:out1 ?distance . # bound to the distance of the visited vertices. + gas:program gas:out2 ?pred . # bound to the predecessor + } +} order by ?distance \ No newline at end of file Added: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths2.ttl =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths2.ttl (rev 0) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths2.ttl 2014-04-03 21:32:58 UTC (rev 8038) @@ -0,0 +1,22 @@ +@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . +@prefix bd: <http://www.bigdata.com/rdf#> . +@prefix : <:> . + +# first path, length = 2 +:source :edge :a . +:a :edge :b . +:b :edge :target . + +# second path, length = 2 +:source :edge :c . +:c :edge :d . +:d :edge :target . + +# third path, length = 3 +:source :edge :e . +:e :edge :f . +:f :edge :g . +:g :edge :target . + +# extraneous +:target :edge :extraneous . Added: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths3.rq =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths3.rq (rev 0) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths3.rq 2014-04-03 21:32:58 UTC (rev 8038) @@ -0,0 +1,16 @@ +PREFIX gas: <http://www.bigdata.com/rdf/gas#> +SELECT * { + SERVICE gas:service { + gas:program gas:gasClass "com.bigdata.rdf.graph.analytics.PATHS" . + gas:program gas:in </:target> . # starting point + gas:program gas:target </:source> . # target vertices + gas:program gas:traversalDirection "Reverse" . + gas:program gas:maxIterationsAfterTargets 2 . + # set this to something that reasonably constrains the runtime + gas:program gas:maxVisited 100000 . + gas:program gas:out ?s . # bound to the visited vertices. + gas:program gas:out1 ?depth . # bound to the depth + } + #?s </:edge> ?o . + #filter(!sameTerm(?s,</:target>)) . +} order by ?depth \ No newline at end of file Added: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths3.ttl =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths3.ttl (rev 0) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths3.ttl 2014-04-03 21:32:58 UTC (rev 8038) @@ -0,0 +1,33 @@ +@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . +@prefix bd: <http://www.bigdata.com/rdf#> . +@prefix : <:> . + +# first path, length = 2 +:source :edge :a . +:a :edge :b . +:b :edge :target . + +# second path, length = 2 +:source :edge :c . +:c :edge :d . +:d :edge :target . + +# third path, length = 3 +:source :edge :e . +:e :edge :f . +:f :edge :g . +:g :edge :target . + +# third path, length = 4 +:source :edge :h . +:h :edge :i . +:i :edge :j . +:j :edge :k . +:k :edge :target . + +# extraneous +:target :edge :extraneous . + +# cycle +:b :edge :cycle . +:cycle :edge :a . Added: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.rq =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.rq (rev 0) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.rq 2014-04-03 21:32:58 UTC (rev 8038) @@ -0,0 +1,18 @@ +PREFIX gas: <http://www.bigdata.com/rdf/gas#> +SELECT * { + SERVICE gas:service { + gas:program gas:gasClass "com.bigdata.rdf.graph.analytics.PATHS" . + gas:program gas:in </:target> . # starting point + gas:program gas:target </:source1> . # target vertices + gas:program gas:target </:source2> . # target vertices + gas:program gas:traversalDirection "Reverse" . + # gas:program gas:maxIterations 2 . + gas:program gas:maxIterationsAfterTargets 0 . + # set this to something that reasonably constrains the runtime + gas:program gas:maxVisited 100000 . + gas:program gas:out ?s . # bound to the visited vertices. + gas:program gas:out1 ?depth . # bound to the depth + } + ?s </:edge> ?o . + filter(?s != </:target>) . +} order by ?depth \ No newline at end of file Added: branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.ttl =================================================================== --- branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.ttl (rev 0) +++ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/graph/paths4.ttl 2014-04-03 21:32:58 UTC (rev 8038) @@ -0,0 +1,36 @@ +@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . +@prefix bd: <http://www.bigdata.com/rdf#> . +@prefix : <:> . + +# first path from source1, length = 2 +:source1 :edge :b . +:b :edge :a . +:a :edge :target . + +# second path from source1, length = 3 +:source1 :edge :e . +:e :edge :d . +:d :edge :c . +:c :edge :target . + +# first path from source2, length = 3 +:source2 :edge :h . +:h :edge :g . +:g :edge :f . +:f :edge :target . + +# second path from source2, length = 4 +:source2 :edge :l . +:l :edge :k . +:k :edge :j . +:j :edge :i . +:i :edge :target . + +# only path from source3, length = 5 +:source3 :edge :z . +:z :edge :y . +:y :edge :x . +:x :edge :w . +:w :edge :v . +:v :edge :target . + This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-04-03 21:20:28
|
Revision: 8037 http://sourceforge.net/p/bigdata/code/8037 Author: mrpersonick Date: 2014-04-03 21:20:25 +0000 (Thu, 03 Apr 2014) Log Message: ----------- added a missing synchronized block Modified Paths: -------------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASContext.java Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASContext.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASContext.java 2014-04-03 21:17:40 UTC (rev 8036) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASContext.java 2014-04-03 21:20:25 UTC (rev 8037) @@ -200,8 +200,12 @@ * program N rounds from now where * N = maxIterationsAfterTargets. */ - this.maxIterations.set(Math.min(getMaxIterations(), - (int) total.getNRounds() + getMaxIterationsAfterTargets())); + synchronized(this.maxIterations) { + + this.maxIterations.set(Math.min(getMaxIterations(), + (int) total.getNRounds() + getMaxIterationsAfterTargets())); + + } if (log.isTraceEnabled()) { log.trace("All targets reached at round " + This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-04-03 21:17:43
|
Revision: 8036 http://sourceforge.net/p/bigdata/code/8036 Author: mrpersonick Date: 2014-04-03 21:17:40 +0000 (Thu, 03 Apr 2014) Log Message: ----------- added an analytic to produce a connected subgraph between a source and one or more targets Added Paths: ----------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java Added: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java (rev 0) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java 2014-04-03 21:17:40 UTC (rev 8036) @@ -0,0 +1,561 @@ +/** + Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package com.bigdata.rdf.graph.analytics; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.log4j.Logger; +import org.openrdf.model.Statement; +import org.openrdf.model.Value; +import org.openrdf.model.ValueFactory; + +import com.bigdata.rdf.graph.EdgesEnum; +import com.bigdata.rdf.graph.Factory; +import com.bigdata.rdf.graph.FrontierEnum; +import com.bigdata.rdf.graph.IBinder; +import com.bigdata.rdf.graph.IBindingExtractor; +import com.bigdata.rdf.graph.IGASContext; +import com.bigdata.rdf.graph.IGASScheduler; +import com.bigdata.rdf.graph.IGASState; +import com.bigdata.rdf.graph.IPredecessor; +import com.bigdata.rdf.graph.impl.BaseGASProgram; + +/** + * Breadth First Search (BFS) is an iterative graph traversal primitive. The + * frontier is expanded iteratively until no new vertices are discovered. Each + * visited vertex is marked with the round (origin ZERO) in which it was + * visited. This is its distance from the initial frontier. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class PATHS extends BaseGASProgram<PATHS.VS, PATHS.ES, Void> implements + IPredecessor<PATHS.VS, PATHS.ES, Void> { + + private static final Logger log = Logger.getLogger(PATHS.class); + + public static class VS { + + /** + * <code>-1</code> until visited. When visited, set to the current round + * in order to assign each vertex its traversal depth. + * <p> + * Note: It is possible that the same vertex may be visited multiple + * times in a given expansion (from one or more source vertices that all + * target the same destination vertex). However, in this case the same + * value will be assigned by each visitor. Thus, synchronization is only + * required for visibility of the update within the round. As long as + * one thread reports that it modified the depth, the vertex will be + * scheduled. + */ + private final AtomicInteger depth = new AtomicInteger(-1); + + /** + * The predecessors are the first source vertex to visit a given target + * vertex. + */ + private final Set<Value> predecessors = + Collections.synchronizedSet(new LinkedHashSet<Value>()); + + /** + * The depth at which this vertex was first visited (origin ZERO) and + * <code>-1</code> if the vertex has not been visited. + */ + public int depth() { +// synchronized (this) { + return depth.get(); +// } + } + + /** + * Return the first vertex to discover this vertex during BFS traversal. + */ + public Set<Value> predecessors() { + + return predecessors; + + } + + /** + * Note: This marks the vertex at the current traversal depth. + * + * @return <code>true</code> if the vertex was visited for the first + * time in this round and the calling thread is the thread that + * first visited the vertex (this helps to avoid multiple + * scheduling of a vertex). + */ + public boolean visit(final int depth, final Value predecessor) { + if (predecessor != null) + this.predecessors.add(predecessor); + if (this.depth.compareAndSet(-1/* expect */, depth/* newValue */)) { + // Scheduled by this thread. + return true; + } + return false; +// synchronized (this) { +// if (this.depth == -1) { +// this.depth = depth; +// return true; +// } +// return false; +// } + } + + @Override + public String toString() { + return "{depth=" + depth() + "}"; + } + + }// class VS + + /** + * Edge state is not used. + */ + public static class ES { + + } + + private static final Factory<Value, PATHS.VS> vertexStateFactory = new Factory<Value, PATHS.VS>() { + + @Override + public PATHS.VS initialValue(final Value value) { + + return new VS(); + + } + + }; + + @Override + public Factory<Value, PATHS.VS> getVertexStateFactory() { + + return vertexStateFactory; + + } + + @Override + public Factory<Statement, PATHS.ES> getEdgeStateFactory() { + + return null; + + } + + @Override + public FrontierEnum getInitialFrontierEnum() { + + return FrontierEnum.SingleVertex; + + } + + @Override + public EdgesEnum getGatherEdges() { + + return EdgesEnum.NoEdges; + + } + + @Override + public EdgesEnum getScatterEdges() { + + return EdgesEnum.OutEdges; + + } + + /** + * Not used. + */ + @Override + public void initVertex(final IGASContext<PATHS.VS, PATHS.ES, Void> ctx, + final IGASState<PATHS.VS, PATHS.ES, Void> state, final Value u) { + + state.getState(u).visit(0, null/* predecessor */); + + } + + /** + * Not used. + */ + @Override + public Void gather(IGASState<PATHS.VS, PATHS.ES, Void> state, Value u, Statement e) { + + throw new UnsupportedOperationException(); + + } + + /** + * Not used. + */ + @Override + public Void sum(final IGASState<PATHS.VS, PATHS.ES, Void> state, + final Void left, final Void right) { + + throw new UnsupportedOperationException(); + + } + + /** + * NOP + */ + @Override + public PATHS.VS apply(final IGASState<PATHS.VS, PATHS.ES, Void> state, final Value u, + final Void sum) { + + return null; + + } + + /** + * Returns <code>true</code>. + */ + @Override + public boolean isChanged(IGASState<VS, ES, Void> state, Value u) { + + return true; + + } + + /** + * The remote vertex is scheduled for activation unless it has already been + * visited. + * <p> + * Note: We are scattering to out-edges. Therefore, this vertex is + * {@link Statement#getSubject()}. The remote vertex is + * {@link Statement#getObject()}. + */ + @Override + public void scatter(final IGASState<PATHS.VS, PATHS.ES, Void> state, + final IGASScheduler sch, final Value u, final Statement e) { + +// if (state.getTargetVertices().contains(u)) { +// // don't schedule any more vertices, we've hit a target +// return; +// } + + // remote vertex state. + final Value v = state.getOtherVertex(u, e); + + final VS otherState = state.getState(v); +// final VS otherState = state.getState(e.getObject()/* v */); + + // visit. + if (otherState.visit(state.round() + 1, u/* predecessor */)) { + + /* + * This is the first visit for the remote vertex. Add it to the + * schedule for the next iteration. + */ + + sch.schedule(v); + + } + + } + + @Override + public boolean nextRound(final IGASContext<PATHS.VS, PATHS.ES, Void> ctx) { + + return true; + + } + + /** + * {@inheritDoc} + * <p> + * <dl> + * <dt>{@value Bindings#DEPTH}</dt> + * <dd>The depth at which the vertex was first encountered during traversal. + * </dd> + * <dt>{@value Bindings#PREDECESSOR}</dt> + * <dd>The predecessor is the first vertex that discovers a given vertex + * during traversal.</dd> + * </dl> + */ + @Override + public List<IBinder<PATHS.VS, PATHS.ES, Void>> getBinderList() { + + final List<IBinder<PATHS.VS, PATHS.ES, Void>> tmp = super.getBinderList(); + + tmp.add(new IBinder<PATHS.VS, PATHS.ES, Void>() { + + @Override + public int getIndex() { + return Bindings.DEPTH; + } + + @Override + public Value bind(final ValueFactory vf, + final IGASState<PATHS.VS, PATHS.ES, Void> state, final Value u) { + + return vf.createLiteral(state.getState(u).depth.get()); + + } + }); + +// tmp.add(new IBinder<PATHS.VS, PATHS.ES, Void>() { +// +// @Override +// public int getIndex() { +// return Bindings.PREDECESSOR; +// } +// +// @Override +// public Value bind(final ValueFactory vf, +// final IGASState<PATHS.VS, PATHS.ES, Void> state, final Value u) { +// +// return state.getState(u).predecessor.get(); +// +// } +// }); + + return tmp; + + } + + /** + * Additional {@link IBindingExtractor.IBinder}s exposed by {@link PATHS}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ + public interface Bindings extends BaseGASProgram.Bindings { + + /** + * The depth at which the vertex was visited. + */ + int DEPTH = 1; + +// /** +// * The BFS predecessor is the first vertex to discover a given vertex. +// * +// */ +// int PREDECESSOR = 2; + + } + +// /** +// * Reduce the active vertex state, returning a histogram reporting the #of +// * vertices at each distance from the starting vertex. There will always be +// * one vertex at depth zero - this is the starting vertex. For each +// * successive depth, the #of vertices that were labeled at that depth is +// * reported. This is essentially the same as reporting the size of the +// * frontier in each round of the traversal, but the histograph is reported +// * based on the vertex state. +// * +// * @author <a href="mailto:tho...@us...">Bryan +// * Thompson</a> +// */ +// protected static class HistogramReducer implements +// IReducer<VS, ES, Void, Map<Integer, AtomicLong>> { +// +// private final ConcurrentHashMap<Integer, AtomicLong> values = new ConcurrentHashMap<Integer, AtomicLong>(); +// +// @Override +// public void visit(final IGASState<VS, ES, Void> state, final Value u) { +// +// final VS us = state.getState(u); +// +// if (us != null) { +// +// final Integer depth = Integer.valueOf(us.depth()); +// +// AtomicLong newval = values.get(depth); +// +// if (newval == null) { +// +// final AtomicLong oldval = values.putIfAbsent(depth, +// newval = new AtomicLong()); +// +// if (oldval != null) { +// +// // lost data race. +// newval = oldval; +// +// } +// +// } +// +// newval.incrementAndGet(); +// +// } +// +// } +// +// @Override +// public Map<Integer, AtomicLong> get() { +// +// return Collections.unmodifiableMap(values); +// +// } +// +// } + + /* + * TODO Do this in parallel for each specified target vertex. + */ + @Override + public void prunePaths(final IGASContext<VS, ES, Void> ctx, + final Value[] targetVertices) { + + if (ctx == null) + throw new IllegalArgumentException(); + + if (targetVertices == null) + throw new IllegalArgumentException(); + + final IGASState<PATHS.VS, PATHS.ES, Void> gasState = ctx.getGASState(); + +// for (Value v : gasState.values()) { +// log.trace(v); +// } + + final Set<Value> retainSet = new HashSet<Value>(); + + for (Value v : targetVertices) { + + if (!gasState.isVisited(v)) { + + // This target was not reachable. + continue; + + } + + /* + * Walk the precessors back to a starting vertex. + */ + retainSet.add(v); + + visitPredecessors(gasState, v, retainSet); + +// Value current = v; +// +// while (current != null) { +// +// retainSet.add(current); +// +// final PATHS.VS currentState = gasState.getState(current); +// +// final Value predecessor = currentState.predecessor(); +// +// current = predecessor; +// +// } + + } // next target vertex. + + gasState.retainAll(retainSet); + + } + + protected void visitPredecessors( + final IGASState<PATHS.VS, PATHS.ES, Void> gasState, final Value v, + final Set<Value> retainSet) { + + final PATHS.VS currentState = gasState.getState(v); + + for (Value pred : currentState.predecessors()) { + + if (pred == null) { + + continue; + + } + + if (retainSet.contains(pred)) { + + continue; + + } + + retainSet.add(pred); + + visitPredecessors(gasState, pred, retainSet); + + } + + } + +// @Override +// public <T> IReducer<VS, ES, Void, T> getDefaultAfterOp() { +// +// class NV implements Comparable<NV> { +// public final int n; +// public final long v; +// public NV(final int n, final long v) { +// this.n = n; +// this.v = v; +// } +// @Override +// public int compareTo(final NV o) { +// if (o.n > this.n) +// return -1; +// if (o.n < this.n) +// return 1; +// return 0; +// } +// } +// +// final IReducer<VS, ES, Void, T> outerReducer = new IReducer<VS, ES, Void, T>() { +// +// final HistogramReducer innerReducer = new HistogramReducer(); +// +// @Override +// public void visit(IGASState<VS, ES, Void> state, Value u) { +// +// innerReducer.visit(state, u); +// +// } +// +// @Override +// public T get() { +// +// final Map<Integer, AtomicLong> h = innerReducer.get(); +// +// final NV[] a = new NV[h.size()]; +// +// int i = 0; +// +// for (Map.Entry<Integer, AtomicLong> e : h.entrySet()) { +// +// a[i++] = new NV(e.getKey().intValue(), e.getValue().get()); +// +// } +// +// Arrays.sort(a); +// +// System.out.println("distance, frontierSize, sumFrontierSize"); +// long sum = 0L; +// for (NV t : a) { +// +// System.out.println(t.n + ", " + t.v + ", " + sum); +// +// sum += t.v; +// +// } +// +// return null; +// } +// +// }; +// +// return outerReducer; +// +// } + +} Property changes on: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PATHS.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-04-03 21:17:06
|
Revision: 8035 http://sourceforge.net/p/bigdata/code/8035 Author: mrpersonick Date: 2014-04-03 21:17:03 +0000 (Thu, 03 Apr 2014) Log Message: ----------- added the ability to halt a program once the "target" vertices have been visited, either immediately or a specified number of iterations afterwards Modified Paths: -------------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASContext.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASState.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASContext.java branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASState.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASService.java Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASContext.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASContext.java 2014-04-03 21:15:33 UTC (rev 8034) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASContext.java 2014-04-03 21:17:03 UTC (rev 8035) @@ -15,6 +15,7 @@ */ package com.bigdata.rdf.graph; +import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; @@ -174,7 +175,7 @@ * the attributes for the edges). */ void setLinkAttributeType(URI linkType); - + /** * Set an optional {@link IReducer} that will run after the * {@link IGASProgram} is terminated. This may be used to extract results @@ -227,5 +228,35 @@ */ @Override IGASStats call() throws Exception; + + + /** + * Set the target vertices for the program (if any). + */ + void setTargetVertices(Value[] targetVertices); + /** + * Get the target vertices for the program (if any). + * @return + */ + Set<Value> getTargetVertices(); + + /** + * Specify the maximum number of iterations for the algorithm to continue + * once all the target vertices have been reached. Default is for the + * program to run until completion without regard to whether the target + * vertices have been reached or not. A value of ZERO will stop the program + * exactly when all target vertices are found in the frontier. + * + * @param newValue + * The maximum number of iterations past the target vertices. + */ + void setMaxIterationsAfterTargets(int newValue); + + /** + * Return the maximum number iterations for the algorithm to continue + * once all the target vertices have been reached. + */ + int getMaxIterationsAfterTargets(); + } \ No newline at end of file Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASState.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASState.java 2014-04-03 21:15:33 UTC (rev 8034) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/IGASState.java 2014-04-03 21:17:03 UTC (rev 8035) @@ -119,6 +119,18 @@ boolean isVisited(Value v); /** + * Return <code>true</code> iff the specified vertices all have an associated + * vertex state object - this is interpreted as meaning that the vertex has + * been "visited". + * + * @param v + * The vertices. + * @return <code>true</code> iff there is vertex state associated with all + * specified vertices. + */ + boolean isVisited(Set<Value> v); + + /** * The current frontier. */ IStaticFrontier frontier(); Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASContext.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASContext.java 2014-04-03 21:15:33 UTC (rev 8034) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASContext.java 2014-04-03 21:17:03 UTC (rev 8035) @@ -15,7 +15,11 @@ */ package com.bigdata.rdf.graph.impl; +import java.util.Arrays; +import java.util.Collections; import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -100,6 +104,20 @@ null); /** + * A collection of target vertices for the program to reach. + */ + private final Set<Value> targetVertices = + Collections.synchronizedSet(new LinkedHashSet<Value>()); + + /** + * The maximum number of iterations after the target vertices have been + * reached. Default behavior is to continue on even after the targets have + * been reached. + */ + private final AtomicInteger maxIterationsAfterTargets = new AtomicInteger( + Integer.MAX_VALUE); + + /** * * @param namespace * The namespace of the graph (KB instance). @@ -158,6 +176,11 @@ program.before(this); + if (log.isTraceEnabled()) { + log.trace("# of targets: " + targetVertices.size()); + log.trace("max iterations after targets: " + maxIterationsAfterTargets.get()); + } + while (!gasState.frontier().isEmpty()) { /* @@ -167,7 +190,30 @@ * GASStats. */ - if (total.getNRounds() + 1 >= getMaxIterations()) { + if (targetVertices.size() > 0 && + getMaxIterationsAfterTargets() < Integer.MAX_VALUE) { + + if (gasState.isVisited(targetVertices)) { + + /* + * If we've reached all target vertices then halt the + * program N rounds from now where + * N = maxIterationsAfterTargets. + */ + this.maxIterations.set(Math.min(getMaxIterations(), + (int) total.getNRounds() + getMaxIterationsAfterTargets())); + + if (log.isTraceEnabled()) { + log.trace("All targets reached at round " + + total.getNRounds() + ", halting at round " + + this.maxIterations.get()); + } + + } + + } + + if (total.getNRounds() + 1 > getMaxIterations()) { log.warn("Halting: maxIterations=" + getMaxIterations() + ", #rounds=" + total.getNRounds()); @@ -882,7 +928,40 @@ this.linkAttributeType.set(linkAttributeType); } + + @Override + public void setTargetVertices(final Value[] targetVertices) { + + this.targetVertices.addAll(Arrays.asList(targetVertices)); + + } + @Override + public Set<Value> getTargetVertices() { + + return this.targetVertices; + + } + + @Override + public void setMaxIterationsAfterTargets(final int newValue) { + + if (newValue < 0) + throw new IllegalArgumentException(); + + this.maxIterationsAfterTargets.set(newValue); + + } + + @Override + public int getMaxIterationsAfterTargets() { + + return maxIterationsAfterTargets.get(); + + } + + + // /** // * {@inheritDoc} // * <p> Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASState.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASState.java 2014-04-03 21:15:33 UTC (rev 8034) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/impl/GASState.java 2014-04-03 21:17:03 UTC (rev 8035) @@ -217,6 +217,13 @@ } @Override + public boolean isVisited(final Set<Value> v) { + + return vertexState.keySet().containsAll(v); + + } + + @Override public ES getState(final Statement e) { if (edgeState == null) @@ -453,4 +460,10 @@ } +// public Set<Value> values() { +// +// return vertexState.keySet(); +// +// } + } Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASService.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASService.java 2014-04-03 21:15:33 UTC (rev 8034) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/GASService.java 2014-04-03 21:17:03 UTC (rev 8035) @@ -194,6 +194,19 @@ int DEFAULT_MAX_ITERATIONS = Integer.MAX_VALUE; /** + * The maximum #of iterations for the GAS program after the targets + * have been reached (optional, default + * {@value #DEFAULT_MAX_ITERATIONS_AFTER_TARGETS}). Default behavior + * is to not stop once the targets are reached. + * + * @see #DEFAULT_MAX_ITERATIONS_AFTER_TARGETS + * @see IGASContext#setMaxIterationsAfterTargets(int) + */ + URI MAX_ITERATIONS_AFTER_TARGETS = new URIImpl(NAMESPACE + "maxIterationsAfterTargets"); + + int DEFAULT_MAX_ITERATIONS_AFTER_TARGETS = Integer.MAX_VALUE; + + /** * The maximum #of vertices in the visited set for the GAS program * (optional, default {@value #DEFAULT_MAX_VISITED}). * @@ -406,6 +419,7 @@ private final int nthreads; private final TraversalDirectionEnum traversalDirection; private final int maxIterations; + private final int maxIterationsAfterTargets; private final int maxVisited; private final URI linkType, linkAttrType; private final Class<IGASProgram<VS, ES, ST>> gasClass; @@ -452,6 +466,11 @@ .createLiteral(Options.DEFAULT_MAX_ITERATIONS))) .intValue(); + this.maxIterationsAfterTargets = ((Literal) getOnlyArg(Options.PROGRAM, + Options.MAX_ITERATIONS_AFTER_TARGETS, store.getValueFactory() + .createLiteral(Options.DEFAULT_MAX_ITERATIONS_AFTER_TARGETS))) + .intValue(); + this.maxVisited = ((Literal) getOnlyArg( Options.PROGRAM, Options.MAX_VISITED, @@ -774,8 +793,16 @@ gasContext.setMaxIterations(maxIterations); + gasContext.setMaxIterationsAfterTargets(maxIterationsAfterTargets); + gasContext.setMaxVisited(maxVisited); + + if (targetVertices != null) { + gasContext.setTargetVertices(toIV(targetVertices)); + + } + // Optional link type constraint. if (linkType != null) gasContext.setLinkType(linkType); @@ -803,7 +830,7 @@ gasState.setFrontier(gasContext, tmp); } - + // Run the analytic. final IGASStats stats = (IGASStats) gasContext.call(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-04-03 21:15:38
|
Revision: 8034 http://sourceforge.net/p/bigdata/code/8034 Author: mrpersonick Date: 2014-04-03 21:15:33 +0000 (Thu, 03 Apr 2014) Log Message: ----------- added a predecessor field and implemented the IPredecessor interface Modified Paths: -------------- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java 2014-04-03 13:23:17 UTC (rev 8033) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/SSSP.java 2014-04-03 21:15:33 UTC (rev 8034) @@ -15,7 +15,9 @@ */ package com.bigdata.rdf.graph.analytics; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import org.apache.log4j.Logger; @@ -31,6 +33,7 @@ import com.bigdata.rdf.graph.IGASContext; import com.bigdata.rdf.graph.IGASScheduler; import com.bigdata.rdf.graph.IGASState; +import com.bigdata.rdf.graph.IPredecessor; import com.bigdata.rdf.graph.impl.BaseGASProgram; /** @@ -46,7 +49,8 @@ * so we need a different data structure to collect them (we need to * store the predecesor when we run SSSP to do this). */ -public class SSSP extends BaseGASProgram<SSSP.VS, SSSP.ES, Integer/* dist */> { +public class SSSP extends BaseGASProgram<SSSP.VS, SSSP.ES, Integer/* dist */> + implements IPredecessor<SSSP.VS, SSSP.ES, Integer/* dist */> { private static final Logger log = Logger.getLogger(SSSP.class); @@ -101,6 +105,15 @@ */ private final AtomicReference<Value> predecessor = new AtomicReference<Value>(); + /** + * Return the vertex preceding this vertex on the shortest path. + */ + public Value predecessor() { + + return predecessor.get(); + + } + // /** // * Set the distance for the vertex to ZERO. This is done for the // * starting vertex. @@ -445,6 +458,22 @@ } }); + tmp.add(new IBinder<SSSP.VS, SSSP.ES, Integer>() { + + @Override + public int getIndex() { + return Bindings.PREDECESSOR; + } + + @Override + public Value bind(final ValueFactory vf, + final IGASState<SSSP.VS, SSSP.ES, Integer> state, final Value u) { + + return state.getState(u).predecessor.get(); + + } + }); + return tmp; } @@ -461,6 +490,58 @@ */ int DISTANCE = 1; + /** + * The predecessor vertex on a shortest path. + * + */ + int PREDECESSOR = 2; + } + @Override + public void prunePaths(IGASContext<VS, ES, Integer> ctx, + Value[] targetVertices) { + + if (ctx == null) + throw new IllegalArgumentException(); + + if (targetVertices == null) + throw new IllegalArgumentException(); + + final IGASState<SSSP.VS, SSSP.ES, Integer> gasState = ctx.getGASState(); + + final Set<Value> retainSet = new HashSet<Value>(); + + for (Value v : targetVertices) { + + if (!gasState.isVisited(v)) { + + // This target was not reachable. + continue; + + } + + /* + * Walk the precessors back to a starting vertex. + */ + Value current = v; + + while (current != null) { + + retainSet.add(current); + + final SSSP.VS currentState = gasState.getState(current); + + final Value predecessor = currentState.predecessor(); + + current = predecessor; + + } + + } // next target vertex. + + gasState.retainAll(retainSet); + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-04-03 13:23:21
|
Revision: 8033 http://sourceforge.net/p/bigdata/code/8033 Author: martyncutcher Date: 2014-04-03 13:23:17 +0000 (Thu, 03 Apr 2014) Log Message: ----------- Remove sleep added to pass HA1 snapshot tests Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-03 09:48:19 UTC (rev 8032) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-03 13:23:17 UTC (rev 8033) @@ -3429,13 +3429,12 @@ return; if (!quorum.isHighlyAvailable()) { - // FIXME: Find the reason why this delay is needed and remove it! - // - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - e.printStackTrace(); - } + // FIXME: Find the reason why this delay is needed to pass HA1 snapshot tests +// try { +// Thread.sleep(1000); +// } catch (InterruptedException e) { +// e.printStackTrace(); +// } return; } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |