From: <tho...@us...> - 2012-09-27 16:27:57
|
Revision: 6633 http://bigdata.svn.sourceforge.net/bigdata/?rev=6633&view=rev Author: thompsonbry Date: 2012-09-27 16:27:50 +0000 (Thu, 27 Sep 2012) Log Message: ----------- Various changes to support the solution set cache integrated with the backing Journal. Tests and SPARQL UPDATE/QUERY through the NSS are now running. tx based tests still fail as I have not yet addressed tx isolation for solution set access and update. Still turned off by default. Added ability to invoke DumpJournal from the NSS status page using a URL query parameter. This capability is not advertised on the page. @see https://sourceforge.net/apps/trac/bigdata/ticket/531 (Solution Set Cache) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/Checkpoint.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/stream/Stream.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/CacheConnectionImpl.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/SolutionSetCache.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java 2012-09-27 12:16:36 UTC (rev 6632) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java 2012-09-27 16:27:50 UTC (rev 6633) @@ -47,6 +47,7 @@ import com.bigdata.rdf.internal.encoder.SolutionSetStreamDecoder; import com.bigdata.rdf.internal.encoder.SolutionSetStreamEncoder; import com.bigdata.rdf.sparql.ast.ISolutionSetStats; +import com.bigdata.rdf.sparql.ast.SolutionSetStats; import com.bigdata.rwstore.IPSOutputStream; import com.bigdata.stream.Stream; import com.bigdata.striterator.Chunkerator; @@ -190,11 +191,34 @@ * @return The {@link ISolutionSetStats}. */ public ISolutionSetStats getStats() { - + + /* + * Note: This field is set by setCheckpoint(). + */ + return solutionSetStats; } + /** + * Return the address of the {@link SolutionSetStats} to be written into the + * next {@link Checkpoint} record. The caller must have {@link #flush()} the + * {@link SolutionSetStream} as a pre-condition (to ensure that the stats + * have been written out). If the {@link SolutionSetStats} are not loaded, + * then the address from the last {@link Checkpoint} record is returned. + */ + public long getStatsAddr() { + + if (solutionSetStats != null) { + + return solutionSetStats.addr; + + } + + return getCheckpoint().getBloomFilterAddr(); + + } + public ICloseableIterator<IBindingSet[]> get() { if (rootAddr == IRawStore.NULL) @@ -375,9 +399,10 @@ */ if (solutionSetStats != null - && solutionSetStats.addr != getCheckpoint() - .getBloomFilterAddr()) { - + && (solutionSetStats.addr == IRawStore.NULL // + || solutionSetStats.addr != getCheckpoint().getBloomFilterAddr())// + ) { + solutionSetStats.addr = getStore() .write(ByteBuffer.wrap(SerializerUtil .serialize(solutionSetStats.delegate))); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/Checkpoint.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/Checkpoint.java 2012-09-27 12:16:36 UTC (rev 6632) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/Checkpoint.java 2012-09-27 16:27:50 UTC (rev 6633) @@ -29,6 +29,7 @@ import java.io.ObjectOutput; import java.nio.ByteBuffer; +import com.bigdata.bop.solutions.SolutionSetStream; import com.bigdata.htree.HTree; import com.bigdata.io.SerializerUtil; import com.bigdata.journal.AbstractJournal; @@ -489,12 +490,12 @@ * is defined but the bloom filter has been disabled, then we * also write a 0L so that the bloom filter is no longer * reachable from the new checkpoint. + * + * FIXME GIST : The SolutionSetStats are hacked into the + * bloom filter addr for the Stream. */ -// (htree.bloomFilter == null ? htree.getCheckpoint() -// .getBloomFilterAddr() -// : htree.bloomFilter.isEnabled() ? htree.bloomFilter -// .getAddr() : 0L),// - 0L, // TODO No bloom filter yet. Do we want to support this? + ((SolutionSetStream)stream).getStatsAddr(),// + // 0, // htree.height,// Note: HTree is not balanced (height not uniform) 0L,//stream.getNodeCount(),// 0L,//stream.getLeafCount(),// @@ -831,6 +832,9 @@ case HTree: ndx = HTree.load(store, checkpointAddr, readOnly); break; + case Stream: + ndx = Stream.load(store, checkpointAddr, readOnly); + break; default: throw new AssertionError("Unknown: " + checkpoint.getIndexType()); } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-09-27 12:16:36 UTC (rev 6632) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-09-27 16:27:50 UTC (rev 6633) @@ -3776,7 +3776,7 @@ } // Resolve the index against that commit record. - ndx = (BTree) getIndexWithCommitRecord(name, commitRecord); + ndx = (ICheckpointProtocol) getIndexWithCommitRecord(name, commitRecord); if (ndx == null) { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java 2012-09-27 12:16:36 UTC (rev 6632) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/DumpJournal.java 2012-09-27 16:27:50 UTC (rev 6633) @@ -30,6 +30,7 @@ import java.io.File; import java.io.IOException; import java.io.InputStream; +import java.io.PrintStream; import java.nio.ByteBuffer; import java.util.Date; import java.util.Iterator; @@ -71,9 +72,6 @@ * * TODO add an option to dump only as of a specified commitTime? * - * TODO add an option to restrict the names of the indices to be dumped - * (-name=<regex>). - * * TODO GIST : Support all types of indices. * * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/585"> GIST @@ -107,22 +105,29 @@ // } /** - * Dump one or more journal files. + * Dump one or more journal files: * - * @param args - * The name(s) of the journal file to open. - * <dl> - * <dt>-history </dt> - * <dd>Dump metadata for indices in all commit records (default - * only dumps the metadata for the indices as of the most current - * committed state).</dd> - * <dt>-indices</dt> - * <dd>Dump the indices (does not show the tuples by default).</dd> - * <dt>-pages</dt> - * <dd>Dump the pages of the indices and reports some information on the page size.</dd> - * <dt>-tuples</dt> - * <dd>Dump the records in the indices.</dd> - * </dl> + * <pre> + * usage: (option*) filename+ + * </pre> + * + * where <i>option</i> is any of: + * <dl> + * <dt>-namespace</dt> + * <dd>Dump only those indices having the specified namespace prefix.</dd> + * <dt>-history</dt> + * <dd>Dump metadata for indices in all commit records (default only dumps + * the metadata for the indices as of the most current committed state).</dd> + * <dt>-indices</dt> + * <dd>Dump the indices (does not show the tuples by default).</dd> + * <dt>-pages</dt> + * <dd>Dump the pages of the indices and reports some information on the + * page size.</dd> + * <dt>-tuples</dt> + * <dd>Dump the records in the indices.</dd> + * </dl> + * + * where <i>filename</i> is one or more journal file names. */ // FIXME feature is not finished. Must differentiate different address types. // * <dt>-addr ADDR</dt> @@ -139,6 +144,10 @@ int i = 0; + // Zero or more namespaces to be dumped. All are dumped if none are + // specified. + final List<String> namespaces = new LinkedList<String>(); + boolean dumpHistory = false; boolean dumpIndices = false; @@ -166,6 +175,12 @@ } + else if(arg.equals("-namespace")) { + + namespaces.add(args[i + 1]); + + } + else if(arg.equals("-indices")) { dumpIndices = true; @@ -196,9 +211,9 @@ throw new RuntimeException("Unknown argument: " + arg); } - - for(; i<args.length; i++) { - + + for (; i < args.length; i++) { + final File file = new File(args[i]); try { @@ -253,8 +268,8 @@ final DumpJournal dumpJournal = new DumpJournal(journal); - dumpJournal.dumpJournal(dumpHistory, dumpPages, - dumpIndices, showTuples); + dumpJournal.dumpJournal(System.out, namespaces, + dumpHistory, dumpPages, dumpIndices, showTuples); for(Long addr : addrs) { @@ -304,6 +319,33 @@ public void dumpJournal(final boolean dumpHistory, final boolean dumpPages, final boolean dumpIndices, final boolean showTuples) { + dumpJournal(System.out, null/* namespaces */, dumpHistory, dumpPages, + dumpIndices, showTuples); + + } + + /** + * @param out + * Where to write the output. + * @param namespaces + * When non-empty and non-<code>null</code>, dump only those + * indices having any of the specified namespaces. + * @param dumpHistory + * Dump metadata for indices in all commit records (default only + * dumps the metadata for the indices as of the most current + * committed state). + * @param dumpPages + * Dump the pages of the indices and reports some information on + * the page size. + * @param dumpIndices + * Dump the indices (does not show the tuples by default). + * @param showTuples + * Dump the records in the indices. + */ + public void dumpJournal(final PrintStream out, final List<String> namespaces, + final boolean dumpHistory, final boolean dumpPages, + final boolean dumpIndices, final boolean showTuples) { + final FileMetadata fmd = journal.getFileMetadata(); if (fmd != null) { @@ -314,8 +356,8 @@ */ // dump the MAGIC and VERSION. - System.out.println("magic=" + Integer.toHexString(fmd.magic)); - System.out.println("version=" + out.println("magic=" + Integer.toHexString(fmd.magic)); + out.println("version=" + Integer.toHexString(fmd.version)); /* @@ -329,7 +371,7 @@ final long bytesAvailable = (fmd.userExtent - fmd.nextOffset); - System.out.println("extent=" + fmd.extent + "(" + fmd.extent + out.println("extent=" + fmd.extent + "(" + fmd.extent / Bytes.megabyte + "M)" + ", userExtent=" + fmd.userExtent + "(" + fmd.userExtent / Bytes.megabyte + "M)" + ", bytesAvailable=" @@ -356,7 +398,7 @@ if (rootBlock0 != null) { - System.out.println(new RootBlockView( + out.println(new RootBlockView( true/* rootBlock0 */, rootBlock0, new ChecksumUtility()).toString()); @@ -371,18 +413,18 @@ if (rootBlock1 != null) { - System.out.println(new RootBlockView( + out.println(new RootBlockView( false/* rootBlock0 */, rootBlock1, new ChecksumUtility()).toString()); } } - // System.out.println(fmd.rootBlock0.toString()); - // System.out.println(fmd.rootBlock1.toString()); + // out.println(fmd.rootBlock0.toString()); + // out.println(fmd.rootBlock1.toString()); // report on which root block is the current root block. - System.out.println("The current root block is #" + out.println("The current root block is #" + (journal.getRootBlockView().isRootBlock0() ? 0 : 1)); } @@ -398,7 +440,7 @@ store.showAllocators(sb); - System.out.println(sb); + out.println(sb); } @@ -407,7 +449,7 @@ final DeleteBlockStats stats = store.checkDeleteBlocks(journal); - System.out.println(stats.toString(store)); + out.println(stats.toString(store)); final Set<Integer> duplicateAddrs = stats .getDuplicateAddresses(); @@ -453,18 +495,18 @@ final CommitRecordIndex commitRecordIndex = journal .getCommitRecordIndex(); - System.out.println("There are " + commitRecordIndex.getEntryCount() + out.println("There are " + commitRecordIndex.getEntryCount() + " commit points."); if (dumpGRS) { - dumpGlobalRowStore(); + dumpGlobalRowStore(out); } if (dumpHistory) { - System.out.println("Historical commit points follow in temporal sequence (first to last):"); + out.println("Historical commit points follow in temporal sequence (first to last):"); // final IKeyBuilder keyBuilder = KeyBuilder.newInstance(Bytes.SIZEOF_LONG); // @@ -488,20 +530,20 @@ while(itr.hasNext()) { - System.out.println("----"); + out.println("----"); final CommitRecordIndex.Entry entry = itr.next().getObject(); - System.out.print("Commit Record: " + entry.commitTime + out.print("Commit Record: " + entry.commitTime + ", addr=" + journal.toString(entry.addr)+", "); final ICommitRecord commitRecord = journal .getCommitRecord(entry.commitTime); - System.out.println(commitRecord.toString()); + out.println(commitRecord.toString()); - dumpNamedIndicesMetadata(commitRecord, dumpPages, - dumpIndices, showTuples); + dumpNamedIndicesMetadata(out, namespaces, commitRecord, + dumpPages, dumpIndices, showTuples); } @@ -513,11 +555,11 @@ final ICommitRecord commitRecord = journal.getCommitRecord(); - System.out.println(commitRecord.toString()); + out.println(commitRecord.toString()); - dumpNamedIndicesMetadata(commitRecord, dumpPages, dumpIndices, - showTuples); - + dumpNamedIndicesMetadata(out, namespaces, commitRecord, + dumpPages, dumpIndices, showTuples); + } } @@ -533,7 +575,7 @@ } - public void dumpGlobalRowStore() { + public void dumpGlobalRowStore(final PrintStream out) { final SparseRowStore grs = journal.getGlobalRowStore(journal .getLastCommitTime()); @@ -546,7 +588,7 @@ final ITPS tps = itr.next(); - System.out.println(tps.toString()); + out.println(tps.toString()); } @@ -562,7 +604,7 @@ final ITPS tps = itr.next(); - System.out.println(tps.toString()); + out.println(tps.toString()); } @@ -576,7 +618,8 @@ * @param journal * @param commitRecord */ - private void dumpNamedIndicesMetadata(final ICommitRecord commitRecord, + private void dumpNamedIndicesMetadata(final PrintStream out, + final List<String> namespaces, final ICommitRecord commitRecord, final boolean dumpPages, final boolean dumpIndices, final boolean showTuples) { @@ -590,8 +633,33 @@ // a registered index. final String name = nitr.next(); + + if (namespaces != null && !namespaces.isEmpty()) { + + boolean found = false; + + for(String namespace : namespaces) { + + if (name.startsWith(namespace)) { + + found = true; + + break; + + } + + } + + if (!found) { + + // Skip this index. Not a desired namespace. + continue; + + } + + } - System.out.println("name=" + name); + out.println("name=" + name); // load index from its checkpoint record. final ICheckpointProtocol ndx; @@ -622,10 +690,10 @@ } // show checkpoint record. - System.out.println("\t" + ndx.getCheckpoint()); + out.println("\t" + ndx.getCheckpoint()); // show metadata record. - System.out.println("\t" + ndx.getIndexMetadata()); + out.println("\t" + ndx.getIndexMetadata()); /* * Collect statistics on the page usage for the index. @@ -642,7 +710,7 @@ final PageStats stats = ((ISimpleTreeIndexAccess) ndx) .dumpPages(); - System.out.println("\t" + stats); + out.println("\t" + stats); pageStats.put(name, stats); @@ -671,7 +739,7 @@ /* * Write out the header. */ - System.out.println(PageStats.getHeaderRow()); + out.println(PageStats.getHeaderRow()); for (Map.Entry<String, PageStats> e : pageStats.entrySet()) { @@ -688,7 +756,7 @@ final ICheckpointProtocol tmp = journal .getIndexWithCommitRecord(name, commitRecord); - System.out.println("name: " + name + ", class=" + out.println("name: " + name + ", class=" + tmp.getClass() + ", checkpoint=" + tmp.getCheckpoint()); @@ -700,7 +768,7 @@ * Write out the stats for this index. */ - System.out.println(stats.getDataRow()); + out.println(stats.getDataRow()); } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/stream/Stream.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/stream/Stream.java 2012-09-27 12:16:36 UTC (rev 6632) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/stream/Stream.java 2012-09-27 16:27:50 UTC (rev 6633) @@ -229,6 +229,8 @@ // this.counter = new AtomicLong( checkpoint.getCounter() ); this.recordVersion = checkpoint.getRecordVersion(); + + this.rootAddr = checkpoint.getRootAddr(); } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/CacheConnectionImpl.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/CacheConnectionImpl.java 2012-09-27 12:16:36 UTC (rev 6632) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/CacheConnectionImpl.java 2012-09-27 16:27:50 UTC (rev 6633) @@ -133,7 +133,7 @@ * journal instance, then the commit protocol should be taking care of * things for us. */ - private static final boolean useMainDatabaseForCache = false; + private static final boolean useMainDatabaseForCache = true; private IIndexManager getLocalIndexManager() { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/SolutionSetCache.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/SolutionSetCache.java 2012-09-27 12:16:36 UTC (rev 6632) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/SolutionSetCache.java 2012-09-27 16:27:50 UTC (rev 6633) @@ -332,6 +332,9 @@ timestamp); } + + // Note: Forces all access to be unisolated. +// return (SolutionSetStream) cache.getStore().getUnisolatedIndex(fqn); } @@ -479,8 +482,14 @@ if (sset != null) { - return sset.getStats(); + final ISolutionSetStats stats = sset.getStats(); + if (stats == null) + throw new RuntimeException("No statistics? solutionSet=" + + solutionSet); + + return stats; + } return null; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2012-09-27 12:16:36 UTC (rev 6632) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2012-09-27 16:27:50 UTC (rev 6633) @@ -1675,7 +1675,11 @@ } else if (xhtml) { switch (queryType) { case ASK: - // Should be all we need to do. + /* + * TODO This is just sending back text/plain. If we want to keep + * to the XHTML semantics, then we should send back XML with an + * XSL style sheet. + */ acceptStr = BooleanQueryResultFormat.TEXT.getDefaultMIMEType(); break; case SELECT: @@ -1690,7 +1694,10 @@ break; case DESCRIBE: case CONSTRUCT: - // Generate RDF/XML so we can apply XSLT transform. + /* Generate RDF/XML so we can apply XSLT transform. + * + * FIXME This should be sending back RDFs or using a lens. + */ acceptStr = RDFFormat.RDFXML.getDefaultMIMEType(); break; default: Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2012-09-27 12:16:36 UTC (rev 6632) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2012-09-27 16:27:50 UTC (rev 6633) @@ -24,7 +24,9 @@ import java.io.IOException; import java.io.OutputStreamWriter; +import java.io.PrintStream; import java.io.Writer; +import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.Iterator; @@ -50,7 +52,10 @@ import com.bigdata.bop.engine.QueryLog; import com.bigdata.bop.fed.QueryEngineFactory; import com.bigdata.counters.CounterSet; +import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.DumpJournal; import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.Journal; import com.bigdata.rdf.sail.sparql.ast.SimpleNode; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.RunningQuery; import com.bigdata.rdf.sparql.ast.ASTContainer; @@ -85,8 +90,23 @@ * which could be served. */ private static final String SHOW_NAMESPACES = "showNamespaces"; - + /** + * Request a low-level dump of the journal. + * + * @see DumpJournal + */ + private static final String DUMP_JOURNAL = "dumpJournal"; + + /** + * Request a low-level dump of the pages in the indices for the journal. The + * {@link #DUMP_JOURNAL} option MUST also be specified. + * + * @see DumpJournal + */ + private static final String DUMP_PAGES = "dumpPages"; + + /** * The name of a request parameter used to request a display of the * currently running queries. Legal values for this request parameter are * either {@value #DETAILS} or no value. @@ -234,6 +254,10 @@ * <dt>showNamespaces</dt> * <dd>List the namespaces for the registered {@link AbstractTripleStore}s.</dd> * </dl> + * <dt>dumpJournal</dt> + * <dd>Provides low-level information about the backing {@link Journal} (if + * any).</dd> + * </dl> * </p> * * @todo This status page combines information about the addressed KB and @@ -290,8 +314,47 @@ } // open the body - current = current.node("body"); + current = current.node("body",""); + // Dump Journal? + final boolean dumpJournal = req.getParameter(DUMP_JOURNAL) != null; + + if(dumpJournal && getIndexManager() instanceof AbstractJournal) { + + current.node("h1", "Dump Journal").node("p", "Running..."); + +// final XMLBuilder.Node section = current.node("pre"); + // flush writer before writing on PrintStream. + w.flush(); + + // dump onto the response. + final PrintStream out = new PrintStream(resp.getOutputStream()); + + out.print("<pre>\n"); + + final DumpJournal dump = new DumpJournal((Journal) getIndexManager()); + + final List<String> namespaces = Collections.emptyList(); + + final boolean dumpHistory = false; + + final boolean dumpPages = req.getParameter(DUMP_PAGES) != null; + + final boolean dumpIndices = false; + + final boolean dumpTuples = false; + + dump.dumpJournal(out, namespaces, dumpHistory, dumpPages, dumpIndices, dumpTuples); + + // flush PrintStream before resuming writes on Writer. + out.flush(); + + // close section. +// section.close(); + out.print("\n</pre>"); + + } + current.node("br", "Accepted query count=" + getBigdataRDFContext().getQueryIdFactory().get()); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |