This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tob...@us...> - 2014-06-10 20:59:51
|
Revision: 8459 http://sourceforge.net/p/bigdata/code/8459 Author: tobycraig Date: 2014-06-10 20:59:43 +0000 (Tue, 10 Jun 2014) Log Message: ----------- Fixed file drag & drop on update pane Modified Paths: -------------- branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js Modified: branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js 2014-06-10 14:36:56 UTC (rev 8458) +++ branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js 2014-06-10 20:59:43 UTC (rev 8459) @@ -322,22 +322,28 @@ /* Update */ -function handleDragOver(e) { +function handleDragOver(cm, e) { e.stopPropagation(); e.preventDefault(); - e.originalEvent.dataTransfer.dropEffect = 'copy'; + e.dataTransfer.dropEffect = 'copy'; } -function handleFile(e) { +function handleDrop(cm, e) { e.stopPropagation(); e.preventDefault(); + var files = e.dataTransfer.files; + handleFile(files); +} - if(e.type == 'drop') { - var files = e.originalEvent.dataTransfer.files; - } else { - var files = e.originalEvent.target.files; - } - +function handleFileInput(e) { + e.stopPropagation(); + e.preventDefault(); + var files = e.originalEvent.target.files; + handleFile(files); + $('#update-file').val(''); +} + +function handleFile(files) { // only one file supported if(files.length > 1) { alert('Ignoring all but first file'); @@ -348,31 +354,29 @@ // if file is too large, tell user to supply local path if(f.size > 1048576 * 100) { alert('File too large, enter local path to file'); - $('#update-box').val('/path/to/' + f.name); + EDITORS.update.setValue('/path/to/' + f.name); setType('path'); - $('#update-box').prop('disabled', false) + EDITORS.update.setOption('readOnly', false) $('#large-file-message, #clear-file').hide(); } else { var fr = new FileReader(); - fr.onload = function(e2) { + fr.onload = function(e) { if(f.size > 10240) { // do not use textarea - $('#update-box').prop('disabled', true) + EDITORS.update.setOption('readOnly', true) $('#filename').html(f.name); $('#large-file-message, #clear-file').show() - $('#update-box').val(''); - FILE_CONTENTS = e2.target.result; + EDITORS.update.setValue(''); + FILE_CONTENTS = e.target.result; } else { // display file contents in the textarea clearFile(); - $('#update-box').val(e2.target.result); + EDITORS.update.setValue(e.target.result); } - guessType(f.name.split('.').pop().toLowerCase(), e2.target.result); + guessType(f.name.split('.').pop().toLowerCase(), e.target.result); }; fr.readAsText(f); } - - $('#update-file').val(''); } function clearFile(e) { @@ -490,11 +494,11 @@ var sparql_update_commands = ['INSERT', 'DELETE', 'LOAD', 'CLEAR']; -$('#update-file').change(handleFile); -$('#update-box').on('dragover', handleDragOver) - .on('drop', handleFile) - .on('paste', handlePaste) - .on('input propertychange', function() { $('#update-errors').hide(); }); +$('#update-file').change(handleFileInput); +// $('#update-box').on('dragover', handleDragOver) +// .on('drop', handleFile) +// .on('paste', handlePaste) +// .on('input propertychange', function() { $('#update-errors').hide(); }); $('#clear-file').click(clearFile); $('#update-update').click(submitUpdate); @@ -506,6 +510,9 @@ ERROR_CHARACTER_MARKERS.update.clear(); } }); +EDITORS.update.on('dragover', handleDragOver); +EDITORS.update.on('drop', handleDrop); +EDITORS.update.on('paste', handlePaste); EDITORS.update.addKeyMap({'Ctrl-Enter': submitUpdate}); function submitUpdate(e) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-10 14:37:11
|
Revision: 8458 http://sourceforge.net/p/bigdata/code/8458 Author: thompsonbry Date: 2014-06-10 14:36:56 +0000 (Tue, 10 Jun 2014) Log Message: ----------- reduced developer log levels for haLog, txLog from ALL to INFO. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/logging/log4j-dev.properties Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/logging/log4j-dev.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/logging/log4j-dev.properties 2014-06-10 13:08:37 UTC (rev 8457) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/logging/log4j-dev.properties 2014-06-10 14:36:56 UTC (rev 8458) @@ -282,9 +282,9 @@ # Normal data loader (single threaded). log4j.logger.com.bigdata.rdf.store.DataLoader=INFO -log4j.logger.com.bigdata.ha=ALL -log4j.logger.com.bigdata.txLog=ALL -log4j.logger.com.bigdata.haLog=ALL +log4j.logger.com.bigdata.ha=INFO +log4j.logger.com.bigdata.txLog=INFO +log4j.logger.com.bigdata.haLog=INFO #log4j.logger.com.bigdata.rwstore=ALL log4j.logger.com.bigdata.journal=INFO #log4j.logger.com.bigdata.journal.AbstractBufferStrategy=ALL This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-06-10 13:09:26
|
Revision: 8457 http://sourceforge.net/p/bigdata/code/8457 Author: martyncutcher Date: 2014-06-10 13:08:37 +0000 (Tue, 10 Jun 2014) Log Message: ----------- Branch to test new RWStore defensive commit protocol Added Paths: ----------- branches/RWSTORE_COMMITSTATE_973/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-06-10 13:01:48
|
Revision: 8456 http://sourceforge.net/p/bigdata/code/8456 Author: martyncutcher Date: 2014-06-10 13:01:39 +0000 (Tue, 10 Jun 2014) Log Message: ----------- Implements a demi-space metaBits allocation to remove problems associated with storing the metaBits in a FixedAllocator slot. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-06-09 19:30:26 UTC (rev 8455) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-06-10 13:01:39 UTC (rev 8456) @@ -306,6 +306,11 @@ * should be tuned to target perhaps 80% of an 8k page in order to have * only a small number of pages that spill over into blobs. * + * TODO: We should consider a more adaptable BLOB approach where we + * specify the maximum "slop" in an allocation as the means to determine + * a blob boundary. So, for example, a 5.5K allocation, with maximum slop of + * 1K, would be allocated as a blob of 4K + 2K and not an 8K slot. + * * @see #ALLOCATION_SIZES */ String DEFAULT_ALLOCATION_SIZES = "1, 2, 3, 5, 8, 12, 16, 32, 48, 64, 128"; @@ -321,12 +326,25 @@ * <p> * Note: A value of <code>9</code> may be used to stress the logic which * is responsible for the growth in the meta bits region. + * <p> + * This has now been deprecated since it adds complexity with no significant benefit */ - String META_BITS_SIZE = RWStore.class.getName() + ".metaBitsSize"; + @Deprecated String META_BITS_SIZE = RWStore.class.getName() + ".metaBitsSize"; - String DEFAULT_META_BITS_SIZE = "9"; + @Deprecated String DEFAULT_META_BITS_SIZE = "9"; /** + * Defines whether the metabits should be allocated an explicit demispace (default) + * or if not, then to use a standard Allocation (which limits the metabits size to + * the maximum FixedAllocator slot size). + * <p> + * The value should be either "true" or "false" + */ + String META_BITS_DEMI_SPACE = RWStore.class.getName() + ".metabitsDemispace"; + + String DEFAULT_META_BITS_DEMI_SPACE = "true"; + + /** * Defines the number of bits that must be free in a FixedAllocator for * it to be added to the free list. This is used to ensure a level * of locality when making large numbers of allocations within a single @@ -398,7 +416,7 @@ static final int OFFSET_BITS = 13; static final int OFFSET_BITS_MASK = 0x1FFF; // was 0xFFFF - static final int ALLOCATION_SCALEUP = 16; // multiplier to convert allocations based on minimum allocation of 32k + static final int ALLOCATION_SCALEUP = 16; // multiplier to convert allocations based on minimum allocation of 64k static private final int META_ALLOCATION = 8; // 8 * 32K is size of meta Allocation // If required, then allocate 1M direct buffers @@ -771,16 +789,29 @@ log.info(AbstractTransactionService.Options.MIN_RELEASE_AGE + "=" + m_minReleaseAge); - cDefaultMetaBitsSize = Integer.valueOf(fileMetadata.getProperty( - Options.META_BITS_SIZE, - Options.DEFAULT_META_BITS_SIZE)); + // Remove parameterisation, we want to use fixed Allocator block sizing + // there is no significant advantage to parameterize this since file cache + // locality is handled by size of the allocation - 256K is a reasonable + // number as 32 * 8 * 1K size. + // + // Equally there is no benefit to increasing the size of the Allocators beyond + // 1K. +// cDefaultMetaBitsSize = Integer.valueOf(fileMetadata.getProperty( +// Options.META_BITS_SIZE, +// Options.DEFAULT_META_BITS_SIZE)); + +// cDefaultMetaBitsSize = 9; - if (cDefaultMetaBitsSize < 9) - throw new IllegalArgumentException(Options.META_BITS_SIZE - + " : Must be GTE 9"); +// if (cDefaultMetaBitsSize < 9) +// throw new IllegalArgumentException(Options.META_BITS_SIZE +// + " : Must be GTE 9"); m_metaBitsSize = cDefaultMetaBitsSize; + m_useMetabitsDemispace = Boolean.valueOf(fileMetadata.getProperty( + Options.META_BITS_DEMI_SPACE, + Options.DEFAULT_META_BITS_DEMI_SPACE)); + cDefaultFreeBitsThreshold = Integer.valueOf(fileMetadata.getProperty( Options.FREE_BITS_THRESHOLD, Options.DEFAULT_FREE_BITS_THRESHOLD)); @@ -1419,6 +1450,14 @@ * allocators. So, 16-bits gives us up 64k * 32 = 2M allocators. * Except, that the total #of allocators is reduced by the presence * of a startAddr every N positions in the metaBits[]. + * + * The theoretical maximum number is also reduced since the number + * of "committed" bits could be half the total number of bits. + * + * The theoretical restriction is also limited by the maximum indexable + * allocator, since only 19 bits is available to the index, which, once + * the sign is removed reduces the maximum number of addressable + * allocators to 256K. */ final int metaBitsStore = (int) (rawmbaddr & 0xFFFF); @@ -1445,7 +1484,9 @@ + storeVersion + ", cVersion=" + cVersion); } m_lastDeferredReleaseTime = strBuf.readLong(); - cDefaultMetaBitsSize = strBuf.readInt(); + if (strBuf.readInt() != cDefaultMetaBitsSize) { + throw new IllegalStateException("Store opened with unsupported metabits size"); + } final int allocBlocks = strBuf.readInt(); m_storageStatsAddr = strBuf.readLong(); @@ -1483,12 +1524,6 @@ readAllocationBlocks(); - // clearOutstandingDeferrels(deferredFreeListAddr, deferredFreeListEntries); - - if (physicalAddress(m_metaBitsAddr) == 0) { - throw new IllegalStateException("Free/Invalid metaBitsAddr on load"); - } - } if (log.isInfoEnabled()) @@ -2963,6 +2998,25 @@ * last one being the allocation for the metabits themselves (allowing for * an extension!). * + * Ticket #936: The meta-bits allocation is currently made from the FixedAllocator + * region. This works well providing the required allocation bits is less than + * the maximum FixedAllocator slot size. While this is neat, there are problems at scale + * for maximum slot sizes less than 64K. + * + * To address the 8K bits in a 1K alloctor, 13 bits are required, this leaves 19 bits + * to index an Allocator, or 18 bits without the sign => 256K maximum index. + * + * To be able to commit changes to all 256K allocators requires 512K metabits => 64K bytes. + * We would like to associate the 64K allocations with the root block, so a single 128K + * allocation would be split into 64K demi-spaces, one for each root block. + * + * While a negative address indicates a standard RW allocation a ositive address can be used + * to indicate an explicitly allocated region. The trick is to ensure that the region is + * allocated on a 128K boundary, then the lower bits can indicate which demi-space is used with + * a simple XOR. + * + * Note that we must ensure that any previous demi-space write is removed from the WCS. + * * @throws IOException */ private void writeMetaBits() throws IOException { @@ -3013,7 +3067,8 @@ * Note: this address is set by commit() prior to calling * writeMetaBits(). */ - final long addr = physicalAddress(m_metaBitsAddr); + //final long addr = physicalAddress(m_metaBitsAddr); + final long addr = ((long) m_metaBitsAddr) << ALLOCATION_SCALEUP; if (addr == 0) { throw new IllegalStateException("Invalid metabits address: " + m_metaBitsAddr); } @@ -3024,7 +3079,9 @@ if (log.isDebugEnabled()) log.debug("writing metabits at: " + addr); - m_writeCacheService.write(addr, ByteBuffer.wrap(buf), 0/*chk*/, false/*useChecksum*/, m_metaBitsAddr/*latchedAddr*/); + // Similar to writeMetaBits, we are no longer writing to a FixedAllocator managed region, + // so no latched address is provided + m_writeCacheService.write(addr, ByteBuffer.wrap(buf), 0/*chk*/, false/*useChecksum*/, 0 /*latchedAddr*/); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -3077,19 +3134,41 @@ * that we do not need to reallocate the metabits region when we are * writing out the updated versions of the FixedAllocators). */ - final long oldMetaBits = m_metaBitsAddr; - final int oldMetaBitsSize = (m_metaBits.length + m_allocSizes.length + 1) * 4; - m_metaBitsAddr = alloc(getRequiredMetaBitsStorage(), null); +// final long oldMetaBits = m_metaBitsAddr; +// final int oldMetaBitsSize = (m_metaBits.length + m_allocSizes.length + 1) * 4; +// m_metaBitsAddr = alloc(getRequiredMetaBitsStorage(), null); - // DEBUG SANITY CHECK! + /* + * If m_metaBitsAddr < 0 then was allocated from FixedAllocators (for existing-store compatibility) + */ + if (m_metaBitsAddr < 0) { if (physicalAddress(m_metaBitsAddr) == 0) { throw new IllegalStateException("Returned MetaBits Address not valid!"); } + final int oldMetaBitsSize = (m_metaBits.length + m_allocSizes.length + 1) * 4; // Call immediateFree - no need to defer freeof metaBits, this // has to stop somewhere! // No more allocations must be made - immediateFree((int) oldMetaBits, oldMetaBitsSize); + immediateFree((int) m_metaBitsAddr, oldMetaBitsSize); + + m_metaBitsAddr = 0; + } + + if (m_metaBitsAddr == 0) { + // Allocate special region to be able to store maximum metabits (128k of 2 64K demi-space + // Must be aligned on 128K boundary and allocations are made in units of 64K. + while (m_nextAllocation % 2 != 0) { + m_nextAllocation--; + } + m_metaBitsAddr = -m_nextAllocation; // must be positive to differentiate from FixedAllocator address + m_nextAllocation -= 2; // allocate 2 * 64K + } else { // remove previous write from WCS + m_writeCacheService.removeWriteToAddr(convertAddr(-m_metaBitsAddr), 0); + } + + // Now "toggle" m_metaBitsAddr - 64K boundary + m_metaBitsAddr ^= 0x01; // toggle zero or 64K offset // There must be no buffered deferred frees // assert m_deferredFreeOut.getBytesWritten() == 0; @@ -3397,11 +3476,13 @@ /** * @see Options#META_BITS_SIZE */ - private int cDefaultMetaBitsSize; + final private int cDefaultMetaBitsSize = 9; /** * @see Options#META_BITS_SIZE */ volatile private int m_metaBitsSize; + + volatile private boolean m_useMetabitsDemispace = true; /** * Package private since is uded by FixedAllocators * @@ -4146,11 +4227,17 @@ } /** - * The + * Since we need to store the absolute address and the size can be + * a maximum of 64K, the absolute address is limited to 48 bits, setting + * the maximum address as 140T, which is sufficient. + * * @return long representation of metaBitsAddr PLUS the size */ public long getMetaBitsAddr() { - long ret = physicalAddress((int) m_metaBitsAddr); + assert m_metaBitsAddr > 0; + + // long ret = physicalAddress((int) m_metaBitsAddr); + long ret = convertAddr(-m_metaBitsAddr); // maximum 48 bit address range ret <<= 16; // include space for version, allocSizes and deferred free info AND cDefaultMetaBitsSize @@ -4166,6 +4253,14 @@ } /** + * + * @return the address of the metaBits demi-space + */ + public long getMetaBitsDemiSpace() { + return convertAddr(-m_metaBitsAddr); + } + + /** * @return long representation of metaStartAddr PLUS the size where addr + * size is fileSize (not necessarily physical size) */ @@ -4180,6 +4275,10 @@ */ public long getNextOffset() { long ret = -m_nextAllocation; + if (m_metaBitsAddr > 0) { + // FIX for sign use in m_metaBitsAddr when packing into long + ret++; + } ret <<= 32; ret += -m_metaBitsAddr; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-09 19:30:32
|
Revision: 8455 http://sourceforge.net/p/bigdata/code/8455 Author: thompsonbry Date: 2014-06-09 19:30:26 +0000 (Mon, 09 Jun 2014) Log Message: ----------- Added an HALOG_DIR and a SNAPSHOT_DIR to allow these things to be set independently of the DATA_DIR (which is where the journal lives). Updated the wiki page at http://wiki.bigdata.com/wiki/index.php/HAJournalServer#Durable_Data to declare these new variables and document others. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2014-06-09 17:42:45 UTC (rev 8454) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2014-06-09 19:30:26 UTC (rev 8455) @@ -89,10 +89,12 @@ private static dataDir = new File(ConfigMath.getProperty("DATA_DIR",""+serviceDir)); // HA log directory. - private static haLogDir = new File(serviceDir,"HALog"); + private static haLogDir = new File(ConfigMath.getProperty("HALOG_DIR",""+serviceDir+File.separator+"HALog")); + //private static haLogDir = new File(serviceDir,"HALog"); // Snapshot directory. - private static snapshotDir = new File(serviceDir,"snapshot"); + private static snapshot = new File(ConfigMath.getProperty("SNAPSHOT_DIR",""+serviceDir+File.separator+"snapshot")); + //private static snapshotDir = new File(serviceDir,"snapshot"); /* Snapshot policy. Choose one. * @@ -332,7 +334,7 @@ new NV(Options.BUFFER_MODE,""+BufferMode.DiskRW), - new NV(Options.WRITE_CACHE_BUFFER_COUNT,"2000"), + new NV(Options.WRITE_CACHE_BUFFER_COUNT,ConfigMath.getProperty("WRITE_CACHE_BUFFER_COUNT","2000")), new NV(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY,"4000"), Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA 2014-06-09 17:42:45 UTC (rev 8454) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/default/bigdataHA 2014-06-09 19:30:26 UTC (rev 8455) @@ -55,6 +55,10 @@ #export JETTY_PORT=8080 #export JETTY_XML=var/jetty/WEB-INF/jetty.xml #export JETTY_RESOURCE_BASE=var/jetty +#export WRITE_CACHE_BUFFER_COUNT=2000 +#export DATA_DIR= +#export HALOG_DIR= +#export SNAPSHOT_DIR= #export COLLECT_QUEUE_STATISTICS= #export COLLECT_PLATFORM_STATISTICS= #export GANGLIA_REPORT= This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-09 17:42:50
|
Revision: 8454 http://sourceforge.net/p/bigdata/code/8454 Author: thompsonbry Date: 2014-06-09 17:42:45 +0000 (Mon, 09 Jun 2014) Log Message: ----------- Added logic to collect the #of raw records and the IRawStore level storage bytes associated with those raw records (the backing store may have additional overhead). We have observed some cases where a lot of raw records were appearing in the ID2TERMS index. Prior to this commit those raw records were not directly visible using DumpJournal, but only in the creation of a large number of small (64 or 128 byte) allocations. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/PageStats.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/PageStats.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/PageStats.java 2014-06-09 13:30:41 UTC (rev 8453) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/PageStats.java 2014-06-09 17:42:45 UTC (rev 8454) @@ -24,6 +24,7 @@ package com.bigdata.btree; import com.bigdata.btree.data.IAbstractNodeData; +import com.bigdata.btree.data.ILeafData; import com.bigdata.rawstore.IRawStore; /** @@ -63,6 +64,14 @@ */ public static final int[] SLOT_SIZES = new int[] { 64, 128, 192, 320, 512, 768, 1024, 2048, 3072, 4096, 8192 }; + /** + * The number of raw record allocations and the byte size of those raw + * record allocations. + * + * TODO We could also use a histogram over this information (raw records + * sizes). + */ + public long nrawRecs = 0, rawRecBytes; public PageStats() { @@ -100,7 +109,7 @@ /** Return {@link #nodeBytes} plus {@link #leafBytes}. */ public long getTotalBytes() { - return nodeBytes + leafBytes; + return nodeBytes + leafBytes + rawRecBytes; } /** The average bytes per node. */ @@ -113,6 +122,11 @@ return (nleaves == 0 ? 0 : leafBytes / nleaves); } + /** The average bytes per raw record. */ + public long getBytesPerRawRecord() { + return (nrawRecs== 0 ? 0 : rawRecBytes / nrawRecs); + } + public String toString() { final StringBuilder sb = new StringBuilder(); sb.append(getClass().getName()); @@ -120,14 +134,17 @@ sb.append(",m=" + m); sb.append(",nnodes=" + nnodes); sb.append(",nleaves=" + nleaves); + sb.append(",nrawRecs=" + nrawRecs); sb.append(",nodeBytes=" + nodeBytes); sb.append(",minNodeBytes=" + minNodeBytes); sb.append(",maxNodeBytes=" + maxNodeBytes); sb.append(",leafBytes=" + leafBytes); sb.append(",minLeafBytes=" + minLeafBytes); sb.append(",maxLeafBytes=" + maxLeafBytes); + sb.append(",rawRecBytes=" + rawRecBytes); sb.append(",bytesPerNode=" + getBytesPerNode()); sb.append(",bytesPerLeaf=" + getBytesPerLeaf()); + sb.append(",bytesPerRawRec=" + getBytesPerRawRecord()); sb.append(",nerrors=" + nerrors); final long npages = (nleaves + nnodes); for (int i = 0; i < SLOT_SIZES.length; i++) { @@ -177,18 +194,24 @@ sb.append('\t'); sb.append("nentries"); sb.append('\t'); + sb.append("nrawRecs"); + sb.append('\t'); sb.append("nerrors"); sb.append('\t'); sb.append("nodeBytes"); sb.append('\t'); sb.append("leafBytes"); sb.append('\t'); + sb.append("rawRecBytes"); + sb.append('\t'); sb.append("totalBytes"); sb.append('\t'); sb.append("avgNodeBytes"); sb.append('\t'); sb.append("avgLeafBytes"); sb.append('\t'); + sb.append("avgRawRecBytes"); + sb.append('\t'); sb.append("minNodeBytes"); sb.append('\t'); sb.append("maxNodeBytes"); @@ -246,18 +269,24 @@ sb.append('\t'); sb.append(stats.ntuples); sb.append('\t'); + sb.append(stats.nrawRecs); + sb.append('\t'); sb.append(stats.nerrors); sb.append('\t'); sb.append(stats.nodeBytes); sb.append('\t'); sb.append(stats.leafBytes); sb.append('\t'); + sb.append(stats.rawRecBytes); + sb.append('\t'); sb.append(stats.getTotalBytes()); sb.append('\t'); sb.append(stats.getBytesPerNode()); sb.append('\t'); sb.append(stats.getBytesPerLeaf()); sb.append('\t'); + sb.append(stats.getBytesPerRawRecord()); + sb.append('\t'); sb.append(stats.minNodeBytes); sb.append('\t'); sb.append(stats.maxNodeBytes); @@ -363,6 +392,20 @@ if (stats.maxLeafBytes < nbytes) stats.maxLeafBytes = nbytes; + if (node instanceof ILeafData) { + final ILeafData data = (ILeafData) node; + if(data.hasRawRecords()) { + for (int i = 0; i < data.getKeys().size(); i++) { + final long rawAddr = data.getRawRecord(i); + if (rawAddr != IRawStore.NULL) { + stats.nrawRecs++; + stats.rawRecBytes += store + .getByteCount(rawAddr); + } + } + } + } + } else { stats.nnodes++; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-09 13:30:49
|
Revision: 8453 http://sourceforge.net/p/bigdata/code/8453 Author: thompsonbry Date: 2014-06-09 13:30:41 +0000 (Mon, 09 Jun 2014) Log Message: ----------- javadoc clarification for raw record support. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2014-06-06 09:23:17 UTC (rev 8452) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2014-06-09 13:30:41 UTC (rev 8453) @@ -1547,16 +1547,19 @@ } - /** - * When {@link #getRawRecords()} returns <code>true</code>, this method - * returns the maximum byte length of a <code>byte[]</code> value which may - * be stored in a B+Tree leaf (default {@link Options#MAX_REC_LEN}. Values - * larger than this will be automatically converted into raw record - * references. - * - * @see Options#MAX_REC_LEN - * @see Options#DEFAULT_MAX_REC_LEN - */ + /** + * When {@link #getRawRecords()} returns <code>true</code>, this method + * returns the maximum byte length of a <code>byte[]</code> value will be be + * stored in a B+Tree leaf (default {@link Options#MAX_REC_LEN}) while + * values larger than this will be automatically converted into raw record + * references. Note that this method returns the configured value regardless + * of the value of {@link #getRawRecords()} - the caller must check + * {@link #getRawRecords()} in order to correctly interpret the value + * returned by this method. + * + * @see Options#MAX_REC_LEN + * @see Options#DEFAULT_MAX_REC_LEN + */ public final int getMaxRecLen() {return maxRecLen;} /** This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-06-06 09:23:20
|
Revision: 8452 http://sourceforge.net/p/bigdata/code/8452 Author: martyncutcher Date: 2014-06-06 09:23:17 +0000 (Fri, 06 Jun 2014) Log Message: ----------- Fix to nextOffset encoding to handle that the metaBitsAddr can now be positive. Modified Paths: -------------- branches/META_BITS_936/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/META_BITS_936/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java Modified: branches/META_BITS_936/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/META_BITS_936/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-06-05 23:58:49 UTC (rev 8451) +++ branches/META_BITS_936/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-06-06 09:23:17 UTC (rev 8452) @@ -334,6 +334,17 @@ @Deprecated String DEFAULT_META_BITS_SIZE = "9"; /** + * Defines whether the metabits should be allocated an explicit demispace (default) + * or if not, then to use a standard Allocation (which limits the metabits size to + * the maximum FixedAllocator slot size). + * <p> + * The value should be either "true" or "false" + */ + String META_BITS_DEMI_SPACE = RWStore.class.getName() + ".metabitsDemispace"; + + String DEFAULT_META_BITS_DEMI_SPACE = "true"; + + /** * Defines the number of bits that must be free in a FixedAllocator for * it to be added to the free list. This is used to ensure a level * of locality when making large numbers of allocations within a single @@ -797,6 +808,10 @@ m_metaBitsSize = cDefaultMetaBitsSize; + m_useMetabitsDemispace = Boolean.valueOf(fileMetadata.getProperty( + Options.META_BITS_DEMI_SPACE, + Options.DEFAULT_META_BITS_DEMI_SPACE)); + cDefaultFreeBitsThreshold = Integer.valueOf(fileMetadata.getProperty( Options.FREE_BITS_THRESHOLD, Options.DEFAULT_FREE_BITS_THRESHOLD)); @@ -809,8 +824,7 @@ m_metaBits = new int[m_metaBitsSize]; m_metaTransientBits = new int[m_metaBitsSize]; - - + m_quorum = quorum; m_fd = fileMetadata.file; @@ -3208,7 +3222,7 @@ // Now remember the committed next allocation that will be checked in reset() m_committedNextAllocation = m_nextAllocation; - + // Should not write rootBlock, this is responsibility of client // to provide control // writeFileSpec(); @@ -3466,6 +3480,8 @@ * @see Options#META_BITS_SIZE */ volatile private int m_metaBitsSize; + + volatile private boolean m_useMetabitsDemispace = true; /** * Package private since is uded by FixedAllocators * @@ -4258,6 +4274,10 @@ */ public long getNextOffset() { long ret = -m_nextAllocation; + if (m_metaBitsAddr > 0) { + // FIX for sign use in m_metaBitsAddr when packing into long + ret++; + } ret <<= 32; ret += -m_metaBitsAddr; @@ -5103,7 +5123,7 @@ checkRootBlock(rootBlock); assertOpen(); - + if (log.isTraceEnabled()) { log.trace("Writing new rootblock with commitCounter: " + rootBlock.getCommitCounter() + ", commitRecordAddr: " Modified: branches/META_BITS_936/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java =================================================================== --- branches/META_BITS_936/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java 2014-06-05 23:58:49 UTC (rev 8451) +++ branches/META_BITS_936/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3ChangeLeader.java 2014-06-06 09:23:17 UTC (rev 8452) @@ -89,14 +89,19 @@ awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); - /* + // Check store states on fully met quorum + assertStoreStates(new HAGlue[] { serverA, serverB, serverC }); + + /* * Now go through sevearl commit points with a met quorum. The HALog * files should be retained at the final commit point. */ for (int t = 0; t < ntrans; t++) { simpleTransaction(); - } - + // Check store states + assertStoreStates(new HAGlue[] { serverA, serverB, serverC }); + } + shutdownA(); final long token2 = awaitNextQuorumMeet(token1); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-06-05 23:58:55
|
Revision: 8451 http://sourceforge.net/p/bigdata/code/8451 Author: tobycraig Date: 2014-06-05 23:58:49 +0000 (Thu, 05 Jun 2014) Log Message: ----------- Added namespace column to query history Modified Paths: -------------- branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/index.html branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js Modified: branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/index.html =================================================================== --- branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/index.html 2014-06-05 23:23:41 UTC (rev 8450) +++ branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/index.html 2014-06-05 23:58:49 UTC (rev 8451) @@ -137,6 +137,7 @@ <thead> <tr> <th>Time</th> + <th>Namespace</th> <th>Query</th> <th>Results</th> </tr> Modified: branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js 2014-06-05 23:23:41 UTC (rev 8450) +++ branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js 2014-06-05 23:58:49 UTC (rev 8451) @@ -636,6 +636,7 @@ function loadHistory() { EDITORS.query.setValue(this.innerText); + useNamespace($(this).prev('.query-namespace').text()); EDITORS.query.focus(); } @@ -657,7 +658,7 @@ // see if this query is already in the history $('#query-history tbody tr').each(function(i, row) { - if($(row).find('.query')[0].innerText == query) { + if($(row).find('.query')[0].innerText == query && $(row).find('.query-namespace').text() == NAMESPACE) { // clear the old results and set the time to now $(row).find('.query-time').text(new Date().toISOString()); $(row).find('.query-results').text('...'); @@ -672,6 +673,7 @@ // add this query to the history var row = $('<tr>').prependTo($('#query-history tbody')); row.append('<td class="query-time">' + new Date().toISOString() + '</td>'); + row.append('<td class="query-namespace">' + NAMESPACE + '</td>'); var cell = $('<td class="query">').appendTo(row); cell.text(query); cell.html(cell.html().replace('\n', '<br>')); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-06-05 23:23:46
|
Revision: 8450 http://sourceforge.net/p/bigdata/code/8450 Author: tobycraig Date: 2014-06-05 23:23:41 +0000 (Thu, 05 Jun 2014) Log Message: ----------- Modified Paths: -------------- branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/css/style.css branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/index.html branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js Added Paths: ----------- branches/WORKBENCH_QUERY_HISTORY/ Index: branches/WORKBENCH_QUERY_HISTORY =================================================================== --- branches/BIGDATA_RELEASE_1_3_0 2014-06-05 14:48:07 UTC (rev 8449) +++ branches/WORKBENCH_QUERY_HISTORY 2014-06-05 23:23:41 UTC (rev 8450) Property changes on: branches/WORKBENCH_QUERY_HISTORY ___________________________________________________________________ Added: svn:ignore ## -0,0 +1,31 ## +ant-build +src +bin +bigdata*.jar +ant-release +standalone +test* +countersfinal.xml +events.jnl +.settings +*.jnl +TestInsertRate.out +SYSTAP-BBT-result.txt +U10load+query +*.hprof +com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv +commit-log.txt +eventLog +dist +bigdata-test +com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv +DIST.bigdata-*.tgz +REL.bigdata-*.tgz +queryLog* +queryRunState* +sparql.txt +benchmark +CI +bsbm10-dataset.nt.gz +bsbm10-dataset.nt.zip +benchmark* Added: svn:mergeinfo ## -0,0 +1,20 ## +/branches/BIGDATA_MGC_HA1_HA5:8025-8122 +/branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 +/branches/BIGDATA_RELEASE_1_2_0:6766-7380 +/branches/BTREE_BUFFER_BRANCH:2004-2045 +/branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 +/branches/INT64_BRANCH:4486-4522 +/branches/JOURNAL_HA_BRANCH:2596-4066 +/branches/LARGE_LITERALS_REFACTOR:4175-4387 +/branches/LEXICON_REFACTOR_BRANCH:2633-3304 +/branches/MGC_1_3_0:7609-7752 +/branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 +/branches/RDR:7665-8159 +/branches/READ_CACHE:7215-7271 +/branches/RWSTORE_1_1_0_DEBUG:5896-5935 +/branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 +/branches/ZK_DISCONNECT_HANDLING:7465-7484 +/branches/bugfix-btm:2594-3237 +/branches/dev-btm:2574-2730 +/branches/fko:3150-3194 +/trunk:3392-3437,3656-4061 \ No newline at end of property Modified: branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/css/style.css =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css 2014-06-05 14:48:07 UTC (rev 8449) +++ branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/css/style.css 2014-06-05 23:23:41 UTC (rev 8450) @@ -247,7 +247,7 @@ border: none; } -.advanced-features, #query-response, #query-pagination, #query-explanation, #query-export-container, #update-response, #update-clear-container, #explore-results, #namespace-properties { +.advanced-features, #query-response, #query-pagination, #query-explanation, #query-history, #query-export-container, #update-response, #update-clear-container, #explore-results, #namespace-properties { display: none; } @@ -325,6 +325,10 @@ border: 1px solid #e1e1e1; } +#query-history .query { + white-space: pre; +} + #query-export-container { text-align: right; } Modified: branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/index.html =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html 2014-06-05 14:48:07 UTC (rev 8449) +++ branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/index.html 2014-06-05 23:23:41 UTC (rev 8450) @@ -132,6 +132,19 @@ <div id="query-explanation" class="box"> </div> + <div id="query-history" class="box"> + <table> + <thead> + <tr> + <th>Time</th> + <th>Query</th> + <th>Results</th> + </tr> + </thead> + <tbody></tbody> + </table> + </div> + <div id="query-export-container" class="box"> <button id="query-export">Export</button> <button id="query-response-clear">Clear</button> Modified: branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-06-05 14:48:07 UTC (rev 8449) +++ branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js 2014-06-05 23:23:41 UTC (rev 8450) @@ -250,7 +250,7 @@ data: data, contentType: 'application/xml', success: function() { $('#new-namespace-name').val(''); getNamespaces(); }, - error: function(jqXHR, textStatus, errorThrown) { debugger;alert(jqXHR.responseText); } + error: function(jqXHR, textStatus, errorThrown) { alert(jqXHR.responseText); } }; $.ajax(RW_URL_PREFIX + 'namespace', settings); } @@ -632,6 +632,13 @@ }); EDITORS.query.addKeyMap({'Ctrl-Enter': submitQuery}); +$('#query-history').on('click', '.query', loadHistory); + +function loadHistory() { + EDITORS.query.setValue(this.innerText); + EDITORS.query.focus(); +} + function submitQuery(e) { try { e.preventDefault(); @@ -641,10 +648,38 @@ EDITORS.query.save(); // do nothing if query is empty - if($('#query-box').val().trim() == '') { + var query = $('#query-box').val().trim(); + if(query == '') { return; } + var queryExists = false; + + // see if this query is already in the history + $('#query-history tbody tr').each(function(i, row) { + if($(row).find('.query')[0].innerText == query) { + // clear the old results and set the time to now + $(row).find('.query-time').text(new Date().toISOString()); + $(row).find('.query-results').text('...'); + // move it to the top + $(row).prependTo('#query-history tbody'); + queryExists = true; + return false; + } + }); + + if(!queryExists) { + // add this query to the history + var row = $('<tr>').prependTo($('#query-history tbody')); + row.append('<td class="query-time">' + new Date().toISOString() + '</td>'); + var cell = $('<td class="query">').appendTo(row); + cell.text(query); + cell.html(cell.html().replace('\n', '<br>')); + row.append('<td class="query-results">...</td>'); + } + + $('#query-history').show(); + var url = RO_URL_PREFIX + 'namespace/' + NAMESPACE + '/sparql'; var settings = { type: 'POST', @@ -804,6 +839,10 @@ $('#download-link').remove(); } +function updateResultCount(count) { + $('#query-history tbody tr:first td.query-results').text(count); +} + function showQueryResults(data) { $('#query-response').empty(); $('#query-export-rdf').hide(); @@ -833,6 +872,7 @@ table.append(tr); } } + updateResultCount(rows.length); } else { // JSON // save data for export and pagination @@ -841,6 +881,7 @@ if(typeof(data.boolean) != 'undefined') { // ASK query table.append('<tr><td>' + data.boolean + '</td></tr>').addClass('boolean'); + updateResultCount('' + data.boolean); return; } @@ -882,6 +923,7 @@ table.append(thead); $('#total-results').html(data.results.bindings.length); + updateResultCount(data.results.bindings.length); setNumberOfPages(); showPage(1); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-06-05 14:48:14
|
Revision: 8449 http://sourceforge.net/p/bigdata/code/8449 Author: martyncutcher Date: 2014-06-05 14:48:07 +0000 (Thu, 05 Jun 2014) Log Message: ----------- Fix for #936 to support larger metabits allocations through definition of an explicit demi-space allocated directly from the store, not from the FixedAllocators. Modified Paths: -------------- branches/META_BITS_936/bigdata/src/java/com/bigdata/rwstore/RWStore.java Modified: branches/META_BITS_936/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/META_BITS_936/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-06-05 14:45:32 UTC (rev 8448) +++ branches/META_BITS_936/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-06-05 14:48:07 UTC (rev 8449) @@ -306,6 +306,11 @@ * should be tuned to target perhaps 80% of an 8k page in order to have * only a small number of pages that spill over into blobs. * + * TODO: We should consider a more adaptable BLOB approach where we + * specify the maximum "slop" in an allocation as the means to determine + * a blob boundary. So, for example, a 5.5K allocation, with maximum slop of + * 1K, would be allocated as a blob of 4K + 2K and not an 8K slot. + * * @see #ALLOCATION_SIZES */ String DEFAULT_ALLOCATION_SIZES = "1, 2, 3, 5, 8, 12, 16, 32, 48, 64, 128"; @@ -321,10 +326,12 @@ * <p> * Note: A value of <code>9</code> may be used to stress the logic which * is responsible for the growth in the meta bits region. + * <p> + * This has now been deprecated since it adds complexity with no significant benefit */ - String META_BITS_SIZE = RWStore.class.getName() + ".metaBitsSize"; + @Deprecated String META_BITS_SIZE = RWStore.class.getName() + ".metaBitsSize"; - String DEFAULT_META_BITS_SIZE = "9"; + @Deprecated String DEFAULT_META_BITS_SIZE = "9"; /** * Defines the number of bits that must be free in a FixedAllocator for @@ -398,7 +405,7 @@ static final int OFFSET_BITS = 13; static final int OFFSET_BITS_MASK = 0x1FFF; // was 0xFFFF - static final int ALLOCATION_SCALEUP = 16; // multiplier to convert allocations based on minimum allocation of 32k + static final int ALLOCATION_SCALEUP = 16; // multiplier to convert allocations based on minimum allocation of 64k static private final int META_ALLOCATION = 8; // 8 * 32K is size of meta Allocation // If required, then allocate 1M direct buffers @@ -771,13 +778,22 @@ log.info(AbstractTransactionService.Options.MIN_RELEASE_AGE + "=" + m_minReleaseAge); - cDefaultMetaBitsSize = Integer.valueOf(fileMetadata.getProperty( - Options.META_BITS_SIZE, - Options.DEFAULT_META_BITS_SIZE)); + // Remove parameterisation, we want to use fixed Allocator block sizing + // there is no significant advantage to parameterize this since file cache + // locality is handled by size of the allocation - 256K is a reasonable + // number as 32 * 8 * 1K size. + // + // Equally there is no benefit to increasing the size of the Allocators beyond + // 1K. +// cDefaultMetaBitsSize = Integer.valueOf(fileMetadata.getProperty( +// Options.META_BITS_SIZE, +// Options.DEFAULT_META_BITS_SIZE)); + +// cDefaultMetaBitsSize = 9; - if (cDefaultMetaBitsSize < 9) - throw new IllegalArgumentException(Options.META_BITS_SIZE - + " : Must be GTE 9"); +// if (cDefaultMetaBitsSize < 9) +// throw new IllegalArgumentException(Options.META_BITS_SIZE +// + " : Must be GTE 9"); m_metaBitsSize = cDefaultMetaBitsSize; @@ -1419,6 +1435,14 @@ * allocators. So, 16-bits gives us up 64k * 32 = 2M allocators. * Except, that the total #of allocators is reduced by the presence * of a startAddr every N positions in the metaBits[]. + * + * The theoretical maximum number is also reduced since the number + * of "committed" bits could be half the total number of bits. + * + * The theoretical restriction is also limited by the maximum indexable + * allocator, since only 19 bits is available to the index, which, once + * the sign is removed reduces the maximum number of addressable + * allocators to 256K. */ final int metaBitsStore = (int) (rawmbaddr & 0xFFFF); @@ -1445,7 +1469,9 @@ + storeVersion + ", cVersion=" + cVersion); } m_lastDeferredReleaseTime = strBuf.readLong(); - cDefaultMetaBitsSize = strBuf.readInt(); + if (strBuf.readInt() != cDefaultMetaBitsSize) { + throw new IllegalStateException("Store opened with unsupported metabits size"); + } final int allocBlocks = strBuf.readInt(); m_storageStatsAddr = strBuf.readLong(); @@ -1483,12 +1509,6 @@ readAllocationBlocks(); - // clearOutstandingDeferrels(deferredFreeListAddr, deferredFreeListEntries); - - if (physicalAddress(m_metaBitsAddr) == 0) { - throw new IllegalStateException("Free/Invalid metaBitsAddr on load"); - } - } if (log.isInfoEnabled()) @@ -2963,6 +2983,25 @@ * last one being the allocation for the metabits themselves (allowing for * an extension!). * + * Ticket #936: The meta-bits allocation is currently made from the FixedAllocator + * region. This works well providing the required allocation bits is less than + * the maximum FixedAllocator slot size. While this is neat, there are problems at scale + * for maximum slot sizes less than 64K. + * + * To address the 8K bits in a 1K alloctor, 13 bits are required, this leaves 19 bits + * to index an Allocator, or 18 bits without the sign => 256K maximum index. + * + * To be able to commit changes to all 256K allocators requires 512K metabits => 64K bytes. + * We would like to associate the 64K allocations with the root block, so a single 128K + * allocation would be split into 64K demi-spaces, one for each root block. + * + * While a negative address indicates a standard RW allocation a ositive address can be used + * to indicate an explicitly allocated region. The trick is to ensure that the region is + * allocated on a 128K boundary, then the lower bits can indicate which demi-space is used with + * a simple XOR. + * + * Note that we must ensure that any previous demi-space write is removed from the WCS. + * * @throws IOException */ private void writeMetaBits() throws IOException { @@ -3013,7 +3052,8 @@ * Note: this address is set by commit() prior to calling * writeMetaBits(). */ - final long addr = physicalAddress(m_metaBitsAddr); + //final long addr = physicalAddress(m_metaBitsAddr); + final long addr = ((long) m_metaBitsAddr) << ALLOCATION_SCALEUP; if (addr == 0) { throw new IllegalStateException("Invalid metabits address: " + m_metaBitsAddr); } @@ -3024,7 +3064,9 @@ if (log.isDebugEnabled()) log.debug("writing metabits at: " + addr); - m_writeCacheService.write(addr, ByteBuffer.wrap(buf), 0/*chk*/, false/*useChecksum*/, m_metaBitsAddr/*latchedAddr*/); + // Similar to writeMetaBits, we are no longer writing to a FixedAllocator managed region, + // so no latched address is provided + m_writeCacheService.write(addr, ByteBuffer.wrap(buf), 0/*chk*/, false/*useChecksum*/, 0 /*latchedAddr*/); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -3077,20 +3119,42 @@ * that we do not need to reallocate the metabits region when we are * writing out the updated versions of the FixedAllocators). */ - final long oldMetaBits = m_metaBitsAddr; - final int oldMetaBitsSize = (m_metaBits.length + m_allocSizes.length + 1) * 4; - m_metaBitsAddr = alloc(getRequiredMetaBitsStorage(), null); - - // DEBUG SANITY CHECK! - if (physicalAddress(m_metaBitsAddr) == 0) { - throw new IllegalStateException("Returned MetaBits Address not valid!"); +// final long oldMetaBits = m_metaBitsAddr; +// final int oldMetaBitsSize = (m_metaBits.length + m_allocSizes.length + 1) * 4; +// m_metaBitsAddr = alloc(getRequiredMetaBitsStorage(), null); + + /* + * If m_metaBitsAddr < 0 then was allocated from FixedAllocators (for existing-store compatibility) + */ + if (m_metaBitsAddr < 0) { + if (physicalAddress(m_metaBitsAddr) == 0) { + throw new IllegalStateException("Returned MetaBits Address not valid!"); + } + + final int oldMetaBitsSize = (m_metaBits.length + m_allocSizes.length + 1) * 4; + // Call immediateFree - no need to defer freeof metaBits, this + // has to stop somewhere! + // No more allocations must be made + immediateFree((int) m_metaBitsAddr, oldMetaBitsSize); + + m_metaBitsAddr = 0; } - // Call immediateFree - no need to defer freeof metaBits, this - // has to stop somewhere! - // No more allocations must be made - immediateFree((int) oldMetaBits, oldMetaBitsSize); - + if (m_metaBitsAddr == 0) { + // Allocate special region to be able to store maximum metabits (128k of 2 64K demi-space + // Must be aligned on 128K boundary and allocations are made in units of 64K. + while (m_nextAllocation % 2 != 0) { + m_nextAllocation--; + } + m_metaBitsAddr = -m_nextAllocation; // must be positive to differentiate from FixedAllocator address + m_nextAllocation -= 2; // allocate 2 * 64K + } else { // remove previous write from WCS + m_writeCacheService.removeWriteToAddr(convertAddr(-m_metaBitsAddr), 0); + } + + // Now "toggle" m_metaBitsAddr - 64K boundary + m_metaBitsAddr ^= 0x01; // toggle zero or 64K offset + // There must be no buffered deferred frees // assert m_deferredFreeOut.getBytesWritten() == 0; @@ -3397,7 +3461,7 @@ /** * @see Options#META_BITS_SIZE */ - private int cDefaultMetaBitsSize; + final private int cDefaultMetaBitsSize = 9; /** * @see Options#META_BITS_SIZE */ @@ -4146,11 +4210,17 @@ } /** - * The + * Since we need to store the absolute address and the size can be + * a maximum of 64K, the absolute address is limited to 48 bits, setting + * the maximum address as 140T, which is sufficient. + * * @return long representation of metaBitsAddr PLUS the size */ public long getMetaBitsAddr() { - long ret = physicalAddress((int) m_metaBitsAddr); + assert m_metaBitsAddr > 0; + + // long ret = physicalAddress((int) m_metaBitsAddr); + long ret = convertAddr(-m_metaBitsAddr); // maximum 48 bit address range ret <<= 16; // include space for version, allocSizes and deferred free info AND cDefaultMetaBitsSize @@ -4166,6 +4236,14 @@ } /** + * + * @return the address of the metaBits demi-space + */ + public long getMetaBitsDemiSpace() { + return convertAddr(-m_metaBitsAddr); + } + + /** * @return long representation of metaStartAddr PLUS the size where addr + * size is fileSize (not necessarily physical size) */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-06-05 14:45:37
|
Revision: 8448 http://sourceforge.net/p/bigdata/code/8448 Author: martyncutcher Date: 2014-06-05 14:45:32 +0000 (Thu, 05 Jun 2014) Log Message: ----------- Branch to test new meta bits allocations for #936: Support Larger Metabits Allocations Added Paths: ----------- branches/META_BITS_936/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-05 12:30:03
|
Revision: 8447 http://sourceforge.net/p/bigdata/code/8447 Author: thompsonbry Date: 2014-06-05 12:29:44 +0000 (Thu, 05 Jun 2014) Log Message: ----------- Added javadoc for the correct handling of the try/finally pattern for BigdataSail.getUnisolatedConnection() and to the BigdataSailRepository getConnection() and getUnisolatedConnection() methods as well. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepository.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2014-06-04 20:32:18 UTC (rev 8446) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2014-06-05 12:29:44 UTC (rev 8447) @@ -1269,10 +1269,35 @@ * returns the unisolated view of the database. Note that truth maintenance * requires only one connection at a time and is therefore not compatible * with full read/write transactions. + * <p> + * The correct pattern for obtaining an updatable connection, doing work + * with that connection, and committing or rolling back that update is as + * follows. + * + * <pre> + * + * BigdataSailConnection conn = null; + * boolean ok = false; + * try { + * conn = sail.getConnection(); + * doWork(conn); + * conn.commit(); + * ok = true; + * } finally { + * if (conn != null) { + * if (!ok) { + * conn.rollback(); + * } + * conn.close(); + * } + * } + * </pre> + * + * This pattern can also be used with {@link #getUnisolatedConnection()}. */ @Override public BigdataSailConnection getConnection() throws SailException { - + return (BigdataSailConnection) super.getConnection(); } @@ -1297,25 +1322,49 @@ */ final private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(false/*fair*/); - /** - * Return an unisolated connection to the database. The unisolated - * connection supports fast, scalable updates against the database. The - * unisolated connection is ACID when used with a local {@link Journal} and - * shard-wise ACID when used with an {@link IBigdataFederation}. - * <p> - * In order to guarantee that operations against the unisolated connection - * are ACID, only one of unisolated connection is permitted at a time for a - * {@link Journal} and this method will block until the connection is - * available. If there is an open unisolated connection against a local - * {@link Journal}, then the open connection must be closed before a new - * connection can be returned by this method. - * <p> - * This constraint that there can be only one unisolated connection is not - * enforced in scale-out since unisolated operations in scale-out are only - * shard-wise ACID. - * - * @return The unisolated connection to the database - */ + /** + * Return an unisolated connection to the database. The unisolated + * connection supports fast, scalable updates against the database. The + * unisolated connection is ACID when used with a local {@link Journal} and + * shard-wise ACID when used with an {@link IBigdataFederation}. + * <p> + * In order to guarantee that operations against the unisolated connection + * are ACID, only one of unisolated connection is permitted at a time for a + * {@link Journal} and this method will block until the connection is + * available. If there is an open unisolated connection against a local + * {@link Journal}, then the open connection must be closed before a new + * connection can be returned by this method. + * <p> + * This constraint that there can be only one unisolated connection is not + * enforced in scale-out since unisolated operations in scale-out are only + * shard-wise ACID. + * <p> + * The correct pattern for obtaining an updatable connection, doing work + * with that connection, and committing or rolling back that update is as + * follows. + * + * <pre> + * BigdataSailConnection conn = null; + * boolean ok = false; + * try { + * conn = sail.getUnisolatedConnection(); + * doWork(conn); + * conn.commit(); + * ok = true; + * } finally { + * if (conn != null) { + * if (!ok) { + * conn.rollback(); + * } + * conn.close(); + * } + * } + * </pre> + * + * @return The unisolated connection to the database + * + * @see #getConnection() + */ public BigdataSailConnection getUnisolatedConnection() throws InterruptedException { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepository.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepository.java 2014-06-04 20:32:18 UTC (rev 8446) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepository.java 2014-06-05 12:29:44 UTC (rev 8447) @@ -33,6 +33,35 @@ // // } + /** + * {@inheritDoc} + * <p> + * The correct pattern for obtaining an updatable connection, doing work + * with that connection, and committing or rolling back that update is as + * follows. + * + * <pre> + * + * BigdataSailConnection conn = null; + * boolean ok = false; + * try { + * conn = repo.getConnection(); + * doWork(conn); + * conn.commit(); + * ok = true; + * } finally { + * if (conn != null) { + * if (!ok) { + * conn.rollback(); + * } + * conn.close(); + * } + * } + * </pre> + * + * @see BigdataSail#getConnection() + * @see #getUnisolatedConnection() + */ @Override public BigdataSailRepositoryConnection getConnection() throws RepositoryException { @@ -105,12 +134,36 @@ } /** - * Return an unisolated connection to the database. Only one of these + * Return an unisolated connection to the database. Only one of these * allowed at a time. + * <p> + * The correct pattern for obtaining an updatable connection, doing work + * with that connection, and committing or rolling back that update is as + * follows. * + * <pre> + * + * BigdataSailConnection conn = null; + * boolean ok = false; + * try { + * conn = repo.getConnection(); + * doWork(conn); + * conn.commit(); + * ok = true; + * } finally { + * if (conn != null) { + * if (!ok) { + * conn.rollback(); + * } + * conn.close(); + * } + * } + * </pre> + * * @return unisolated connection to the database * * @see BigdataSail#getUnisolatedConnection() + * @see #getConnection() */ public BigdataSailRepositoryConnection getUnisolatedConnection() throws RepositoryException { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-06-04 20:32:23
|
Revision: 8446 http://sourceforge.net/p/bigdata/code/8446 Author: tobycraig Date: 2014-06-04 20:32:18 +0000 (Wed, 04 Jun 2014) Log Message: ----------- Merged in new workbench features Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css 2014-06-04 15:37:13 UTC (rev 8445) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css 2014-06-04 20:32:18 UTC (rev 8446) @@ -188,31 +188,15 @@ overflow: hidden; } +#query-form, #update-box-container { + clear: both; +} + .namespace-shortcuts { float: right; margin-bottom: 20px; } -.namespace-shortcuts li { - display: inline-block; - border: 1px solid #e4e4e4; - padding: 5px; - margin-left: 5px; - cursor: pointer; - width: 40px; - text-align: center; -} - -.namespace-shortcuts li:hover { - border-color: #b7b7b7; - background-color: #b7b7b7; - color: #ededed; -} - -#query-form, #update-box-container { - clear: both; -} - #large-file-message { display: none; margin: 5px 0; @@ -228,7 +212,6 @@ .CodeMirror { margin: 5px 0; border: 1px solid #e1e1e1; - font-size: 125%; } .CodeMirror-placeholder { @@ -281,6 +264,19 @@ font-family: monospace; } +/* make cursor visible in error highlighting */ +div.CodeMirror-cursors { + z-index: 3; +} + +.error-line { + background: red; +} + +.error-character { + background: green; +} + #page-selector { float: right; } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html 2014-06-04 15:37:13 UTC (rev 8445) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html 2014-06-04 20:32:18 UTC (rev 8446) @@ -204,7 +204,14 @@ </div> <div class="box"> - <form id="namespace-create"><input type="text"> <input type="submit" value="Create namespace"></form> + <h1>Create namespace</h1> + <form id="namespace-create"> + <label for="new-namespace-name">Name:</label> <input type="text" id="new-namespace-name"><br> + <label for="new-namespace-index">Index:</label> <input type="checkbox" id="new-namespace-index"><br> + <label for="new-namespace-truth-maintenance">Truth maintenance:</label> <input type="checkbox" id="new-namespace-truth-maintenance"><br> + <label for="new-namespace-quads">Quads:</label> <input type="checkbox" id="new-namespace-quads"><br> + <input type="submit" value="Create namespace"> + </form> </div> </div> Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-06-04 15:37:13 UTC (rev 8445) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-06-04 20:32:18 UTC (rev 8446) @@ -1,10 +1,29 @@ $(function() { // global variables -var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACE_URL, NAMESPACES_READY, NAMESPACE_SHORTCUTS, FILE_CONTENTS, QUERY_RESULTS; -var QUERY_EDITOR, UPDATE_EDITOR; +var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACES_READY, NAMESPACE_SHORTCUTS, FILE_CONTENTS, QUERY_RESULTS; +// LBS URLs do not currently work with non-HA and HA1 setups. Set this to true to use LBS URLs +if(false) { + var RW_URL_PREFIX = '/bigdata/LBS/leader/', RO_URL_PREFIX = '/bigdata/LBS/read/'; +} else { + var RW_URL_PREFIX = '/bigdata/', RO_URL_PREFIX = '/bigdata/'; +} +var CODEMIRROR_DEFAULTS, EDITORS = {}, ERROR_LINE_MARKERS = {}, ERROR_CHARACTER_MARKERS = {}; var PAGE_SIZE = 50, TOTAL_PAGES, CURRENT_PAGE; +var NAMESPACE_PARAMS = { + 'name': 'com.bigdata.rdf.sail.namespace', + 'index': 'com.bigdata.search.FullTextIndex.fieldsEnabled', + 'truthMaintenance': 'com.bigdata.rdf.sail.truthMaintenance', + 'quads': 'com.bigdata.rdf.store.AbstractTripleStore.quads' +}; + +CODEMIRROR_DEFAULTS = { + lineNumbers: true, + mode: 'sparql', + extraKeys: {'Ctrl-,': moveTabLeft, 'Ctrl-.': moveTabRight} +}; + // debug to access closure variables $('html, textarea, select').bind('keydown', 'ctrl+d', function() { debugger; }); @@ -29,7 +48,7 @@ return; } var query = 'select ?s ?p ?o { ?o bds:search "' + term + '" . ?s ?p ?o . }' - $('#query-box').val(query); + EDITORS.query.setValue(query); $('#query-errors').hide(); $('#query-form').submit(); showTab('query'); @@ -49,10 +68,8 @@ if(!nohash && window.location.hash.substring(1).indexOf(tab) != 0) { window.location.hash = tab; } - if(tab == 'query') { - QUERY_EDITOR.refresh(); - } else if(tab == 'update') { - UPDATE_EDITOR.refresh(); + if(EDITORS[tab]) { + EDITORS[tab].refresh(); } } @@ -89,7 +106,7 @@ /* Namespaces */ function getNamespaces() { - $.get('/bigdata/namespace?describe-each-named-graph=false', function(data) { + $.get(RO_URL_PREFIX + 'namespace?describe-each-named-graph=false', function(data) { $('#namespaces-list').empty(); var rdf = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'; var namespaces = namespaces = data.getElementsByTagNameNS(rdf, 'Description') @@ -103,11 +120,11 @@ } else { use = '<a href="#" class="use-namespace">Use</a>'; } - $('#namespaces-list').append('<li data-name="' + title + '" data-url="' + url + '">' + titleText + ' - ' + use + ' - <a href="#" class="delete-namespace">Delete</a> - <a href="#" class="namespace-properties">Properties</a> - <a href="/bigdata/namespace/' + title + '/sparql" class="namespace-service-description">Service Description</a></li>'); + $('#namespaces-list').append('<li data-name="' + title + '">' + titleText + ' - ' + use + ' - <a href="#" class="delete-namespace">Delete</a> - <a href="#" class="namespace-properties">Properties</a> - <a href="' + RO_URL_PREFIX + 'namespace/' + title + '/sparql" class="namespace-service-description">Service Description</a></li>'); } $('.use-namespace').click(function(e) { e.preventDefault(); - useNamespace($(this).parent().data('name'), $(this).parent().data('url')); + useNamespace($(this).parent().data('name')); }); $('.delete-namespace').click(function(e) { e.preventDefault(); @@ -117,6 +134,14 @@ e.preventDefault(); getNamespaceProperties($(this).parent().data('name')); }); + $('.namespace-properties-java').click(function(e) { + e.preventDefault(); + getNamespaceProperties($(this).parent().data('name'), 'java'); + }); + $('.clone-namespace').click(function(e) { + e.preventDefault(); + cloneNamespace($(this).parent().data('name')); + }); $('.namespace-service-description').click(function(e) { return confirm('This can be an expensive operation. Proceed anyway?'); }); @@ -133,10 +158,9 @@ } } -function useNamespace(name, url) { +function useNamespace(name) { $('#current-namespace').html(name); NAMESPACE = name; - NAMESPACE_URL = url; getNamespaces(); } @@ -151,7 +175,7 @@ if(namespace == NAMESPACE) { // FIXME: what is the desired behaviour when deleting the current namespace? } - var url = '/bigdata/namespace/' + namespace; + var url = RW_URL_PREFIX + 'namespace/' + namespace; var settings = { type: 'DELETE', success: getNamespaces, @@ -161,46 +185,84 @@ } } -function getNamespaceProperties(namespace) { - $('#namespace-properties h1').html(namespace); - $('#namespace-properties table').empty(); - $('#namespace-properties').show(); - var url = '/bigdata/namespace/' + namespace + '/properties'; +function getNamespaceProperties(namespace, download) { + var url = RO_URL_PREFIX + 'namespace/' + namespace + '/properties'; + if(!download) { + $('#namespace-properties h1').html(namespace); + $('#namespace-properties table').empty(); + $('#namespace-properties').show(); + } $.get(url, function(data) { + var java = ''; $.each(data.getElementsByTagName('entry'), function(i, entry) { - $('#namespace-properties table').append('<tr><td>' + entry.getAttribute('key') + '</td><td>' + entry.textContent + '</td></tr>'); + if(download) { + java += entry.getAttribute('key') + '=' + entry.textContent + '\n'; + } else { + $('#namespace-properties table').append('<tr><td>' + entry.getAttribute('key') + '</td><td>' + entry.textContent + '</td></tr>'); + } }); + if(download) { + downloadFile(java, 'text/x-java-properties', this.url.split('/')[3] + '.properties'); + } }); } +function cloneNamespace(namespace) { + var url = RO_URL_PREFIX + 'namespace/' + namespace + '/properties'; + $.get(url, function(data) { + var reversed_params = {}; + for(var key in NAMESPACE_PARAMS) { + reversed_params[NAMESPACE_PARAMS[key]] = key; + } + $.each(data.getElementsByTagName('entry'), function(i, entry) { + var key = entry.getAttribute('key'); + if(reversed_params[key] == 'name') { + return; + } + if(key in reversed_params) { + $('#new-namespace-' + reversed_params[key]).prop('checked', entry.textContent.trim() == 'true'); + } + }); + $('#new-namespace-name').focus(); + }); +} + function createNamespace(e) { e.preventDefault(); - var input = $(this).find('input[type=text]'); - var namespace = input.val(); - if(!namespace) { + // get new namespace name and config options + var params = {}; + params.name = $('#new-namespace-name').val().trim(); + if(!params.name) { return; } + params.index = $('#new-namespace-index').is(':checked'); + params.truthMaintenance = $('#new-namespace-truth-maintenance').is(':checked'); + params.quads = $('#new-namespace-quads').is(':checked'); // TODO: validate namespace // TODO: allow for other options to be specified - var data = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">\n<properties>\n<entry key="com.bigdata.rdf.sail.namespace">' + namespace + '</entry>\n</properties>'; + var data = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">\n<properties>\n'; + for(key in NAMESPACE_PARAMS) { + data += '<entry key="' + NAMESPACE_PARAMS[key] + '">' + params[key] + '</entry>\n'; + } + data += '</properties>'; var settings = { type: 'POST', data: data, contentType: 'application/xml', - success: function() { input.val(''); getNamespaces(); }, - error: function(jqXHR, textStatus, errorThrown) { alert(jqXHR.statusText); } + success: function() { $('#new-namespace-name').val(''); getNamespaces(); }, + error: function(jqXHR, textStatus, errorThrown) { debugger;alert(jqXHR.responseText); } }; - $.ajax('/bigdata/namespace', settings); + $.ajax(RW_URL_PREFIX + 'namespace', settings); } $('#namespace-create').submit(createNamespace); function getDefaultNamespace() { - $.get('/bigdata/namespace?describe-each-named-graph=false&describe-default-namespace=true', function(data) { + $.get(RO_URL_PREFIX + 'namespace?describe-each-named-graph=false&describe-default-namespace=true', function(data) { // Chrome does not work with rdf\:Description, so look for Description too var defaultDataset = $(data).find('rdf\\:Description, Description'); DEFAULT_NAMESPACE = defaultDataset.find('title')[0].textContent; var url = defaultDataset.find('sparqlEndpoint')[0].attributes['rdf:resource'].textContent; - useNamespace(DEFAULT_NAMESPACE, url); + useNamespace(DEFAULT_NAMESPACE); }); } @@ -210,34 +272,51 @@ /* Namespace shortcuts */ NAMESPACE_SHORTCUTS = { - 'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#', - 'rdfs': 'http://www.w3.org/2000/01/rdf-schema#', - 'owl': 'http://www.w3.org/2002/07/owl#', - 'bd': 'http://www.bigdata.com/rdf#', - 'bds': 'http://www.bigdata.com/rdf/search#', - 'gas': 'http://www.bigdata.com/rdf/gas#', - 'foaf': 'http://xmlns.com/foaf/0.1/', - 'hint': 'http://www.bigdata.com/queryHints#', - 'dc': 'http://purl.org/dc/elements/1.1/', - 'xsd': 'http://www.w3.org/2001/XMLSchema#' + 'Bigdata': { + 'bd': 'http://www.bigdata.com/rdf#', + 'bds': 'http://www.bigdata.com/rdf/search#', + 'gas': 'http://www.bigdata.com/rdf/gas#', + 'hint': 'http://www.bigdata.com/queryHints#' + }, + 'W3C': { + 'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#', + 'rdfs': 'http://www.w3.org/2000/01/rdf-schema#', + 'owl': 'http://www.w3.org/2002/07/owl#', + 'skos': 'http://www.w3.org/2004/02/skos/core#', + 'xsd': 'http://www.w3.org/2001/XMLSchema#' + }, + 'Dublic Core': { + 'dc': 'http://purl.org/dc/elements/1.1/', + 'dcterm': 'http://purl.org/dc/terms/', + 'void': 'http://rdfs.org/ns/void#' + }, + 'Social/Other': { + 'foaf': 'http://xmlns.com/foaf/0.1/', + 'schema': 'http://schema.org/', + 'sioc': 'http://rdfs.org/sioc/ns#' + } }; -$('.namespace-shortcuts').html('<ul>'); -for(var ns in NAMESPACE_SHORTCUTS) { - // cannot use data-ns attribute on li, as jQuery mangles namespaces that don't end with #, adding </li> to them - var li = $('<li>' + ns.toUpperCase() + '</li>'); - li.data('ns', 'prefix ' + ns + ': <' + NAMESPACE_SHORTCUTS[ns] + '>'); - li.appendTo('.namespace-shortcuts ul'); +$('.namespace-shortcuts').html(''); +for(var category in NAMESPACE_SHORTCUTS) { + var select = $('<select><option>' + category + '</option></select>').appendTo($('.namespace-shortcuts')); + for(var ns in NAMESPACE_SHORTCUTS[category]) { + select.append('<option value="' + NAMESPACE_SHORTCUTS[category][ns] + '">' + ns + '</option>'); + } } -$('.namespace-shortcuts li').click(function() { - var textarea = $(this).parents('.tab').find('textarea'); - var current = textarea.val(); - var ns = $(this).data('ns'); +$('.namespace-shortcuts select').change(function() { + var uri = this.value; + var tab = $(this).parents('.tab').attr('id').split('-')[0]; + var current = EDITORS[tab].getValue(); - if(current.indexOf(ns) == -1) { - textarea.val(ns + '\n' + current); + if(current.indexOf(uri) == -1) { + var ns = $(this).find(':selected').text(); + EDITORS[tab].setValue('prefix ' + ns + ': <' + uri + '>\n' + current); } + + // reselect group label + this.selectedIndex = 0; }); @@ -379,7 +458,7 @@ mode = rdf_modes[type]; } } - UPDATE_EDITOR.setOption('mode', mode); + EDITORS.update.setOption('mode', mode); } // .xml is used for both RDF and TriX, assume it's RDF @@ -420,9 +499,14 @@ $('#update-update').click(submitUpdate); -UPDATE_EDITOR = CodeMirror.fromTextArea($('#update-box')[0], {lineNumbers: true, mode: 'sparql', - extraKeys: {'Ctrl-Enter': submitUpdate, 'Ctrl-,': moveTabLeft, 'Ctrl-.': moveTabRight} +EDITORS.update = CodeMirror.fromTextArea($('#update-box')[0], CODEMIRROR_DEFAULTS); +EDITORS.update.on('change', function() { + if(ERROR_LINE_MARKERS.update) { + ERROR_LINE_MARKERS.update.clear(); + ERROR_CHARACTER_MARKERS.update.clear(); + } }); +EDITORS.update.addKeyMap({'Ctrl-Enter': submitUpdate}); function submitUpdate(e) { // Updates are submitted as a regular form for SPARQL updates in monitor mode, and via AJAX for non-monitor SPARQL, RDF & file path updates. @@ -435,9 +519,10 @@ $('#update-response').show(); + var url = RW_URL_PREFIX + 'namespace/' + NAMESPACE + '/sparql'; var settings = { type: 'POST', - data: FILE_CONTENTS == null ? UPDATE_EDITOR.getValue() : FILE_CONTENTS, + data: FILE_CONTENTS == null ? EDITORS.update.getValue() : FILE_CONTENTS, success: updateResponseXML, error: updateResponseError } @@ -449,7 +534,7 @@ if($('#update-monitor').is(':checked')) { // create form and submit it, sending output to the iframe var form = $('<form method="POST" target="update-response-container">') - .attr('action', NAMESPACE_URL) + .attr('action', url) .append($('<input name="update">').val(settings.data)) .append('<input name="monitor" value="true">'); if($('#update-analytic').is(':checked')) { @@ -485,7 +570,7 @@ $('#update-response pre').show().html('Data loading...'); - $.ajax(NAMESPACE_URL, settings); + $.ajax(url, settings); } $('#update-clear').click(function() { @@ -538,9 +623,14 @@ } }); -QUERY_EDITOR = CodeMirror.fromTextArea($('#query-box')[0], {lineNumbers: true, mode: 'sparql', - extraKeys: {'Ctrl-Enter': submitQuery, 'Ctrl-,': moveTabLeft, 'Ctrl-.': moveTabRight} +EDITORS.query = CodeMirror.fromTextArea($('#query-box')[0], CODEMIRROR_DEFAULTS); +EDITORS.query.on('change', function() { + if(ERROR_LINE_MARKERS.query) { + ERROR_LINE_MARKERS.query.clear(); + ERROR_CHARACTER_MARKERS.query.clear(); + } }); +EDITORS.query.addKeyMap({'Ctrl-Enter': submitQuery}); function submitQuery(e) { try { @@ -548,8 +638,14 @@ } catch(e) {} // transfer CodeMirror content to textarea - QUERY_EDITOR.save(); + EDITORS.query.save(); + // do nothing if query is empty + if($('#query-box').val().trim() == '') { + return; + } + + var url = RO_URL_PREFIX + 'namespace/' + NAMESPACE + '/sparql'; var settings = { type: 'POST', data: $('#query-form').serialize(), @@ -561,7 +657,7 @@ $('#query-response').show().html('Query running...'); $('#query-pagination').hide(); - $.ajax(NAMESPACE_URL, settings); + $.ajax(url, settings); $('#query-explanation').empty(); if($('#query-explain').is(':checked')) { @@ -572,7 +668,7 @@ success: showQueryExplanation, error: queryResultsError }; - $.ajax(NAMESPACE_URL, settings); + $.ajax(url, settings); } else { $('#query-explanation').hide(); } @@ -630,7 +726,7 @@ success: function() { downloadFile(data, dataType, filename); }, error: downloadRDFError }; - $.ajax('/bigdata/sparql?workbench&convert', settings); + $.ajax(RO_URL_PREFIX + 'sparql?workbench&convert', settings); } else { // not RDF export_extensions[dataType][3](filename); @@ -811,21 +907,9 @@ if(match) { // highlight character at error position var line = match[1] - 1; - var column = match[2] - 1; - var input = $('#' + pane + '-box').val(); - var lines = input.split('\n'); - var container = '#' + pane + '-errors'; - $(container).html(''); - for(var i=0; i<line; i++) { - var p = $('<p>').text(lines[i]); - $(container).append(p); - } - $(container).append('<p class="error-line">'); - $(container + ' .error-line').append(document.createTextNode(lines[line].substr(0, column))); - $(container + ' .error-line').append($('<span class="error-character">').text(lines[line].charAt(column) || ' ')); - $(container + ' .error-line').append(document.createTextNode(lines[line].substr(column + 1))); - $(container).show(); - $('#' + pane + '-box').scrollTop(0); + var character = match[2] - 1; + ERROR_LINE_MARKERS[pane] = EDITORS.query.doc.markText({line: line, ch: 0}, {line: line}, {className: 'error-line'}); + ERROR_CHARACTER_MARKERS[pane] = EDITORS.query.doc.markText({line: line, ch: character}, {line: line, ch: character + 1}, {className: 'error-character'}); } } @@ -925,8 +1009,16 @@ $('#explore-form').submit(function(e) { e.preventDefault(); - var uri = $(this).find('input').val(); + var uri = $(this).find('input[type="text"]').val().trim(); if(uri) { + // add < > if they're not present and this is not a namespaced URI + if(uri[0] != '<' && uri.match(/^\w+:\//)) { + uri = '<' + uri; + if(uri.slice(-1) != '>') { + uri += '>'; + } + $(this).find('input[type="text"]').val(uri); + } loadURI(uri); // if this is a SID, make the components clickable @@ -1015,7 +1107,7 @@ success: updateExploreStart, error: updateExploreError }; - $.ajax(NAMESPACE_URL, settings); + $.ajax(RO_URL_PREFIX + 'namespace/' + NAMESPACE + '/sparql', settings); } function updateExploreStart(data) { @@ -1142,7 +1234,7 @@ if(e) { e.preventDefault(); } - $.get('/bigdata/status', function(data) { + $.get(RO_URL_PREFIX + 'status', function(data) { // get data inside a jQuery object data = $('<div>').append(data); getStatusNumbers(data); @@ -1151,8 +1243,8 @@ function getStatusNumbers(data) { $('#status-text').html(data); - $('#status-text a[href*="status"]').eq(0).click(function(e) { e.preventDefault(); showQueries(false); return false; }); - $('#status-text a[href*="status"]').eq(1).click(function(e) { e.preventDefault(); showQueries(true); return false; }); + $('#status-text a').eq(1).click(function(e) { e.preventDefault(); showQueries(false); return false; }); + $('#status-text a').eq(2).click(function(e) { e.preventDefault(); showQueries(true); return false; }); } $('#show-queries').click(function(e) { @@ -1166,7 +1258,7 @@ }); function showQueries(details) { - var url = '/bigdata/status?showQueries'; + var url = RO_URL_PREFIX + 'status?showQueries'; if(details) { url += '=details'; } @@ -1218,7 +1310,7 @@ e.preventDefault(); if(confirm('Cancel query?')) { var id = $(this).data('queryId'); - $.post('/bigdata/status?cancelQuery&queryId=' + id, function() { getStatus(); }); + $.post(RW_URL_PREFIX + 'status?cancelQuery&queryId=' + id, function() { getStatus(); }); $(this).parents('li').remove(); } } @@ -1226,7 +1318,7 @@ function getQueryDetails(e) { e.preventDefault(); var id = $(this).data('queryId'); - $.ajax({url: '/bigdata/status?showQueries=details&queryId=' + id, + $.ajax({url: RO_URL_PREFIX + 'status?showQueries=details&queryId=' + id, success: function(data) { // get data inside a jQuery object data = $('<div>').append(data); @@ -1247,7 +1339,7 @@ /* Performance */ $('#tab-selector a[data-target=performance]').click(function(e) { - $.get('/bigdata/counters', function(data) { + $.get(RO_URL_PREFIX + 'counters', function(data) { $('#performance-tab .box').html(data); }); }); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-04 15:37:18
|
Revision: 8445 http://sourceforge.net/p/bigdata/code/8445 Author: thompsonbry Date: 2014-06-04 15:37:13 +0000 (Wed, 04 Jun 2014) Log Message: ----------- We have extensively modified the TestMROWTransaction test suite. This class is designed to test for problems where there is a single writer and concurrent readers. We have modified the class to force spurious failures in the BTree.writeNodeOrLeaf() method. These failures directly simulate the behavior on the system of an exception in DefaultNodeCoder.encodeLive(). That method does not have a side-effect. It either succeeds, in which case the caller applies the side-effect, or it fails, in which case there is no side effect. We have demonstrated that the Sail level rollback correctly discards the partial update of the index and that new writer threads continue to make progress and that readers do not observe errors. This effectively disproves the hypothesis that rollback() was failing to discard some state. See #855 (AssertionError: Child does not have persistent identity) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsNoHistory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsWithHistory.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java 2014-06-04 10:55:01 UTC (rev 8444) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java 2014-06-04 15:37:13 UTC (rev 8445) @@ -1,460 +1,652 @@ -package com.bigdata.rdf.sail; - -import java.util.Properties; -import java.util.Random; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; - -import org.apache.log4j.Logger; -import org.openrdf.model.URI; -import org.openrdf.model.impl.URIImpl; - -import com.bigdata.btree.IndexMetadata; -import com.bigdata.counters.CAT; -import com.bigdata.journal.BufferMode; -import com.bigdata.journal.ITx; -import com.bigdata.journal.Journal; -import com.bigdata.rdf.axioms.NoAxioms; -import com.bigdata.rdf.sail.BigdataSail.Options; -import com.bigdata.rdf.store.AbstractTripleStore; -import com.bigdata.rdf.store.BD; -import com.bigdata.rdf.store.BigdataStatementIterator; -import com.bigdata.rdf.vocab.NoVocabulary; -import com.bigdata.service.AbstractTransactionService; -import com.bigdata.util.InnerCause; -import com.bigdata.util.concurrent.DaemonThreadFactory; - -abstract public class TestMROWTransactions extends ProxyBigdataSailTestCase { - - private static final Logger txLog = Logger.getLogger("com.bigdata.txLog"); - - TestMROWTransactions() { - } - - TestMROWTransactions(String arg0) { - super(arg0); - } - - void domultiple_csem_transaction_onethread(final int retentionMillis) throws Exception { - - domultiple_csem_transaction_onethread(retentionMillis, 2000, 50); - - } - - void domultiple_csem_transaction(final int retentionMillis) throws Exception { - - domultiple_csem_transaction2(retentionMillis, 2/* nreaderThreads */, - 1000/* nwriters */, 20 * 1000/* nreaders */); - - } - - /** - * - * @param retentionMillis - * The retention time (milliseconds). - * @param nreaderThreads - * The #of threads running reader tasks. Increase nreaderThreads - * to increase chance startup condition and decrement to increase - * chance of commit point with no open read-only transaction (no - * sessions). Value is in [1:...]. - * @param nwriters - * The #of writer tasks (there is only one writer thread). - * @param nreaders - * The #of reader tasks. - * - * @throws Exception - */ - void domultiple_csem_transaction2(final int retentionMillis, - final int nreaderThreads, final int nwriters, final int nreaders) - throws Exception { - - /** - * The most likely problem is related to the session protection in the - * RWStore. In development we saw problems when concurrent transactions - * had reduced the open/active transactions to zero, therefore releasing - * session protection. If the protocol works correctly we should never - * release session protection if any transaction has been initialized. - * - * The message of "invalid address" would be generated if an allocation - * has been freed and is no longer protected from recycling when an - * attempt is made to read from it. - * - * TODO Experiment with different values of [nthreads] for the with and - * w/o history variations of this test. Consider lifting that parameter - * into the signature of this method. - */ - final int nuris = 2000; // number of unique subject/objects - final int npreds = 50; // - // final PseudoRandom r = new PseudoRandom(2000); - // r.next(1500); - final Random r = new Random(); - - final CAT commits = new CAT(); - final CAT nreadersDone = new CAT(); - final AtomicReference<Throwable> failex = new AtomicReference<Throwable>(null); - // Set [true] iff there are no failures by the time we cancel the running tasks. - final AtomicBoolean success = new AtomicBoolean(false); - final BigdataSail sail = getSail(getProperties(retentionMillis)); - // log.warn("Journal: "+sail.getDatabase().getIndexManager()+", file="+((Journal)sail.getDatabase().getIndexManager()).getFile()); - try { - - sail.initialize(); - final BigdataSailRepository repo = new BigdataSailRepository(sail); - final AbstractTripleStore origStore = repo.getDatabase(); - - final URI[] subs = new URI[nuris]; - for (int i = 0; i < nuris; i++) { - subs[i] = uri("uri:" + i); - } - final URI[] preds = new URI[npreds]; - for (int i = 0; i < npreds; i++) { - preds[i] = uri("pred:" + i); - } - - // Writer task adds nwrites statements then commits - class Writer implements Callable<Long> { - final int nwrites; - - Writer(final int nwrites) { - this.nwrites = nwrites; - } - - public Long call() throws Exception { - try { - final boolean isQuads = origStore.isQuads(); - // Thread.sleep(r.nextInt(2000) + 500); - try { - - for (int i = 0; i < nwrites; i++) { - origStore - .addStatement( - subs[r.nextInt(nuris)], - preds[r.nextInt(npreds)], - subs[r.nextInt(nuris)], - isQuads ? subs[r.nextInt(nuris)] - : null); - // System.out.print('.'); - } - // System.out.println("\n"); - commits.increment(); - - } finally { - origStore.commit(); - if (log.isInfoEnabled()) { - log.info("Commit #" + commits); - } - } - } catch (Throwable ise) { - if (!InnerCause.isInnerCause(ise, - InterruptedException.class)) { - if (failex - .compareAndSet(null/* expected */, ise/* newValue */)) { - log.error("firstCause:" + ise, ise); - } else { - if (log.isInfoEnabled()) - log.info("Other error: " + ise, ise); - } - } else { - // Ignore. - } - } - return null; - } - - } - - // ReaderTask makes nreads and closes - class Reader implements Callable<Long> { - final int nreads; - - Reader(final int nwrites) { - this.nreads = nwrites; - } - - public Long call() throws Exception { - try { - final Long txId = ((Journal) origStore - .getIndexManager()).newTx(ITx.READ_COMMITTED); - - try { - /* - * Note: This sleep makes it much easier to hit the - * bug documented here. However, the sleep can also - * cause the test to really stretch out. So the - * sleep is only used until the writers are done. - * - * https://sourceforge.net/apps/trac/bigdata/ticket/467 - */ - if (commits.get() < nwriters) - Thread.sleep(2000/* millis */); - txLog.info("Reading with tx: " + txId); - final AbstractTripleStore readstore = (AbstractTripleStore) origStore - .getIndexManager().getResourceLocator() - .locate(origStore.getNamespace(), txId); - - for (int i = 0; i < nreads; i++) { - final BigdataStatementIterator stats = readstore - .getStatements(subs[r.nextInt(nuris)], - null, null); - try { - while (stats.hasNext()) { - stats.next(); - } - } finally { - stats.close(); - } - } - - txLog.info("Finished with tx: " + txId); - } catch (IllegalStateException ise) { - txLog.info("IllegalStateException tx: " + txId); - failex.compareAndSet(null, ise); - } catch (Exception e) { - txLog.info("UnexpectedException tx: " + txId); - failex.compareAndSet(null, e); - throw e; - } finally { - txLog.info("Aborting tx: " + txId); - ((Journal) origStore.getIndexManager()).abort(txId); - nreadersDone.increment(); - } - } catch (Throwable ise) { - if (!InnerCause.isInnerCause(ise, - InterruptedException.class)) { - if (failex - .compareAndSet(null/* expected */, ise/* newValue */)) { - log.error("firstCause:" + ise, ise); - } else { - if (log.isInfoEnabled()) - log.info("Other error: " + ise, ise); - } - } else { - // Ignore. - } - } - return null; - } - - } - - ExecutorService writers = null; - ExecutorService readers = null; - try { - - writers = Executors - .newSingleThreadExecutor(new DaemonThreadFactory( - "test-writer-pool")); - - readers = Executors.newFixedThreadPool(nreaderThreads, - new DaemonThreadFactory("test-reader-pool")); - - // let's schedule a few writers and readers (more than needed) - // writers.submit(new Writer(5000000/* nwrite */)); - Future<Long> lastWriterFuture = null; - Future<Long> lastReaderFuture = null; - for (int i = 0; i < nwriters; i++) { - lastWriterFuture = writers - .submit(new Writer(500/* nwrite */)); - } - for (int rdrs = 0; rdrs < nreaders; rdrs++) { - lastReaderFuture = readers - .submit(new Reader(60/* nread */)); - } - - // let the writers run riot for a time, checking for failure - while (true) { - final boolean bothDone = lastWriterFuture.isDone() - && lastReaderFuture.isDone(); - if (bothDone) - break; - if (failex.get() != null) { - // Something errored. - break; - } - Thread.sleep(1000/* ms */); - } - // for (int i = 0; i < 600; i++) { - // Thread.sleep(1000); - // if (failex.get() != null) - // break; - // } - if (failex.get() == null) { - /* - * Note whether or not there are failures before we - * interrupt the running tasks. - */ - success.set(true); - } - writers.shutdownNow(); - readers.shutdownNow(); - writers.awaitTermination(5, TimeUnit.SECONDS); - readers.awaitTermination(5, TimeUnit.SECONDS); - if (!success.get()) { - final Throwable ex = failex.get(); - if (ex != null) { - fail("Test failed: firstCause=" + ex - + ", retentionMillis=" + retentionMillis - + ", nreaderThreads=" + nreaderThreads - + ", nwriters=" + nwriters + ", nreaders=" - + nreaders + ", indexManager=" - + repo.getDatabase().getIndexManager(), ex); - } - } - if (log.isInfoEnabled()) - log.info("Writers committed: " + commits.get() - + ", readers done: " + nreadersDone.get()); - } finally { - if (writers != null) - writers.shutdownNow(); - if (readers != null) - readers.shutdownNow(); - } - } finally { - - sail.__tearDownUnitTest(); - - } - - } - - void domultiple_csem_transaction_onethread(final int retention, final int nuris, final int npreds) throws Exception { - - // final PseudoRandom r = new PseudoRandom(20000 /*10000*/); - final Random r = new Random(); - - final CAT writes = new CAT(); - final CAT reads = new CAT(); -// final AtomicReference<Throwable> failex = new AtomicReference<Throwable>(null); - // Set [true] iff there are no failures by the time we cancel the - // running tasks. - // final AtomicBoolean success = new AtomicBoolean(false); - final BigdataSail sail = getSail(getProperties(retention)); - try { - - sail.initialize(); - final BigdataSailRepository repo = new BigdataSailRepository(sail); - final AbstractTripleStore origStore = repo.getDatabase(); - - final URI[] subs = new URI[nuris]; - for (int i = 0; i < nuris; i++) { - subs[i] = uri("uri:" + i); - } - final URI[] preds = new URI[npreds + 20]; - for (int i = 0; i < npreds; i++) { - preds[i] = uri("pred:" + i); - } - final int nwrites = 600; - final int nreads = 50; - final int ntrials = 20; - final boolean isQuads = origStore.isQuads(); - - for (int loop = 0; loop < ntrials; loop++) { - final Long txId = ((Journal) origStore.getIndexManager()) - .newTx(ITx.READ_COMMITTED); - try { - // System.err.println("READ_STATE: " + txId); - final AbstractTripleStore readstore = (AbstractTripleStore) origStore - .getIndexManager().getResourceLocator() - .locate(origStore.getNamespace(), txId); - for (int i = 0; i < nreads; i++) { - final BigdataStatementIterator stats = readstore - // .getStatements(subs[nuris/2 + loop], null, - // null); - .getStatements(subs[r.nextInt(nuris)], null, - null); - try { - while (stats.hasNext()) { - stats.next(); - reads.increment(); - } - } finally { - stats.close(); - } - } - - // Thread.sleep(r.nextInt(1000) + 500); - try { - - for (int i = 0; i < nwrites; i++) { - origStore.addStatement(subs[r.nextInt(nuris)], - preds[r.nextInt(npreds)], - subs[r.nextInt(nuris)], - isQuads ? subs[r.nextInt(nuris)] : null); - // origStore.addStatement(subs[nuris/2 + loop], - // preds[npreds/2 + loop], - // subs[nuris/2 - loop], - // isQuads ? subs[nuris/2 + loop] : null); - writes.increment(); - // System.out.print('.'); - } - // System.out.println("\n"); - - } finally { - origStore.commit(); - log.warn("Commit: " + loop); - // if (log.isInfoEnabled()) - // log.info("Commit"); - } - // Close Read Connection - ((Journal) readstore.getIndexManager()).abort(txId); - - } catch (Throwable ise) { - log.error("firstCause:" + ise, ise); - throw new Exception(ise); - } - } - - } finally { - - sail.__tearDownUnitTest(); - - } - - } - - protected URI uri(String s) { - return new URIImpl(BD.NAMESPACE + s); - } - - @Override - public Properties getProperties() { - - Properties props = super.getProperties(); - - props.setProperty(BigdataSail.Options.ISOLATABLE_INDICES, "true"); - props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false"); - props.setProperty(BigdataSail.Options.AXIOMS_CLASS, NoAxioms.class.getName()); - props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, NoVocabulary.class.getName()); - props.setProperty(BigdataSail.Options.JUSTIFY, "false"); - props.setProperty(BigdataSail.Options.TEXT_INDEX, "false"); - // props.setProperty(Options.WRITE_CACHE_BUFFER_COUNT, "3"); - - // ensure using RWStore - props.setProperty(Options.BUFFER_MODE, BufferMode.DiskRW.toString()); - // props.setProperty(RWStore.Options.MAINTAIN_BLACKLIST, "false"); - // props.setProperty(RWStore.Options.OVERWRITE_DELETE, "true"); - // props.setProperty(Options.CREATE_TEMP_FILE, "false"); - // props.setProperty(Options.FILE, "/Volumes/SSDData/csem.jnl"); - - // props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY, "20"); - // props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_SCAN, "0"); - props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY, "500"); - props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_SCAN, "10"); - - return props; - - } - - protected Properties getProperties(int retention) { - final Properties props = getProperties(); - props.setProperty(AbstractTransactionService.Options.MIN_RELEASE_AGE, "" + retention); - - return props; - } - -} +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Dec 19, 2006 + */ +package com.bigdata.rdf.sail; + +import info.aduna.iteration.CloseableIteration; + +import java.util.Properties; +import java.util.Random; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import org.openrdf.model.Resource; +import org.openrdf.model.Statement; +import org.openrdf.model.URI; +import org.openrdf.model.Value; +import org.openrdf.model.impl.URIImpl; +import org.openrdf.sail.SailException; + +import com.bigdata.btree.AbstractNode; +import com.bigdata.btree.BTree; +import com.bigdata.btree.Checkpoint; +import com.bigdata.btree.IndexMetadata; +import com.bigdata.counters.CAT; +import com.bigdata.journal.BufferMode; +import com.bigdata.rawstore.IRawStore; +import com.bigdata.rdf.axioms.NoAxioms; +import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; +import com.bigdata.rdf.sail.BigdataSail.Options; +import com.bigdata.rdf.store.BD; +import com.bigdata.rdf.vocab.NoVocabulary; +import com.bigdata.service.AbstractTransactionService; +import com.bigdata.util.InnerCause; +import com.bigdata.util.concurrent.DaemonThreadFactory; + +/** + * TestCase to test single writer/mutiple transaction committed readers with + * SAIL interface. + * + * @author Martyn Cutcher + */ +abstract public class TestMROWTransactions extends ProxyBigdataSailTestCase { + +// private static final Logger txLog = Logger.getLogger("com.bigdata.txLog"); + + TestMROWTransactions() { + } + + TestMROWTransactions(final String arg0) { + super(arg0); + } + +// void domultiple_csem_transaction_onethread(final int retentionMillis) throws Exception { +// +// domultiple_csem_transaction_onethread(retentionMillis, 2000, 50); +// +// } +// +// void domultiple_csem_transaction(final int retentionMillis) throws Exception { +// +// domultiple_csem_transaction2(retentionMillis, 2/* nreaderThreads */, +// 1000/* nwriters */, 20 * 1000/* nreaders */); +// +// } + + /** + * + * @param retentionMillis + * The retention time (milliseconds). + * @param nreaderThreads + * The #of threads running reader tasks. Increase nreaderThreads + * to increase chance startup condition and decrement to increase + * chance of commit point with no open read-only transaction (no + * sessions). Value is in [1:...]. + * @param nwriters + * The #of writer tasks (there is only one writer thread). + * @param nreaders + * The #of reader tasks. + * @param isolatableIndice + * When <code>true</code> the writers will use read/write + * transactions. Otherwise they will use the unisolated + * connection. + * @throws Exception + */ + void domultiple_csem_transaction2(final int retentionMillis, + final int nreaderThreads, final int nwriters, final int nreaders, + final boolean isolatableIndices) throws Exception { + + if (log.isInfoEnabled()) { + log.info("================================================================================="); + log.info("retentionMillis=" + retentionMillis + ", nreaderThreads=" + + nreaderThreads + ", nwriters=" + nwriters + ", nreaders=" + + nreaders + ", isolatableIndices=" + isolatableIndices); + log.info("================================================================================="); + } + + /** + * The most likely problem is related to the session protection in the + * RWStore. In development we saw problems when concurrent transactions + * had reduced the open/active transactions to zero, therefore releasing + * session protection. If the protocol works correctly we should never + * release session protection if any transaction has been initialized. + * + * The message of "invalid address" would be generated if an allocation + * has been freed and is no longer protected from recycling when an + * attempt is made to read from it. + * + * TODO Experiment with different values of [nthreads] for the with and + * w/o history variations of this test. Consider lifting that parameter + * into the signature of this method. + */ + final int nuris = 2000; // number of unique subject/objects + final int npreds = 50; // + // final PseudoRandom r = new PseudoRandom(2000); + // r.next(1500); + final Random r = new Random(); + + final int maxAborts = 100; + + final CAT commits = new CAT(); + final CAT aborts = new CAT(); + final CAT nreadersDone = new CAT(); + final AtomicReference<Throwable> failex = new AtomicReference<Throwable>(null); + // Set [true] iff there are no failures by the time we cancel the running tasks. + final AtomicBoolean success = new AtomicBoolean(false); + final BigdataSail sail = getSail(getProperties(retentionMillis, + isolatableIndices)); + // log.warn("Journal: "+sail.getDatabase().getIndexManager()+", file="+((Journal)sail.getDatabase().getIndexManager()).getFile()); + try { + + sail.initialize(); + // TODO Force an initial commit? + +// final BigdataSailRepository repo = new BigdataSailRepository(sail); +// final AbstractTripleStore origStore = repo.getDatabase(); + + final URI[] subs = new URI[nuris]; + for (int i = 0; i < nuris; i++) { + subs[i] = uri("uri:" + i); + } + final URI[] preds = new URI[npreds]; + for (int i = 0; i < npreds; i++) { + preds[i] = uri("pred:" + i); + } + + ExecutorService writers = null; + ExecutorService readers = null; + try { + + writers = Executors + .newSingleThreadExecutor(new DaemonThreadFactory( + "test-writer-pool")); + + readers = Executors.newFixedThreadPool(nreaderThreads, + new DaemonThreadFactory("test-reader-pool")); + + // let's schedule a few writers and readers (more than needed) + // writers.submit(new Writer(5000000/* nwrite */)); + Future<Long> lastWriterFuture = null; + @SuppressWarnings("unused") + Future<Long> lastReaderFuture = null; + + for (int i = 0; i < nwriters; i++) { + + lastWriterFuture = writers.submit(new Writer(r, + 500/* nwrites */, sail, commits, aborts, + maxAborts, failex, subs, preds)); + + } + + for (int rdrs = 0; rdrs < nreaders; rdrs++) { + + lastReaderFuture = readers.submit(new Reader(r, + 60/* nread */, nwriters, sail, failex, + commits, nreadersDone, subs)); + + } + + // let the writers run riot for a time, checking for failure + while (true) { +// final boolean bothDone = lastWriterFuture.isDone() +// && lastReaderFuture.isDone(); +// if (bothDone) +// break; + if(lastWriterFuture.isDone()) { + // End test when the writers are done. + break; + } + if (failex.get() != null) { + // Something errored. + break; + } + Thread.sleep(250/* ms */); + } + if (failex.get() == null) { + /* + * Note whether or not there are failures before we + * interrupt the running tasks. + */ + success.set(true); + } + writers.shutdownNow(); + readers.shutdownNow(); + writers.awaitTermination(5, TimeUnit.SECONDS); + readers.awaitTermination(5, TimeUnit.SECONDS); + if (!success.get()) { + final Throwable ex = failex.get(); + if (ex != null) { + fail("Test failed: firstCause=" + ex + + ", retentionMillis=" + retentionMillis + + ", nreaderThreads=" + nreaderThreads + + ", nwriters=" + nwriters + ", nreaders=" + + nreaders + ", indexManager=" + + sail.getDatabase().getIndexManager(), ex); + } + } + if (log.isInfoEnabled()) + log.info("Writers committed: " + commits.get() + + ", writers aborted: " + aborts.get() + + ", readers done: " + nreadersDone.get()); + } finally { + if (writers != null) + writers.shutdownNow(); + if (readers != null) + readers.shutdownNow(); + } + } finally { + try { + sail.__tearDownUnitTest(); + } catch (Throwable t) { + /* + * FIXME The test helper tear down should not throw anything, + * but it can do so if a tx has been asynchronously closed. This + * has to do with the logic that openrdf uses to close open + * transactions when the sail is shutdown by the caller. + */ + log.error("Problem with test shutdown: " + t, t); + } + + } + + } + + /** Writer task adds nwrites statements then commits */ + static private class Writer implements Callable<Long> { + + final Random r; + final int nwrites; + final BigdataSail sail; + final CAT commits; + final CAT aborts; + final int maxAborts; + final AtomicReference<Throwable> failex; + final int nuris; + final int npreds; + final URI[] subs; + final URI[] preds; + + Writer(final Random r, final int nwrites, + final BigdataSail sail, final CAT commits, + final CAT aborts, final int maxAborts, + final AtomicReference<Throwable> failex, final URI[] subs, + final URI[] preds) { + + this.r = r; + this.nwrites = nwrites; + this.sail = sail; + this.commits = commits; + this.aborts = aborts; + this.maxAborts = maxAborts; + this.failex = failex; + this.nuris = subs.length; + this.npreds = preds.length; + this.subs = subs; + this.preds = preds; + + } + + @Override + public Long call() throws Exception { + final boolean isQuads = sail.isQuads(); + // Thread.sleep(r.nextInt(2000) + 500); + BigdataSailConnection con = null; + boolean ok = false; + try { + con = sail.getConnection(); + for (int i = 0; i < nwrites; i++) { + con.addStatement(subs[r.nextInt(nuris)], + preds[r.nextInt(npreds)], subs[r.nextInt(nuris)], + isQuads ? subs[r.nextInt(nuris)] : null); + // System.out.print('.'); + } + // System.out.println("\n"); + con.commit(); + ok = true; + commits.increment(); + if (log.isInfoEnabled()) + log.info("Commit #" + commits); + + } catch (Throwable ise) { + log.warn(ise, ise); + if (InnerCause.isInnerCause(ise, InterruptedException.class)) { + // ignore + } else if (InnerCause.isInnerCause(ise, MyBTreeException.class) + && aborts.get() < maxAborts) { + // ignore + } else { + // Set the first cause (but not for the forced abort). + if (failex + .compareAndSet(null/* expected */, ise/* newValue */)) { + log.error("firstCause:" + ise, ise); + } + } + } finally { + if (con != null) { + if (!ok) { + con.rollback(); + aborts.increment(); + log.error("Abort #" + aborts + " (with " + + commits.get() + " commits)"); + } + con.close(); + } + } + return null; + } + + } // Writer + + /** ReaderTask makes nreads and closes. */ + private static class Reader implements Callable<Long> { + + final Random r; + final int nreads; + final int nwriters; + final BigdataSail sail; + final AtomicReference<Throwable> failex; + final CAT commits; + final CAT nreadersDone; + final int nuris; + final URI[] subs; + + Reader(final Random r, final int nreads, final int nwriters, + final BigdataSail sail, + final AtomicReference<Throwable> failex, final CAT commits, + final CAT nreadersDone, final URI[] subs) { + this.r = r; + this.nreads = nreads; + this.nwriters = nwriters; + this.sail = sail; + this.failex = failex; + this.commits = commits; + this.nreadersDone = nreadersDone; + this.nuris = subs.length; + this.subs = subs; + } + + @Override + public Long call() throws Exception { + BigdataSailConnection con = null; + try { + con = sail.getReadOnlyConnection(); + /* + * Note: This sleep makes it much easier to hit the bug + * documented here. However, the sleep can also cause the test + * to really stretch out. So the sleep is only used until the + * writers are done. + * + * https://sourceforge.net/apps/trac/bigdata/ticket/467 + */ + if (commits.get() < Math.max(nwriters, 5)) + Thread.sleep(2000/* millis */); + + for (int i = 0; i < nreads; i++) { + final CloseableIteration<? extends Statement, SailException> stats = con + .getStatements(subs[r.nextInt(nuris)], (URI) null, + (Value) null, (Resource) null); + try { + while (stats.hasNext()) { + stats.next(); + } + } finally { + stats.close(); + } + } + } catch (Throwable ise) { + if (InnerCause.isInnerCause(ise, InterruptedException.class)) { + // Ignore. + } else { + if (failex + .compareAndSet(null/* expected */, ise/* newValue */)) { + log.error("firstCause:" + ise, ise); + } else { + if (log.isInfoEnabled()) + log.info("Other error: " + ise, ise); + } + } + } finally { + if (con != null) { + con.rollback(); + con.close(); + } + nreadersDone.increment(); + } + return null; + } + + } // Reader + + +// void domultiple_csem_transaction_onethread(final int retention, final int nuris, final int npreds) throws Exception { +// +// // final PseudoRandom r = new PseudoRandom(20000 /*10000*/); +// final Random r = new Random(); +// +// final CAT writes = new CAT(); +// final CAT reads = new CAT(); +//// final AtomicReference<Throwable> failex = new AtomicReference<Throwable>(null); +// // Set [true] iff there are no failures by the time we cancel the +// // running tasks. +// // final AtomicBoolean success = new AtomicBoolean(false); +// final boolean isolatableIndices = false; +// final BigdataSail sail = getSail(getProperties(retention,isolatableIndices)); +// try { +// +// sail.initialize(); +// final BigdataSailRepository repo = new BigdataSailRepository(sail); +// final AbstractTripleStore origStore = repo.getDatabase(); +// +// final URI[] subs = new URI[nuris]; +// for (int i = 0; i < nuris; i++) { +// subs[i] = uri("uri:" + i); +// } +// final URI[] preds = new URI[npreds + 20]; +// for (int i = 0; i < npreds; i++) { +// preds[i] = uri("pred:" + i); +// } +// final int nwrites = 600; +// final int nreads = 50; +// final int ntrials = 20; +// final boolean isQuads = origStore.isQuads(); +// +// for (int loop = 0; loop < ntrials; loop++) { +// final Long txId = ((Journal) origStore.getIndexManager()) +// .newTx(ITx.READ_COMMITTED); +// try { +// // System.err.println("READ_STATE: " + txId); +// final AbstractTripleStore readstore = (AbstractTripleStore) origStore +// .getIndexManager().getResourceLocator() +// .locate(origStore.getNamespace(), txId); +// for (int i = 0; i < nreads; i++) { +// final BigdataStatementIterator stats = readstore +// // .getStatements(subs[nuris/2 + loop], null, +// // null); +// .getStatements(subs[r.nextInt(nuris)], null, +// null); +// try { +// while (stats.hasNext()) { +// stats.next(); +// reads.increment(); +// } +// } finally { +// stats.close(); +// } +// } +// +// // Thread.sleep(r.nextInt(1000) + 500); +// try { +// +// for (int i = 0; i < nwrites; i++) { +// origStore.addStatement(subs[r.nextInt(nuris)], +// preds[r.nextInt(npreds)], +// subs[r.nextInt(nuris)], +// isQuads ? subs[r.nextInt(nuris)] : null); +// // origStore.addStatement(subs[nuris/2 + loop], +// // preds[npreds/2 + loop], +// // subs[nuris/2 - loop], +// // isQuads ? subs[nuris/2 + loop] : null); +// writes.increment(); +// // System.out.print('.'); +// } +// // System.out.println("\n"); +// +// } finally { +// origStore.commit(); +// log.warn("Commit: " + loop); +// // if (log.isInfoEnabled()) +// // log.info("Commit"); +// } +// // Close Read Connection +// ((Journal) readstore.getIndexManager()).abort(txId); +// +// } catch (Throwable ise) { +// log.error("firstCause:" + ise, ise); +// throw new Exception(ise); +// } +// } +// +// } finally { +// +// sail.__tearDownUnitTest(); +// +// } +// +// } + + protected URI uri(String s) { + return new URIImpl(BD.NAMESPACE + s); + } + + @Override + public Properties getProperties() { + + final Properties props = super.getProperties(); + + props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false"); + props.setProperty(BigdataSail.Options.AXIOMS_CLASS, NoAxioms.class.getName()); + props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, NoVocabulary.class.getName()); + props.setProperty(BigdataSail.Options.JUSTIFY, "false"); + props.setProperty(BigdataSail.Options.TEXT_INDEX, "false"); + // props.setProperty(Options.WRITE_CACHE_BUFFER_COUNT, "3"); + + // ensure using RWStore + props.setProperty(Options.BUFFER_MODE, BufferMode.DiskRW.toString()); + // props.setProperty(RWStore.Options.MAINTAIN_BLACKLIST, "false"); + // props.setProperty(RWStore.Options.OVERWRITE_DELETE, "true"); + // props.setProperty(Options.CREATE_TEMP_FILE, "false"); + // props.setProperty(Options.FILE, "/Volumes/SSDData/csem.jnl"); + + // props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY, "20"); + // props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_SCAN, "0"); + props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY, "500"); + props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_SCAN, "10"); + + return props; + + } + + protected Properties getProperties(final int retention, + final boolean isolatableIndices) { + + final Properties props = getProperties(); + + props.setProperty(BigdataSail.Options.ISOLATABLE_INDICES, + Boolean.toString(isolatableIndices)); + + props.setProperty(AbstractTransactionService.Options.MIN_RELEASE_AGE, + "" + retention); + + final boolean isQuads = Boolean.valueOf(props.getProperty( + Options.QUADS_MODE, "false")); + + /** + * Force override of the BTree on one index to occasionally prompt + * errors during the test run. + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: + * Child does not have persistent identity </a>. + */ + if (!isolatableIndices) { + /* + * Note: if this is used with read/write tx for the updates then we + * do not observe the desired exception in the Writer class when we + * call con.commit(). This causes the test to fail, but it is + * failing in an uninteresting manner. Hence, the forced abort of + * the B+Tree update is only present at this time for the unisolated + * indices. This is where the problem is reported for ticket #855. + */ + final String name = isQuads ? "SPOC" : "SPO"; + props.setProperty("com.bigdata.namespace.kb.spo." + name + + ".com.bigdata.btree.BTree.className", + MyBTree.class.getName()); + } + return props; + } + + /** + * Helper class for force abort of a B+Tree write. + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child + * does not have persistent identity </a>. + */ + public static class MyBTree extends BTree { + + private final Random r = new Random(12L); + + public MyBTree(IRawStore store, Checkpoint checkpoint, + IndexMetadata metadata, boolean readOnly) { + + super(store, checkpoint, metadata, readOnly); + + } + + @Override + protected long writeNodeOrLeaf(final AbstractNode<?> node) { + + if (node.isLeaf() && r.nextInt(500) == 0) { + + throw new MyBTreeException("Forcing abort: " + this); + + } + + final long addr = super.writeNodeOrLeaf(node); + + return addr; + + } + + } + /** Marker exception for a force abort of a B+Tree write. */ + private static class MyBTreeException extends RuntimeException { + + public MyBTreeException(final String string) { + super(string); + } + + /** + * + */ + private static final long serialVersionUID = 1L; + + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsNoHistory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsNoHistory.java 2014-06-04 10:55:01 UTC (rev 8444) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsNoHistory.java 2014-06-04 15:37:13 UTC (rev 8445) @@ -1,3 +1,29 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Dec 19, 2006 + */ package com.bigdata.rdf.sail; import java.util.Random; @@ -7,106 +33,142 @@ * SAIL interface. * * @author Martyn Cutcher - * */ public class TestMROWTransactionsNoHistory extends TestMROWTransactions { - /** - * - */ - public TestMROWTransactionsNoHistory() { - } + public TestMROWTransactionsNoHistory() { + } - /** - * @param arg0 - */ - public TestMROWTransactionsNoHistory(String arg0) { - super(arg0); - } + public TestMROWTransactionsNoHistory(final String arg0) { + super(arg0); + } - @Override + @Override protected void setUp() throws Exception { super.setUp(); } - + @Override protected void tearDown() throws Exception { super.tearDown(); } - -// // similar to test_multiple_transactions but uses direct AbsractTripleStore -// // manipulations rather than RepositoryConnections -// public void test_multiple_csem_transaction_nohistory() throws Exception { -// -//// domultiple_csem_transaction(0); -// -// domultiple_csem_transaction2(0/* retentionMillis */, -// 2/* nreaderThreads */, 1000/* nwriters */, 20 * 1000/* nreaders */); -// -// } -// -// public void test_multiple_csem_transaction_nohistory_oneReaderThread() throws Exception { -// -// domultiple_csem_transaction2(0/* retentionMillis */, -// 1/* nreaderThreads */, 1000/* nwriters */, 20 * 1000/* nreaders */); -// -// } - - public void test_multiple_csem_transaction_nohistory_stress() throws Exception { + + /** + * I do observe problems with the "no-history" version of this test. The + * RWStore has known issues and a minimum retention time of zero is not + * supported at this time. + * + * <pre> + * junit.framework.AssertionFailedError: Test failed: firstCause=java.lang.RuntimeException: java.lang.RuntimeException: Could not load Checkpoint: store=com.bigdata.journal.Journal@327556d1, addrCheckpoint={off=852288,len=220}, retentionMillis=0, nreaderThreads=19, nwriters=100, nreaders=400, indexManager=com.bigdata.journal.Journal@327556d1 + * at junit.framework.TestCase2.fail(TestCase2.java:90) + * at com.bigdata.rdf.sail.TestMROWTransactions.domultiple_csem_transaction2(TestMROWTransactions.java:237) + * at com.bigdata.rdf.sail.TestMROWTransactionsNoHistory.test_multiple_csem_transaction_no_history_stress(TestMROWTransactionsNoHistory.java:66) + * at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) + * at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) + * at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) + * at java.lang.reflect.Method.invoke(Method.java:606) + * at junit.framework.TestCase.runTest(TestCase.java:154) + * at junit.framework.TestCase.runBare(TestCase.java:127) + * at junit.framework.TestResult$1.protect(TestResult.java:106) + * at junit.framework.TestResult.runProtected(TestResult.java:124) + * at junit.framework.TestResult.run(TestResult.java:109) + * at junit.framework.TestCase.run(TestCase.java:118) + * at junit.framework.TestSuite.runTest(TestSuite.java:208) + * at junit.framework.TestSuite.run(TestSuite.java:203) + * at org.eclipse.jdt.internal.junit.runner.junit3.JUnit3TestReference.run(JUnit3TestReference.java:130) + * at org.eclipse.jdt.internal.junit.runner.TestExecution.run(TestExecution.java:38) + * at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:467) + * at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:683) + * at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.run(RemoteTestRunner.java:390) + * at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.main(RemoteTestRunner.java:197) + * Caused by: java.lang.RuntimeException: java.lang.RuntimeException: Could not load Checkpoint: store=com.bigdata.journal.Journal@327556d1, addrCheckpoint={off=852288,len=220} + * at com.bigdata.rdf.lexicon.LexiconRelation.addTerms(LexiconRelation.java:1861) + * at com.bigdata.rdf.lexicon.LexiconRelation.addTerms(LexiconRelation.java:1722) + * at com.bigdata.rdf.store.AbstractTripleStore.getAccessPath(AbstractTripleStore.java:2868) + * at com.bigdata.rdf.sail.BigdataSail$BigdataSailConnection.getStatements(BigdataSail.java:3534) + * at com.bigdata.rdf.sail.BigdataSail$BigdataSailConnection.getStatements(BigdataSail.java:3470) + * at com.bigdata.rdf.sail.BigdataSail$BigdataSailConnection.getStatements(BigdataSail.java:3433) + * at com.bigdata.rdf.sail.TestMROWTransactions$Reader.call(TestMROWTransactions.java:404) + * at com.bigdata.rdf.sail.TestMROWTransactions$Reader.call(TestMROWTransactions.java:1) + * at java.util.concurrent.FutureTask.run(FutureTask.java:262) + * at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) + * at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) + * at java.lang.Thread.run(Thread.java:745) + * Caused by: java.lang.RuntimeException: Could not load Checkpoint: store=com.bigdata.journal.Journal@327556d1, addrCheckpoint={off=852288,len=220} + * at com.bigdata.btree.Checkpoint.loadFromCheckpoint(Checkpoint.java:756) + * at com.bigdata.journal.AbstractJournal.getIndexWithCheckpointAddr(AbstractJournal.java:5288) + * at com.bigdata.journal.AbstractJournal.getIndexWithCommitRecord(AbstractJournal.java:5135) + * at com.bigdata.journal.AbstractJournal.getIndexLocal(AbstractJournal.java:5005) + * at com.bigdata.journal.AbstractJournal.getIndex(AbstractJournal.java:4897) + * at com.bigdata.journal.Journal.getIndexSources(Journal.java:2656) + * at com.bigdata.journal.Journal.getIndex(Journal.java:2892) + * at com.bigdata.journal.Journal.getIndex(Journal.java:1) + * at com.bigdata.relation.AbstractRelation.getIndex(AbstractRelation.java:238) + * at com.bigdata.relation.AbstractRelation.getIndex(AbstractRelation.java:198) + * at com.bigdata.relation.AbstractRelation.getIndex(AbstractRelation.java:166) + * at com.bigdata.rdf.lexicon.LexiconRelation.getTerm2IdIndex(LexiconRelation.java:984) + * at com.bigdata.rdf.lexicon.LexiconRelation.addTerms(LexiconRelation.java:1857) + * ... 11 more + * Caused by: java.lang.RuntimeException: addr=-8196 : cause=com.bigdata.util.ChecksumError: offset=852288,nbytes=224,expected=721420255,actual=-1747893185 + * at com.bigdata.rwstore.RWStore.getData(RWStore.java:1899) + * at com.bigdata.journal.RWStrategy.readFromLocalStore(RWStrategy.java:727) + * at com.bigdata.journal.RWStrategy.read(RWStrategy.java:154) + * at com.bigdata.journal.AbstractJournal.read(AbstractJournal.java:4043) + * at com.bigdata.btree.Checkpoint.load(Checkpoint.java:575) + * at com.bigdata.btree.Checkpoint.loadFromCheckpoint(Checkpoint.java:754) + * ... 23 more + * Caused by: com.bigdata.util.ChecksumError: offset=852288,nbytes=224,expected=721420255,actual=-1747893185 + * at com.bigdata.io.writecache.WriteCacheService._readFromLocalDiskIntoNewHeapByteBuffer(WriteCacheService.java:3706) + * at com.bigdata.io.writecache.WriteCacheService._getRecord(WriteCacheService.java:3521) + * at com.bigdata.io.writecache.WriteCacheService.access$1(WriteCacheService.java:3493) + * at com.bigdata.io.writecache.WriteCacheService$1.compute(WriteCacheService.java:3358) + * at com.bigdata.io.writecache.WriteCacheService$1.compute(WriteCacheService.java:1) + * at com.bigdata.util.concurrent.Memoizer$1.call(Memoizer.java:77) + * at java.util.concurrent.FutureTask.run(FutureTask.java:262) + * at com.bigdata.util.concurrent.Memoizer.compute(Memoizer.java:92) + * at com.bigdata.io.writecache.WriteCacheService.loadRecord(WriteCacheService.java:3463) + * at com.bigdata.io.writecache.WriteCacheService.read(WriteCacheService.java:3182) + * at com.bigdata.rwstore.RWStore.getData(RWStore.java:1890) + * ... 28 more + * </pre> + * + * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child + * does not have persistent identity </a>. + */ + // Note: This test is disabled since there are known issues when retentionMillis:=0. + public void _test_multiple_csem_transaction_no_history_stress() throws Exception { - final Random r = new Random(); - - for (int i = 0; i < 10; i++) { + final Random r = new Random(); + + for (int i = 0; i < 10; i++) { - final int nreaderThreads = r.nextInt(19) + 1; - - log.warn("Trial: " + i + ", nreaderThreads=" + nreaderThreads); + final int nreaderThreads = r.nextInt(19) + 1; + + log.warn("Trial: " + i + ", nreaderThreads=" + nreaderThreads); - domultiple_csem_transaction2(0/* retentionMillis */, - nreaderThreads, 20/* nwriters */, 400/* nreaders */); + domultiple_csem_transaction2(0/* retentionMillis */, + nreaderThreads, 100/* nwriters */, 400/* nreaders */, false/* isolatableIndices */); - } - - } - -// public void notest_stress_multiple_csem_transaction_nohistory() throws Exception { -// -// final int retentionMillis = 0; -// -// for (int i = 0; i< 50; i++) { -// -// domultiple_csem_transaction2(retentionMillis, 2/* nreaderThreads */, -// 1000/* nwriters */, 20 * 1000/* nreaders */); -// -// } -// -// } -// -// public void test_multiple_csem_transaction_onethread_nohistory() throws Exception { -// -// domultiple_csem_transaction_onethread(0); -// -// } -// -//// Open a read committed transaction -// //do reads -// //do write without closing read -// //commit write -// //close read -// //repeat -// public void notest_multiple_csem_transaction_onethread_nohistory_debug() throws Exception { -// PseudoRandom r = new PseudoRandom(2000); -// -// for (int run = 0; run < 200; run++) { -// final int uris = 1 + r.nextInt(599); -// final int preds = 1 + r.nextInt(49); -// try { -// System.err.println("Testing with " + uris + " uris, " + preds + " preds"); -// domultiple_csem_transaction_onethread(0, uris, preds); -// } catch (Exception e) { -// System.err.println("problem with " + uris + " uris, " + preds + " preds"); -// throw e; -// } -// } -// } + } + + } + + public void test_multiple_csem_transaction_no_history_stress_readWriteTx() + throws Exception { + + final Random r = new Random(); + + for (int i = 0; i < 10; i++) { + + final int nreaderThreads = r.nextInt(19) + 1; + + log.warn("Trial: " + i + ", nreaderThreads=" + nreaderThreads); + + domultiple_csem_transaction2(0/* retentionMillis */, + nreaderThreads, 100/* nwriters */, 400/* nreaders */, true/* isolatableIndices */); + + } + + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsWithHistory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsWithHistory.java 201... [truncated message content] |
From: <tho...@us...> - 2014-06-04 10:55:09
|
Revision: 8444 http://sourceforge.net/p/bigdata/code/8444 Author: thompsonbry Date: 2014-06-04 10:55:01 +0000 (Wed, 04 Jun 2014) Log Message: ----------- Disabled the unbuffered rules.log file by default. This addresses an often reported issue with file system permissions and also a potential bottleneck for truth maintenance on small transactions. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_4/bigdata-war/src/resources/log4j.properties Modified: branches/BIGDATA_RELEASE_1_2_4/bigdata-war/src/resources/log4j.properties =================================================================== --- branches/BIGDATA_RELEASE_1_2_4/bigdata-war/src/resources/log4j.properties 2014-06-03 23:26:41 UTC (rev 8443) +++ branches/BIGDATA_RELEASE_1_2_4/bigdata-war/src/resources/log4j.properties 2014-06-04 10:55:01 UTC (rev 8444) @@ -35,8 +35,8 @@ log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n ## -# Rule execution log. This is a formatted log file (comma delimited). -log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog +# Rule execution log. Uncomment the next line to enable. This is a formatted log file (comma delimited). +# log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog log4j.additivity.com.bigdata.relation.rule.eval.RuleLog=false log4j.appender.ruleLog=org.apache.log4j.FileAppender log4j.appender.ruleLog.Threshold=ALL This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-06-03 23:26:49
|
Revision: 8443 http://sourceforge.net/p/bigdata/code/8443 Author: tobycraig Date: 2014-06-03 23:26:41 +0000 (Tue, 03 Jun 2014) Log Message: ----------- #938 - Added LBS support to workbench Modified Paths: -------------- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js Modified: branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-06-03 14:58:09 UTC (rev 8442) +++ branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-06-03 23:26:41 UTC (rev 8443) @@ -1,7 +1,8 @@ $(function() { // global variables -var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACE_URL, NAMESPACES_READY, NAMESPACE_SHORTCUTS, FILE_CONTENTS, QUERY_RESULTS; +var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACES_READY, NAMESPACE_SHORTCUTS, FILE_CONTENTS, QUERY_RESULTS; +var RW_URL_PREFIX = '/bigdata/LBS/leader/', RO_URL_PREFIX = '/bigdata/LBS/read/'; var CODEMIRROR_DEFAULTS, EDITORS = {}, ERROR_LINE_MARKERS = {}, ERROR_CHARACTER_MARKERS = {}; var PAGE_SIZE = 50, TOTAL_PAGES, CURRENT_PAGE; var NAMESPACE_PARAMS = { @@ -100,7 +101,7 @@ /* Namespaces */ function getNamespaces() { - $.get('/bigdata/namespace?describe-each-named-graph=false', function(data) { + $.get(RO_URL_PREFIX + 'namespace?describe-each-named-graph=false', function(data) { $('#namespaces-list').empty(); var rdf = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'; var namespaces = namespaces = data.getElementsByTagNameNS(rdf, 'Description') @@ -114,11 +115,11 @@ } else { use = '<a href="#" class="use-namespace">Use</a>'; } - $('#namespaces-list').append('<li data-name="' + title + '" data-url="' + url + '">' + titleText + ' - ' + use + ' - <a href="#" class="delete-namespace">Delete</a> - <a href="#" class="namespace-properties">Properties</a> (Download <a href="/bigdata/namespace/' + title + '/properties" download="' + title + '.xml">XML</a>/<a href="#" class="namespace-properties-java">Java</a>) - <a href="#" class="clone-namespace">Clone</a> - <a href="/bigdata/namespace/' + title + '/sparql" class="namespace-service-description">Service Description</a></li>'); + $('#namespaces-list').append('<li data-name="' + title + '">' + titleText + ' - ' + use + ' - <a href="#" class="delete-namespace">Delete</a> - <a href="#" class="namespace-properties">Properties</a> - <a href="' + RO_URL_PREFIX + 'namespace/' + title + '/sparql" class="namespace-service-description">Service Description</a></li>'); } $('.use-namespace').click(function(e) { e.preventDefault(); - useNamespace($(this).parent().data('name'), $(this).parent().data('url')); + useNamespace($(this).parent().data('name')); }); $('.delete-namespace').click(function(e) { e.preventDefault(); @@ -152,10 +153,9 @@ } } -function useNamespace(name, url) { +function useNamespace(name) { $('#current-namespace').html(name); NAMESPACE = name; - NAMESPACE_URL = url; getNamespaces(); } @@ -170,7 +170,7 @@ if(namespace == NAMESPACE) { // FIXME: what is the desired behaviour when deleting the current namespace? } - var url = '/bigdata/namespace/' + namespace; + var url = RW_URL_PREFIX + 'namespace/' + namespace; var settings = { type: 'DELETE', success: getNamespaces, @@ -181,7 +181,7 @@ } function getNamespaceProperties(namespace, download) { - var url = '/bigdata/namespace/' + namespace + '/properties'; + var url = RO_URL_PREFIX + 'namespace/' + namespace + '/properties'; if(!download) { $('#namespace-properties h1').html(namespace); $('#namespace-properties table').empty(); @@ -203,7 +203,7 @@ } function cloneNamespace(namespace) { - var url = '/bigdata/namespace/' + namespace + '/properties'; + var url = RO_URL_PREFIX + 'namespace/' + namespace + '/properties'; $.get(url, function(data) { var reversed_params = {}; for(var key in NAMESPACE_PARAMS) { @@ -247,17 +247,17 @@ success: function() { $('#new-namespace-name').val(''); getNamespaces(); }, error: function(jqXHR, textStatus, errorThrown) { debugger;alert(jqXHR.responseText); } }; - $.ajax('/bigdata/namespace', settings); + $.ajax(RW_URL_PREFIX + 'namespace', settings); } $('#namespace-create').submit(createNamespace); function getDefaultNamespace() { - $.get('/bigdata/namespace?describe-each-named-graph=false&describe-default-namespace=true', function(data) { + $.get(RO_URL_PREFIX + 'namespace?describe-each-named-graph=false&describe-default-namespace=true', function(data) { // Chrome does not work with rdf\:Description, so look for Description too var defaultDataset = $(data).find('rdf\\:Description, Description'); DEFAULT_NAMESPACE = defaultDataset.find('title')[0].textContent; var url = defaultDataset.find('sparqlEndpoint')[0].attributes['rdf:resource'].textContent; - useNamespace(DEFAULT_NAMESPACE, url); + useNamespace(DEFAULT_NAMESPACE); }); } @@ -514,6 +514,7 @@ $('#update-response').show(); + var url = RW_URL_PREFIX + 'namespace/' + NAMESPACE + '/sparql'; var settings = { type: 'POST', data: FILE_CONTENTS == null ? EDITORS.update.getValue() : FILE_CONTENTS, @@ -528,7 +529,7 @@ if($('#update-monitor').is(':checked')) { // create form and submit it, sending output to the iframe var form = $('<form method="POST" target="update-response-container">') - .attr('action', NAMESPACE_URL) + .attr('action', url) .append($('<input name="update">').val(settings.data)) .append('<input name="monitor" value="true">'); if($('#update-analytic').is(':checked')) { @@ -564,7 +565,7 @@ $('#update-response pre').show().html('Data loading...'); - $.ajax(NAMESPACE_URL, settings); + $.ajax(url, settings); } $('#update-clear').click(function() { @@ -639,6 +640,7 @@ return; } + var url = RO_URL_PREFIX + 'namespace/' + NAMESPACE + '/sparql'; var settings = { type: 'POST', data: $('#query-form').serialize(), @@ -650,7 +652,7 @@ $('#query-response').show().html('Query running...'); $('#query-pagination').hide(); - $.ajax(NAMESPACE_URL, settings); + $.ajax(url, settings); $('#query-explanation').empty(); if($('#query-explain').is(':checked')) { @@ -661,7 +663,7 @@ success: showQueryExplanation, error: queryResultsError }; - $.ajax(NAMESPACE_URL, settings); + $.ajax(url, settings); } else { $('#query-explanation').hide(); } @@ -719,7 +721,7 @@ success: function() { downloadFile(data, dataType, filename); }, error: downloadRDFError }; - $.ajax('/bigdata/sparql?workbench&convert', settings); + $.ajax(RO_URL_PREFIX + 'sparql?workbench&convert', settings); } else { // not RDF export_extensions[dataType][3](filename); @@ -1100,7 +1102,7 @@ success: updateExploreStart, error: updateExploreError }; - $.ajax(NAMESPACE_URL, settings); + $.ajax(RO_URL_PREFIX + 'namespace/' + NAMESPACE + '/sparql', settings); } function updateExploreStart(data) { @@ -1227,7 +1229,7 @@ if(e) { e.preventDefault(); } - $.get('/bigdata/status', function(data) { + $.get(RO_URL_PREFIX + 'status', function(data) { // get data inside a jQuery object data = $('<div>').append(data); getStatusNumbers(data); @@ -1251,7 +1253,7 @@ }); function showQueries(details) { - var url = '/bigdata/status?showQueries'; + var url = RO_URL_PREFIX + 'status?showQueries'; if(details) { url += '=details'; } @@ -1303,7 +1305,7 @@ e.preventDefault(); if(confirm('Cancel query?')) { var id = $(this).data('queryId'); - $.post('/bigdata/status?cancelQuery&queryId=' + id, function() { getStatus(); }); + $.post(RW_URL_PREFIX + 'status?cancelQuery&queryId=' + id, function() { getStatus(); }); $(this).parents('li').remove(); } } @@ -1311,7 +1313,7 @@ function getQueryDetails(e) { e.preventDefault(); var id = $(this).data('queryId'); - $.ajax({url: '/bigdata/status?showQueries=details&queryId=' + id, + $.ajax({url: RO_URL_PREFIX + 'status?showQueries=details&queryId=' + id, success: function(data) { // get data inside a jQuery object data = $('<div>').append(data); @@ -1332,7 +1334,7 @@ /* Performance */ $('#tab-selector a[data-target=performance]').click(function(e) { - $.get('/bigdata/counters', function(data) { + $.get(RO_URL_PREFIX + 'counters', function(data) { $('#performance-tab .box').html(data); }); }); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-03 14:58:12
|
Revision: 8442 http://sourceforge.net/p/bigdata/code/8442 Author: thompsonbry Date: 2014-06-03 14:58:09 +0000 (Tue, 03 Jun 2014) Log Message: ----------- Documentation for the start-bigdata ant task. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-06-03 14:40:41 UTC (rev 8441) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-06-03 14:58:09 UTC (rev 8442) @@ -2628,7 +2628,7 @@ </java> </target> - <target name="start-bigdata" depends="compile" description="Start the Bigdata Server."> + <target name="start-bigdata" depends="compile" description="Start the Bigdata Server (triples mode)."> <java classname="com.bigdata.rdf.sail.webapp.NanoSparqlServer" failonerror="true" fork="true" logerror="true"> <classpath refid="runtime.classpath" /> <jvmarg value="-server"/> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-03 14:40:48
|
Revision: 8441 http://sourceforge.net/p/bigdata/code/8441 Author: thompsonbry Date: 2014-06-03 14:40:41 +0000 (Tue, 03 Jun 2014) Log Message: ----------- Redefined the status code for the create of a pre-existing namespace operation as 409 (Conflict). Was previously 400 (Bad request). Redefined the status code for the success of the create namespace operation as 201 (Created). Was previously 200 (Ok). The REST API has been updated to make the expected status codes explicit (they were not previously specified). Updated the test suite to check the new status code for a pre-existing namespace. See #971 (Clarify HTTP Status codes for CREATE NAMESPACE operation) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestMultiTenancyAPI.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-06-03 12:59:47 UTC (rev 8440) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-06-03 14:40:41 UTC (rev 8441) @@ -355,9 +355,12 @@ if (tripleStore != null) { /* - * Already exists. + * The namespace already exists. + * + * Note: The response code is defined as 409 (Conflict) since + * 1.3.2. */ - buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, + buildResponse(resp, HttpServletResponse.SC_CONFLICT, MIME_TEXT_PLAIN, "EXISTS: " + namespace); return; } @@ -395,12 +398,15 @@ } - buildResponse(resp, HTTP_OK, MIME_TEXT_PLAIN, "CREATED: " - + namespace); + /* + * Note: The response code is defined as 201 (Created) since 1.3.2. + */ + buildResponse(resp, HttpServletResponse.SC_CREATED, + MIME_TEXT_PLAIN, "CREATED: " + namespace); } catch (Throwable e) { - throw launderThrowable(e, resp, ""); + throw launderThrowable(e, resp, "namespace=" + namespace); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestMultiTenancyAPI.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestMultiTenancyAPI.java 2014-06-03 12:59:47 UTC (rev 8440) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestMultiTenancyAPI.java 2014-06-03 14:40:41 UTC (rev 8441) @@ -10,6 +10,8 @@ import java.util.TreeMap; import java.util.UUID; +import javax.servlet.http.HttpServletResponse; + import org.openrdf.model.Graph; import org.openrdf.model.Literal; import org.openrdf.model.Resource; @@ -346,11 +348,11 @@ m_repo.createRepository(namespace2, properties); - fail("Expecting: " + BigdataServlet.HTTP_BADREQUEST); + fail("Expecting: " + HttpServletResponse.SC_CONFLICT); } catch (HttpException ex) { - assertEquals(BigdataServlet.HTTP_BADREQUEST, ex.getStatusCode()); + assertEquals(HttpServletResponse.SC_CONFLICT, ex.getStatusCode()); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-03 12:59:53
|
Revision: 8440 http://sourceforge.net/p/bigdata/code/8440 Author: thompsonbry Date: 2014-06-03 12:59:47 +0000 (Tue, 03 Jun 2014) Log Message: ----------- javadoc Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-06-03 00:31:26 UTC (rev 8439) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-06-03 12:59:47 UTC (rev 8440) @@ -328,6 +328,8 @@ snapshotIndex = SnapshotIndex.createTransient(); + // Note: Caller MUST invoke init() Callable. + } @Override @@ -340,7 +342,11 @@ /** * Task that is used to initialize the {@link SnapshotManager}. * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + * + * @see <a href="http://trac.bigdata.com/ticket/775" > HAJournal start() + * (optimization) </a> */ private class InitTask implements Callable<Void> { @@ -367,9 +373,6 @@ private void doRunWithLock() throws IOException, InterruptedException, ExecutionException { - if (log.isInfoEnabled()) - log.info("Starting cleanup."); - /* * Delete any temporary files that were left lying around in the * snapshot directory. @@ -381,6 +384,9 @@ * the times for these different scans so I can get a better sense * of the latencies involved. */ + if (log.isInfoEnabled()) + log.info("Starting cleanup."); + CommitCounterUtility.recursiveDelete(false/* errorIfDeleteFails */, getSnapshotDir(), TEMP_FILE_FILTER); @@ -413,12 +419,29 @@ } /** - * Scans the {@link #snapshotDir} and populates the {@link #snapshotIndex} - * from the root blocks in snapshot files found in that directory. + * Scans the {@link SnapshotManager#getSnapshotDir()} and populates the + * {@link SnapshotIndex} from the root blocks in snapshot files found in + * that directory. * * @throws IOException * @throws ExecutionException * @throws InterruptedException + * + * TODO Follow the code pattern for the HALogNexus and + * provide robust error handling for snapshot files. Note + * that snapshots are taken locally based on various + * criteria (including the size of the delta, the #of + * HALogs, etc.). As long as we have all HALogs the services + * should be able to make a purely local decisions about + * what to do if we have a bad snapshot file. One option is + * to force a snapshot when the service starts. That option + * is only available of course if the service can join with + * the quorum. + * <p> + * Note: If the service CAN NOT do a point in time recovery + * because it lacks a combination of valid HALog files and + * snapshots, then a failover to that service will degrade + * the availability of the cluster. */ private void populateIndexRecursive(final LatchedExecutor executor, final File f, final FileFilter fileFilter, final int depth) @@ -474,6 +497,20 @@ /* * Await futures, obtaining snapshot records for the current * leaf directory. + * + * TODO If the root blocks are bad, then this will throw an + * IOException and that will prevent the startup of the + * HAJournalServer. However, if we start up the server with + * a known bad snapshot *and* the snapshot is the earliest + * snapshot, then we can not restore commit points which + * depend on that earliest snapshot (we can still restore + * commit points that are GTE the first useable snapshot). + * + * TODO A similar problem exists if any of the HALog files + * GTE the earliest snapshot are missing, have bad root + * blocks, etc. We will not be able to restore the commit + * point associated with that HALog file unless it also + * happens to correspond to a snapshot. */ final List<SnapshotRecord> records = new ArrayList<SnapshotRecord>( children.length); @@ -493,6 +530,22 @@ snapshotIndex.add(r); + final long nentries = snapshotIndex.getEntryCount(); + + if (nentries % 1000 == 0) { + + /* + * Provide an indication that the server is doing + * work during startup (it would be unusual to have + * a lot of snapshot files, but this provides + * symmetry with the HALog startup procedure). + */ + + haLog.warn("Indexed " + nentries + + " snapshot files"); + + } + } } finally { @@ -533,7 +586,7 @@ } - } + } // class InitTask private void ensureSnapshotDirExists() throws IOException { @@ -642,20 +695,6 @@ * if the file can not be read. * @throws ChecksumError * if there is a checksum problem with the root blocks. - * - * TODO If the root blocks are bad, then this will throw an - * IOException and that will prevent the startup of the - * HAJournalServer. However, if we start up the server with a - * known bad snapshot *and* the snapshot is the earliest - * snapshot, then we can not restore commit points which depend - * on that earliest snapshot (we can still restore commit points - * that are GTE the first useable snapshot). - * - * TODO A similar problem exists if any of the HALog files GTE - * the earliest snapshot are missing, have bad root blocks, etc. - * We will not be able to restore the commit point associated - * with that HALog file unless it also happens to correspond to - * a snapshot. */ private SnapshotRecord getSnapshotRecord(final File file) throws IOException { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-06-03 00:31:33
|
Revision: 8439 http://sourceforge.net/p/bigdata/code/8439 Author: mrpersonick Date: 2014-06-03 00:31:26 +0000 (Tue, 03 Jun 2014) Log Message: ----------- added a JSON parser and writer for construct. Modified Paths: -------------- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/SPARQLJSONWriterBase.java Added Paths: ----------- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserForConstruct.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserForConstructFactory.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterForConstruct.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterForConstructFactory.java Modified: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java 2014-06-02 17:18:22 UTC (rev 8438) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java 2014-06-03 00:31:26 UTC (rev 8439) @@ -40,7 +40,9 @@ import com.bigdata.rdf.model.StatementEnum; import com.bigdata.rdf.rio.json.BigdataSPARQLResultsJSONParserFactory; +import com.bigdata.rdf.rio.json.BigdataSPARQLResultsJSONParserForConstructFactory; import com.bigdata.rdf.rio.json.BigdataSPARQLResultsJSONWriterFactory; +import com.bigdata.rdf.rio.json.BigdataSPARQLResultsJSONWriterForConstructFactory; import com.bigdata.rdf.rio.ntriples.BigdataNTriplesParserFactory; import com.bigdata.rdf.rio.turtle.BigdataTurtleParserFactory; import com.bigdata.rdf.rio.turtle.BigdataTurtleWriterFactory; @@ -125,7 +127,7 @@ * Allows parsing of JSON SPARQL Results with an {s,p,o,[c]} header. * RDR-enabled. */ -// r.add(new BigdataSPARQLResultsJSONParserFactory()); + r.add(new BigdataSPARQLResultsJSONParserForConstructFactory()); } @@ -157,7 +159,7 @@ r.add(new BigdataTurtleWriterFactory()); // RDR-enabled -// r.add(new BigdataSPARQLResultsJSONWriterFactory()); + r.add(new BigdataSPARQLResultsJSONWriterForConstructFactory()); } Modified: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java 2014-06-02 17:18:22 UTC (rev 8438) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java 2014-06-03 00:31:26 UTC (rev 8439) @@ -48,7 +48,7 @@ */ public class BigdataSPARQLResultsJSONParser extends SPARQLJSONParserBase implements TupleQueryResultParser { - public static final String STATEMENT = "statement"; + public static final String SID = "sid"; public static final String SUBJECT = "subject"; @@ -156,7 +156,7 @@ } // added for Sids support - if (type.equals(STATEMENT)) { + if (type.equals(SID)) { final Resource s = (Resource) sid.get(SUBJECT); final URI p = (URI) sid.get(PREDICATE); Added: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserForConstruct.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserForConstruct.java (rev 0) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserForConstruct.java 2014-06-03 00:31:26 UTC (rev 8439) @@ -0,0 +1,147 @@ +/* + * Licensed to Aduna under one or more contributor license agreements. + * See the NOTICE.txt file distributed with this work for additional + * information regarding copyright ownership. + * + * Aduna licenses this file to you under the terms of the Aduna BSD + * License (the "License"); you may not use this file except in compliance + * with the License. See the LICENSE.txt file distributed with this work + * for the full License. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing permissions + * and limitations under the License. + */ +package com.bigdata.rdf.rio.json; + +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.util.List; + +import org.apache.commons.io.input.ReaderInputStream; +import org.openrdf.model.Resource; +import org.openrdf.model.Statement; +import org.openrdf.model.URI; +import org.openrdf.model.Value; +import org.openrdf.model.ValueFactory; +import org.openrdf.query.BindingSet; +import org.openrdf.query.QueryResultHandlerException; +import org.openrdf.query.TupleQueryResultHandler; +import org.openrdf.query.TupleQueryResultHandlerException; +import org.openrdf.query.resultio.QueryResultParseException; +import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFHandlerException; +import org.openrdf.rio.RDFParseException; +import org.openrdf.rio.RDFParser; +import org.openrdf.rio.helpers.RDFParserBase; + +/** + * Parser for SPARQL-1.1 JSON Results Format documents + * + * @see <a href="http://www.w3.org/TR/sparql11-results-json/">SPARQL 1.1 Query + * Results JSON Format</a> + * @author Peter Ansell + */ +public class BigdataSPARQLResultsJSONParserForConstruct extends RDFParserBase + implements RDFParser, TupleQueryResultHandler { + + private final BigdataSPARQLResultsJSONParser parser; + + public BigdataSPARQLResultsJSONParserForConstruct() { + this.parser = new BigdataSPARQLResultsJSONParser(); + } + + public BigdataSPARQLResultsJSONParserForConstruct(final ValueFactory vf) { + this.parser = new BigdataSPARQLResultsJSONParser(vf); + this.parser.setQueryResultHandler(this); + } + + @Override + public RDFFormat getRDFFormat() { + return BigdataSPARQLResultsJSONWriterForConstructFactory.JSON; + } + + @Override + public void parse(InputStream in, String baseURI) throws IOException, + RDFParseException, RDFHandlerException { + try { + parser.parseQueryResult(in); + } catch (QueryResultParseException e) { + throw new RDFParseException(e); + } catch (QueryResultHandlerException e) { + throw new RDFHandlerException(e); + } + } + + @Override + public void parse(Reader reader, String baseURI) throws IOException, + RDFParseException, RDFHandlerException { + parse(new ReaderInputStream(reader), baseURI); + } + + @Override + public void handleBoolean(boolean value) throws QueryResultHandlerException { + // do nothing + } + + @Override + public void handleLinks(List<String> linkUrls) + throws QueryResultHandlerException { + // do nothing + } + + @Override + public void startQueryResult(List<String> bindingNames) + throws TupleQueryResultHandlerException { + try { + getRDFHandler().startRDF(); + } catch (RDFHandlerException e) { + throw new TupleQueryResultHandlerException(e); + } + } + + @Override + public void endQueryResult() throws TupleQueryResultHandlerException { + try { + getRDFHandler().endRDF(); + } catch (RDFHandlerException e) { + throw new TupleQueryResultHandlerException(e); + } + } + + @Override + public void handleSolution(BindingSet bs) + throws TupleQueryResultHandlerException { + + if (!bs.hasBinding("subject")) { + throw new TupleQueryResultHandlerException("no subject: " + bs); + } + if (!bs.hasBinding("predicate")) { + throw new TupleQueryResultHandlerException("no predicate: " + bs); + } + if (!bs.hasBinding("object")) { + throw new TupleQueryResultHandlerException("no object: " + bs); + } + + final Resource s = (Resource) bs.getValue("subject"); + final URI p = (URI) bs.getValue("predicate"); + final Value o = (Value) bs.getValue("object"); + final Resource c = bs.hasBinding("context") ? + (Resource) bs.getBinding("context") : null; + + final Statement stmt = valueFactory.createStatement(s, p, o, c); + + try { + getRDFHandler().handleStatement(stmt); + } catch (RDFHandlerException e) { + throw new TupleQueryResultHandlerException(e); + } + + } + + + +} Property changes on: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserForConstruct.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserForConstructFactory.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserForConstructFactory.java (rev 0) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserForConstructFactory.java 2014-06-03 00:31:26 UTC (rev 8439) @@ -0,0 +1,44 @@ +/* + * Licensed to Aduna under one or more contributor license agreements. + * See the NOTICE.txt file distributed with this work for additional + * information regarding copyright ownership. + * + * Aduna licenses this file to you under the terms of the Aduna BSD + * License (the "License"); you may not use this file except in compliance + * with the License. See the LICENSE.txt file distributed with this work + * for the full License. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing permissions + * and limitations under the License. + */ +package com.bigdata.rdf.rio.json; + +import org.openrdf.query.resultio.TupleQueryResultParserFactory; +import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFParser; +import org.openrdf.rio.RDFParserFactory; + +/** + * A {@link TupleQueryResultParserFactory} for parsers of SPARQL-1.1 JSON Tuple + * Query Results. + * + * @author Peter Ansell + */ +public class BigdataSPARQLResultsJSONParserForConstructFactory implements RDFParserFactory { + + public static final RDFFormat JSON = BigdataSPARQLResultsJSONWriterForConstructFactory.JSON; + + @Override + public RDFParser getParser() { + return new BigdataSPARQLResultsJSONParserForConstruct(); + } + + @Override + public RDFFormat getRDFFormat() { + return JSON; + } + +} Property changes on: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserForConstructFactory.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java 2014-06-02 17:18:22 UTC (rev 8438) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java 2014-06-03 00:31:26 UTC (rev 8439) @@ -18,13 +18,10 @@ import java.io.IOException; import java.io.OutputStream; +import java.io.Writer; -import org.openrdf.model.BNode; -import org.openrdf.model.Literal; -import org.openrdf.model.URI; import org.openrdf.model.Value; import org.openrdf.query.QueryResultHandlerException; -import org.openrdf.query.TupleQueryResultHandlerException; import org.openrdf.query.resultio.TupleQueryResultFormat; import org.openrdf.query.resultio.TupleQueryResultWriter; @@ -42,6 +39,10 @@ * Constructors * *--------------*/ + public BigdataSPARQLResultsJSONWriter(Writer writer) { + super(writer); + } + public BigdataSPARQLResultsJSONWriter(OutputStream out) { super(out); } @@ -82,21 +83,21 @@ jg.writeStartObject(); - jg.writeStringField("type", "statement"); + jg.writeStringField("type", BigdataSPARQLResultsJSONParser.SID); final BigdataStatement stmt = sid.getStatement(); - jg.writeFieldName("subject"); + jg.writeFieldName(BigdataSPARQLResultsJSONParser.SUBJECT); writeValue(stmt.getSubject()); - jg.writeFieldName("predicate"); - writeValue(stmt.getSubject()); + jg.writeFieldName(BigdataSPARQLResultsJSONParser.PREDICATE); + writeValue(stmt.getPredicate()); - jg.writeFieldName("object"); - writeValue(stmt.getSubject()); + jg.writeFieldName(BigdataSPARQLResultsJSONParser.OBJECT); + writeValue(stmt.getObject()); if (stmt.getContext() != null) { - jg.writeFieldName("context"); + jg.writeFieldName(BigdataSPARQLResultsJSONParser.CONTEXT); writeValue(stmt.getContext()); } Added: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterForConstruct.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterForConstruct.java (rev 0) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterForConstruct.java 2014-06-03 00:31:26 UTC (rev 8439) @@ -0,0 +1,134 @@ +/* + * Licensed to Aduna under one or more contributor license agreements. + * See the NOTICE.txt file distributed with this work for additional + * information regarding copyright ownership. + * + * Aduna licenses this file to you under the terms of the Aduna BSD + * License (the "License"); you may not use this file except in compliance + * with the License. See the LICENSE.txt file distributed with this work + * for the full License. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing permissions + * and limitations under the License. + */ +package com.bigdata.rdf.rio.json; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.Writer; +import java.util.Arrays; +import java.util.Collection; + +import org.openrdf.model.Statement; +import org.openrdf.query.QueryResultHandlerException; +import org.openrdf.query.TupleQueryResultHandlerException; +import org.openrdf.query.impl.MapBindingSet; +import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFHandlerException; +import org.openrdf.rio.RDFWriter; +import org.openrdf.rio.RioSetting; +import org.openrdf.rio.WriterConfig; + +/** + * A TupleQueryResultWriter that writes query results in the <a + * href="http://www.w3.org/TR/rdf-sparql-json-res/">SPARQL Query Results JSON + * Format</a>. + */ +public class BigdataSPARQLResultsJSONWriterForConstruct implements RDFWriter { + + private final BigdataSPARQLResultsJSONWriter writer; + + /*--------------* + * Constructors * + *--------------*/ + + public BigdataSPARQLResultsJSONWriterForConstruct(final Writer writer) { + this.writer = new BigdataSPARQLResultsJSONWriter(writer); + } + + public BigdataSPARQLResultsJSONWriterForConstruct(final OutputStream out) { + this.writer = new BigdataSPARQLResultsJSONWriter(out); + } + + /*---------* + * Methods * + *---------*/ + + @Override + public RDFFormat getRDFFormat() { + return BigdataSPARQLResultsJSONWriterForConstructFactory.JSON; + } + + + @Override + public void startRDF() throws RDFHandlerException { + try { + writer.startDocument(); + writer.startHeader(); + writer.startQueryResult(Arrays.asList(new String[] { + "subject", "predicate", "object", "context" + })); + writer.endHeader(); + } catch (QueryResultHandlerException e) { + throw new RDFHandlerException(e); + } + } + + @Override + public void endRDF() throws RDFHandlerException { + try { + writer.endDocument(); + } catch (IOException e) { + throw new RDFHandlerException(e); + } + } + + @Override + public void handleNamespace(String prefix, String uri) + throws RDFHandlerException { + try { + writer.handleNamespace(prefix, uri); + } catch (QueryResultHandlerException e) { + throw new RDFHandlerException(e); + } + } + + @Override + public void handleStatement(Statement st) throws RDFHandlerException { + final MapBindingSet bs = new MapBindingSet(); + bs.addBinding("subject", st.getSubject()); + bs.addBinding("predicate", st.getPredicate()); + bs.addBinding("object", st.getObject()); + if (st.getContext() != null) + bs.addBinding("context", st.getContext()); + try { + writer.handleSolution(bs); + } catch (TupleQueryResultHandlerException e) { + throw new RDFHandlerException(e); + } + } + + @Override + public void handleComment(String comment) throws RDFHandlerException { + // do nothing + } + + @Override + public void setWriterConfig(WriterConfig config) { + writer.setWriterConfig(config); + } + + @Override + public WriterConfig getWriterConfig() { + return writer.getWriterConfig(); + } + + @Override + public Collection<RioSetting<?>> getSupportedSettings() { + return writer.getSupportedSettings(); + } + +} Property changes on: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterForConstruct.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterForConstructFactory.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterForConstructFactory.java (rev 0) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterForConstructFactory.java 2014-06-03 00:31:26 UTC (rev 8439) @@ -0,0 +1,63 @@ +/* + * Licensed to Aduna under one or more contributor license agreements. + * See the NOTICE.txt file distributed with this work for additional + * information regarding copyright ownership. + * + * Aduna licenses this file to you under the terms of the Aduna BSD + * License (the "License"); you may not use this file except in compliance + * with the License. See the LICENSE.txt file distributed with this work + * for the full License. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing permissions + * and limitations under the License. + */ +package com.bigdata.rdf.rio.json; + +import java.io.OutputStream; +import java.io.Writer; +import java.nio.charset.Charset; +import java.util.Arrays; + +import org.openrdf.query.resultio.TupleQueryResultWriterFactory; +import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFWriter; +import org.openrdf.rio.RDFWriterFactory; + + +/** + * A {@link TupleQueryResultWriterFactory} for writers of SPARQL/JSON query + * results. + * + * @author Arjohn Kampman + */ +public class BigdataSPARQLResultsJSONWriterForConstructFactory implements RDFWriterFactory { + +// public static final RDFFormat JSON = new RDFFormat("N-Triples", "text/plain", +// Charset.forName("US-ASCII"), "nt", NO_NAMESPACES, NO_CONTEXTS); + + /** + * SPARQL Query Results JSON Format. + */ + public static final RDFFormat JSON = new RDFFormat("SPARQL/JSON", Arrays.asList( + "application/sparql-results+json", "application/json"), Charset.forName("UTF-8"), Arrays.asList( + "srj", "json"), RDFFormat.NO_NAMESPACES, RDFFormat.SUPPORTS_CONTEXTS); + + + @Override + public RDFFormat getRDFFormat() { + return JSON; + } + + @Override + public RDFWriter getWriter(final Writer writer) { + return new BigdataSPARQLResultsJSONWriterForConstruct(writer); + } + + @Override + public RDFWriter getWriter(final OutputStream out) { + return new BigdataSPARQLResultsJSONWriterForConstruct(out); + } +} Property changes on: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterForConstructFactory.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/SPARQLJSONWriterBase.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/SPARQLJSONWriterBase.java 2014-06-02 17:18:22 UTC (rev 8438) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/SPARQLJSONWriterBase.java 2014-06-03 00:31:26 UTC (rev 8439) @@ -19,6 +19,7 @@ import java.io.IOException; import java.io.OutputStream; import java.io.OutputStreamWriter; +import java.io.Writer; import java.nio.charset.Charset; import java.util.Collection; import java.util.HashSet; @@ -26,9 +27,6 @@ import java.util.List; import java.util.Set; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonGenerator; - import org.openrdf.model.BNode; import org.openrdf.model.Literal; import org.openrdf.model.URI; @@ -43,6 +41,9 @@ import org.openrdf.rio.RioSetting; import org.openrdf.rio.helpers.BasicWriterSettings; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; + /** * An abstract class to implement the base functionality for both * SPARQLBooleanJSONWriter and SPARQLResultsJSONWriter. @@ -86,6 +87,15 @@ protected final JsonGenerator jg; + public SPARQLJSONWriterBase(Writer writer) { + try { + jg = JSON_FACTORY.createJsonGenerator(writer); + } + catch (IOException e) { + throw new IllegalArgumentException(e); + } + } + public SPARQLJSONWriterBase(OutputStream out) { try { jg = JSON_FACTORY.createJsonGenerator(new OutputStreamWriter(out, Charset.forName("UTF-8"))); @@ -413,7 +423,7 @@ // Ignored by SPARQLJSONWriterBase } - protected void endDocument() + public void endDocument() throws IOException { jg.writeEndObject(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-06-02 17:18:24
|
Revision: 8438 http://sourceforge.net/p/bigdata/code/8438 Author: tobycraig Date: 2014-06-02 17:18:22 +0000 (Mon, 02 Jun 2014) Log Message: ----------- #960 & #961 - Fixed wrong variable names Modified Paths: -------------- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js Modified: branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-06-02 17:09:07 UTC (rev 8437) +++ branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-06-02 17:18:22 UTC (rev 8438) @@ -7,7 +7,7 @@ var NAMESPACE_PARAMS = { 'name': 'com.bigdata.rdf.sail.namespace', 'index': 'com.bigdata.search.FullTextIndex.fieldsEnabled', - 'truth-maintenance': 'com.bigdata.rdf.sail.truthMaintenance', + 'truthMaintenance': 'com.bigdata.rdf.sail.truthMaintenance', 'quads': 'com.bigdata.rdf.store.AbstractTripleStore.quads' }; @@ -237,7 +237,7 @@ // TODO: allow for other options to be specified var data = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">\n<properties>\n'; for(key in NAMESPACE_PARAMS) { - data += '<entry key="' + keys[key] + '">' + params[key] + '</entry>\n'; + data += '<entry key="' + NAMESPACE_PARAMS[key] + '">' + params[key] + '</entry>\n'; } data += '</properties>'; var settings = { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-06-02 17:09:13
|
Revision: 8437 http://sourceforge.net/p/bigdata/code/8437 Author: tobycraig Date: 2014-06-02 17:09:07 +0000 (Mon, 02 Jun 2014) Log Message: ----------- #960 & #961 - Added namespace properties export and namespace clone functionality Modified Paths: -------------- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/index.html branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js Modified: branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/index.html =================================================================== --- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/index.html 2014-06-02 16:43:56 UTC (rev 8436) +++ branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/index.html 2014-06-02 17:09:07 UTC (rev 8437) @@ -204,7 +204,14 @@ </div> <div class="box"> - <form id="namespace-create"><input type="text"> <input type="submit" value="Create namespace"></form> + <h1>Create namespace</h1> + <form id="namespace-create"> + <label for="new-namespace-name">Name:</label> <input type="text" id="new-namespace-name"><br> + <label for="new-namespace-index">Index:</label> <input type="checkbox" id="new-namespace-index"><br> + <label for="new-namespace-truth-maintenance">Truth maintenance:</label> <input type="checkbox" id="new-namespace-truth-maintenance"><br> + <label for="new-namespace-quads">Quads:</label> <input type="checkbox" id="new-namespace-quads"><br> + <input type="submit" value="Create namespace"> + </form> </div> </div> Modified: branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-06-02 16:43:56 UTC (rev 8436) +++ branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-06-02 17:09:07 UTC (rev 8437) @@ -4,7 +4,14 @@ var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACE_URL, NAMESPACES_READY, NAMESPACE_SHORTCUTS, FILE_CONTENTS, QUERY_RESULTS; var CODEMIRROR_DEFAULTS, EDITORS = {}, ERROR_LINE_MARKERS = {}, ERROR_CHARACTER_MARKERS = {}; var PAGE_SIZE = 50, TOTAL_PAGES, CURRENT_PAGE; +var NAMESPACE_PARAMS = { + 'name': 'com.bigdata.rdf.sail.namespace', + 'index': 'com.bigdata.search.FullTextIndex.fieldsEnabled', + 'truth-maintenance': 'com.bigdata.rdf.sail.truthMaintenance', + 'quads': 'com.bigdata.rdf.store.AbstractTripleStore.quads' +}; + CODEMIRROR_DEFAULTS = { lineNumbers: true, mode: 'sparql', @@ -195,22 +202,50 @@ }); } +function cloneNamespace(namespace) { + var url = '/bigdata/namespace/' + namespace + '/properties'; + $.get(url, function(data) { + var reversed_params = {}; + for(var key in NAMESPACE_PARAMS) { + reversed_params[NAMESPACE_PARAMS[key]] = key; + } + $.each(data.getElementsByTagName('entry'), function(i, entry) { + var key = entry.getAttribute('key'); + if(reversed_params[key] == 'name') { + return; + } + if(key in reversed_params) { + $('#new-namespace-' + reversed_params[key]).prop('checked', entry.textContent.trim() == 'true'); + } + }); + $('#new-namespace-name').focus(); + }); +} + function createNamespace(e) { e.preventDefault(); - var input = $(this).find('input[type=text]'); - var namespace = input.val(); - if(!namespace) { + // get new namespace name and config options + var params = {}; + params.name = $('#new-namespace-name').val().trim(); + if(!params.name) { return; } + params.index = $('#new-namespace-index').is(':checked'); + params.truthMaintenance = $('#new-namespace-truth-maintenance').is(':checked'); + params.quads = $('#new-namespace-quads').is(':checked'); // TODO: validate namespace // TODO: allow for other options to be specified - var data = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">\n<properties>\n<entry key="com.bigdata.rdf.sail.namespace">' + namespace + '</entry>\n</properties>'; + var data = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">\n<properties>\n'; + for(key in NAMESPACE_PARAMS) { + data += '<entry key="' + keys[key] + '">' + params[key] + '</entry>\n'; + } + data += '</properties>'; var settings = { type: 'POST', data: data, contentType: 'application/xml', - success: function() { input.val(''); getNamespaces(); }, - error: function(jqXHR, textStatus, errorThrown) { alert(jqXHR.statusText); } + success: function() { $('#new-namespace-name').val(''); getNamespaces(); }, + error: function(jqXHR, textStatus, errorThrown) { debugger;alert(jqXHR.responseText); } }; $.ajax('/bigdata/namespace', settings); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-06-02 16:44:02
|
Revision: 8436 http://sourceforge.net/p/bigdata/code/8436 Author: thompsonbry Date: 2014-06-02 16:43:56 +0000 (Mon, 02 Jun 2014) Log Message: ----------- See #966 (Failed to get namespace list under concurrent update) Martyn and I worked through the REST API transaction semantics and have found and fixed a few issues. He is going to continue a review to normalize: - use of launderThrowable() - patterns for try/finally for methods that perform mutations. The desired pattern looks like this: {{{ } finally { if (conn != null) { if (!success) conn.rollback(); conn.close(); } } } catch (Throwable t) { throw BigdataRDFServlet.launderThrowable(t, resp, ""/*summary-of-REST_API_CALL*/); } }}} This commit includes the following changes: - DefaultResourceLocator: identified and marked a possible hotspot. - GlobalRowStoreHelper: get(timestamp) now invokes getGlobalRowStore() when timestamp==ITx.UNISOLATED. getGlobalRowStore() has implicit creation semantics for the GRS. This way the two methods have the same semantics for that timestamp. - AbstractTripleStore: @Override annotations. - TestLocalTripleStoreDestroy: modified to check post-conditions after calling tripleStore.commit() - BigdataSail.createLTS(): fixed issues with some abnormal code paths which could leave the global semaphore or the write lock held and thus block further updates against the DB/SAIL. Webapp: - MultiTenancyServlet: fixed some issues with failure to hold a transaction open across the operation that was the root cause of this ticket. - Documentation and throwable handling fixes to several servlets. Martyn will continue to work on this aspect of the ticket. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CountersServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DescribeCacheServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -482,7 +482,7 @@ protected Properties locateResource(final String namespace, final long timestamp, final AtomicReference<IIndexManager> foundOn) { - synchronized (seeAlso) { + synchronized (seeAlso) { // FIXME Probably a read/write lock since [seeAlso] normally empty. for (IIndexManager indexManager : seeAlso.keySet()) { @@ -1126,7 +1126,7 @@ * * @see #locateResource(String) */ - public void add(IIndexManager indexManager) { + public void add(final IIndexManager indexManager) { if (indexManager == null) throw new IllegalArgumentException(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -178,6 +178,13 @@ if (log.isInfoEnabled()) log.info(TimestampUtility.toString(timestamp)); + if (timestamp == ITx.UNISOLATED) { + + /* This version does an implicit create if the GRS does not exist. */ + return getGlobalRowStore(); + + } + final IIndex ndx; /** Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -1730,7 +1730,8 @@ } - public void destroy() { + @Override + final public void destroy() { assertWritable(); @@ -2142,6 +2143,7 @@ * @throws IllegalStateException * if the view is read only. */ + @Override public long commit() { if (isReadOnly()) @@ -2163,6 +2165,7 @@ } + @Override final public long getTermCount() { long rangeCount = 0L; @@ -2175,6 +2178,7 @@ } + @Override final public long getURICount() { long rangeCount = 0L; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreDestroy.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -41,6 +41,7 @@ import com.bigdata.relation.RelationSchema; import com.bigdata.relation.locator.DefaultResourceLocator; import com.bigdata.sparse.ITPS; +import com.bigdata.sparse.SparseRowStore; /** * Test suite to verify the semantics of destroying a {@link LocalTripleStore}, @@ -94,12 +95,19 @@ try { + final long lastCommitTime = store.getIndexManager().getLastCommitTime(); + // Note: Will be in lexical order for Unicode. - final String[] namespaces = getNamespaces(indexManager).toArray( - new String[] {}); + assertEquals( + new String[] { namespace }, + getNamespaces(indexManager, ITx.UNISOLATED).toArray( + new String[] {})); + // Note found before the create. + assertEquals( + new String[] {}, + getNamespaces(indexManager, lastCommitTime - 1).toArray( + new String[] {})); - assertEquals(new String[] { namespace }, namespaces); - assertTrue(store == indexManager.getResourceLocator().locate( store.getNamespace(), ITx.UNISOLATED)); assertTrue(store.getLexiconRelation() == indexManager @@ -118,9 +126,16 @@ */ store.destroy(); + // Did not go through a commit on the LTS. + assertEquals(lastCommitTime, store.getIndexManager() + .getLastCommitTime()); + // global row store entry is gone. - assertTrue(getNamespaces(indexManager).isEmpty()); + assertTrue(getNamespaces(indexManager, ITx.UNISOLATED).isEmpty()); + // but not in the last commited view. + assertFalse(getNamespaces(indexManager, lastCommitTime).isEmpty()); + // resources can not be located. assertTrue(null == indexManager.getResourceLocator().locate( namespace, ITx.UNISOLATED)); @@ -134,7 +149,19 @@ ITx.UNISOLATED)); assertNull(indexManager.getIndex(primaryStatementIndexName, ITx.UNISOLATED)); + // but not at the last commit time. + assertNotNull(indexManager.getIndex(primaryStatementIndexName, + lastCommitTime)); + + /* + * Commit. + */ + store.commit(); + // No longer present at the last commit time. + assertTrue(getNamespaces(indexManager, + store.getIndexManager().getLastCommitTime()).isEmpty()); + } finally { indexManager.destroy(); @@ -175,8 +202,8 @@ store.addTerm(store.getValueFactory().createLiteral("bigdata")); // Note: Will be in lexical order for Unicode. - final String[] namespaces = getNamespaces(indexManager).toArray( - new String[] {}); + final String[] namespaces = getNamespaces(indexManager, + ITx.UNISOLATED).toArray(new String[] {}); assertEquals(new String[] { namespace }, namespaces); @@ -202,7 +229,7 @@ store.destroy(); // global row store entry is gone. - assertTrue(getNamespaces(indexManager).isEmpty()); + assertTrue(getNamespaces(indexManager,ITx.UNISOLATED).isEmpty()); // resources can not be located. assertTrue(null == indexManager.getResourceLocator().locate( @@ -222,6 +249,32 @@ assertNotNull(indexManager.getResourceLocator().locate(namespace, commitTime-1)); + /* + * Commit the destroy. + */ + store.commit(); + + + // global row store entry is gone. + assertTrue(getNamespaces(indexManager,ITx.UNISOLATED).isEmpty()); + + // resources can not be located. + assertTrue(null == indexManager.getResourceLocator().locate( + namespace, ITx.UNISOLATED)); + assertTrue(null == indexManager.getResourceLocator().locate( + namespaceLexiconRelation, ITx.UNISOLATED)); + assertTrue(null == indexManager.getResourceLocator().locate( + namespaceSPORelation, ITx.UNISOLATED)); + + // indicies are gone. + assertNull(indexManager.getIndex(lexiconRelationIndexName, + ITx.UNISOLATED)); + assertNull(indexManager.getIndex(primaryStatementIndexName, + ITx.UNISOLATED)); + + // The committed version of the triple store remains visible. + assertNotNull(indexManager.getResourceLocator().locate(namespace, + commitTime-1)); } finally { indexManager.destroy(); @@ -234,15 +287,24 @@ * Return a list of the namespaces for the {@link AbstractTripleStore}s * registered against the bigdata instance. */ - static private List<String> getNamespaces(final IIndexManager indexManager) { + static private List<String> getNamespaces(final IIndexManager indexManager, + final long timestamp) { // the triple store namespaces. final List<String> namespaces = new LinkedList<String>(); + final SparseRowStore grs = indexManager.getGlobalRowStore(timestamp); + + if (grs == null) { + + return namespaces; + + } + // scan the relation schema in the global row store. @SuppressWarnings("unchecked") - final Iterator<ITPS> itr = (Iterator<ITPS>) indexManager - .getGlobalRowStore().rangeIterator(RelationSchema.INSTANCE); + final Iterator<ITPS> itr = (Iterator<ITPS>) grs + .rangeIterator(RelationSchema.INSTANCE); while (itr.hasNext()) { @@ -348,7 +410,7 @@ * * Note: Will be in lexical order for Unicode. */ - final String[] namespaces = getNamespaces(indexManager) + final String[] namespaces = getNamespaces(indexManager,ITx.UNISOLATED) .toArray(new String[] {}); assertEquals(new String[] { namespace, namespace1 }, namespaces); @@ -404,7 +466,7 @@ kb.destroy(); // global row store entry is gone. - final String[] namespaces = getNamespaces(indexManager).toArray( + final String[] namespaces = getNamespaces(indexManager,ITx.UNISOLATED).toArray( new String[] {}); assertEquals(new String[] { namespace1 }, namespaces); @@ -438,7 +500,7 @@ * * Note: Will be in lexical order for Unicode. */ - final String[] namespaces = getNamespaces(indexManager).toArray( + final String[] namespaces = getNamespaces(indexManager,ITx.UNISOLATED).toArray( new String[] {}); assertEquals(new String[] { namespace1 }, namespaces); @@ -477,7 +539,7 @@ kb1.destroy(); // global row store entry is gone. - assertTrue(getNamespaces(indexManager).isEmpty()); + assertTrue(getNamespaces(indexManager,ITx.UNISOLATED).isEmpty()); // resources can not be located. assertTrue(null == indexManager.getResourceLocator().locate( Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -698,23 +698,16 @@ * during the middle of a BigdataSailConnection level operation (or visa * versa). */ + boolean acquiredConnection = false; try { - // acquire the unisolated connection permit. - journal.acquireUnisolatedConnection(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - try { + try { + // acquire the unisolated connection permit. + journal.acquireUnisolatedConnection(); + acquiredConnection = true; + } catch (InterruptedException e) { + throw new RuntimeException(e); + } -// final boolean create; -// final long tx0 = txService.newTx(ITx.READ_COMMITTED); -// try { -// // verify kb does not exist (can not be located). -// create = journal.getResourceLocator().locate(namespace, tx0) == null; -// } finally { -// txService.abort(tx0); -// } - // Check for pre-existing instance. { @@ -730,29 +723,50 @@ } // Create a new instance. -// if (create) { - final LocalTripleStore lts = new LocalTripleStore( - journal, namespace, ITx.UNISOLATED, properties); - if (Boolean.parseBoolean(properties.getProperty( BigdataSail.Options.ISOLATABLE_INDICES, BigdataSail.Options.DEFAULT_ISOLATABLE_INDICES))) { + /* + * Isolatable indices: requires the use of a tx to create + * the KB instance. + */ + final long txCreate = txService.newTx(ITx.UNISOLATED); - - final AbstractTripleStore txCreateView = new LocalTripleStore( - journal, namespace, Long.valueOf(txCreate), properties); - - // create the kb instance within the tx. - txCreateView.create(); - - // commit the tx. - txService.commit(txCreate); + + boolean ok = false; + try { + + final AbstractTripleStore txCreateView = new LocalTripleStore( + journal, namespace, Long.valueOf(txCreate), + properties); + + // create the kb instance within the tx. + txCreateView.create(); + + // commit the tx. + txService.commit(txCreate); + + ok = true; + + } finally { + + if (!ok) + txService.abort(txCreate); + + } } else { + /* + * Create KB without isolatable indices. + */ + + final LocalTripleStore lts = new LocalTripleStore( + journal, namespace, ITx.UNISOLATED, properties); + lts.create(); } @@ -790,7 +804,8 @@ } finally { - journal.releaseUnisolatedConnection(); + if (acquiredConnection) + journal.releaseUnisolatedConnection(); } @@ -1314,22 +1329,40 @@ "UNISOLATED connection is not reentrant."); } - if (getDatabase().getIndexManager() instanceof Journal) { - // acquire permit from Journal. - ((Journal) getDatabase().getIndexManager()) - .acquireUnisolatedConnection(); - } + boolean acquiredConnection = false; + Lock writeLock = null; + BigdataSailConnection conn = null; + try { + if (getDatabase().getIndexManager() instanceof Journal) { + // acquire permit from Journal. + ((Journal) getDatabase().getIndexManager()) + .acquireUnisolatedConnection(); + acquiredConnection = true; + } - // acquire the write lock. - final Lock writeLock = lock.writeLock(); - writeLock.lock(); + // acquire the write lock. + writeLock = lock.writeLock(); + writeLock.lock(); - // new writable connection. - final BigdataSailConnection conn = new BigdataSailConnection(database, - writeLock, true/* unisolated */).startConn(); + // new writable connection. + conn = new BigdataSailConnection(database, writeLock, true/* unisolated */) + .startConn(); + } finally { + if (conn == null) { + // Did not obtain connection. + if (writeLock != null) { + // release write lock. + writeLock.unlock(); + } + if (acquiredConnection) { + // release permit. + ((Journal) getDatabase().getIndexManager()) + .releaseUnisolatedConnection(); + } + } + } + return conn; - return conn; - } /** Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -2167,7 +2167,7 @@ * @param namespace * The namespace. * @param timestamp - * The timestamp. + * A timestamp -or- a tx identifier. * * @return The {@link AbstractTripleStore} -or- <code>null</code> if none is * found for that namespace and timestamp. @@ -2205,7 +2205,7 @@ * * @throws RepositoryException */ - public BigdataSailRepositoryConnection getUnisolatedConnection( + public BigdataSailRepositoryConnection getUnisolatedConnection( // FIXME REVIEW CALLERS final String namespace) throws SailException, RepositoryException { // resolve the default namespace. @@ -2247,7 +2247,7 @@ try { - return getNamespaces(timestamp, tx); + return getNamespacesTx(tx); } finally { @@ -2257,25 +2257,25 @@ } - private List<String> getNamespaces(long timestamp, final long tx) { + /*package*/ List<String> getNamespacesTx(final long tx) { - if (timestamp == ITx.READ_COMMITTED) { +// if (timestamp == ITx.READ_COMMITTED) { +// +// // Use the last commit point. +// timestamp = getIndexManager().getLastCommitTime(); +// +// } - // Use the last commit point. - timestamp = getIndexManager().getLastCommitTime(); - - } - // the triple store namespaces. final List<String> namespaces = new LinkedList<String>(); final SparseRowStore grs = getIndexManager().getGlobalRowStore( - timestamp); + tx); if (grs == null) { - log.warn("No GRS @ timestamp=" - + TimestampUtility.toString(timestamp)); + log.warn("No GRS @ tx=" + + TimestampUtility.toString(tx)); // Empty. return namespaces; @@ -2346,6 +2346,7 @@ long tx = timestamp; // use dirty reads unless Journal. if (getIndexManager() instanceof Journal) { + final ITransactionService txs = ((Journal) getIndexManager()) .getLocalTransactionManager().getTransactionService(); @@ -2368,12 +2369,9 @@ * The transaction identifier. */ public void abortTx(final long tx) { - if (getIndexManager() instanceof Journal) { -// if (!TimestampUtility.isReadWriteTx(tx)) { -// // Not a transaction. -// throw new IllegalStateException(); -// } + if (getIndexManager() instanceof Journal) { + final ITransactionService txs = ((Journal) getIndexManager()) .getLocalTransactionManager().getTransactionService(); @@ -2388,4 +2386,22 @@ } +// public void commitTx(final long tx) { +// +// if (getIndexManager() instanceof Journal) { +// +// final ITransactionService txs = ((Journal) getIndexManager()) +// .getLocalTransactionManager().getTransactionService(); +// +// try { +// txs.commit(tx); +// } catch (IOException e) { +// // Note: Local operation. Will not throw IOException. +// throw new RuntimeException(e); +// } +// +// } +// +// } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -146,17 +146,35 @@ * client's response. This code path should be used iff we have already * begun writing the response. Otherwise, an HTTP error status should be * used instead. + * <p> + * This method is invoked as follows: * + * <pre> + * throw launderThrowable(...) + * </pre> + * + * This keeps the compiler happy since it will understand that the caller's + * method always exits with a thrown cause. + * * @param t * The thrown error. * @param os * The stream on which the response will be written. * @param queryStr - * The SPARQL Query -or- SPARQL Update command (if available). + * The SPARQL Query -or- SPARQL Update command (if available) + * -or- a summary of the REST API command -or- an empty string if + * nothing else is more appropriate. * - * @return The laundered exception. + * @return Nothing. The pattern of the returned throwable is used to make + * the compiler happy. * - * @throws Exception + * @throws IOException + * if the cause was an {@link IOException} + * @throws Error + * if the cause was an {@link Error}. + * @throws RuntimeException + * if the cause was a {@link RuntimeException} or anything not + * declared to be thrown by this method. */ protected static RuntimeException launderThrowable(final Throwable t, final HttpServletResponse resp, final String queryStr) @@ -217,7 +235,7 @@ } } if (t instanceof RuntimeException) { - return (RuntimeException) t; + throw (RuntimeException) t; } else if (t instanceof Error) { throw (Error) t; } else if (t instanceof IOException) { @@ -239,10 +257,12 @@ * namespace (or it should be configured for each graph explicitly, or * we should bundle the (namespace,timestamp) together as a single * object). + * + * @see QueryServlet#ATTR_TIMESTAMP; */ protected long getTimestamp(final HttpServletRequest req) { - final String timestamp = req.getParameter("timestamp"); + final String timestamp = req.getParameter(QueryServlet.ATTR_TIMESTAMP); if (timestamp == null) { @@ -342,7 +362,7 @@ protected void reportModifiedCount(final HttpServletResponse resp, final long nmodified, final long elapsed) throws IOException { - final StringWriter w = new StringWriter(); + final StringWriter w = new StringWriter(); final XMLBuilder t = new XMLBuilder(w); @@ -422,40 +442,37 @@ /* * CONNEG for the MIME type. */ - { + final String acceptStr = req.getHeader("Accept"); - final String acceptStr = req.getHeader("Accept"); + final ConnegUtil util = new ConnegUtil(acceptStr); - final ConnegUtil util = new ConnegUtil(acceptStr); + // The best RDFFormat for that Accept header. + RDFFormat format = util.getRDFFormat(); - // The best RDFFormat for that Accept header. - RDFFormat format = util.getRDFFormat(); - - if (format == null) - format = RDFFormat.RDFXML; + if (format == null) + format = RDFFormat.RDFXML; - resp.setStatus(HTTP_OK); + resp.setStatus(HTTP_OK); - resp.setContentType(format.getDefaultMIMEType()); + resp.setContentType(format.getDefaultMIMEType()); - final OutputStream os = resp.getOutputStream(); - try { - final RDFWriter writer = RDFWriterRegistry.getInstance() - .get(format).getWriter(os); - writer.startRDF(); - final Iterator<Statement> itr = g.iterator(); - while (itr.hasNext()) { - final Statement stmt = itr.next(); - writer.handleStatement(stmt); - } - writer.endRDF(); - os.flush(); - } catch (RDFHandlerException e) { - log.error(e, e); - throw launderThrowable(e, resp, ""); - } finally { - os.close(); + final OutputStream os = resp.getOutputStream(); + try { + final RDFWriter writer = RDFWriterRegistry.getInstance() + .get(format).getWriter(os); + writer.startRDF(); + final Iterator<Statement> itr = g.iterator(); + while (itr.hasNext()) { + final Statement stmt = itr.next(); + writer.handleStatement(stmt); } + writer.endRDF(); + os.flush(); + } catch (RDFHandlerException e) { + // log.error(e, e); + throw launderThrowable(e, resp, ""); + } finally { + os.close(); } } @@ -471,34 +488,31 @@ /* * CONNEG for the MIME type. */ - { + final String acceptStr = req.getHeader("Accept"); - final String acceptStr = req.getHeader("Accept"); + final ConnegUtil util = new ConnegUtil(acceptStr); - final ConnegUtil util = new ConnegUtil(acceptStr); + // The best format for that Accept header. + PropertiesFormat format = util.getPropertiesFormat(); - // The best format for that Accept header. - PropertiesFormat format = util.getPropertiesFormat(); - - if (format == null) - format = PropertiesFormat.XML; + if (format == null) + format = PropertiesFormat.XML; - resp.setStatus(HTTP_OK); + resp.setStatus(HTTP_OK); - resp.setContentType(format.getDefaultMIMEType()); + resp.setContentType(format.getDefaultMIMEType()); - final OutputStream os = resp.getOutputStream(); - try { - final PropertiesWriter writer = PropertiesWriterRegistry.getInstance() - .get(format).getWriter(os); - writer.write(properties); - os.flush(); - } catch (IOException e) { - log.error(e, e); - throw launderThrowable(e, resp, ""); - } finally { - os.close(); - } + final OutputStream os = resp.getOutputStream(); + try { + final PropertiesWriter writer = PropertiesWriterRegistry + .getInstance().get(format).getWriter(os); + writer.write(properties); + os.flush(); + } catch (IOException e) { + // log.error(e, e); + throw launderThrowable(e, resp, ""); + } finally { + os.close(); } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -138,10 +138,9 @@ } - } catch (Exception ex) { + } catch (Throwable t) { - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); + throw BigdataRDFServlet.launderThrowable(t, resp, ""); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CountersServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CountersServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/CountersServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -77,35 +77,6 @@ public CountersServlet() { } -// /** -// * Access to the {@link CounterSet} exposed by this service. -// */ -// private final ICounterSetAccess accessor; -// -// /** -// * The service reference iff one one specified to the ctor (may be null). -// */ -// private final IService service; -// -// /** -// * The minimum time before a client can force the re-materialization of the -// * {@link CounterSet}. This is designed to limit the impact of the client on -// * the service. -// * -// * TODO Configuration parameter for {@link #minUpdateLatency} -// */ -// private final long minUpdateLatency = 5000; -// -// /** -// * The last materialized {@link CounterSet}. -// */ -// private volatile CounterSet counterSet = null; -// -// /** -// * The timestamp of the last materialized {@link CounterSet}. -// */ -// private volatile long lastTimestamp = 0L; - /** * Performance counters * <pre> @@ -115,48 +86,9 @@ @Override protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { + + try { -// final ByteArrayOutputStream baos = new ByteArrayOutputStream( -// 2 * Bytes.kilobyte32); -// -// final InputStream is; - -// /* -// * If the request uri is one of the pre-declared resources then we send -// * that resource. -// */ -// final DeclaredResource decl = allowedClassPathResources.get(req.uri); -// -// if (decl != null) { -// -// // send that resource. -// return sendClasspathResource(decl); -// -// } - - /* - * Materialization the CounterSet iff necessary or stale. - * - * Note: This bit needs to be single threaded to avoid concurrent - * requests causing concurrent materialization of the counter set. - */ -// final ICounterSelector counterSelector; -// synchronized(this) { -// -// final long now = System.currentTimeMillis(); -// -// final long elapsed = now - lastTimestamp; -// -// if (counterSet == null || elapsed > minUpdateLatency/* ms */) { -// -// counterSet = accessor.getCounters(); -// -// } -// -// counterSelector = new CounterSetSelector(counterSet); -// -// } - // TODO Hook this how? (NSS does not define an IService right now) final IService service = null; @@ -255,6 +187,12 @@ if (log.isTraceEnabled()) log.trace("done"); + + } catch (Throwable t) { + + throw BigdataRDFServlet.launderThrowable(t, resp, ""); + + } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -131,7 +131,6 @@ */ final PipedOutputStream os = new PipedOutputStream(); final InputStream is = newPipedInputStream(os); - try { // Use this format for the query results. final RDFFormat format = RDFFormat.NTRIPLES; @@ -215,17 +214,10 @@ } - } catch (Throwable t) { + } catch (Throwable t) { - throw BigdataRDFServlet.launderThrowable(t, resp, queryStr); + throw BigdataRDFServlet.launderThrowable(t, resp, queryStr); - } - - } catch (Exception ex) { - - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); - } } @@ -382,10 +374,9 @@ } - } catch (Exception ex) { + } catch (Throwable t) { - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); + throw BigdataRDFServlet.launderThrowable(t, resp, ""); } @@ -480,8 +471,6 @@ try { - try { - BigdataSailRepositoryConnection conn = null; try { @@ -528,19 +517,20 @@ } - } catch (Throwable t) { + } catch (Throwable t) { - throw BigdataRDFServlet.launderThrowable(t, resp, ""); + throw BigdataRDFServlet.launderThrowable(t, resp, "s=" + s + ",p=" + + p + ",o=" + o + ",c=" + c); - } + } - } catch (Exception ex) { +// } catch (Exception ex) { +// +// // Will be rendered as an INTERNAL_ERROR. +// throw new RuntimeException(ex); +// +// } - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); - - } - } // static private transient final Resource[] nullArray = new Resource[]{}; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DescribeCacheServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DescribeCacheServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DescribeCacheServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -369,14 +369,14 @@ os.flush(); } catch (Throwable e) { - try { +// try { throw BigdataRDFServlet.launderThrowable(e, resp, "DESCRIBE" // queryStr // TODO Report as "DESCRIBE uri(s)". ); - } catch (Exception e1) { - throw new RuntimeException(e); - } +// } catch (Exception e1) { +// throw new RuntimeException(e); +// } } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -140,7 +140,11 @@ final String namespace = getNamespace(req); final String contentType = req.getContentType(); - if(contentType==null) buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, "Content-Type not specified."); + + if (contentType == null) + buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, + "Content-Type not specified."); + if (log.isInfoEnabled()) log.info("Request body: " + contentType); @@ -220,6 +224,7 @@ final AtomicLong nmodified = new AtomicLong(0L); BigdataSailRepositoryConnection conn = null; + boolean success = false; try { conn = getBigdataRDFContext() @@ -256,26 +261,26 @@ reportModifiedCount(resp, nmodified.get(), elapsed); + success = true; + return; - } catch(Throwable t) { - - if(conn != null) - conn.rollback(); - - throw new RuntimeException(t); - } finally { - if (conn != null) + if (conn != null) { + + if (!success) + conn.rollback(); + conn.close(); + + } } - } catch (Exception ex) { + } catch (Throwable t) { - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); + throw BigdataRDFServlet.launderThrowable(t, resp, ""); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -45,7 +45,6 @@ import com.bigdata.rdf.properties.PropertiesParserFactory; import com.bigdata.rdf.properties.PropertiesParserRegistry; import com.bigdata.rdf.sail.BigdataSail; -import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.sail.webapp.client.ConnectOptions; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.store.ScaleOutTripleStore; @@ -346,19 +345,23 @@ BigdataSail.Options.NAMESPACE, BigdataSail.Options.DEFAULT_NAMESPACE); - final long timestamp = ITx.UNISOLATED; + { - // resolve the namespace. - final AbstractTripleStore tripleStore = (AbstractTripleStore) getIndexManager() - .getResourceLocator().locate(namespace, timestamp); + final long timestamp = ITx.UNISOLATED; + + // resolve the namespace. + final AbstractTripleStore tripleStore = (AbstractTripleStore) getIndexManager() + .getResourceLocator().locate(namespace, timestamp); - if (tripleStore != null) { - /* - * Already exists. - */ - buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, "EXISTS: " - + namespace); - return; + if (tripleStore != null) { + /* + * Already exists. + */ + buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, + "EXISTS: " + namespace); + return; + } + } try { @@ -397,8 +400,6 @@ } catch (Throwable e) { - log.error(e, e); - throw launderThrowable(e, resp, ""); } @@ -419,54 +420,105 @@ final long timestamp = ITx.UNISOLATED; - final AbstractTripleStore tripleStore = getBigdataRDFContext() - .getTripleStore(namespace, timestamp); - - if (tripleStore == null) { - /* - * There is no such triple/quad store instance. - */ - buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); - return; - } - + boolean acquiredConnection = false; try { + + if (getIndexManager() instanceof Journal) { + // acquire permit from Journal. + ((Journal) getIndexManager()).acquireUnisolatedConnection(); + acquiredConnection = true; + } - final BigdataSail sail = new BigdataSail(tripleStore); + final AbstractTripleStore tripleStore = getBigdataRDFContext() + .getTripleStore(namespace, timestamp); - BigdataSailConnection con = null; - - try { - - sail.initialize(); - // This basically puts a lock on the KB instance. - con = sail.getUnisolatedConnection(); - // Destroy the KB instance. - tripleStore.destroy(); - // Commit. - con.commit(); - - } finally { - - if (con != null) - con.close(); - - sail.shutDown(); - + if (tripleStore == null) { + /* + * There is no such triple/quad store instance. + */ + buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); + return; } + // Destroy the KB instance. + tripleStore.destroy(); + + tripleStore.commit(); + buildResponse(resp, HTTP_OK, MIME_TEXT_PLAIN, "DELETED: " + namespace); } catch (Throwable e) { - log.error(e, e); - throw launderThrowable(e, resp, ""); + + } finally { + + if (acquiredConnection) { + ((Journal) getIndexManager()).releaseUnisolatedConnection(); + + } + } } + +// private void doDeleteNamespace(final HttpServletRequest req, +// final HttpServletResponse resp) throws IOException { +// +// final String namespace = getNamespace(req); +// +// final long timestamp = ITx.UNISOLATED; +// +// final AbstractTripleStore tripleStore = getBigdataRDFContext() +// .getTripleStore(namespace, timestamp); +// +// if (tripleStore == null) { +// /* +// * There is no such triple/quad store instance. +// */ +// buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); +// return; +// } +// +// try { +// +// final BigdataSail sail = new BigdataSail(tripleStore); +// +// BigdataSailConnection con = null; +// +// try { +// +// sail.initialize(); +// // This basically puts a lock on the KB instance. +// con = sail.getUnisolatedConnection(); +// // Destroy the KB instance. +// tripleStore.destroy(); +// // Commit. +// con.commit(); +// +// } finally { +// +// if (con != null) +// con.close(); +// +// sail.shutDown(); +// +// } +// +// buildResponse(resp, HTTP_OK, MIME_TEXT_PLAIN, "DELETED: " +// + namespace); +// +// } catch (Throwable e) { +// +// log.error(e, e); +// +// throw launderThrowable(e, resp, ""); +// +// } +// +// } /** * Send the configuration properties for the addressed KB instance. @@ -480,21 +532,21 @@ final String namespace = getNamespace(req); - long timestamp = getTimestamp(req); + final long timestamp = getTimestamp(req); - if (timestamp == ITx.READ_COMMITTED) { +// if (timestamp == ITx.READ_COMMITTED) { +// +// // Use the last commit point. +// timestamp = getIndexManager().getLastCommitTime(); +// +// } - // Use the last commit point. - timestamp = getIndexManager().getLastCommitTime(); - - } - final long tx = getBigdataRDFContext().newTx(timestamp); try { final AbstractTripleStore tripleStore = getBigdataRDFContext() - .getTripleStore(namespace, timestamp); + .getTripleStore(namespace, tx); if (tripleStore == null) { /* @@ -523,15 +575,15 @@ private void doDescribeNamespaces(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - long timestamp = getTimestamp(req); + final long timestamp = getTimestamp(req); - if (timestamp == ITx.READ_COMMITTED) { +// if (timestamp == ITx.READ_COMMITTED) { +// +// // Use the last commit point. +// timestamp = getIndexManager().getLastCommitTime(); +// +// } - // Use the last commit point. - timestamp = getIndexManager().getLastCommitTime(); - - } - final boolean describeEachNamedGraph; { final String s = req.getParameter(DESCRIBE_EACH_NAMED_GRAPH); @@ -565,8 +617,8 @@ final String namespace = getBigdataRDFContext().getConfig().namespace; - describeNamespace(req, g, namespace, describeEachNamedGraph, - timestamp); + describeNamespaceTx(req, g, namespace, describeEachNamedGraph, + tx); } else { @@ -574,12 +626,12 @@ * The set of registered namespaces for KBs. */ final List<String> namespaces = getBigdataRDFContext() - .getNamespaces(timestamp); + .getNamespacesTx(tx); for (String namespace : namespaces) { - describeNamespace(req, g, namespace, - describeEachNamedGraph, timestamp); + describeNamespaceTx(req, g, namespace, + describeEachNamedGraph, tx); } @@ -598,14 +650,14 @@ /** * Describe a namespace into the supplied Graph object. */ - private void describeNamespace(final HttpServletRequest req, + private void describeNamespaceTx(final HttpServletRequest req, final Graph g, final String namespace, - final boolean describeEachNamedGraph, final long timestamp) + final boolean describeEachNamedGraph, final long tx) throws IOException { // Get a view onto that KB instance for that timestamp. final AbstractTripleStore tripleStore = getBigdataRDFContext() - .getTripleStore(namespace, timestamp); + .getTripleStore(namespace, tx); if (tripleStore == null) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -133,6 +133,14 @@ */ static final transient String ATTR_UUID = "uuid"; + /** + * The name of the URL query parameter which indicates the timestamp against + * which an operation will be carried out. + * + * @see BigdataRDFServlet#getTimestamp(HttpServletRequest) + */ + static final transient String ATTR_TIMESTAMP = "timestamp"; + // /** // * The name of the request attribute for the {@link AbstractQueryTask}. // */ @@ -244,45 +252,57 @@ */ private void doServiceDescription(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { + + /** + * Protect the entire operation with a transaction. + * + * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency + * problem with list namespaces and create namespace </a> + */ + final long tx = getBigdataRDFContext().newTx(getTimestamp(req)); - final String namespace = getNamespace(req); + try { + + final AbstractTripleStore tripleStore = getBigdataRDFContext() + .getTripleStore(getNamespace(req), tx); - final long timestamp = getTimestamp(req); + if (tripleStore == null) { + /* + * There is no such triple/quad store instance. + */ + buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); + return; + } - final AbstractTripleStore tripleStore = getBigdataRDFContext() - .getTripleStore(namespace, timestamp); - - if (tripleStore == null) { + // The serviceURIs for this graph. + final String[] serviceURI = BigdataServlet.getServiceURIs( + getServletContext(), req); + /* - * There is no such triple/quad store instance. + * TODO Resolve the SD class name and ctor via a configuration + * property for extensible descriptions. */ - buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); - return; - } + final Graph g = new GraphImpl(); + { - // The serviceURIs for this graph. - final String[] serviceURI = BigdataServlet.getServiceURIs( - getServletContext(), req); + final SD sd = new SD(g, tripleStore, serviceURI); - /* - * TODO Resolve the SD class name and ctor via a configuration property - * for extensible descriptions. - */ - final Graph g = new GraphImpl(); - { + final SparqlEndpointConfig config = getBigdataRDFContext() + .getConfig(); - final SD sd = new SD(g, tripleStore, serviceURI); + sd.describeService(true/* describeStatistics */, + config.describeEachNamedGraph); - final SparqlEndpointConfig config = getBigdataRDFContext() - .getConfig(); + } - sd.describeService(true/* describeStatistics */, - config.describeEachNamedGraph); + sendGraph(req, resp, g); + } finally { + + getBigdataRDFContext().abortTx(tx); + } - sendGraph(req, resp, g); - } /** @@ -386,11 +406,11 @@ ft.get(); } catch (Throwable e) { - try { +// try { throw BigdataRDFServlet.launderThrowable(e, resp, updateStr); - } catch (Exception e1) { - throw new RuntimeException(e); - } +// } catch (Exception e1) { +// throw new RuntimeException(e); +// } } } @@ -627,11 +647,11 @@ } } catch (Throwable e) { - try { +// try { throw BigdataRDFServlet.launderThrowable(e, resp, queryStr); - } catch (Exception e1) { - throw new RuntimeException(e); - } +// } catch (Exception e1) { +// throw new RuntimeException(e); +// } } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java 2014-06-02 16:41:49 UTC (rev 8435) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/WorkbenchServlet.java 2014-06-02 16:43:56 UTC (rev 8436) @@ -91,7 +91,7 @@ final String namespace = getNamespace(req); - final long timestamp = getTimestamp(req); + final long timestamp = getTimestamp(req); // FIXME Use newTx(). Why does this even look for a KB instance? final AbstractTripleStore tripleStore = getBigdataRDFContext() .getTripleStore(namespace, timestamp); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-06-02 16:41:57
|
Revision: 8435 http://sourceforge.net/p/bigdata/code/8435 Author: dmekonnen Date: 2014-06-02 16:41:49 +0000 (Mon, 02 Jun 2014) Log Message: ----------- Adding 'replication_factor' attribute to support HA1 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha1 Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-06-02 05:14:19 UTC (rev 8434) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-06-02 16:41:49 UTC (rev 8435) @@ -90,6 +90,9 @@ # Name of the replication cluster to which this HAJournalServer will belong. default['bigdata'][:logical_service_id] = 'HA-Replication-Cluster-1' + # Set the REPLICATION_FACTOR. 1 = HA1, 3 = HA3, etc + default['bigdata'][:replication_factor] = 3 + # Where to find the Apache River service registrars (can also use multicast). default['bigdata'][:river_locator1] = '33.33.33.10' default['bigdata'][:river_locator2] = '33.33.33.11' Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha1 =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha1 (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha1 2014-06-02 16:41:49 UTC (rev 8435) @@ -0,0 +1,71 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : +# +# Vagraant.ha1 - Install the Bigdata High Availability Server with 1 node with a VirtualBox Provider +# +# The launch synopsis for this Vagrantfile: +# +# % vagrant up +# % vagrant halt +# % vagrant up +# +# The "halt" and following "up" forces a restart of the services post-installation. +# This is a temporary requirement until recipes are upated. + +Vagrant.require_plugin "vagrant-berkshelf" + +Vagrant.configure("2") do |config| + +config.vm.provider :virtualbox do |vb| + vb.customize ["modifyvm", :id, "--memory", "2048"] +end + +script = <<SCRIPT + apt-get update + apt-get install -y curl + curl -L https://www.opscode.com/chef/install.sh | bash + mkdir -p /var/lib/zookeeper + echo "33.33.33.10 bigdataA" >> /etc/hosts + echo "33.33.33.11 bigdataB" >> /etc/hosts + echo "33.33.33.12 bigdataC" >> /etc/hosts +SCRIPT + +$scriptA = "#{script}\n\techo 1 > /var/lib/zookeeper/myid\n" +config.vm.define :bigdataA do |bigdataA| + + bigdataA.vm.hostname = "bigdataA" + bigdataA.vm.box = "precise64" + + bigdataA.berkshelf.enabled = true + + bigdataA.vm.box_url = "http://files.vagrantup.com/precise64.box" + + bigdataA.vm.network :private_network, ip: "33.33.33.10" + + bigdataA.vm.provision :chef_solo do |chef| + chef.json = { + :bigdata => { + :install_flavor => "ha", + :replication_factor => 1 + }, + :java => { + :install_flavor => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :zookeeper => { + :zoocfg => { :clientPort=> '2081' } + } + } + + bigdataA.vm.provision :shell, inline: $scriptA + + chef.run_list = [ + "recipe[bigdata::high_availability]" + ] + + end + +end # bigdataA + +end This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |