|
From: Bryan T. <br...@bl...> - 2016-09-20 16:52:17
|
There is a different parameter for the JVM to specify the maximum native
memory allocation. Here is an example from the wiki. You will want to use
values specific to your machine.
# Sample JVM options showing allocation of a 4GB managed object heap
# and allowing a 3GB native heap. Always use the -server mode JVM for
# Blazegraph.
-server -Xmx4G -XX:MaxDirectMemorySize=3000m
Thanks,
Bryan
On Mon, Sep 19, 2016 at 9:57 AM, Eric Scott <eri...@at...> wrote:
> Hi all -
>
> We've been running a copy of the Wikidata stand-alone for several months
> now with relatively few problems, but this weekend we abruptly starting
> having memory issues while executing a query against the WD triple store.
> It seems to manifest first as a MemoryManagerClosedException.
>
> See below for a stack trace of one fairly typical error that gets logged.
>
> We've been running with the default RWStore parameters included for the
> Wikidata stand-alone. Upping the heap size form 8G to 12G did not help.
> This is running on a server that has 128GB of memory, with plenty free.
>
> If someone could provide me with guidance, I'd greatly appreciate it.
>
> Cheers,
>
> Eric Scott
>
>
> ERROR: Haltable.java:469: com.bigdata.bop.join.PipelineJoin$JoinTask{
> joinOp=com.bigdata.bop.join.PipelineJoin[2]()[ BOp.bopId=2,
> JoinAnnotations.constraints=null, AST2BOpBase.simpleJoin=true,
> BOp.evaluationContext=ANY, AccessPathJoinAnnotations.
> predicate=com.bigdata.rdf.spo.SPOPredicate[1](s=null, p=null, o=null)[
> IPredicate.relationName=[wdq.spo], IPredicate.timestamp=1474255803369,
> BOp.bopId=1, AST2BOpBase.estimatedCardinality=1009418509,
> AST2BOpBase.originalIndex=SPO, IPredicate.flags=[KEYS,VALS,READONLY,PARALLEL]]]}
> : isFirstCause=true : com.bigdata.rwstore.sector.
> MemoryManagerClosedException
> com.bigdata.rwstore.sector.MemoryManagerClosedException
> at com.bigdata.rwstore.sector.MemoryManager.assertOpen(
> MemoryManager.java:110)
> at com.bigdata.rwstore.sector.MemoryManager.allocate(
> MemoryManager.java:671)
> at com.bigdata.rwstore.sector.AllocationContext.allocate(
> AllocationContext.java:195)
> at com.bigdata.rwstore.sector.AllocationContext.allocate(
> AllocationContext.java:169)
> at com.bigdata.rwstore.sector.AllocationContext.allocate(
> AllocationContext.java:159)
> at com.bigdata.rwstore.sector.AllocationContext.alloc(
> AllocationContext.java:359)
> at com.bigdata.rwstore.PSOutputStream.save(
> PSOutputStream.java:335)
> at com.bigdata.rwstore.PSOutputStream.getAddr(
> PSOutputStream.java:416)
> at com.bigdata.bop.solutions.SolutionSetStream.put(
> SolutionSetStream.java:297)
> at com.bigdata.bop.engine.LocalNativeChunkMessage.<init>
> (LocalNativeChunkMessage.java:213)
> at com.bigdata.bop.engine.LocalNativeChunkMessage.<init>
> (LocalNativeChunkMessage.java:147)
> at com.bigdata.bop.engine.StandaloneChunkHandler.handleChunk(
> StandaloneChunkHandler.java:90)
> at com.bigdata.bop.engine.ChunkedRunningQuery$
> HandleChunkBuffer.outputChunk(ChunkedRunningQuery.java:1699)
> at com.bigdata.bop.engine.ChunkedRunningQuery$HandleChunkBuffer.
> addReorderAllowed(ChunkedRunningQuery.java:1628)
> at com.bigdata.bop.engine.ChunkedRunningQuery$
> HandleChunkBuffer.add(ChunkedRunningQuery.java:1569)
> at com.bigdata.bop.engine.ChunkedRunningQuery$
> HandleChunkBuffer.add(ChunkedRunningQuery.java:1453)
> at com.bigdata.relation.accesspath.UnsyncLocalOutputBuffer.
> handleChunk(UnsyncLocalOutputBuffer.java:59)
> at com.bigdata.relation.accesspath.UnsyncLocalOutputBuffer.
> handleChunk(UnsyncLocalOutputBuffer.java:14)
> at com.bigdata.relation.accesspath.AbstractUnsynchronizedArrayBuf
> fer.overflow(AbstractUnsynchronizedArrayBuffer.java:287)
> at com.bigdata.relation.accesspath.AbstractUnsynchronizedArrayBuf
> fer.add2(AbstractUnsynchronizedArrayBuffer.java:215)
> at com.bigdata.relation.accesspath.AbstractUnsynchronizedArrayBuf
> fer.add(AbstractUnsynchronizedArrayBuffer.java:173)
> at com.bigdata.bop.join.PipelineJoin$JoinTask$
> AccessPathTask.handleJoin2(PipelineJoin.java:1868)
> at com.bigdata.bop.join.PipelineJoin$JoinTask$AccessPathTask.call(
> PipelineJoin.java:1684)
> at com.bigdata.bop.join.PipelineJoin$JoinTask$
> BindingSetConsumerTask.runOneTask(PipelineJoin.java:1086)
> at com.bigdata.bop.join.PipelineJoin$JoinTask$
> BindingSetConsumerTask.call(PipelineJoin.java:995)
> at com.bigdata.bop.join.PipelineJoin$JoinTask.
> consumeSource(PipelineJoin.java:728)
> at com.bigdata.bop.join.PipelineJoin$JoinTask.call(
> PipelineJoin.java:623)
> at com.bigdata.bop.join.PipelineJoin$JoinTask.call(
> PipelineJoin.java:382)
> at java.util.concurrent.FutureTask.run(FutureTask.java:266)
> at com.bigdata.concurrent.FutureTaskMon.run(FutureTaskMon.java:63)
> at com.bigdata.bop.engine.ChunkedRunningQuery$ChunkTask.
> call(ChunkedRunningQuery.java:1346)
> at com.bigdata.bop.engine.ChunkedRunningQuery$
> ChunkTaskWrapper.run(ChunkedRunningQuery.java:926)
> at java.util.concurrent.Executors$RunnableAdapter.
> call(Executors.java:511)
> at java.util.concurrent.FutureTask.run(FutureTask.java:266)
> at com.bigdata.concurrent.FutureTaskMon.run(FutureTaskMon.java:63)
> at com.bigdata.bop.engine.ChunkedRunningQuery$ChunkFutureTask.run(
> ChunkedRunningQuery.java:821)
> at java.util.concurrent.ThreadPoolExecutor.runWorker(
> ThreadPoolExecutor.java:1142)
> at java.util.concurrent.ThreadPoolExecutor$Worker.run(
> ThreadPoolExecutor.java:617)
> at java.lang.Thread.run(Thread.java:745)
>
>
> ------------------------------------------------------------
> ------------------
>
> _______________________________________________
> Bigdata-developers mailing list
> Big...@li...
> https://lists.sourceforge.net/lists/listinfo/bigdata-developers
>
>
|