0

我們在AmazoneWeb服務服務器上安裝了Couchbase實例,並在同一臺服務器上運行了Elastic Search實例。Couchbase - ElasticSearch Java堆內存

bewtween他們兩個的連接正在做的好,目前複製的罰款,直到...... 出於藍,我們得到了ElasticSearch以下錯誤日誌:

[2013-08-29 21:27:34,947][WARN ][cluster.metadata   ] [01-Thor] failed to dynamically update the mapping in cluster_state from shard 
java.lang.OutOfMemoryError: Java heap space 
    at org.apache.lucene.util.ArrayUtil.grow(ArrayUtil.java:343) 
    at org.elasticsearch.common.io.FastByteArrayOutputStream.write(FastByteArrayOutputStream.java:103) 
    at org.elasticsearch.common.jackson.core.json.UTF8JsonGenerator._flushBuffer(UTF8JsonGenerator.java:1848) 
    at org.elasticsearch.common.jackson.core.json.UTF8JsonGenerator.writeString(UTF8JsonGenerator.java:436) 
    at org.elasticsearch.common.xcontent.json.JsonXContentGenerator.writeString(JsonXContentGenerator.java:84) 
    at org.elasticsearch.common.xcontent.XContentBuilder.field(XContentBuilder.java:314) 
    at org.elasticsearch.index.mapper.core.AbstractFieldMapper.doXContentBody(AbstractFieldMapper.java:601) 
    at org.elasticsearch.index.mapper.core.NumberFieldMapper.doXContentBody(NumberFieldMapper.java:286) 
    at org.elasticsearch.index.mapper.core.LongFieldMapper.doXContentBody(LongFieldMapper.java:338) 
    at org.elasticsearch.index.mapper.core.AbstractFieldMapper.toXContent(AbstractFieldMapper.java:595) 
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920) 
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:852) 
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920) 
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:852) 
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920) 
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:852) 
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920) 
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:852) 
    at org.elasticsearch.index.mapper.object.ObjectMapper.toXContent(ObjectMapper.java:920) 
    at org.elasticsearch.index.mapper.DocumentMapper.toXContent(DocumentMapper.java:700) 
    at org.elasticsearch.index.mapper.DocumentMapper.refreshSource(DocumentMapper.java:682) 
    at org.elasticsearch.index.mapper.DocumentMapper.<init>(DocumentMapper.java:342) 
    at org.elasticsearch.index.mapper.DocumentMapper$Builder.build(DocumentMapper.java:224) 
    at org.elasticsearch.index.mapper.DocumentMapperParser.parse(DocumentMapperParser.java:231) 
    at org.elasticsearch.index.mapper.MapperService.parse(MapperService.java:380) 
    at org.elasticsearch.index.mapper.MapperService.merge(MapperService.java:190) 
    at org.elasticsearch.cluster.metadata.MetaDataMappingService$2.execute(MetaDataMappingService.java:185) 
    at org.elasticsearch.cluster.service.InternalClusterService$2.run(InternalClusterService.java:229) 
    at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:95) 
    at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source) 
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source) 
    at java.lang.Thread.run(Unknown Source) 
[2013-08-29 21:27:56,948][WARN ][indices.ttl    ] [01-Thor] failed to execute ttl purge 
java.lang.OutOfMemoryError: Java heap space 
    at org.apache.lucene.util.ByteBlockPool$Allocator.getByteBlock(ByteBlockPool.java:66) 
    at org.apache.lucene.util.ByteBlockPool.nextBuffer(ByteBlockPool.java:202) 
    at org.apache.lucene.util.BytesRefHash.add(BytesRefHash.java:319) 
    at org.apache.lucene.util.BytesRefHash.add(BytesRefHash.java:274) 
    at org.apache.lucene.search.ConstantScoreAutoRewrite$CutOffTermCollector.collect(ConstantScoreAutoRewrite.java:131) 
    at org.apache.lucene.search.TermCollectingRewrite.collectTerms(TermCollectingRewrite.java:79) 
    at org.apache.lucene.search.ConstantScoreAutoRewrite.rewrite(ConstantScoreAutoRewrite.java:95) 
    at org.apache.lucene.search.MultiTermQuery$ConstantScoreAutoRewrite.rewrite(MultiTermQuery.java:220) 
    at org.apache.lucene.search.MultiTermQuery.rewrite(MultiTermQuery.java:288) 
    at org.apache.lucene.search.IndexSearcher.rewrite(IndexSearcher.java:639) 
    at org.apache.lucene.search.IndexSearcher.createNormalizedWeight(IndexSearcher.java:686) 
    at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:309) 
    at org.elasticsearch.indices.ttl.IndicesTTLService.purgeShards(IndicesTTLService.java:186) 
    at org.elasticsearch.indices.ttl.IndicesTTLService.access$000(IndicesTTLService.java:65) 
    at org.elasticsearch.indices.ttl.IndicesTTLService$PurgerThread.run(IndicesTTLService.java:122) 

[2013-08-29 21:29:23,919][WARN ][indices.ttl    ] [01-Thor] failed to execute ttl purge 
java.lang.OutOfMemoryError: Java heap space 

我們試圖改變一些記憶的價值,但我們似乎無法做到。

有人遇到同樣的問題嗎?

回答

0

一些故障排除提示:

  1. 一般聰明的一個AWS實例奉獻只爲Elasticsearch可預測的性能/調試的便利性。

  2. 使用Bigdesk插件監控您的內存使用情況。這將向您顯示您的內存瓶頸是否從Elasticsearch發生 - 可能來自操作系統,同時進行繁重的查詢和索引,或者出現意想不到的情況。

  3. Elasticsearch的Java堆應該設置在您盒子總內存的50%左右。

  4. 來自Shay Banon的gist提供了幾種解決方案來解決Elasticsearch中的內存問題。