I have simple Elasticsearch database together with Kibana in Docker containers. The issue is that despite container keeps running, I repeatedly loose all my indices and thus all the data. I am forced to reinsert all of it, but after a day it is again gone. I have tried both single and multinode clusters, increasing memory limits, upgrading images etc. How to prevent this? Here is my current docker-compose file:
version: '2.2'
services:
kibana:
image: docker.elastic.co/kibana/kibana:7.10.0
depends_on:
- es01
- es02
- es03
links:
- "es01:elasticsearch"
ports:
- "5601:5601"
networks:
- elastic
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.10.0
container_name: es01
environment:
- node.name=es01
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es02,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data01:/usr/share/elasticsearch/data
ports:
- 9200:9200
networks:
- elastic
es02:
image: docker.elastic.co/elasticsearch/elasticsearch:7.10.0
container_name: es02
environment:
- node.name=es02
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data02:/usr/share/elasticsearch/data
networks:
- elastic
es03:
image: docker.elastic.co/elasticsearch/elasticsearch:7.10.0
container_name: es03
environment:
- node.name=es03
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es02
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data03:/usr/share/elasticsearch/data
networks:
- elastic
volumes:
data01:
driver: local
data02:
driver: local
data03:
driver: local
networks:
elastic:
driver: bridge
In logs I have found something like this:
fatal error in thread [main], exiting
java.lang.ExceptionInInitializerError
at org.elasticsearch.node.Node.<init>(Node.java:637)
at org.elasticsearch.node.Node.<init>(Node.java:289)
at org.elasticsearch.bootstrap.Bootstrap$5.<init>(Bootstrap.java:227)
at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:227)
at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:393)
at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:170)
at org.elasticsearch.bootstrap.Elasticsearch.execute(Elasticsearch.java:161)
at org.elasticsearch.cli.EnvironmentAwareCommand.execute(EnvironmentAwareCommand.java:86)
at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:127)
at org.elasticsearch.cli.Command.main(Command.java:90)
at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:126)
at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:92)
Caused by: java.security.AccessControlException: access denied ("java.lang.RuntimePermission" "getClassLoader")
at java.base/java.security.AccessControlContext.checkPermission(AccessControlContext.java:472)
at java.base/java.security.AccessController.checkPermission(AccessController.java:1036)
at java.base/java.lang.SecurityManager.checkPermission(SecurityManager.java:408)
at java.base/java.lang.ClassLoader.checkClassLoaderPermission(ClassLoader.java:2068)
at java.base/java.lang.ClassLoader.getParent(ClassLoader.java:1815)
at org.apache.logging.log4j.core.selector.ClassLoaderContextSelector.locateContext(ClassLoaderContextSelector.java:114)
at org.apache.logging.log4j.core.selector.ClassLoaderContextSelector.getContext(ClassLoaderContextSelector.java:70)
at org.apache.logging.log4j.core.selector.ClassLoaderContextSelector.getContext(ClassLoaderContextSelector.java:57)
at org.apache.logging.log4j.core.impl.Log4jContextFactory.getContext(Log4jContextFactory.java:148)
at org.apache.logging.log4j.core.impl.Log4jContextFactory.getContext(Log4jContextFactory.java:45)
at org.apache.logging.log4j.LogManager.getContext(LogManager.java:194)
at org.apache.logging.log4j.LogManager.getLogger(LogManager.java:581)
at org.elasticsearch.persistent.PersistentTasksService.<clinit>(PersistentTasksService.java:49)
... 12 more
{"type": "server", "timestamp": "2020-11-20T02:13:00,006Z", "level": "INFO", "component": "o.e.x.m.a.TransportDeleteExpiredDataAction", "cluster.name": "es-docker-cluster", "node.name": "es01", "message": "Deleting expired data", "cluster.uuid": "B76EHJrlTzG80_NuQFjCqQ", "node.id": "oCU5GS2xStSf4q7IPtdcTA" }