0

I am getting the error mentioned in this title every time I start my docker ElasticSearch container via docker-compose. It is funy that although it complains about lock ElastiSearch starts anyway. Searching around I found some people saying that this is because some process didn't kill when I run docker-compose down and remains as orphan. Well, after docker-compose down I checked and I don't see any container up and I am starting with docker-compose up -d --remove-orphans which is suppposed to not leave an Orphan Container up and running as far as I understand. (I found some git issue regard --remove-orphans flag around 10 months ago fixed - issue known and fixed

Here is my entire docker-compose.yml

version: '3.2'
services:
  kibana:
    image: docker.elastic.co/kibana/kibana:7.5.2
    volumes:
      - "./kibana.yml:/usr/share/kibana/config/kibana.yml"
    restart: always
    environment:
    - SERVER_NAME=kibana.localhost
    - ELASTICSEARCH_HOSTS=http://elasticsearch:9200
    ports:
      - "5601:5601"
    links:
      - elasticsearch
    depends_on:
      - elasticsearch
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.5.2
    environment:
      - cluster.name=docker-cluster
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - xpack.security.enabled=false
      - xpack.watcher.enabled=false
      - discovery.type=single-node
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - "./esdata:/usr/share/elasticsearch/data"
    ports:
      - "9200:9200"
  logstash:
    image: docker.elastic.co/logstash/logstash:7.5.2
    volumes:
      - "./logstash.conf:/config-dir/logstash.conf"
    restart: always
    command: logstash -f /config-dir/logstash.conf
    ports:
      - "9600:9600"
      - "7777:7777"
    links:
      - elasticsearch
      - kafka1
      - kafka2
      - kafka3
  kafka1:
    image: confluentinc/cp-kafka:latest
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    links:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - "9092:9092"
    environment:
      KAFKA_LISTENERS: PLAINTEXT://:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092
      KAFKA_BROKER_ID: 1
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_LOG_RETENTION_HOURS: "168"
      KAFKA_LOG_RETENTION_BYTES: "100000000"
      KAFKA_ZOOKEEPER_CONNECT:  zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_CREATE_TOPICS: "log:3:3"
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
  kafka2:
    image: confluentinc/cp-kafka:latest
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    links:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - "9093:9092"
    environment:
      KAFKA_LISTENERS: PLAINTEXT://:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092
      KAFKA_BROKER_ID: 2
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_LOG_RETENTION_HOURS: "168"
      KAFKA_LOG_RETENTION_BYTES: "100000000"
      KAFKA_ZOOKEEPER_CONNECT:  zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_CREATE_TOPICS: "log:3:3"
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
  kafka3:
    image: confluentinc/cp-kafka:latest
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    links:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - "9094:9092"
    environment:
      KAFKA_LISTENERS: PLAINTEXT://:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092
      KAFKA_BROKER_ID: 3
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_LOG_RETENTION_HOURS: "168"
      KAFKA_LOG_RETENTION_BYTES: "100000000"
      KAFKA_ZOOKEEPER_CONNECT:  zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_CREATE_TOPICS: "log:3:3"
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
  zoo1:
    image: confluentinc/cp-zookeeper:latest
    environment:
      MYID: 1
      SERVERS: zoo1,zoo2,zoo3
      ZOOKEEPER_CLIENT_PORT: 2181
    ports:
      - "2181:2181"
  zoo2:
    image: confluentinc/cp-zookeeper:latest
    environment:
      MYID: 2
      SERVERS: zoo1,zoo2,zoo3
      ZOOKEEPER_CLIENT_PORT: 2182
    ports:
      - "2182:2181"
  zoo3:
    image: confluentinc/cp-zookeeper:latest
    environment:
      MYID: 3
      SERVERS: zoo1,zoo2,zoo3
      ZOOKEEPER_CLIENT_PORT: 2183
    ports:
      - "2183:2181"
  filebeat:
    image: docker.elastic.co/beats/filebeat:7.5.2
    volumes:
      - "./filebeat.yml:/usr/share/filebeat/filebeat.yml:ro"
      - "./sample-logs:/sample-logs"
    links:
      - kafka1
      - kafka2
      - kafka3
    depends_on:
      - kafka1
      - kafka2
      - kafka3

Here is most relevant part of the log:

"o.e.b.ElasticsearchUncaughtExceptionHandler", "cluster.name": "docker-cluster", "node.name": "94d8bc6a1491", "message": "uncaught exception in thread [main]", 
"stacktrace": ["org.elasticsearch.bootstrap.StartupException: org.apache.lucene.store.AlreadyClosedException: Underlying file changed by an external force at 1970-01-01T00:00:00Z, 

(lock=NativeFSLock(path=/usr/share/elasticsearch/data/nodes/0/node.lock,impl=sun.nio.ch.FileLockImpl[0:9223372036854775807 exclusive valid],creationTime=2020-02-05T20:57:13.011548Z))",
"at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:163) ~[elasticsearch-7.5.2.jar:7.5.2]",
"at org.elasticsearch.bootstrap.Elasticsearch.execute(Elasticsearch.java:150) ~[elasticsearch-7.5.2.jar:7.5.2]",
"at org.elasticsearch.cli.EnvironmentAwareCommand.execute(EnvironmentAwareCommand.java:86) ~[elasticsearch-7.5.2.jar:7.5.2]",
"at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:125) ~[elasticsearch-cli-7.5.2.jar:7.5.2]",
"at org.elasticsearch.cli.Command.main(Command.java:90) ~[elasticsearch-cli-7.5.2.jar:7.5.2]",
"at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:115) ~[elasticsearch-7.5.2.jar:7.5.2]",
"at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:92) ~[elasticsearch-7.5.2.jar:7.5.2]",
"Caused by: org.apache.lucene.store.AlreadyClosedException: Underlying file changed by an external force at 1970-01-01T00:00:00Z, (lock=NativeFSLock(path=/usr/share/elasticsearch/data/nodes/0/node.lock,impl=sun.nio.ch.FileLockImpl[0:9223372036854775807 exclusive valid],creationTime=2020-02-05T20:57:13.011548Z))",
"at org.apache.lucene.store.NativeFSLockFactory$NativeFSLock.ensureValid(NativeFSLockFactory.java:191) ~[lucene-core-8.3.0.jar:8.3.0 2aa586909b911e66e1d8863aa89f173d69f86cd2 - ishan - 2019-10-25 23:10:03]",
"at org.elasticsearch.env.NodeEnvironment.assertEnvIsLocked(NodeEnvironment.java:1039) ~[elasticsearch-7.5.2.jar:7.5.2]",
"at org.elasticsearch.env.NodeEnvironment.nodeDataPaths(NodeEnvironment.java:789) ~[elasticsearch-7.5.2.jar:7.5.2]",
"at org.elasticsearch.env.NodeEnvironment.assertCanWrite(NodeEnvironment.java:1218) ~[elasticsearch-7.5.2.jar:7.5.2]",
"at org.elasticsearch.env.NodeEnvironment.<init>(NodeEnvironment.java:314) ~[elasticsearch-7.5.2.jar:7.5.2]",
"at org.elasticsearch.node.Node.<init>(Node.java:273) ~[elasticsearch-7.5.2.jar:7.5.2]",
"at org.elasticsearch.node.Node.<init>(Node.java:253) ~[elasticsearch-7.5.2.jar:7.5.2]",
"at org.elasticsearch.bootstrap.Bootstrap$5.<init>(Bootstrap.java:241) ~[elasticsearch-7.5.2.jar:7.5.2]",
"at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:241) ~[elasticsearch-7.5.2.jar:7.5.2]",
"at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:369) ~[elasticsearch-7.5.2.jar:7.5.2]",
"at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:159) ~[elasticsearch-7.5.2.jar:7.5.2]",
"... 6 more"] }

And here the StackOverFlow thread basically saying it is an issue with lock process but it seems the guy wasn't using docker enter link description here

In case it is relevant, my docker-compose version:

C:\Dockers\megalog-try-1>docker-compose version
docker-compose version 1.25.2, build 698e2846
docker-py version: 4.1.0
CPython version: 2.7.16
OpenSSL version: OpenSSL 1.0.2q  20 Nov 2018

*** edit 1

C:\Dockers\megalog-try-1>docker ps
CONTAINER ID        IMAGE                                                 COMMAND                  CREATED             STATUS              PORTS                                                      NAMES
1d7af59ed31a        docker.elastic.co/logstash/logstash:7.5.2             "/usr/local/bin/dock…"   16 hours ago        Up 21 minutes       0.0.0.0:7777->7777/tcp, 5044/tcp, 0.0.0.0:9600->9600/tcp   megalog-try-1_logstash_1
80af72445bfb        confluentinc/cp-kafka:latest                          "/etc/confluent/dock…"   16 hours ago        Up 21 minutes       0.0.0.0:9092->9092/tcp                                     megalog-try-1_kafka1_1
e5d209fe2c42        confluentinc/cp-kafka:latest                          "/etc/confluent/dock…"   16 hours ago        Up 21 minutes       0.0.0.0:9094->9092/tcp                                     megalog-try-1_kafka3_1
c00871259fab        confluentinc/cp-kafka:latest                          "/etc/confluent/dock…"   16 hours ago        Up 21 minutes       0.0.0.0:9093->9092/tcp                                     megalog-try-1_kafka2_1
9f4dccedbe17        docker.elastic.co/kibana/kibana:7.5.2                 "/usr/local/bin/dumb…"   16 hours ago        Up 21 minutes       0.0.0.0:5601->5601/tcp                                     megalog-try-1_kibana_1
d240b6961d78        confluentinc/cp-zookeeper:latest                      "/etc/confluent/dock…"   16 hours ago        Up 21 minutes       2888/tcp, 3888/tcp, 0.0.0.0:2183->2181/tcp                 megalog-try-1_zoo3_1
63bd8906ca83        confluentinc/cp-zookeeper:latest                      "/etc/confluent/dock…"   16 hours ago        Up 21 minutes       2888/tcp, 0.0.0.0:2181->2181/tcp, 3888/tcp                 megalog-try-1_zoo1_1
3218d615cb19        confluentinc/cp-zookeeper:latest                      "/etc/confluent/dock…"   16 hours ago        Up 21 minutes       2888/tcp, 3888/tcp, 0.0.0.0:2182->2181/tcp                 megalog-try-1_zoo2_1
13737ff80b03        docker.elastic.co/elasticsearch/elasticsearch:7.5.2   "/usr/local/bin/dock…"   16 hours ago        Up 21 minutes       0.0.0.0:9200->9200/tcp, 9300/tcp                           megalog-try-1_elasticsearch_1
Jim C
  • 3,957
  • 25
  • 85
  • 162
  • are you sure, there is just one instance of this compose running? `docker ps` yields `10` running containers? – ibexit Feb 06 '20 at 17:24
  • I have just pasted the result from docker ps. I see only one elasticsearch container and I keep getting the message. How do you conclude: "docker ps yields 10 running containers?" – Jim C Feb 06 '20 at 21:29
  • 9 if all is ok, 10 if there were a bogus elastic container ;) – ibexit Feb 07 '20 at 00:19
  • really sorry but I still didn't get your point. I can count 9 containers but only one ElasticSearch. The issue mentioned in this question is regard an exception raised in ElasticSearch Container. Please, can you clarify your comment? It seems either you didn't catch from my question that it is only regard a message in ElasticSearch container or you want to point something I couldn't understand yet – Jim C Feb 07 '20 at 13:06
  • 1
    Sorry. What I wanted to say, is that it looks fine on docker level. There is no second container locking the file. That is exactly what I wanted to verify. – ibexit Feb 07 '20 at 13:13

0 Answers0