-1

I'm not an ELK expert. I've a 2 nodes docker Swarm cluster in which I want to deploy the ELK stack.

This is my docker-compose.yml:

version: '3.4'

services:

  elk:
    image: docker.elastic.co/elasticsearch/elasticsearch-basic:6.2.1
    volumes:
      - ./elk/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
      - ./elk/data:/usr/share/elasticsearch/data
    ports:
      - "9200:9200"
      - "9300:9300"
    environment:
      ES_JAVA_OPTS: "-Xms256m -Xmx256m"
      ELASTIC_PASSWORD: changeme
    networks:
      - net
    deploy:
      mode: replicated
      replicas: 1

  logstash:
    image: docker.elastic.co/logstash/logstash:6.2.1
    volumes:
      - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
      - ./logstash/pipeline:/usr/share/logstash/pipeline:ro
    ports:
      - "5000:5000"
      - "51415:51415"
    environment:
      LS_JAVA_OPTS: "-Xmx256m -Xms256m"
    networks:
      - net
    deploy:
      mode: replicated
      replicas: 1

  kibana:
    image: docker.elastic.co/kibana/kibana:6.2.1
    volumes:
      - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
    ports:
      - "5601:5601"
    networks:
      - net
    deploy:
      mode: replicated
      replicas: 1

  logspout:
      image: gliderlabs/logspout:v3.2.4
      volumes:
        - '/var/run/docker.sock:/tmp/docker.sock'
      deploy:
        mode: global
      environment:
        SYSLOG_FORMAT: "rfc3164"
      command: 'syslog://logstash:51415'
      networks:
        - net

  apm-server:
      image: docker.elastic.co/apm/apm-server:6.2.0
      ports:
        - "8200:8200" 
      volumes:
        - ./apmserver/apm-server.yml:/usr/share/apm-server/apm-server.yml
      networks:
        - net
      deploy:
        mode: replicated
        replicas: 1

networks:
  net:

Basically I would like to forward all the docker containers logs to logstash. I'm doing it using logspout. Since in the docker swarm there is only the ELK stack running, the logs that logspout is forwarding to logstash are only the logs of the ELK stack containers.

It works fine for some hours, after that there is an exception: org.elasticsearch.action.UnavailableShardsException primary shard is not active Timeout

Output of GET _cat/shards?h=index,shard,prirep,state,unassigned.reason:

.kibana                           0 p STARTED    
.triggered_watches                0 p STARTED    
.monitoring-logstash-6-2018.02.16 0 p UNASSIGNED ALLOCATION_FAILED
.monitoring-kibana-6-2018.02.19   0 p UNASSIGNED ALLOCATION_FAILED
.monitoring-es-6-2018.02.18       0 p UNASSIGNED ALLOCATION_FAILED
.watches                          0 p UNASSIGNED ALLOCATION_FAILED
.monitoring-logstash-6-2018.02.20 0 p UNASSIGNED ALLOCATION_FAILED
.monitoring-logstash-6-2018.02.17 0 p UNASSIGNED ALLOCATION_FAILED
.monitoring-es-6-2018.02.17       0 p UNASSIGNED ALLOCATION_FAILED
.watcher-history-7-2018.02.16     0 p UNASSIGNED ALLOCATION_FAILED
.monitoring-kibana-6-2018.02.20   0 p UNASSIGNED ALLOCATION_FAILED
.monitoring-es-6-2018.02.16       0 p UNASSIGNED ALLOCATION_FAILED
.monitoring-logstash-6-2018.02.19 0 p UNASSIGNED ALLOCATION_FAILED
.monitoring-es-6-2018.02.19       0 p UNASSIGNED ALLOCATION_FAILED
logstash-2018.02.16               0 p UNASSIGNED ALLOCATION_FAILED
.monitoring-kibana-6-2018.02.16   0 p STARTED    
.monitoring-logstash-6-2018.02.18 0 p UNASSIGNED ALLOCATION_FAILED
.monitoring-alerts-6              0 p STARTED    
.monitoring-kibana-6-2018.02.18   0 p UNASSIGNED ALLOCATION_FAILED
apm-6.2.0-2018.02.16              0 p STARTED    
.monitoring-kibana-6-2018.02.17   0 p UNASSIGNED ALLOCATION_FAILED
.monitoring-es-6-2018.02.20       0 p UNASSIGNED ALLOCATION_FAILED

Output of GET _template/logstash?pretty

{
  "logstash": {
    "order": 0,
    "index_patterns": [
      "logstash-*"
    ],
    "settings": {
      "index": {
        "number_of_shards": "1",
        "number_of_replicas": "0"
      }
    },
   .....
  ......

Output of GET _cluster/health

{
  "cluster_name": "test-cluster",
  "status": "red",
  "timed_out": false,
  "number_of_nodes": 1,
  "number_of_data_nodes": 1,
  "active_primary_shards": 5,
  "active_shards": 5,
  "relocating_shards": 0,
  "initializing_shards": 0,
  "unassigned_shards": 17,
  "delayed_unassigned_shards": 0,
  "number_of_pending_tasks": 0,
  "number_of_in_flight_fetch": 0,
  "task_max_waiting_in_queue_millis": 0,
  "active_shards_percent_as_number": 22.727272727272727
}

Elasticsearch.yml

---
## Default Elasticsearch configuration from elasticsearch-docker.
## from https://github.com/elastic/elasticsearch-docker/blob/master/build/elasticsearch/elasticsearch.yml
#
cluster.name: "docker-cluster"
network.host: 0.0.0.0

# minimum_master_nodes need to be explicitly set when bound on a public IP
# set to 1 to allow single node clusters
# Details: https://github.com/elastic/elasticsearch/pull/17288
discovery.zen.minimum_master_nodes: 1

## Use single node discovery in order to disable production mode and avoid bootstrap checks
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
#
discovery.type: single-node

What can I do to solve the problem? thank you

Fabry
  • 1,498
  • 4
  • 23
  • 47

2 Answers2

0

I have the same issue as you.. I think that logspout doesn't work because it doesn't stay up. When I do a docker ps -a I have this result :

CONTAINER ID        IMAGE                        COMMAND                  CREATED              STATUS                          PORTS                                                        NAMES
420478d86c18        docker_logstash              "/usr/local/bin/dock…"   About a minute ago   Up About a minute               0.0.0.0:5044->5044/tcp, 0.0.0.0:51415->51415/tcp, 9600/tcp   elastic_logstash-dev
c94d0517fa11        docker_kibana                "/bin/bash /usr/loca…"   About a minute ago   Up About a minute               0.0.0.0:5601->5601/tcp                                       elastic-kibana-dev
3ba3bc40677a        docker_elasticsearch         "/usr/local/bin/dock…"   About a minute ago   Up About a minute               0.0.0.0:9200->9200/tcp, 0.0.0.0:9300->9300/tcp               elastic-elasticsearch-dev
35d799e5652b        docker_postgresql            "docker-entrypoint.s…"   About a minute ago   Up About a minute               0.0.0.0:60901->5432/tcp                                      elastic-postgresql-dev
ddee91cfeb68        gliderlabs/logspout:latest   "/bin/logspout syslo…"   About a minute ago   Exited (1) About a minute ago                                                                docker_logspout_1

I guess there is a config to keep this container up and so, logging all container's logs to logstash.

Quentin LOOTS
  • 31
  • 1
  • 4
-1

The problem was that I was using glusterFS to sycnronize the data among all the nodes of the cluster: Elk on Docker Swarm and glusterFS crash

Fabry
  • 1,498
  • 4
  • 23
  • 47