0

The YAML file below (originally provided here) works in a Windows and macOS docker environment. But when I run it in a CentOS environment, mongo-express can't connect to the MongoDB service and it doesn't show up in the browser at localhost:8081. I suppose this is an issue with DNSes and mapping Mongo to the respective container IP address. How can I fix this?

# Use root/example as user/password credentials
version: '3.1'

services:

  mongo:
    image: mongo
    restart: always
    environment:
      MONGO_INITDB_ROOT_USERNAME: root
      MONGO_INITDB_ROOT_PASSWORD: example

  mongo-express:
    image: mongo-express
    restart: always
    ports:
      - 8081:8081
    environment:
      ME_CONFIG_MONGODB_ADMINUSERNAME: root
      ME_CONFIG_MONGODB_ADMINPASSWORD: example

Log from the mongo service:

{"t":{"$date":"2021-01-01T00:15:56.059+00:00"},"s":"I",  "c":"CONTROL",  "id":23285,   "ctx":"main","msg":"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'"}
{"t":{"$date":"2021-01-01T00:15:56.063+00:00"},"s":"W",  "c":"ASIO",     "id":22601,   "ctx":"main","msg":"No TransportLayer configured during NetworkInterface startup"}
{"t":{"$date":"2021-01-01T00:15:56.063+00:00"},"s":"I",  "c":"NETWORK",  "id":4648601, "ctx":"main","msg":"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize."}
killing process with pid: 29
{"t":{"$date":"2021-01-01T00:15:56.063+00:00"},"s":"I",  "c":"CONTROL",  "id":23377,   "ctx":"SignalHandler","msg":"Received signal","attr":{"signal":15,"error":"Terminated"}}
{"t":{"$date":"2021-01-01T00:15:56.063+00:00"},"s":"I",  "c":"CONTROL",  "id":23378,   "ctx":"SignalHandler","msg":"Signal was sent by kill(2)","attr":{"pid":83,"uid":999}}
{"t":{"$date":"2021-01-01T00:15:56.063+00:00"},"s":"I",  "c":"CONTROL",  "id":23381,   "ctx":"SignalHandler","msg":"will terminate after current cmd ends"}
{"t":{"$date":"2021-01-01T00:15:56.063+00:00"},"s":"I",  "c":"REPL",     "id":4784900, "ctx":"SignalHandler","msg":"Stepping down the ReplicationCoordinator for shutdown","attr":{"waitTimeMillis":10000}}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I",  "c":"COMMAND",  "id":4784901, "ctx":"SignalHandler","msg":"Shutting down the MirrorMaestro"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I",  "c":"SHARDING", "id":4784902, "ctx":"SignalHandler","msg":"Shutting down the WaitForMajorityService"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I",  "c":"CONTROL",  "id":4784903, "ctx":"SignalHandler","msg":"Shutting down the LogicalSessionCache"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I",  "c":"NETWORK",  "id":20562,   "ctx":"SignalHandler","msg":"Shutdown: going to close listening sockets"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I",  "c":"NETWORK",  "id":23017,   "ctx":"listener","msg":"removing socket file","attr":{"path":"/tmp/mongodb-27017.sock"}}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I",  "c":"NETWORK",  "id":4784905, "ctx":"SignalHandler","msg":"Shutting down the global connection pool"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I",  "c":"STORAGE",  "id":4784906, "ctx":"SignalHandler","msg":"Shutting down the FlowControlTicketholder"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I",  "c":"-",        "id":20520,   "ctx":"SignalHandler","msg":"Stopping further Flow Control ticket acquisitions."}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I",  "c":"STORAGE",  "id":4784908, "ctx":"SignalHandler","msg":"Shutting down the PeriodicThreadToAbortExpiredTransactions"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I",  "c":"STORAGE",  "id":4784934, "ctx":"SignalHandler","msg":"Shutting down the PeriodicThreadToDecreaseSnapshotHistoryCachePressure"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"REPL",     "id":4784909, "ctx":"SignalHandler","msg":"Shutting down the ReplicationCoordinator"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"SHARDING", "id":4784910, "ctx":"SignalHandler","msg":"Shutting down the ShardingInitializationMongoD"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"REPL",     "id":4784911, "ctx":"SignalHandler","msg":"Enqueuing the ReplicationStateTransitionLock for shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"-",        "id":4784912, "ctx":"SignalHandler","msg":"Killing all operations for shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"-",        "id":4695300, "ctx":"SignalHandler","msg":"Interrupted all currently running operations","attr":{"opsKilled":3}}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"COMMAND",  "id":4784913, "ctx":"SignalHandler","msg":"Shutting down all open transactions"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"REPL",     "id":4784914, "ctx":"SignalHandler","msg":"Acquiring the ReplicationStateTransitionLock for shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"INDEX",    "id":4784915, "ctx":"SignalHandler","msg":"Shutting down the IndexBuildsCoordinator"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"REPL",     "id":4784916, "ctx":"SignalHandler","msg":"Reacquiring the ReplicationStateTransitionLock for shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"REPL",     "id":4784917, "ctx":"SignalHandler","msg":"Attempting to mark clean shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"NETWORK",  "id":4784918, "ctx":"SignalHandler","msg":"Shutting down the ReplicaSetMonitor"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"SHARDING", "id":4784921, "ctx":"SignalHandler","msg":"Shutting down the MigrationUtilExecutor"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"CONTROL",  "id":4784925, "ctx":"SignalHandler","msg":"Shutting down free monitoring"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"CONTROL",  "id":20609,   "ctx":"SignalHandler","msg":"Shutting down free monitoring"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"FTDC",     "id":4784926, "ctx":"SignalHandler","msg":"Shutting down full-time data capture"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I",  "c":"FTDC",     "id":20626,   "ctx":"SignalHandler","msg":"Shutting down full-time diagnostic data capture"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I",  "c":"STORAGE",  "id":4784927, "ctx":"SignalHandler","msg":"Shutting down the HealthLog"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I",  "c":"STORAGE",  "id":4784929, "ctx":"SignalHandler","msg":"Acquiring the global lock for shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I",  "c":"STORAGE",  "id":4784930, "ctx":"SignalHandler","msg":"Shutting down the storage engine"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I",  "c":"STORAGE",  "id":20282,   "ctx":"SignalHandler","msg":"Deregistering all the collections"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I",  "c":"STORAGE",  "id":22261,   "ctx":"SignalHandler","msg":"Timestamp monitor shutting down"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I",  "c":"STORAGE",  "id":22317,   "ctx":"SignalHandler","msg":"WiredTigerKVEngine shutting down"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I",  "c":"STORAGE",  "id":22318,   "ctx":"SignalHandler","msg":"Shutting down session sweeper thread"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I",  "c":"STORAGE",  "id":22319,   "ctx":"SignalHandler","msg":"Finished shutting down session sweeper thread"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I",  "c":"STORAGE",  "id":22320,   "ctx":"SignalHandler","msg":"Shutting down journal flusher thread"}
{"t":{"$date":"2021-01-01T00:15:56.068+00:00"},"s":"I",  "c":"STORAGE",  "id":22321,   "ctx":"SignalHandler","msg":"Finished shutting down journal flusher thread"}
{"t":{"$date":"2021-01-01T00:15:56.068+00:00"},"s":"I",  "c":"STORAGE",  "id":22322,   "ctx":"SignalHandler","msg":"Shutting down checkpoint thread"}
{"t":{"$date":"2021-01-01T00:15:56.068+00:00"},"s":"I",  "c":"STORAGE",  "id":22323,   "ctx":"SignalHandler","msg":"Finished shutting down checkpoint thread"}
{"t":{"$date":"2021-01-01T00:15:56.068+00:00"},"s":"I",  "c":"STORAGE",  "id":4795902, "ctx":"SignalHandler","msg":"Closing WiredTiger","attr":{"closeConfig":"leak_memory=true,"}}
{"t":{"$date":"2021-01-01T00:15:56.074+00:00"},"s":"I",  "c":"STORAGE",  "id":4795901, "ctx":"SignalHandler","msg":"WiredTiger closed","attr":{"durationMillis":6}}
{"t":{"$date":"2021-01-01T00:15:56.074+00:00"},"s":"I",  "c":"STORAGE",  "id":22279,   "ctx":"SignalHandler","msg":"shutdown: removing fs lock..."}
{"t":{"$date":"2021-01-01T00:15:56.074+00:00"},"s":"I",  "c":"-",        "id":4784931, "ctx":"SignalHandler","msg":"Dropping the scope cache for shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.074+00:00"},"s":"I",  "c":"CONTROL",  "id":20565,   "ctx":"SignalHandler","msg":"Now exiting"}
{"t":{"$date":"2021-01-01T00:15:56.074+00:00"},"s":"I",  "c":"CONTROL",  "id":23138,   "ctx":"SignalHandler","msg":"Shutting down","attr":{"exitCode":0}}

 MongoDB init process complete; ready for start up.

{"t":{"$date":"2021-01-01T00:15:57.181+00:00"},"s":"I",  "c":"CONTROL",  "id":23285,   "ctx":"main","msg":"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'"}
{"t":{"$date":"2021-01-01T00:15:57.185+00:00"},"s":"W",  "c":"ASIO",     "id":22601,   "ctx":"main","msg":"No TransportLayer configured during NetworkInterface startup"}
{"t":{"$date":"2021-01-01T00:15:57.185+00:00"},"s":"I",  "c":"NETWORK",  "id":4648601, "ctx":"main","msg":"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize."}
{"t":{"$date":"2021-01-01T00:15:57.186+00:00"},"s":"I",  "c":"STORAGE",  "id":4615611, "ctx":"initandlisten","msg":"MongoDB starting","attr":{"pid":1,"port":27017,"dbPath":"/data/db","architecture":"64-bit","host":"973e354c35f7"}}
{"t":{"$date":"2021-01-01T00:15:57.186+00:00"},"s":"I",  "c":"CONTROL",  "id":23403,   "ctx":"initandlisten","msg":"Build Info","attr":{"buildInfo":{"version":"4.4.2","gitVersion":"15e73dc5738d2278b688f8929aee605fe4279b0e","openSSLVersion":"OpenSSL 1.1.1  11 Sep 2018","modules":[],"allocator":"tcmalloc","environment":{"distmod":"ubuntu1804","distarch":"x86_64","target_arch":"x86_64"}}}}
{"t":{"$date":"2021-01-01T00:15:57.186+00:00"},"s":"I",  "c":"CONTROL",  "id":51765,   "ctx":"initandlisten","msg":"Operating System","attr":{"os":{"name":"Ubuntu","version":"18.04"}}}
{"t":{"$date":"2021-01-01T00:15:57.186+00:00"},"s":"I",  "c":"CONTROL",  "id":21951,   "ctx":"initandlisten","msg":"Options set by command line","attr":{"options":{"net":{"bindIp":"*"},"security":{"authorization":"enabled"}}}}
{"t":{"$date":"2021-01-01T00:15:57.189+00:00"},"s":"I",  "c":"STORAGE",  "id":22270,   "ctx":"initandlisten","msg":"Storage engine to use detected by data files","attr":{"dbpath":"/data/db","storageEngine":"wiredTiger"}}
{"t":{"$date":"2021-01-01T00:15:57.189+00:00"},"s":"I",  "c":"STORAGE",  "id":22297,   "ctx":"initandlisten","msg":"Using the XFS filesystem is strongly recommended with the WiredTiger storage engine. See http://dochub.mongodb.org/core/prodnotes-filesystem","tags":["startupWarnings"]}
{"t":{"$date":"2021-01-01T00:15:57.189+00:00"},"s":"I",  "c":"STORAGE",  "id":22315,   "ctx":"initandlisten","msg":"Opening WiredTiger","attr":{"config":"create,cache_size=6656M,session_max=33000,eviction=(threads_min=4,threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000,close_scan_interval=10,close_handle_minimum=250),statistics_log=(wait=0),verbose=[recovery_progress,checkpoint_progress,compact_progress],"}}
{"t":{"$date":"2021-01-01T00:15:57.861+00:00"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460157:861019][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 1 through 2"}}
{"t":{"$date":"2021-01-01T00:15:57.969+00:00"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460157:969647][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 2 through 2"}}
{"t":{"$date":"2021-01-01T00:15:58.064+00:00"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460158:64534][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY | WT_VERB_RECOVERY_PROGRESS] Main recovery loop: starting at 1/29568 to 2/256"}}
{"t":{"$date":"2021-01-01T00:15:58.175+00:00"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460158:175353][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 1 through 2"}}
{"t":{"$date":"2021-01-01T00:15:58.249+00:00"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460158:249021][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 2 through 2"}}
{"t":{"$date":"2021-01-01T00:15:58.301+00:00"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460158:301066][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY | WT_VERB_RECOVERY_PROGRESS] Set global recovery timestamp: (0, 0)"}}
{"t":{"$date":"2021-01-01T00:15:58.301+00:00"},"s":"I",  "c":"STORAGE",  "id":22430,   "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460158:301126][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY | WT_VERB_RECOVERY_PROGRESS] Set global oldest timestamp: (0, 0)"}}
{"t":{"$date":"2021-01-01T00:15:58.317+00:00"},"s":"I",  "c":"STORAGE",  "id":4795906, "ctx":"initandlisten","msg":"WiredTiger opened","attr":{"durationMillis":1128}}
{"t":{"$date":"2021-01-01T00:15:58.317+00:00"},"s":"I",  "c":"RECOVERY", "id":23987,   "ctx":"initandlisten","msg":"WiredTiger recoveryTimestamp","attr":{"recoveryTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2021-01-01T00:15:58.318+00:00"},"s":"I",  "c":"STORAGE",  "id":4366408, "ctx":"initandlisten","msg":"No table logging settings modifications are required for existing WiredTiger tables","attr":{"loggingEnabled":true}}
{"t":{"$date":"2021-01-01T00:15:58.320+00:00"},"s":"I",  "c":"STORAGE",  "id":22262,   "ctx":"initandlisten","msg":"Timestamp monitor starting"}
{"t":{"$date":"2021-01-01T00:15:58.321+00:00"},"s":"I",  "c":"CONTROL",  "id":22161,   "ctx":"initandlisten","msg":"You are running in OpenVZ which can cause issues on versions of RHEL older than RHEL6","tags":["startupWarnings"]}
{"t":{"$date":"2021-01-01T00:15:58.324+00:00"},"s":"I",  "c":"STORAGE",  "id":20536,   "ctx":"initandlisten","msg":"Flow Control is enabled on this deployment"}
{"t":{"$date":"2021-01-01T00:15:58.325+00:00"},"s":"I",  "c":"FTDC",     "id":20625,   "ctx":"initandlisten","msg":"Initializing full-time diagnostic data capture","attr":{"dataDirectory":"/data/db/diagnostic.data"}}
{"t":{"$date":"2021-01-01T00:15:58.334+00:00"},"s":"I",  "c":"NETWORK",  "id":23015,   "ctx":"listener","msg":"Listening on","attr":{"address":"/tmp/mongodb-27017.sock"}}
{"t":{"$date":"2021-01-01T00:15:58.334+00:00"},"s":"I",  "c":"NETWORK",  "id":23015,   "ctx":"listener","msg":"Listening on","attr":{"address":"0.0.0.0"}}
{"t":{"$date":"2021-01-01T00:15:58.334+00:00"},"s":"I",  "c":"NETWORK",  "id":23016,   "ctx":"listener","msg":"Waiting for connections","attr":{"port":27017,"ssl":"off"}}

Log from the mongo-express service:

Mongo Express server listening at http://0.0.0.0:8081
Server is open to allow connections from anyone (0.0.0.0)
basicAuth credentials are "admin:pass", it is recommended you change this in your config.js!

/node_modules/mongodb/lib/server.js:265
        process.nextTick(function() { throw err; })
                                      ^
Error [MongoError]: failed to connect to server [mongo:27017] on first connect
    at Pool.<anonymous> (/node_modules/mongodb-core/lib/topologies/server.js:326:35)
    at Pool.emit (events.js:314:20)
    at Connection.<anonymous> (/node_modules/mongodb-core/lib/connection/pool.js:270:12)
    at Object.onceWrapper (events.js:421:26)
    at Connection.emit (events.js:314:20)
    at Socket.<anonymous> (/node_modules/mongodb-core/lib/connection/connection.js:175:49)
    at Object.onceWrapper (events.js:421:26)
    at Socket.emit (events.js:314:20)
    at emitErrorNT (internal/streams/destroy.js:92:8)
    at emitErrorAndCloseNT (internal/streams/destroy.js:60:3)
Waiting for mongo:27017...
/docker-entrypoint.sh: line 14: mongo: Try again
/docker-entrypoint.sh: line 14: /dev/tcp/mongo/27017: Invalid argument
Fri Jan  1 00:17:21 UTC 2021 retrying to connect to mongo:27017 (2/5)
/docker-entrypoint.sh: line 14: mongo: Try again
/docker-entrypoint.sh: line 14: /dev/tcp/mongo/27017: Invalid argument
Fri Jan  1 00:17:27 UTC 2021 retrying to connect to mongo:27017 (3/5)
/docker-entrypoint.sh: line 14: mongo: Try again
/docker-entrypoint.sh: line 14: /dev/tcp/mongo/27017: Invalid argument
Fri Jan  1 00:17:33 UTC 2021 retrying to connect to mongo:27017 (4/5)
/docker-entrypoint.sh: line 14: mongo: Try again
/docker-entrypoint.sh: line 14: /dev/tcp/mongo/27017: Invalid argument
Fri Jan  1 00:17:39 UTC 2021 retrying to connect to mongo:27017 (5/5)
/docker-entrypoint.sh: line 14: mongo: Try again
/docker-entrypoint.sh: line 14: /dev/tcp/mongo/27017: Invalid argument
Yves Gurcan
  • 1,096
  • 1
  • 10
  • 25
Osman
  • 571
  • 7
  • 19
  • did you changed `username` and `password` from `root` and `example`. – Chandan Jan 03 '21 at 12:00
  • did you launched `mongo-db` `container` from command line – Chandan Jan 03 '21 at 12:02
  • why do I have to change the username / password ? Sure, I would change it for production but on windows there was no need to in order to get it running. – Osman Jan 03 '21 at 12:25
  • launching the mongo-db container solely from command line works. But changing username / password has no effect – Osman Jan 03 '21 at 12:27
  • 1
    check if ports are not being used in host – Chandan Jan 03 '21 at 13:01
  • the ports are not occupied. If this wasn't the case then there would be an error accordingly – Osman Jan 03 '21 at 15:13
  • can you check what is your container name – Chandan Jan 03 '21 at 15:37
  • from other images I know docker can change ip randomly on startup so make sure you're using the right ip – ACV Jan 03 '21 at 16:14
  • For example: `sudo docker inspect mongo | grep IPAddress` where mongo is your image name – ACV Jan 03 '21 at 16:16
  • @ACV but as far as I know mongo express is not addressing the ip address directily. It uses the container name "mongo" by default instead. – Osman Jan 03 '21 at 16:16
  • `sudo docker inspect mongo | grep IPAddress` does output an IP address – Osman Jan 03 '21 at 16:20
  • OK make sure centOS doesn't have selinux enabled. Try disabling it – ACV Jan 03 '21 at 16:40
  • @Osman so use that IPAddress in your mongo client – ACV Jan 03 '21 at 16:42
  • selinux is deactivated. Regarding the ip address advice. I don't want to manually set an ip address on each compose up. Furthermore there will be more than one mongo client. That's why I prefer an addressing by container name. – Osman Jan 03 '21 at 16:57
  • It looks like the `/dev/tcp/mongo/27017: Invalid argument` is coming from [here](https://github.com/mongo-express/mongo-express-docker/blob/4b43fe8a1206434cb32a006cd155dd71462f092f/docker-entrypoint.sh#L14) – Shane Bishop Jan 09 '21 at 00:42
  • @Osman if you launch your docker compose file, what does `docker ps --all` output related with the pattern `mongo`, I mean, something like `docker ps --all | grep mongo'. Please, can you try the command? – jccampanero Jan 09 '21 at 18:04
  • @jccampanero since I'm currently trying other linux distros in order to speed this up.. I will try this when I get back to centos – Osman Jan 09 '21 at 22:22
  • It is fine @Osman, please, do not worry. If you can test it, perhaps we can dig further into the problem. – jccampanero Jan 10 '21 at 11:14
  • @jccampanero `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES ac8240e3d104 mongo-express "tini -- /docker-ent…" 6 hours ago Exited (143) 6 hours ago docker_mongo-express_1 af6b1f9de25d mongo "docker-entrypoint.s…" 6 hours ago Exited (0) 6 hours ago docker_mongo_1` – Osman Jan 12 '21 at 19:02

3 Answers3

1

Typically the number of retries mongo-express is 5, maybe the database take more time to be connectable on docker-centos-env more than windows/macos ..you can check if the database is working if you do docker exec -it nameofmongocontainer mongo -uroot -pexample , then you can do something like show dbs;

You mentioned an DNS issue it happened to on my docker windows env 3 time before mongo-express managed to establish connection with database so its not exclusive to centos docker env ..

Potential fixes : you can add script to docker-compose file that can increase to number of retries to 20 .. here is an example from here (save the docker-entrypoint.sh script in the same location as your docker-compose file )

#!/bin/bash
set -eo pipefail
# This is a very small modification of the original docker script taken from
# https://github.com/mongo-express/mongo-express-docker/blob/master/docker-entrypoint.sh
# with only change being the modified value of max_tries=20 instead of original 5,
# to give the main mongodb container enough time to start and be connectable to.
# Also, see https://docs.docker.com/compose/startup-order/

# if command does not start with mongo-express, run the command instead of the entrypoint
if [ "${1}" != "mongo-express" ]; then
    exec "$@"
fi

function wait_tcp_port {
    local host="$1" port="$2"
    local max_tries=20 tries=1

    # see http://tldp.org/LDP/abs/html/devref1.html for description of this syntax.
    while ! exec 6<>/dev/tcp/$host/$port && [[ $tries -lt $max_tries ]]; do
        sleep 10s
        tries=$(( tries + 1 ))
        echo "$(date) retrying to connect to $host:$port ($tries/$max_tries)"
    done
    exec 6>&-
}

# if ME_CONFIG_MONGODB_SERVER has a comma in it, we're pointing to a replica set (https://github.com/mongo-express/mongo-express-docker/issues/21)
if [[ "$ME_CONFIG_MONGODB_SERVER" != *,* ]]; then
    # wait for the mongo server to be available
    echo Waiting for ${ME_CONFIG_MONGODB_SERVER}:${ME_CONFIG_MONGODB_PORT:-27017}...
    wait_tcp_port "${ME_CONFIG_MONGODB_SERVER}" "${ME_CONFIG_MONGODB_PORT:-27017}"
fi

# run mongo-express
exec node app

The docker-compose file then will be

version: '3.1'

services:
  mongo:
    image: mongo
    restart: always
    environment:
      MONGO_INITDB_ROOT_USERNAME: root
      MONGO_INITDB_ROOT_PASSWORD: example

  mongo-express:
    image: mongo-express
    restart: always
    ports:
      - 8081:8081
    environment:
      ME_CONFIG_MONGODB_ADMINUSERNAME: root
      ME_CONFIG_MONGODB_ADMINPASSWORD: example
    volumes: 
      - ./docker-entrypoint.sh:/docker-entrypoint.sh

An issue with Centos maybe ? What you can do is do these steps separately

  1. docker network create mongo-test
  2. docker run -d --name mongo -e MONGO_INITDB_ROOT_USERNAME=root -e MONGO_INITDB_ROOT_PASSWORD=example --net mongo-test mongo
  3. docker run -d --name mongo-express -e ME_CONFIG_MONGODB_ADMINUSERNAME=root -e ME_CONFIG_MONGODB_ADMINPASSWORD=example --net mongo-test -p 8081:8081 mongo-express

A potential issue centos is not allowing any container to use "-p" try running a test docker run --rm --name test --net mongo-test -p 8081:80 httpd and then check your localhost:80

Aschay
  • 356
  • 1
  • 6
  • I haven't tested it yet, but I'm not sure if the increase of the retry count will change anything since actually in total there are more than 5 retries happening in my case. After having 5, this process is repeated again and again as there is `restart: always` in the compose file (I've just cut off the logs). Even though I doubt this will work, I'll award this answer with the bounty due to your efforts (otherwise the bounty is gone anyways) – Osman Jan 11 '21 at 08:52
  • as expected, the increase of the retry count has no (resolving) effect. Having run it in the classic Docker way is also not working unfortunately. Mongo express exits after a short time. I guess it's not surprising when it's also not working over compose. – Osman Jan 12 '21 at 20:23
  • I dont know if you have tried --link : mongo in docker-compose ? or even change the version of mongo-express.. not enough debugging logs – Aschay Jan 12 '21 at 20:37
  • I have check https://github.com/mongo-express/mongo-express-docker ... they use link in run.sh after building the image its not the problem #!/bin/bash set -e docker run -it --rm \ --link web_db_1:mongo \ --name mongo-express-docker \ -p 8081:8081 \ mongo-express-docker "$@" – Aschay Jan 12 '21 at 20:51
  • I didn't know about '--link' but since it is marked as deprecated I can't rely on that. Actually I wanted to try anoter version for mongo-Express but there aren't any provided in docker hub. – Osman Jan 12 '21 at 21:28
  • thanks for your advices.. but I tend to give up on this issue. I don't really have to use CentOS. I tried to but I wasted too many hours on this. I'm going to switch to another hosting service (and maybe to another distro) – Osman Jan 12 '21 at 21:33
  • Sorry about that . there is workround for the versions https://github.com/docker-library/repo-info/blob/master/repos/mongo-express/tag-details.md You should raise the issue anyway on github – Aschay Jan 12 '21 at 21:36
0

Try this:

# Use root/example as user/password credentials
version: '3.1'

services:

  mongo:
    image: mongo
    container_name: mongo
    restart: always
    environment:
      MONGO_INITDB_ROOT_USERNAME: root
      MONGO_INITDB_ROOT_PASSWORD: example
    ports:
      - 27017:27017

  mongo-express:
    image: mongo-express
    restart: always
    ports:
      - 8081:8081
    environment:
      ME_CONFIG_MONGODB_ADMINUSERNAME: root
      ME_CONFIG_MONGODB_ADMINPASSWORD: example
    links:
      - "mongo"
    depends_on:
      - "mongo"

Chandan
  • 11,465
  • 1
  • 6
  • 25
  • unfortunately this doesn't work either. Actually I've already tried this config before I asked this question here (except for the "links:" part) – Osman Jan 03 '21 at 16:14
0

If you want, please try this (working example):

version: '3'

services:
  mongo:
    image: mongo
    restart: always
    container_name: my_mongo
    ports:
    - "27017:27017"
    environment:
      MONGO_INITDB_ROOT_USERNAME: root
      MONGO_INITDB_ROOT_PASSWORD: example
    volumes:
    - ./data/db:/data/db
    - ./data/configdb:/data/configdb

  mongo-express:
    image: mongo-express
    restart: always
    container_name: my_mongo_express
    ports:
    - "8081:8081"
    environment:
      ME_CONFIG_MONGODB_ADMINUSERNAME: root
      ME_CONFIG_MONGODB_ADMINPASSWORD: example
      ME_CONFIG_BASICAUTH_USERNAME: user
      ME_CONFIG_BASICAUTH_PASSWORD: example
    depends_on:
      - mongo
    
volumes:
  node_modules:

You can also try to expose the Mongo service with different name or ports, if you think there is a problem there. In this case, you also have to change the respective variables in MongoExpress service accordingly (ME_CONFIG_MONGODB_SERVER and ME_CONFIG_MONGODB_PORT) - details on the possible configurations here

Either way, is the network involving the two services correctly created?

nunohpinheiro
  • 2,169
  • 13
  • 14
  • acutally I didn't create any network on windows / mac docker desktop. Afaik docker uses a default network in such cases. Thus I didn't create a network in first place. In later attempts I did create networks and added the services to specific network(s). This didn't help either. Currently I'm trying other linux distros.. but I'll try your example asap. – Osman Jan 09 '21 at 22:20