i’m trying to setup a replicat between 3 MongoDB instance in a Docker environnement. All instances of MongoDB seems to works well. I’m able to be connected to thoses instances with Compass. The replicat seems also to works, a primary is elected and nothing in the logs altert me of a potential problem.
Here is the rs.conf() output:
{
_id: 'rs0',
version: 1,
term: 3,
members: [
{
_id: 0,
host: 'mongo1:30001',
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 1,
tags: {},
secondaryDelaySecs: Long("0"),
votes: 1
},
{
_id: 1,
host: 'mongo2:30002',
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 1,
tags: {},
secondaryDelaySecs: Long("0"),
votes: 1
},
{
_id: 2,
host: 'mongo3:30003',
arbiterOnly: false,
buildIndexes: true,
hidden: false,
priority: 1,
tags: {},
secondaryDelaySecs: Long("0"),
votes: 1
}
],
protocolVersion: Long("1"),
writeConcernMajorityJournalDefault: true,
settings: {
chainingAllowed: true,
heartbeatIntervalMillis: 2000,
heartbeatTimeoutSecs: 10,
electionTimeoutMillis: 10000,
catchUpTimeoutMillis: -1,
catchUpTakeoverDelayMillis: 30000,
getLastErrorModes: {},
getLastErrorDefaults: { w: 1, wtimeout: 0 },
replicaSetId: ObjectId("646dd34139abd7f32bb09ea1")
}
}
And the rs.status():
{
set: 'rs0',
date: 2023-05-24T10:01:40.074Z,
myState: 2,
term: Long("3"),
syncSourceHost: 'mongo2:30002',
syncSourceId: 1,
heartbeatIntervalMillis: Long("2000"),
majorityVoteCount: 2,
writeMajorityCount: 2,
votingMembersCount: 3,
writableVotingMembersCount: 3,
optimes: {
lastCommittedOpTime: { ts: Timestamp({ t: 1684922497, i: 1 }), t: Long("3") },
lastCommittedWallTime: 2023-05-24T10:01:37.139Z,
readConcernMajorityOpTime: { ts: Timestamp({ t: 1684922497, i: 1 }), t: Long("3") },
appliedOpTime: { ts: Timestamp({ t: 1684922497, i: 1 }), t: Long("3") },
durableOpTime: { ts: Timestamp({ t: 1684922497, i: 1 }), t: Long("3") },
lastAppliedWallTime: 2023-05-24T10:01:37.139Z,
lastDurableWallTime: 2023-05-24T10:01:37.139Z
},
lastStableRecoveryTimestamp: Timestamp({ t: 1684922447, i: 1 }),
electionParticipantMetrics: {
votedForCandidate: true,
electionTerm: Long("3"),
lastVoteDate: 2023-05-24T09:30:57.058Z,
electionCandidateMemberId: 1,
voteReason: '',
lastAppliedOpTimeAtElection: { ts: Timestamp({ t: 1684920580, i: 1 }), t: Long("2") },
maxAppliedOpTimeInSet: { ts: Timestamp({ t: 1684920580, i: 1 }), t: Long("2") },
priorityAtElection: 1,
newTermStartDate: 2023-05-24T09:30:57.071Z,
newTermAppliedDate: 2023-05-24T09:30:57.349Z
},
members: [
{
_id: 0,
name: 'mongo1:30001',
health: 1,
state: 2,
stateStr: 'SECONDARY',
uptime: 1846,
optime: [Object],
optimeDurable: [Object],
optimeDate: 2023-05-24T10:01:37.000Z,
optimeDurableDate: 2023-05-24T10:01:37.000Z,
lastAppliedWallTime: 2023-05-24T10:01:37.139Z,
lastDurableWallTime: 2023-05-24T10:01:37.139Z,
lastHeartbeat: 2023-05-24T10:01:38.263Z,
lastHeartbeatRecv: 2023-05-24T10:01:38.266Z,
pingMs: Long("0"),
lastHeartbeatMessage: '',
syncSourceHost: 'mongo2:30002',
syncSourceId: 1,
infoMessage: '',
configVersion: 1,
configTerm: 3
},
{
_id: 1,
name: 'mongo2:30002',
health: 1,
state: 1,
stateStr: 'PRIMARY',
uptime: 1846,
optime: [Object],
optimeDurable: [Object],
optimeDate: 2023-05-24T10:01:37.000Z,
optimeDurableDate: 2023-05-24T10:01:37.000Z,
lastAppliedWallTime: 2023-05-24T10:01:37.139Z,
lastDurableWallTime: 2023-05-24T10:01:37.139Z,
lastHeartbeat: 2023-05-24T10:01:38.263Z,
lastHeartbeatRecv: 2023-05-24T10:01:39.779Z,
pingMs: Long("0"),
lastHeartbeatMessage: '',
syncSourceHost: '',
syncSourceId: -1,
infoMessage: '',
electionTime: Timestamp({ t: 1684920657, i: 1 }),
electionDate: 2023-05-24T09:30:57.000Z,
configVersion: 1,
configTerm: 3
},
{
_id: 2,
name: 'mongo3:30003',
health: 1,
state: 2,
stateStr: 'SECONDARY',
uptime: 1849,
optime: [Object],
optimeDate: 2023-05-24T10:01:37.000Z,
lastAppliedWallTime: 2023-05-24T10:01:37.139Z,
lastDurableWallTime: 2023-05-24T10:01:37.139Z,
syncSourceHost: 'mongo2:30002',
syncSourceId: 1,
infoMessage: '',
configVersion: 1,
configTerm: 3,
self: true,
lastHeartbeatMessage: ''
}
],
ok: 1,
'$clusterTime': {
clusterTime: Timestamp({ t: 1684922497, i: 1 }),
signature: {
hash: Binary(Buffer.from("71462de63bcd84a20b0fd37f9f8ecbeeeeff4646", "hex"), 0),
keyId: Long("7236672499625230342")
}
},
operationTime: Timestamp({ t: 1684922497, i: 1 })
}
I have added w=majority in the URI but nothing new. I think I have misunderstood the way to connect to the replica via MongoDBCompass, I’m using this URI :
mongodb://******:******@XX.XX.XX.XX:30001/?replicaSet=rs0&directConnection=true&authMechanism=DEFAULT&authSource=admin&w=majority
But shouldn’t I use this URI with the 3 MongoDB instances ?
mongodb://******:******@XX.XX.XX.XX:30001,XX.XX.XX.XX:30002,XX.XX.XX.XX:30003/?replicaSet=rs0&authMechanism=DEFAULT&authSource=admin&w=majority
When i try this URI, I have this error :
getaddrinfo ENOTFOUND mongo1
Here are the logs :
mongo2 | {"t":{"$date":"2023-05-25T07:25:54.887+00:00"},"s":"I", "c":"NETWORK", "id":22943, "ctx":"listener","msg":"Connection accepted","attr":{"remote":"YY.YY.YY.YY:59230","uuid":"fcdd2403-e65a-4572-a505-90a5865b0650","connectionId":70,"connectionCount":13}}
mongo3 | {"t":{"$date":"2023-05-25T07:25:54.887+00:00"},"s":"I", "c":"NETWORK", "id":22943, "ctx":"listener","msg":"Connection accepted","attr":{"remote":"YY.YY.YY.YY:52958","uuid":"5be994ac-1641-4b4f-a6c2-d172dd90125a","connectionId":38,"connectionCount":9}}
mongo1 | {"t":{"$date":"2023-05-25T07:25:54.886+00:00"},"s":"I", "c":"NETWORK", "id":22943, "ctx":"listener","msg":"Connection accepted","attr":{"remote":"YY.YY.YY.YY:36398","uuid":"3403d40f-9766-4897-80e6-7ee9b10cf35d","connectionId":50,"connectionCount":7}}
mongo2 | {"t":{"$date":"2023-05-25T07:25:54.890+00:00"},"s":"I", "c":"NETWORK", "id":51800, "ctx":"conn70","msg":"client metadata","attr":{"remote":"YY.YY.YY.YY:59230","client":"conn70","doc":{"driver":{"name":"nodejs","version":"5.1.0"},"os":{"type":"Windows_NT","name":"win32","architecture":"x64","version":"10.0.22621"},"platform":"Node.js v16.17.1, LE (unified)|Node.js v16.17.1, LE (unified)","application":{"name":"MongoDB
Compass"}}}}
mongo3 | {"t":{"$date":"2023-05-25T07:25:54.890+00:00"},"s":"I", "c":"NETWORK", "id":51800, "ctx":"conn38","msg":"client metadata","attr":{"remote":"YY.YY.YY.YY:52958","client":"conn38","doc":{"driver":{"name":"nodejs","version":"5.1.0"},"os":{"type":"Windows_NT","name":"win32","architecture":"x64","version":"10.0.22621"},"platform":"Node.js v16.17.1, LE (unified)|Node.js v16.17.1, LE (unified)","application":{"name":"MongoDB
Compass"}}}}
mongo1 | {"t":{"$date":"2023-05-25T07:25:54.891+00:00"},"s":"I", "c":"NETWORK", "id":51800, "ctx":"conn50","msg":"client metadata","attr":{"remote":"YY.YY.YY.YY:36398","client":"conn50","doc":{"driver":{"name":"nodejs","version":"5.1.0"},"os":{"type":"Windows_NT","name":"win32","architecture":"x64","version":"10.0.22621"},"platform":"Node.js v16.17.1, LE (unified)|Node.js v16.17.1, LE (unified)","application":{"name":"MongoDB
Compass"}}}}
mongo2 | {"t":{"$date":"2023-05-25T07:25:54.946+00:00"},"s":"I", "c":"NETWORK", "id":22944, "ctx":"conn70","msg":"Connection ended","attr":{"remote":"YY.YY.YY.YY:59230","uuid":"fcdd2403-e65a-4572-a505-90a5865b0650","connectionId":70,"connectionCount":12}}
mongo1 | {"t":{"$date":"2023-05-25T07:25:54.946+00:00"},"s":"I", "c":"NETWORK", "id":22944, "ctx":"conn50","msg":"Connection ended","attr":{"remote":"YY.YY.YY.YY:36398","uuid":"3403d40f-9766-4897-80e6-7ee9b10cf35d","connectionId":50,"connectionCount":6}}
mongo3 | {"t":{"$date":"2023-05-25T07:25:54.947+00:00"},"s":"I", "c":"NETWORK", "id":22944, "ctx":"conn38","msg":"Connection ended","attr":{"remote":"YY.YY.YY.YY:52958","uuid":"5be994ac-1641-4b4f-a6c2-d172dd90125a","connectionId":38,"connectionCount":8}}
I think i’m missing somethings in the initiatial parameters but i d’ont know what. Any help will be useful.
Thanks, Mallory LP.