I'm testing the performance of IBM MQ (running the latest version in a local docker container) I use a persistent queue.
On the producer side, I can get higher throughput by running multiple producing applications in parallel.
However, on the consumer side, I cannot increase the throughput by parallelizing consumer processes. On the contrary, the throughput is even worse for multiple consumers than for one single consumer.
What could be the reason for the poor consuming performance?
It shouldn't be due to the hardware limit as I'm comparing the consumption with the production and I did only message consumption without any other processing.
Does the GET perform the commit for each message? I don't find any explicit commit method in PyMQI though.
put_demo.py
#!/usr/bin/env python3
import pymqi
import time
queue_manager = 'QM1'
channel = 'DEV.APP.SVRCONN'
host = '127.0.0.1'
port = '1414'
queue_name = 'DEV.QUEUE.1'
message = b'Hello from Python!'
conn_info = '%s(%s)' % (host, port)
nb_messages = 1000
t0 = time.time()
qmgr = pymqi.connect(queue_manager, channel, conn_info)
queue = pymqi.Queue(qmgr, queue_name)
for i in range(nb_messages):
try:
queue.put(message)
except pymqi.MQMIError as e:
print(f"Fatal error: {str(e)}")
queue.close()
qmgr.disconnect()
t1 = time.time()
print(f"tps: {nb_messages/(t1-t0):.0f} nb_message_produced: {nb_messages}")
get_demo.py
#!/usr/bin/env python3
import pymqi
import time
import os
queue_manager = 'QM1'
channel = 'DEV.APP.SVRCONN'
host = '127.0.0.1'
port = '1414'
queue_name = 'DEV.QUEUE.1'
conn_info = '%s(%s)' % (host, port)
nb_messages = 1000
nb_messages_consumed = 0
t0 = time.time()
qmgr = pymqi.connect(queue_manager, channel, conn_info)
queue = pymqi.Queue(qmgr, queue_name)
gmo = pymqi.GMO(Options = pymqi.CMQC.MQGMO_WAIT | pymqi.CMQC.MQGMO_FAIL_IF_QUIESCING)
gmo.WaitInterval = 1000
while nb_messages_consumed < nb_messages:
try:
msg = queue.get(None, None, gmo)
nb_messages_consumed += 1
except pymqi.MQMIError as e:
if e.reason == 2033:
# No messages, that's OK, we can ignore it.
pass
queue.close()
qmgr.disconnect()
t1 = time.time()
print(f"tps: {nb_messages_consumed/(t1-t0):.0f} nb_messages_consumed: {nb_messages_consumed}")
run results
> for i in {1..10}; do ./put_demo.py & done
tps: 385 nb_message_produced: 1000
tps: 385 nb_message_produced: 1000
tps: 383 nb_message_produced: 1000
tps: 379 nb_message_produced: 1000
tps: 378 nb_message_produced: 1000
tps: 377 nb_message_produced: 1000
tps: 377 nb_message_produced: 1000
tps: 378 nb_message_produced: 1000
tps: 374 nb_message_produced: 1000
tps: 374 nb_message_produced: 1000
> for i in {1..10}; do ./get_demo.py & done
tps: 341 nb_messages_consumed: 1000
tps: 339 nb_messages_consumed: 1000
tps: 95 nb_messages_consumed: 1000
tps: 82 nb_messages_consumed: 1000
tps: 82 nb_messages_consumed: 1000
tps: 82 nb_messages_consumed: 1000
tps: 82 nb_messages_consumed: 1000
tps: 82 nb_messages_consumed: 1000
tps: 82 nb_messages_consumed: 1000
tps: 82 nb_messages_consumed: 1000
get_demo.py updated version using syncpoint and batch commit
#!/usr/bin/env python3
import pymqi
import time
import os
queue_manager = 'QM1'
channel = 'DEV.APP.SVRCONN'
host = '127.0.0.1'
port = '1414'
queue_name = 'DEV.QUEUE.1'
conn_info = '%s(%s)' % (host, port)
nb_messages = 1000
commit_batch = 10
nb_messages_consumed = 0
t0 = time.time()
qmgr = pymqi.connect(queue_manager, channel, conn_info)
queue = pymqi.Queue(qmgr, queue_name)
gmo = pymqi.GMO(Options = pymqi.CMQC.MQGMO_WAIT | pymqi.CMQC.MQGMO_FAIL_IF_QUIESCING | pymqi.CMQC.MQGMO_SYNCPOINT)
gmo.WaitInterval = 1000
while nb_messages_consumed < nb_messages:
try:
msg = queue.get(None, None, gmo)
nb_messages_consumed += 1
if nb_messages_consumed % commit_batch == 0:
qmgr.commit()
except pymqi.MQMIError as e:
if e.reason == 2033:
# No messages, that's OK, we can ignore it.
pass
queue.close()
qmgr.disconnect()
t1 = time.time()
print(f"tps: {nb_messages_consumed/(t1-t0):.0f} nb_messages_consumed: {nb_messages_consumed}")
Thanks.