I have multiple background jobs processors set up using the bull package like below
import { CronJob } from 'cron';
import Queue from 'bull';
let queue = new Queue('workers', {
// settings: { lockDuration: 60 * 20000 },
defaultJobOptions: {
removeOnComplete: true
},
});
queue
.on('waiting', function(jobId) {
// A Job is waiting to be processed as soon as a worker is idling.
// workerLogger.info(`Job ${jobId} waiting to be processed `);
})
.on('completed', async(job, result) => {
workerLogger.info(`Job ID: ${job.id}, Result: ${result}`);
try {
const jobbed = await queue.getJob(job.id);
if (jobbed) {
await jobbed.remove();
workerLogger.info(`removed completed job ${job.id}`);
}
} catch (error) {
throw new Error(error);
}
})
.on('failed', function(job, err) {
workerLogger.error('job ' + job.id + ' in queue failed... ' + err);
})
.on('error', function(err) {
workerLogger.error('Queue Error... ' + err);
})
.on('stalled', function(job) {
workerLogger.info(
`stalled job, restarting it again! ${job.queue.name} ${JSON.stringify(
job.data,
)} ${job.id} ${job.name}`,
);
});
queue.process('healthCheckPing', concurrency, function(job, done) {
jobs.healthCheckPing(job.data, done);
});
queue.process('test', concurrency, function(job, done) {
jobs.test(job.data, done);
});
I tried running the job based on a Crontime using the cron package but only one job gets processed, please check the sample below
const cron = new CronJob({
cronTime: '* * * * *',
onTick: function() {
(() => {
workerLogger.info('Pushing test to queue...');
queue.add('test');
queue.add(
'healthCheckPing',
{
jobName: 'test',
},
);
})();
},
start: true,
timeZone: 'Africa/Lagos',
});
I have tried a lot of things to make it work but none seems to work, will list a few below
- running await queue.obliterate({ force: true });
- running the worker processor on a separate instance
- add delay to the job - queue.add('test',{}, {delay:500});
- adding priority to the job - queue.add('test',{}, {priority:1});
Out of the two Jobs(test, healthCheckPing) above, only one always gets triggered.
that is, either the job named test get processed while the other one is not, or the job name healthCheckPing get processed while the other one is not
This is the jobs processor functions below
const jobs = {};
jobs.test = (_, done) => {
try {
workerLogger.error('test');
done(false, 'ok');
} catch (e) {
done(e);
}
};
jobs.healthCheckPing = async({
jobName
}, done) => {
try {
workerLogger.info('health check pinger');
if (!jobName) throw new Error('uuid not passed');
// jobname is the slug for monitor
// use pingkey to negate between staging and prod monitors
const pingKey = !process.env.NODE_ENV || process.env.NODE_ENV !== 'production' ?
process.env.STAGING_HEALTH_CHECK_KEY :
process.env.PROD_HEALTH_CHECK_KEY;
const url = `https://hc-ping.com/${pingKey}/${jobName}`;
await axios.get(url);
done(false, `pinged ${jobName}!`);
} catch (error) {
done(error);
}
};
export default jobs;
This code works perfectly on my local machine, but this issue only occurs when on production. The node server is run using pm2(cluster mode, instance = 1)