#!/usr/bin/env python
import sys
import json
import gearman
from fabric import *
from fabric.api import *
from gearman import GearmanWorker
#
# Run the ssh task
#
def exe_job(worker, job):
d = json.loads(job.data)
env.host_string = d['host']
cmd = d['command']
retries = int(d['retries'])
output = ""
# Run the fabric command. Do not abort on exit
# Run job for the given number of retries
tmp_retries = retries;
while retries > 0:
with settings(warn_only=True):
result = run(cmd)
output = output + str(result)
if result.failed:
if retries == 1:
job.send_fail()
break
else:
next
else:
break
retries = retries - 1
return output
#
# Main function
#
def main():
gm_worker = gearman.GearmanWorker(['localhost:4730'])
gm_worker.register_task('exe_job',exe_job)
gm_worker.work()
if __name__ == '__main__':
main()
In my code, I am trying to retry a gearman job (which runs a fabric command) for the number of retries specified by the user. For every try, I am capturing and appending the output. In the last retry, if the job fails, I want to return the output back to the client.
What is happening is that job.send_fail() command simply breaks out and does not go to the "return output" command at all to return the output of the failed command.
Is there a better way to fail a job and also return the data to the client on exit/failure without the worker dying?