0

I am working on a project of information extraction from images with OpenVino and Python3. Because of the extension of the project, it was decided that our face detection module would work on an independent process with the support of a multiprocessing module.

Unfortunately, I am not managing to make the face detection work within this framework, because the infer() method from IeCore seems to block (there are no primary implementation problems because I can make the mentioned method works properly when using treading module instead of the process).

So, any help on how to work around this issue is welcome.

Bellow, the main elements of the code that I am trying to run with the multiprocessing module.

import os
import cv2
import time
import numpy as np
from pathlib import Path
from multiprocessing import Process

from modules.face.abstract.abcopenvino import ABCOpenVino
from modules.utils.blob2frame import blob2frame

from openvino.inference_engine import IECore, Blob, TensorDesc

YAXIS = 0
XAXIS = 1

class FaceDetection(ABCOpenVino):

    def __init__(self, frame_queue, face_queue):
        super().__init__()

        if not hasattr(self, 'confidence'):
            raise ValueError('Class attribute confidence must be defined')

        self._smoothed = np.zeros((4, )).astype('float32')

        self.frame_queue = frame_queue
        self.face_queue = face_queue
        
        ie_core_handle = IECore()

        self._network = ie_core_handle.read_network(self.file_xml, self.file_bin)

        key = next(iter(self._network.input_info))

        self.tensor_description = self._network.input_info[key].tensor_desc

        self._executable_network = ie_core_handle.load_network(self._network, device_name='CPU', num_requests=1)

        self.inference_request = self._executable_network.requests[0]

        self.input_blob_name = next(iter(self.inference_request.input_blobs))
        self.output_blob_name = next(iter(self.inference_request.output_blobs))

        self.p = Process(target=self._process_exec, args=())
        self.p.name = '{}_{}'.format(__name__, 0)
        self.p.daemon = True
        self.p.start()
        time.sleep(0.01)

        # With treading it works
        #import threading
        #t = threading.Thread(target=self._process_exec, args=())
        #t.name = '{}_{}'.format(__name__, 0)
        #t.daemon = True
        #t.start()
        #time.sleep(0.01)
 
    def _process_exec(self):
        self.myprint("Running! pid: {}".format(os.getpid()))

        while True:
            if self._stop:
                self.myprint('Stopping! pid: {}'.format(os.getpid()))
                time.sleep(0.05)
                break

            elem = self.frame_queue.get()

            if elem:
                frame, = elem
                face_bbox_lst = self._network_infer(frame)
                if face_bbox_lst:
                    elem = (frame, face_bbox_lst, )
                    self.face_queue.put(elem)

        self.myprint('Done')
        return

    def _network_infer(self, frame):
        N, C, H, W = self.tensor_description.dims
        hwc = cv2.resize(frame, (H, W,)).astype('float32')
        chw = hwc.transpose((2, 0, 1))
        input_blob = Blob(self.tensor_description, chw)
        
        self.inference_request.set_blob(blob_name=self.input_blob_name, blob=input_blob)       

        self.inference_request.infer()

        output = self.inference_request.output_blobs[self.output_blob_name].buffer
        return output

    def terminate(self):
        self._stop = True
vasadia
  • 366
  • 5
  • 8
Randerson
  • 775
  • 1
  • 5
  • 19

1 Answers1

0
self.frame_queue = frame_queue
self.face_queue = face_queue

Must be multiprocessing.Queue not queue.Queue. Btw I prefer mp.Pool().imap for such tasks - no queues and no race

marc_s
  • 732,580
  • 175
  • 1,330
  • 1,459
eri
  • 3,133
  • 1
  • 23
  • 35