I would like to run a continuous stream with the PiCamera on the Raspberry Pi 3 and also do other computations in parallel with this stream.
I only have to take from that stream(process) the object that it detected. I will post here the code I have so far. It doesn't enter in the computation()
function. It just starts the camera and detects the objects there and stays in that process.
I've tried using multiprocessing
module from Python but it doesn't seem to work.
def startRecord():
frame_rate_calc = 1
freq = cv2.getTickFrequency()
font = cv2.FONT_HERSHEY_SIMPLEX
camera = PiCamera()
camera.resolution = (IM_WIDTH, IM_HEIGHT)
camera.framerate = 10
camera.vflip = True
rawCapture = PiRGBArray(camera, size=(IM_WIDTH, IM_HEIGHT))
rawCapture.truncate(0)
for frame1 in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
object_detected = "none"
t1 = cv2.getTickCount()
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
frame = np.copy(frame1.array)
frame.setflags(write=1)
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
# Draw the results of the detection (aka 'visualize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.40)
if classes[0][0] == 1 and scores[0][0] > 0.98:
object_detected = "circle"
elif classes[0][0] == 2 and scores[0][0] > 0.98:
object_detected = "donnut"
elif classes[0][0] == 3 and scores[0][0] > 0.98:
object_detected = "square"
elif classes[0][0] == 4 and scores[0][0] > 0.98:
object_detected = "alphabot"
cv2.putText(frame, "FPS: {0:.2f}".format(frame_rate_calc), (30, 50), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
t2 = cv2.getTickCount()
time1 = (t2 - t1) / freq
frame_rate_calc = 1 / time1
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
rawCapture.truncate(0)
camera.close()
def computation():
print("OUTSIDE OF CAPTURE")
print(object_detected)
### Picamera ###
if camera_type == 'picamera':
# Initialize Picamera and grab reference to the raw capture
p1 = Process(target=startRecord())
p2 = Process(target=computation())
p1.start()
p2.start()
p1.join()
p2.join()