So I am trying to plan the approach I want to take to count vehicles and pedestrians in a video. Here are my basic steps for the approach I want to take.
- Use background subtraction to distinguish between moving objects.
- Use cv2.SimpleBlobDetector to detect blobs from the mask generated in the BGS step and return the keypoints.
- Perform tracking of all blobs ( Not yet implemented in the example ) with the given keypoints.
The question: Can this approach be applied to both pedestrian and vehicles and if so, I am not clear on how can one distinguish the different blobs?
I am wondering if may be the size of the blob can be a used to distinguish between pedestrians ( small blobs ) and vehicles ( larger blobs ). However, I am not sure how to handle the case of a vehicle being further away from the source and hence appearing to be small.
import numpy as np
import cv2
cap = cv2.VideoCapture('video.avi')
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
fgbg = cv2.BackgroundSubtractorMOG(500, 6, 0.9, 1)
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 10;
params.maxThreshold = 200;
# Filter by Area.
params.filterByArea = True
params.minArea = 400
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.1
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.87
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.01
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
detector = cv2.SimpleBlobDetector(params)
else :
detector = cv2.SimpleBlobDetector_create(params)
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
#fgmask = frame;
# Detect blobs.
keypoints = detector.detect(fgmask)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
print keypoints
cv2.imshow('frame',im_with_keypoints)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()