There are two cameras looking at opposite sides. I have two videos sync recorded from them. I have to track persons assigning them a unique ID(done(for the single video)). The problem is when a person goes from first camera's area(red square is where person appears/disappears) to the second's one(red square is where person appears/disappears) I must somehow predict coordinates of where person may appear in the second area or backwards and feed them(including ID) to the tracker.
P.S There is also a person who starts walking from a blind spot(between cameras).
Code:
import numpy as np
import matplotlib.pyplot as plt
import cv2
from numpy.lib.type_check import imag
import math
minimum = 6000
frame1 = None
cap = cv2.VideoCapture("/home/timur/Downloads/2.mp4")
#cap2 = cv2.VideoCapture("/home/timur/Downloads/2.mp4")
method = 'ABS'
mog = cv2.createBackgroundSubtractorMOG2()
knn = cv2.createBackgroundSubtractorKNN()
distances = []
detectedCount = 0
cpoints_prev_frame = []
tracking_objects = {}
track_id = 0
while True:
ret, frame = cap.read()
vid = cv2.flip(frame,1)
vid = frame
#ret2, frame2 = cap2.read()
#vid2 = frame2
#triggerArea = 191 628 980 717
triggerArea = frame[630:690,191:980]
#triggerArea2 = frame2[630:690,191:980]
if not ret:
print("END")
break
cpoints_cur_frame = []
curPerson = None
if method == 'MOG2':
bgs = mog.apply(vid)
elif method == 'KNN':
bgs = knn.apply(vid)
elif method == 'ABS':
frame = cv2.GaussianBlur(vid,(7,7),0)
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
if frame1 is None:
frame1 = frame
continue
framedelta = cv2.absdiff(frame1,frame)
retval, bgs = cv2.threshold(framedelta.copy(), 35, 255, cv2.THRESH_BINARY)
mask = np.zeros_like(frame)
contours,_ = cv2.findContours(bgs, mode= cv2.RETR_TREE, method= cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours,key=cv2.contourArea,reverse= True)
for cnt in contours:
detectedCount += 1
if cv2.contourArea(cnt) < minimum:
continue
(x,y,w,h) = cv2.boundingRect(cnt)
cx = int((x + x + w) / 2)
cy = int((y + y + h) / 2)
cpoints_cur_frame.append((cx, cy))
cv2.rectangle(vid,(x,y),(x+w,y+h),(0,255,10),1)
cv2.putText(vid,f'{method}',(20,20),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(0,255,0,2))
cv2.putText(vid,'Timurchik',(20,40),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(0,255,0,2))
cv2.drawContours(mask,cnt,-1,255,3)
break
if detectedCount <= 2:
for pt in cpoints_cur_frame:
for pt2 in cpoints_prev_frame:
distance = math.hypot(pt2[0] - pt[0], pt2[1] - pt[1])
if distance < 130:
tracking_objects[track_id] = pt
track_id += 1
else:
tracking_objects_copy = tracking_objects.copy()
center_points_cur_frame_copy = cpoints_cur_frame.copy()
for object_id, pt2 in tracking_objects_copy.items():
object_exists = False
for pt in center_points_cur_frame_copy:
distance = math.hypot(pt2[0] - pt[0], pt2[1] - pt[1])
distances.append(distance)
print(max(distances))
if distance < 110:
tracking_objects[object_id] = pt
object_exists = True
if pt in cpoints_cur_frame:
cpoints_cur_frame.remove(pt)
continue
if not object_exists:
tracking_objects.pop(object_id)
for pt in cpoints_cur_frame:
tracking_objects[track_id] = pt
track_id += 1
for object_id, pt in tracking_objects.items():
cv2.circle(vid, pt, 5, (0, 0, 255), -1)
cv2.putText(vid, str(object_id), (pt[0], pt[1] - 7), 0, 1, (0, 0, 255), 2)
cv2.imshow('triggerArea', triggerArea)
#cv2.imshow('triggerArea2', triggerArea2)
cv2.imshow('frame',vid)
#cv2.imshow('BGS',bgs)
cpoints_prev_frame = cpoints_cur_frame.copy()
key = cv2.waitKey(1)
if key == ord('q') or key == ord('Q'):
break
elif key == ord('M') or key == ord('m'):
method = 'MOG2'
elif key == ord('K') or key == ord('k'):
method = 'KNN'
elif key == ord('A') or key == ord('a'):
method = 'ABS'
cap.release()
cv2.destroyAllWindows()