I am using YOLOv8 and OpenCV for object detection. I am using a CCTV and I have passed its RTSP URL as the video path for Realtime object detection. What I have done is create two horizontal lines using OpenCV. I want to say that if the object is detected after the first line it's in yellow zone and if the object is detected after the second line it's in red zone. Please check the code below. I have added the condition, but it always says that object is in yellow zone even if the object detected is below the second line
Check the code below:
import cv2
import torch
import numpy as np
from ultralytics import YOLO
if torch.cuda.is_available():
device = torch.device('cuda')
print('Using device:', torch.cuda.get_device_name(torch.cuda.current_device()))
else:
device = torch.device('cpu')
print('Using device:', device)
video_path ="rtsp://192.168.1.83/live/0/MAIN"
cap = cv2.VideoCapture(video_path)
model = YOLO('yolov8n.pt')
x_line1=200
x_line2 =500 # Horizontal line 2
y_line=350
frame_count = 0
# model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True).cuda()
while cap.isOpened():
# Read a frame from the video
success, frame = cap.read()
width = int(cap.get(3))
height = int(cap.get(4))
if success:
# Run YOLOv8 inference on the frame
resized_frame = cv2.resize(frame, (1280, 720), interpolation=cv2.INTER_LINEAR)
cv2.line(resized_frame, (0, x_line1), (width, x_line1), (255, 0, 0), 10)
cv2.line(resized_frame, (0, x_line2), (width, x_line2), (255, 0, 0), 10)
cv2.line(resized_frame, (y_line, 0), (y_line, height), (255, 0, 0), 10) # Vertical line
# Visualize the results on the frame
cropped_frame = resized_frame[x_line1:, y_line:]
# Visualize the results on the cropped frame
results = model(cropped_frame, conf=0.1)
# Combine the original frame with the annotated detections
annotated_cropped_frame = results[0].plot()
annotated_frame = resized_frame.copy()
annotated_frame[x_line1:, y_line:] = annotated_cropped_frame
# Display the annotated frame
cv2.imshow("YOLOv8 Inference", annotated_frame)
# Break the loop if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord("q"):
break
num_objects = len(results[0].boxes)
# If there are two or more objects, save the frame and print a statement
if num_objects >= 9:
cv2.imwrite("frame_{}.jpg".format(frame_count), annotated_frame)
print("There are {} people in the frame".format(num_objects))
boxes = results[0].boxes.data
for box in boxes:
if box[1] > x_line2:
print("Object is in red zone")
elif box[1] > x_line1:
print("Object is in yellow zone")
frame_count += 1
else:
# Break the loop if the end of the video is reached
break
# Release the video capture object and close the display window
cap.release()
cv2.destroyAllWindows()
I was trying this:
boxes = results[0].boxes.data
for box in boxes:
if box[1] > x_line2:
print("Object is in red zone")
elif box[1] > x_line1:
print("Object is in yellow zone")
But it was only saying in yellow zone no matter what.