I tried so many solution, but no any one doesn't work. This is my simple code for just get result (rtsp stream). It working without error, but I cant open rtsp stream.
And second question how to set stream name like rtsp://192.168.1.158:8554/test
First what I do, get camera's frame format from Basler camera 2. Creating video capturing 3. Getting current IP 4. Configuring pip_out string for rtsp streaming 5. Creating opencv writer 6. Read frame from camera and doing yolo detections (here not showed) 7. Showing result on local machine 8. Write frame to rtsp
VLC doesn't connect to rtsp. When I tried open it on local machine like this:
gst-launch-1.0 rtspsrc location=rtsp://localhost:8554 latency=100 !
queue ! rtph264depay ! h264parse ! avdec_h264 !
videoconvert ! videoscale ! video/x-raw,width=640,height=480 !
Xvimagesink
It give me follow error:
Setting pipeline to PAUSED ...
Pipeline is live and does not need PREROLL ...
Progress: (open) Opening Stream
Progress: (connect) Connecting to rtsp://localhost:8554
ERROR: from element /GstPipeline:pipeline0/GstRTSPSrc:rtspsrc0: Could not > open resource for reading and writing.
Additional debug info:
gstrtspsrc.c(7469): gst_rtspsrc_retrieve_sdp (): >/GstPipeline:pipeline0/GstRTSPS
rc:rtspsrc0:
Failed to connect. (Generic error)
ERROR: pipeline doesn't want to preroll.
import subprocess as sb
import shlex as sh
import cv2
import socket
import time
def get_frame_format():
command = 'v4l2-ctl -d /dev/video0 --list-formats-ext'
args = sh.split(command)
p = sb.Popen(args, stdout=sb.PIPE, stderr=sb.STDOUT)
res = p.communicate()[0].split()
fps = float(res[-2].decode('utf-8')[1:])
size = list(map (int , res[19].decode('utf-8').split('x')))
width = size[0]
height = size[1]
return fps, width, height
fps, width, height = get_frame_format()
print (f"fps: {fps}, {type(fps)}\n, size: {width} {height}")
window_title = "USB Camera"
camera_id = "/dev/video0"
cam = cv2.VideoCapture(camera_id, cv2.CAP_V4L2)
#Get current IP for RTSP
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip_address = s.getsockname()[0]
pipe_out = 'appsrc is-live=True ! queue ! videoconvert ! video/x-raw, format=RGBA ! nvvidconv\
! omxh264enc insert-sps-pps=true ! video/x-h264, stream-format=byte-stream! h264parse \
! rtph264pay name=pay0 pt=96 config-interval=1 ! udpsink port=8554 host=192.168.1.158'
rtsp_out = cv2.VideoWriter(pipe_out, fourcc=0, apiPreference=cv2.CAP_GSTREAMER, fps=fps,
frameSize=(1980, 1080), isColor=True)
time.sleep(2)
if not rtsp_out.isOpened() :
print("Writer failed")
exit()
print('Writer opened')
if cam.isOpened():
try:
window_handle = cv2.namedWindow(window_title, cv2.WINDOW_KEEPRATIO )
while True:
ok, frame = cam.read()
# Here frame neuro processing by yolo
# Showing result on local machine
if cv2.getWindowProperty(window_title, cv2.WND_PROP_AUTOSIZE) >=0:
frame_out = cv2.resize(frame, (1980, 1080))
cv2.imshow(window_title, frame_out)
# result streaming to rtsp
rtsp_out.write(frame_out)
else:
break
key = cv2.waitKey(1) & 0xFF
if key == 27 or key == ord("q"):
break
finally:
rtsp_out.release()
cam.release()
cv2.destroyAllWindows()
else:
print("Camera doesn't open")
'''