0

My goal is to get 1280x1280 frame from nvarguscamerasrc. The problem is that nvarguscamerasrc scaled the 3840x2160 frame to 1280x720. The consequence is that the bottom of the frame is always black.

JetsonCamera.py

def gstreamer_pipeline(
    # Issue: the sensor format used by Raspberry Pi 4B and NVIDIA Jetson Nano B01 are different
    # in Raspberry Pi 4B, this command
    # $ libcamera-still --width 1280 --height 1280 --mode 1280:1280
    # uses sensor format 2328x1748.
    # However, v4l2-ctl --list-formats-ext do not have such format.
    capture_width=1920,
    capture_height=1080,
    display_width=640,
    display_height=360,
    framerate=21,
    flip_method=0,
):
    return (
        "nvarguscamerasrc ! "
        "video/x-raw(memory:NVMM), "
        "width=(int)%d, height=(int)%d, "
        "format=(string)NV12, framerate=(fraction)%d/1 ! "
        "nvvidconv flip-method=%d ! "
        "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
        "videoconvert ! "
        "video/x-raw, format=(string)BGR ! appsink"
        % (
            capture_width,
            capture_height,
            framerate,
            flip_method,
            display_width,
            display_height,
        )
    )

class Camera(object):
    frame_reader = None
    cap = None
    previewer = None

    def __init__(self, width=640, height=360):
        self.open_camera(width, height)

    def open_camera(self, width=640, height=360):
        self.cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0, display_width=width, display_height=height), cv2.CAP_GSTREAMER)
        if not self.cap.isOpened():
            raise RuntimeError("Failed to open camera!")
        if self.frame_reader == None:
            self.frame_reader = FrameReader(self.cap, "")
            self.frame_reader.daemon = True
            self.frame_reader.start()

    def getFrame(self, timeout = None):
        return self.frame_reader.getFrame(timeout)

class FrameReader(threading.Thread):
    queues = []
    _running = True
    camera = None
    def __init__(self, camera, name):
        threading.Thread.__init__(self)
        self.name = name
        self.camera = camera
 
    def run(self):
        while self._running:
            _, frame = self.camera.read()
            while self.queues:
                queue = self.queues.pop()
                queue.put(frame)
    
    def addQueue(self, queue):
        self.queues.append(queue)

    def getFrame(self, timeout = None):
        queue = Queue(1)
        self.addQueue(queue)
        return queue.get(timeout = timeout)

    def stop(self):
        self._running = False

main.py

exit_ = False

if __name__ == "__main__":    
    camera = Camera(width=1280, height=1280)

    while not exit_:
        global frame
        frame = camera.getFrame(2000)
        cv2.imshow("Test", frame)

        key = cv2.waitKey(1)
        if key == ord('q'):
            exit_ = True

frame's bottom is black

Jason Rich Darmawan
  • 1,607
  • 3
  • 14
  • 31
  • what you're taking issue with is the "letterboxing". do you want to _stretch_ the frame, i.e. _not_ maintain its sample aspect ratio, i.e. make rectangular pixels? I'm sure gstreamer can give you a result that doesn't apply letterboxing but simply does the rescale, stretching the data to fit. – Christoph Rackwitz Feb 04 '23 at 12:05
  • @ChristophRackwitz I want to mimic `libcamera-still -t 5000 --width 1280 --height 1280 --mode 1280:1280` which uses `2328x1748` sensor format. So the frame generated by `Raspberry Pi 4B with ArduCAM IMX519` and `NVIDIA Jetson Nano B01 with ArduCAM IMX519` stays the same. – Jason Rich Darmawan Feb 04 '23 at 14:54

0 Answers0