0

I'm trying to save video file from Azure Kinect V3(RGB, RGB+Skeleton, Skeleton, Thermal, Depth, IR) and FLIR Lepton3.5 simultaneously

I can save other thing(RGB, RGB+Skeleton, skeleton, Thermal, Depth) but can't save Thermal video

I think IR are array with (height, width) then translate 3 Channel to Use VideoWirter, .wirte()

When I using translate like this code

IR_image = IR_image.astype(np.uint8)
IR_image = cv2.cvtColor(IR_image, cv2.COLOR_GRAY2BGR)

My saved video is broken like this

enter image description here

Here's full my code to save those video

import cv2
import sys
import numpy as np
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton
from PyQt5.QtCore import QCoreApplication
import pykinect_azure as pykinect
import qimage2ndarray
from datetime import datetime

RGB_IMAGE_PATH      = 'C:\\ti\\mmwave_industrial_toolbox_4_12_1\\tools\\Visualizer\\datas\\RGB\\'
IR_IMAGE_PATH       = 'C:\\ti\\mmwave_industrial_toolbox_4_12_1\\tools\\Visualizer\\datas\\IR\\'
DEPTH_IMAGE_PATH    = 'C:\\ti\\mmwave_industrial_toolbox_4_12_1\\tools\\Visualizer\\datas\\Depth\\'
THERMAL_IMAGE_PATH  = 'C:\\ti\\mmwave_industrial_toolbox_4_12_1\\tools\\Visualizer\\datas\\Thermal\\'
RGB_SKELETON_PATH  = 'C:\\ti\\mmwave_industrial_toolbox_4_12_1\\tools\\Visualizer\\datas\\RGB_Skeleton\\'
SKELETON_DATA_PATH  = 'C:\\ti\\mmwave_industrial_toolbox_4_12_1\\tools\\Visualizer\\datas\\Skeleton\\'

def cv2_to_qimage(cv_img):
    height, width, bytesPerComponent = cv_img.shape
    bgra = np.zeros([height, width, 4], dtype=np.uint8)
    bgra[:, :, 0:3] = cv_img
    return QtGui.QImage(bgra.data, width, height, QtGui.QImage.Format_RGB32)

# ------------------ Azure Kinect Setting ------------------ #
# Initialize the library, if the library is not found, add the library path as argument
pykinect.initialize_libraries(track_body=True)

# Modify camera configuration
device_config = pykinect.default_configuration
device_config.color_resolution = pykinect.K4A_COLOR_RESOLUTION_1080P
device_config.depth_mode = pykinect.K4A_DEPTH_MODE_WFOV_2X2BINNED
# ------------------ Azure Kinect Setting ------------------ #

class ShowVideo(QtCore.QObject):
    VideoSignal_RGB     = QtCore.pyqtSignal(QtGui.QImage)
    VideoSignal_Depth   = QtCore.pyqtSignal(QtGui.QImage)
    VideoSignal_IR      = QtCore.pyqtSignal(QtGui.QImage)
    VideoSignal_THERMAL = QtCore.pyqtSignal(QtGui.QImage)


    def __init__(self, parent=None):
        super(ShowVideo, self).__init__(parent)
        self.device = pykinect.start_device(config=device_config)   # Start device
        self.bodyTracker = pykinect.start_body_tracker()            # Start body tracker
        self.thermal_vid = cv2.VideoCapture(0, cv2.CAP_DSHOW)
        self.thermal_vid.set(cv2.CAP_PROP_CONVERT_RGB,0)
        self.clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
        self.list_save = []
        self.record_flag = False
        self.quit_flag = False
        self.image_count = 0
        self.break_flag = 1
        iswrite = True
    
    @QtCore.pyqtSlot()
    def startVideo(self):
        # video save 
        run_video = True
        rgb_out = cv2.VideoWriter(RGB_IMAGE_PATH + 'S01P01R01A01_RGB.mp4', cv2.VideoWriter_fourcc('m','p','4','v'), 8, (1920, 1080), True)
        depth_out = cv2.VideoWriter(DEPTH_IMAGE_PATH + 'S01P01R01A01_DEPTH.mp4', cv2.VideoWriter_fourcc('m','p','4','v'), 8, (512, 512), True)
        ir_out = cv2.VideoWriter(IR_IMAGE_PATH + 'S01P01R01A01_IR.mp4', cv2.VideoWriter_fourcc('m','p','4','v'), 8, (512, 512), True)
        thermal_out = cv2.VideoWriter(THERMAL_IMAGE_PATH + 'S01P01R01A01_THERMAL8.mp4', cv2.VideoWriter_fourcc('m','p','4','v'), 8, (512, 512), True)
        rgb_skeleton_out = cv2.VideoWriter(RGB_SKELETON_PATH + 'S01P01R01A01_RGBSKELETON.mp4', cv2.VideoWriter_fourcc('m','p','4','v'), 8, (1032, 580), True)
        
        while run_video:
            self.datetime = str(datetime.now())
            self.capture = self.device.update()
            self.body_frame = self.bodyTracker.update()
            self.point_bodies = self.body_frame.get_body()
            ret, RGB_image      = self.capture.get_color_image()
            ret, Depth_image    = self.capture.get_colored_depth_image()
            ret, IR_image       = self.capture.get_ir_image()
            ret, Thermal_image  = self.thermal_vid.read()

            if self.record_flag == True : 
                rgb_out.write(RGB_image)
            elif self.record_flag == False:
                self.image_count = 0
                
            # Draw the skeletons into the color image
            RGB_skeleton = self.body_frame.draw_bodies(RGB_image, pykinect.K4A_CALIBRATION_TYPE_COLOR)
            RGB_skeleton = cv2.resize(RGB_skeleton, dsize=(1032, 580), interpolation=cv2.INTER_LINEAR)

            # Thermal processing
            Thermal_image = cv2.resize(Thermal_image, dsize=(512,512), interpolation=cv2.INTER_LINEAR)
            Thermal_image = cv2.normalize(Thermal_image, None, 0, 65535, cv2.NORM_MINMAX, cv2.CV_16U)
            np.right_shift(Thermal_image, 8, Thermal_image)
            Thermal_image = Thermal_image.astype(np.uint8)
            Thermal_image = cv2.cvtColor(Thermal_image, cv2.COLOR_GRAY2BGR)
            Thermal_image = cv2.applyColorMap(Thermal_image, cv2.COLORMAP_INFERNO)
   
            # IR processing
            # np.left_shift(IR_image, 4, IR_image)
            IR_image = IR_image.astype(np.uint8)
            # IR_image = cv2.normalize(IR_image, None, 0, 65535, cv2.NORM_MINMAX, cv2.CV_16U)
            IR_image = cv2.cvtColor(IR_image, cv2.COLOR_GRAY2BGR)

            if self.record_flag == True : 
                self.save_npy(self.datetime)
                depth_out.write(Depth_image)
                ir_out.write(IR_image)
                thermal_out.write(Thermal_image)
                rgb_skeleton_out.write(RGB_skeleton)
                iswrite = True

            if self.quit_flag == True:
                if iswrite == True :
                    np.save(SKELETON_DATA_PATH + str(datetime.now().strftime('%Y-%m-%d %H%M%S')) + '.npy', np.array(self.list_save))
                    depth_out.release()
                    ir_out.release()
                    thermal_out.release()
                    rgb_skeleton_out.release()
                    rgb_out.release()
                    # break
                
            RGB_skeleton = cv2_to_qimage(RGB_skeleton)
            Depth_image  = qimage2ndarray.array2qimage(Depth_image, normalize=False)
            IR_image     = qimage2ndarray.array2qimage(IR_image, normalize=False)
            Thermal_image= qimage2ndarray.array2qimage(Thermal_image, normalize=False)

            qt_image_RGB = QtGui.QImage(RGB_skeleton)
            qt_image_Depth = QtGui.QImage(Depth_image)
            qt_image_IR = QtGui.QImage(IR_image)
            qt_image_Thermal = QtGui.QImage(Thermal_image)

            self.VideoSignal_RGB.emit(qt_image_RGB)
            self.VideoSignal_Depth.emit(qt_image_Depth)
            self.VideoSignal_IR.emit(qt_image_IR)
            self.VideoSignal_THERMAL.emit(qt_image_Thermal)

            loop = QtCore.QEventLoop()
            QtCore.QTimer.singleShot(25, loop.quit) #25 ms
            loop.exec_()
            

    def record_button(self):
        if self.record_flag == False : 
            self.record_flag = True
            push_button1.setText('Finish')

        elif self.record_flag == True : 
            self.record_flag = False
            push_button1.setText('Record')
            # np.save(SKELETON_DATA_PATH + str(datetime.now().strftime('%Y-%m-%d %H%M%S')) + '.npy', np.array(self.list_save))
            self.image_count = 0
            self.list_save = []
            
    def quit_button(self):
        if self.quit_flag == False :
            self.quit_flag = True
        elif self.quit_flag == True :
            self.image_count = 0
            self.list_save = []
            self.quit_flag = False

class ImageViewer(QtWidgets.QWidget):
    def __init__(self, parent=None):
        super(ImageViewer, self).__init__(parent)
        self.init_image = cv2.imread('init_Image.jpg')
        self.init_image = cv2.cvtColor(self.init_image, cv2.COLOR_BGR2RGB)
        self.h, self.w, self.c = self.init_image.shape
        self.image = QtGui.QImage(self.init_image.data, self.w, self.h, self.w*self.c, QtGui.QImage.Format_RGB888)
        self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)

    def paintEvent(self, event):
        painter = QtGui.QPainter(self)
        painter.drawImage(0, 0, self.image)
        self.image = QtGui.QImage()

    def initUI(self):
        self.setWindowTitle('Test')

    @QtCore.pyqtSlot(QtGui.QImage)
    def setImage(self, image):
        if image.isNull():
            print("Viewer Dropped frame!")

        self.image = image
        if image.size() != self.size():
            self.setFixedSize(image.size())
        self.update()


if __name__ == '__main__':

    app = QtWidgets.QApplication(sys.argv)

    thread = QtCore.QThread()
    thread.start()
    vid = ShowVideo()
    vid.moveToThread(thread)

    image_viewer_RGB    = ImageViewer()
    image_viewer_Depth  = ImageViewer()
    image_viewer_IR     = ImageViewer()
    image_viewer_THERMAL= ImageViewer()

    vid.VideoSignal_RGB.connect(image_viewer_RGB.setImage)
    vid.VideoSignal_Depth.connect(image_viewer_Depth.setImage)
    vid.VideoSignal_IR.connect(image_viewer_IR.setImage)
    vid.VideoSignal_THERMAL.connect(image_viewer_THERMAL.setImage)

    push_button1 = QtWidgets.QPushButton('Record')
    push_button1.clicked.connect(vid.record_button)
    quit_button = QtWidgets.QPushButton('Record end')
    quit_button.clicked.connect(vid.quit_button)
    
    vertical_layout = QtWidgets.QVBoxLayout()
    horizontal_layout = QtWidgets.QHBoxLayout()

    vertical_layout.addWidget(image_viewer_RGB)
    horizontal_layout.addWidget(image_viewer_Depth)
    horizontal_layout.addWidget(image_viewer_IR)
    horizontal_layout.addWidget(image_viewer_THERMAL)

    vertical_layout.addLayout(horizontal_layout)
    vertical_layout.addWidget(push_button1)
    vertical_layout.addWidget(quit_button)


    # gridlay = QtWidgets.QGridLayout()
    layout_widget = QtWidgets.QWidget()
    layout_widget.setLayout(vertical_layout)

    main_window = QtWidgets.QMainWindow()
    main_window.setCentralWidget(layout_widget)
    main_window.show()
    vid.startVideo()
    
    app.exec_()
    sys.exit(app.exec_())
# IR processing
np.left_shift(IR_image, 4, IR_image)
IR_image = IR_image.astype(np.uint8)
IR_image = cv2.normalize(IR_image, None, 0, 65535, cv2.NORM_MINMAX, cv2.CV_16U)
IR_image = cv2.cvtColor(IR_image, cv2.COLOR_GRAY2BGR)

using normalize and np.left_shift but isn't work so well

Bilal
  • 3,191
  • 4
  • 21
  • 49
JDK
  • 1
  • What is the bit depth of your `IR_image`? E.g. if it is `uint16` you should devide it by 256 before converting it to `uint8`. – Markus Mar 13 '23 at 14:00
  • Thanks for answer, Bit depth of azure kinect IR Camera is 16bit, then I divided 16 and 256 before converting it to uint8. so I can get IR video well. but because of divided 16 or 256, Video quality is not so good. – JDK Mar 14 '23 at 04:07
  • This is why you normally convert 16 bit IR images to some colour map. – Markus Mar 14 '23 at 07:35

0 Answers0