I am working on a project which requires me to extract rgb image and corresponding depth map. Since RGB image and depth map are obtained from different sensors they are not synced.
Currently I am trying to modify the example given in pylibfreenect2 official documentation.
while True:
frames = listener.waitForNewFrame()
if enable_rgb:
color = frames["color"]
if i%skip_frame ==0 :
np.save('/home/ashu/DDP/Kinect/RGB_images/rgb_{}'.format(j),color.asarray())
if enable_depth:
ir = frames["ir"]
depth = frames["depth"]
if enable_rgb and enable_depth:
registration.apply(color, depth, undistorted, registered)
elif enable_depth:
registration.undistortDepth(depth, undistorted)
if enable_depth:
cv2.imshow("ir", ir.asarray() / 65535.)
cv2.imshow("depth", depth.asarray() / 4500.)
cv2.imshow("undistorted", undistorted.asarray(np.float32) / 4500.)
if i%skip_frame == 0 :
np.save('/home/ashu/DDP/Kinect/Depth_images/depth_{}'.format(j),depth.asarray())
if enable_rgb:
cv2.imshow("color", cv2.resize(color.asarray(),(int(1920 / 3), int(1080 / 3))))
if enable_rgb and enable_depth:
cv2.imshow("registered", registered.asarray(np.uint8))
xx = registration.getPointXYZ(undistorted, registered , )
if i%skip_frame == 0 :
np.save('/home/ashu/DDP/Kinect/registered/reg_{}'.format(j) ,registered.asarray(np.uint8))
j += 1
listener.release(frames)
i+=1
key = cv2.waitKey(delay=1)
if key == ord('q'):
break
device.stop()
device.close()
sys.exit(0)
As it can be seen in the code that I am trying to save rgb image and depth map as .npy file after some fixed number of frames but since both are of different dimensions, they are of no use. Is short my objective is to extract rgb and depth image of same dimensions (424,512)