-1

I am working on a project that uses kinect to avoid obstacles I am using Visual Studio C++, OpenCV library, and code laboratories drivers (CL NUI Platform).And i want to convert the depth image to a real world dimensions.

Thanks in advance Here is my code:

#include "stdafx.h"
#include <opencv/cv.h>
#include <opencv/cxcore.h>
#include <opencv/highgui.h>
#include <CLNUIDevice.h>
using namespace std;
void displayKinectImage();
int _tmain(int argc, _TCHAR* argv[])
{
displayKinectImage();
}

void displayKinectImage() {
PDWORD rgb32_data = (PDWORD)malloc(640 * 480 * 4);
PDWORD depth32_data = (PDWORD)malloc(640 * 480 * 4);

CLNUICamera cam = CreateNUICamera(GetNUIDeviceSerial(0));
CLNUIMotor motor = CreateNUIMotor(GetNUIDeviceSerial(0));

StartNUICamera(cam);

cvNamedWindow("Image", CV_WINDOW_AUTOSIZE);
cvNamedWindow("Depth", CV_WINDOW_AUTOSIZE);
cvNamedWindow("Grey", CV_WINDOW_AUTOSIZE);

IplImage *rgb32 = cvCreateImageHeader(cvSize(640, 480), 8, 4);
IplImage *grey;
IplImage *depth32 = cvCreateImageHeader(cvSize(640, 480), 8, 4);;

do {
    GetNUICameraColorFrameRGB32(cam, rgb32_data);
    GetNUICameraDepthFrameRGB32(cam, depth32_data);

    rgb32 = cvCreateImageHeader(cvSize(640, 480), 8, 4);
    grey = cvCreateImage(cvSize(640, 480), 8, 1);
    depth32 = cvCreateImageHeader(cvSize(640, 480), 8, 4);

    cvSetData(rgb32, rgb32_data, rgb32->widthStep);
    cvSetData(depth32, depth32_data, depth32->widthStep);

    // Convert RGB32 to greyscale
    cvCvtColor(depth32, grey, CV_RGB2GRAY);

    cvShowImage("Image", rgb32);
    cvShowImage("Grey", grey);
    cvShowImage("Depth", depth32);

    cvReleaseImageHeader(&rgb32);
    cvReleaseImage(&grey);
    cvReleaseImageHeader(&depth32);

    cvWaitKey(1);

} while (!GetAsyncKeyState(0x50));


free(rgb32_data);
free(depth32_data);

}

1 Answers1

1

You can get the 3D point corresponding to pixel (j,i) with

constexpr int WIDTH = 640;
constexpr int HEIGHT = 480;
constexpr float DEPTH_TO_M = 0.001f;
constexpr float SCL = 1.0f / 520.0f;

float z = static_cast<float>(depth)*DEPTH_TO_M; 
float x = staitc_cast<float>(j - WIDTH/2)*z*SCL;
float y = staitc_cast<float>(i - HEIGHT/2)*z*SCL;

The constants DEPTH_TO_M and SCL are approximations. If you want accurate results, then you need to calibrate your camera, but this is a scientific problem which is out of scope for stack overflow.

Danvil
  • 22,240
  • 19
  • 65
  • 88