2

Im using two point Grey Chameleon3 mono camera set up as Master Salve and synchronized so can work as stereo camera. Then was following the OpenCV tutorial to calibrate the cameras. I got 70 samples for the calibration and got the Calibration results. I have all Camera parameters as have access to them and obtained intrinsic and extrinsic parameters from calibration

So I took the left and right images, undistort them using the camera matrix and distortion coefficients, and rectify them into epipolar form using the translation and rotation matrices. Then used use calib3d module's StereoSGBM class to create a disparity map. Here is my code

#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <vector>
#include <string>
#include <iostream>
#include <iterator>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <math.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d.hpp>

using namespace cv;
using namespace std;

int main( int argc, char** argv )
{
     int i, j, lr;

    String imageName_left( "/home/OpenCVCode/left_7m.pgm" ); // by default
        if( argc > 1)
        {
        imageName_left = argv[1];
        }

        Mat image_left;
        image_left = imread( imageName_left, IMREAD_COLOR ); // Read the file
        if( image_left.empty() )                      // Check for invalid input
        {
        cout <<  "Could not open or find the image" << std::endl ;
        return -1;
        }

    String imageName_right( "/home/OpenCVCode/right_7m.pgm" ); // by default
        if( argc > 1)
        {
        imageName_right = argv[1];
        }

        Mat image_right;
        image_right = imread( imageName_right, IMREAD_COLOR ); // Read the file
        if( image_right.empty() )                      // Check for invalid input
        {
        cout <<  "Could not open or find the image" << std::endl ;
        return -1;
        }
    cv::Size imageSize;
    imageSize = image_left.size();
    //Mat outputImage = image.clone();

    vector<cv::Point3f> lines[2];
    //double err = 0;

    Mat R1=Mat1d(3, 3);
    Mat R2=Mat1d(3, 3);
    Mat P1=Mat1d(3, 4);
    Mat P2=Mat1d(3, 4);
    Mat Q=Mat1d(4, 4);
    Rect validRoi[2];

    double R_data[] = {0.9997286422545486, 0.000835337139108146, -0.023279692174526096,
           0.0008925647184101439, 0.9998880281815514, -0.00244797956076857,
           0.02327756826002481, 0.0024680939144444804, 0.9997259941245548};
    double T_data[] = {-0.13808630092854335, -0.0016795993989732654, -0.005964101318274714};

    cv::Mat R(3, 3, CV_64F, R_data);
    cv::Mat T(3, 1, CV_64F, T_data);

    Mat Camera_Matrix_Left = (Mat1d(3, 3) << 446.441726, 0, 266.768096, 0, 448.805499, 186.652251, 0, 0, 1);
    Mat Distortion_Coefficients_Left = (Mat1d(1, 5) << -0.174502, 0.079787, 0.001010, 0.000899, 0);
    Mat Camera_Matrix_Right = (Mat1d(3, 3) << 440.825465, 0, 277.733688, 0, 442.324307, 198.863384, 0, 0, 1);
    Mat Distortion_Coefficients_Right = (Mat1d(1, 5) << -0.226564, 0.290791, 0.000979, 0.003149, 0);

    Mat Matrix_R = (Mat1d(3, 3) << 0.9997286422545486, 0.000835337139108146, -0.023279692174526096,
           0.0008925647184101439, 0.9998880281815514, -0.00244797956076857,
           0.02327756826002481, 0.0024680939144444804, 0.9997259941245548);
    Mat Matrix_T = (Mat1d(3, 1) << -0.13808630092854335, -0.0016795993989732654, -0.005964101318274714);

            //undistort(image, outputImage, Camera_Matrix, Distortion_Coefficients);
        stereoRectify(Camera_Matrix_Left, Distortion_Coefficients_Left, Camera_Matrix_Right, Distortion_Coefficients_Right, image_left.size(), R, T, R1, R2, P1, P2, Q, 0, 1, imageSize, &validRoi[0]);
                    FileStorage fs1("4m2.yml", FileStorage::WRITE);
                    fs1 << "R1" << R1;
                    fs1 << "R2" << R2;
                    fs1 << "P1" << P1;
                    fs1 << "P2" << P2;
                    fs1 << "Q" << Q;
                    fs1.release();



        // Maps for AP View
        //cv::stereoRectify(camera_matrix_ap, distortion_ap, camera_matrix_lat, distortion_lat, rectimg_ap.size(), R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 1, rectimg_ap.size(), &validRoi[0], &validRoi[1] );
        Mat map1x(image_left.size(), CV_32FC1, 255.0);
        Mat map2x(image_left.size(), CV_32FC1, 255.0);
        // Maps for LAT View
        Mat map1y(image_right.size(), CV_32FC1, 255.0);
        Mat map2y(image_right.size(), CV_32FC1, 255.0);

        cv::initUndistortRectifyMap(Camera_Matrix_Left, Distortion_Coefficients_Left, R1, P1, image_left.size(), CV_32FC1, map1x, map1y);
        cv::initUndistortRectifyMap(Camera_Matrix_Right, Distortion_Coefficients_Right, R2, P2, image_right.size(), CV_32FC1, map2x, map2y);

        cv::Mat tmp1, tmp2;
        cv::remap(image_left, tmp1, map1x, map1y, INTER_LINEAR);
        cv::remap(image_right, tmp2, map2x, map2y, INTER_LINEAR);
        namedWindow( "Display window1", WINDOW_AUTOSIZE ); // Create a window for display.
            imshow( "Display window1", tmp2);                // Show our image inside it.
        namedWindow( "Display window2", WINDOW_AUTOSIZE ); // Create a window for display.
            imshow( "Display window2", tmp2 );   

        //Dispaity Map
        cv::Mat pair; 
        pair.create(imageSize.height, imageSize.width * 2, CV_8UC3);
        cv::Ptr<cv::StereoSGBM> stereo = cv::StereoSGBM::create(
        -64, 128, 11, 100, 1000, 32, 0, 15, 1000, 16, cv::StereoSGBM::MODE_HH);

        cv::Mat img1 = cv::imread(imageName_left, 0);
        cv::Mat img2 = cv::imread(imageName_right, 0);
        cv::Mat img1r, img2r, disp, vdisp;
        cv::remap(img1, img1r, map1x, map1y, cv::INTER_LINEAR);
        cv::remap(img2, img2r, map2x, map2y, cv::INTER_LINEAR);
        stereo->compute(img1r, img2r, disp);
        cv::normalize(disp, vdisp, 0, 256, cv::NORM_MINMAX, CV_8U);
        cv::imshow("Vertical disparity", vdisp);

        cv::Mat part = pair.colRange(0, imageSize.width);
        cvtColor(img1r, part, cv::COLOR_GRAY2BGR);
        part = pair.colRange(imageSize.width, imageSize.width * 2);
        cvtColor(img2r, part, cv::COLOR_GRAY2BGR);
        for (j = 0; j < imageSize.height; j += 16)
        cv::line(pair, cv::Point(0, j), cv::Point(imageSize.width * 2, j),cv::Scalar(0, 255, 0));       
        cv::imshow("Vertical rectified", pair);


    waitKey(); // Wait for a keystroke in the window
        //return 0;

}

Here are the left and right raw images. Left Image Right Image

Here the link to the left left raw image and to the right image right raw image

Rectified Images and disparity Map disparity Map

I have the rectified images and disparity map. I want to compute the real distance from the camera to the chessboard in m. How can be done ? I know the ground true distance from the camera to that object but would like to see how accurate is my camera. Any help? Thanks

Bob9710
  • 205
  • 3
  • 15

1 Answers1

1

Since you did all the hard work and got your disparity map, calculating depth from here is the easy part. To calculate depth of each pixel, you need three things:
1- Disparity value of pixel
2- Distance between your cameras
3- Focal length (if for some reason your cameras have different focal length, you can use average)

Depth = (focal length * distance between cameras) / (disparity value)

Apply this equation to each pixel of your disparity map. If units of both your focal length and disparity value are pixels, you are left with the unit of distance between cameras (cm, mm...)

//  assuming dmap is the single channel disparity map Mat
//  f:focal length, b:distance between cameras
depthImage = cv::Mat(dmap.size(), CV_32FC1);
for (int row = 0; row < dmap.rows; ++row)
{
    for (int col = 0; col < dmap.cols; ++col)
    {
        int disparityValue = (int) dmap.at<uchar>(row, col);
        depthImage.at<float>(row, col) = (f * b) / disparityValue;
    }
}

Or you can just use available opencv function for that, probably better to do so.

unlut
  • 3,525
  • 2
  • 14
  • 23
  • 1
    Thanks. I search on this topic and find out that the formula Depth = (focal length * distance between cameras) / (disparity value) can be applied if the both cameras are perfectly parallel. So I dont know how this can be accurate. If not than is the reprojectImageTo3D available opencv function better solution? Also I would like to know the depth not of the whole image but only of that chessboard (means of certain Points). So how to get the depth only of that chessboard object on the image? – Bob9710 Sep 06 '18 at 04:51
  • About your first question, that is why you perform rectification, If you would have two perfectly horizontally aligned cameras you would not need rectification. Second question, as I said if you know where the chessboard is on your image, you can apply this equation to pixels belong to chessboard. – unlut Sep 06 '18 at 05:50
  • ok. got it. But then the maths doesnt work. I got disparity value of 72. From the Q matrix I have focal length is 450. So assume both values are in pixes. Then here is confuse part because from Q matrix baseline is - 25.5. In real the the distance between cameras is 13cm. So the real distance to the object measured is 7.9m. The math is not working. Can you please tell me what is wrong?Thanks – Bob9710 Sep 06 '18 at 08:09
  • You obtain distance between cameras from [T matrix](https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#stereorectify) (translation between camera frames). According to your values, depth should be measured as ~85 cm. Lets try that: Disparity value:72 pixels, focal length:450 pixels, Baseline:sqrt(0.13^2 + 0.0016^2 + 0.006^2) ~= 0.13m. Z = (450 * 0.13) / 72 ~= 80cm. Is this value correct for you? If not can you give an example of measured depth value by code and actual real depth value? – unlut Sep 06 '18 at 08:27
  • The T matrix obtained from the calibration is {-0.13808630092854335, -0.0016795993989732654, -0.005964101318274714}. The real value I measured with laser is 7.9m. Did not use any code for that just simple laser measurement. – Bob9710 Sep 06 '18 at 08:34
  • So also disparity values varied a lot depends which stereo rectified method I use as sbm or sgbm. – Bob9710 Sep 06 '18 at 08:46
  • Can I ask some questions about your calibration process? 1- Did you calibrate your cameras individually or together? 2- Did you perform any scaling on your images during calibration process? 3- Is baseline being 13 correct with real world measurements? – unlut Sep 06 '18 at 08:55
  • 1. Together as a Master Slave. 2. No no scaling. 3. Yes baseline 13.2 cm is correct with real world measurement. And the distance from the chessboard to the camera was obtained in real measurement by laser, no code. 7.m distance from the chessboard to the camera is the ground true. – Bob9710 Sep 06 '18 at 09:06
  • sorry, was typo, real distance measured from the laser is 7.9m. And the center of the chessboard is by Point (260,191) – Bob9710 Sep 06 '18 at 09:17
  • Did you check reprojection error of calibration, how much is it? Also can you try estimating depth value of points of a closer chessboard? It would help if we understand whether the source of problem is your intrinsic parameters, stereo matching algorithm, or both. – unlut Sep 06 '18 at 11:55
  • yes. its 0.21. So should be ok. i have a closer like 4.6 m. Still not god. But there is big difference between SGBM and BM. When twinkle SGBM parameters get better but still too far form real dept values – Bob9710 Sep 07 '18 at 06:16
  • Can you please share your left and right images, I would like to test myself. – unlut Sep 07 '18 at 10:42
  • Yes I edit them. Can you open it or I should share with you? – Bob9710 Sep 10 '18 at 02:57
  • I add I link to the left and right images. Please have a look. Thanks – Bob9710 Sep 10 '18 at 03:12