6

I am resurrecting some of my old code for OpenCV 2.4.1 which worked and allowed me to train my own HoG ( Histogram of Oriented Gradients ) descriptors for OpenCV's SVM. I am now porting this old working code to OpenCV 3.4.1 and I have noticed that quite a bit of the API has changed.

I have ported it and it compiles and runs, however when I try to test my training cycle by calling "predict" to determine if there is a person in my test image, my application dies with "SIGFPE: Arithmetic exception".

I've made sure the CV::Mat fed to the predictor matches the shape of the Mats that are stored in my "trainedSVM.xml" data. What could be wrong? How can I fix this?

I try to test my trained dataset like this:

// Load trained SVM xml data
    Ptr<SVM> svm = SVM::create();
    svm->load("trainedSVM.xml");

    //count variable
    int nnn = 0, ppp = 0;

    // Get a directory iterator
    QDir dir;
    if ( type == 1 ) {
        qDebug() << "\n\nTesting images in Test Directory!";
        dir = QDir("test");
    }
    else if ( type == 2 ) {
        qDebug() << "\n\nTesting images in Pos Directory!";
        dir = QDir("pos");
    }
    else {
        qDebug() << "\n\nTesting images in Neg Directory!";
        dir = QDir("neg");
    }
    QFileInfoList fileList = dir.entryInfoList(QDir::AllEntries | QDir::NoDotAndDotDot);

    // Loop for the number of images that we have
    foreach(QFileInfo files, fileList){

        qDebug() << "Reading file: " << files.filePath();

        // Read image file
        Mat img, img_gray;
        img = imread(files.filePath().toStdString().c_str());

        qDebug() << "Converting color now...";

        // Gray
        cvtColor(img, img_gray, COLOR_RGB2GRAY);

        // Show image
        imshow("Gray Image", img_gray);
        //waitKey(10000);

        qDebug() << "Done converting color now...";

        // Extract HogFeature
        HOGDescriptor d( Size(32,16), Size(8,8), Size(4,4), Size(4,4), 9);
        vector<float> descriptorsValues;
        vector<Point> locations;
        d.compute( img_gray, descriptorsValues, Size(0,0), Size(0,0), locations);
        // Vector to Mat
        Mat fm = Mat(1, descriptorsValues.size(), CV_32FC1, descriptorsValues.data()).clone();

        qDebug() << "Test Descriptor Vector..." << fm.rows << fm.cols;

        // Classification whether data is positive or negative
        Mat result;   // output
        svm->predict(fm,result);
 
        ...

    }

    if ( type == 1 || type == 2 ) {
        qDebug() << "positive/negative = " << ppp << "/" << nnn << " Percent Correct: " << float(ppp)/float(fileList.count())*100.0f << "%";
    }
    else {
        qDebug() << "positive/negative = " << ppp << "/" << nnn << " Percent Correct: " << float(nnn)/float(fileList.count())*100.0f << "%";
    }

I train the OpenCV SVM like this:

/// Train the SVM
void HoGTrainer::trainOpenCvSVM() {

    // Read Hog feature from XML file
    qDebug() << "Reading XML for pos/neg...";

    // Create xml to read
    qDebug() << "Reading XML file for positive features...";
    FileStorage read_PositiveXml("Positive.xml", FileStorage::READ);
    qDebug() << "Reading XML file for negative features...";
    FileStorage read_NegativeXml("Negative.xml", FileStorage::READ);

    // Positive Mat
    Mat pMat;
    read_PositiveXml["Descriptor_of_images"] >> pMat;
    // Read Row, Cols
    int pRow,pCol;
    pRow = pMat.rows; pCol = pMat.cols;

    // Negative Mat
    Mat nMat;
    read_NegativeXml["Descriptor_of_images"] >> nMat;
    // Read Row, Cols
    int nRow,nCol;
    nRow = nMat.rows; nCol = nMat.cols;

    // Rows, Cols printf
    fprintf(stderr,"   pRow=%d pCol=%d, nRow=%d nCol=%d\n", pRow, pCol, nRow, nCol );
    read_PositiveXml.release();
    read_NegativeXml.release();

    // Make training data for SVM
    qDebug() << "Making training data for SVM...";

    // Descriptor data set
    Mat PN_Descriptor_mtx( pRow + nRow, pCol, CV_32FC1 ); // in here pCol and nCol is descriptor number, so two value must be same;
    memcpy(PN_Descriptor_mtx.data, pMat.data, sizeof(float) * pMat.cols * pMat.rows );
    int startP = sizeof(float) * pMat.cols * pMat.rows;
    memcpy(&(PN_Descriptor_mtx.data[ startP ]), nMat.data, sizeof(float) * nMat.cols * nMat.rows );

    // Data labeling
    Mat labels( pRow + nRow, 1, CV_32S, Scalar(-1.0) );
    labels.rowRange( 0, pRow ) = Scalar( 1.0 );

    // Set svm parameters
    qDebug() << "SVM Parameter setting...";
    Ptr<SVM> svm = SVM::create();
    svm->setType(SVM::C_SVC);
    svm->setKernel(SVM::LINEAR);
    svm->setTermCriteria(cv::TermCriteria(cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS, 10, 0.01));

    // Training
    qDebug() << "Training SVM Now...";
    svm->train(PN_Descriptor_mtx, ml::ROW_SAMPLE, labels);

    // Trained data save
    qDebug() << "Saving trained data...";
    svm->save( "trainedSVM.xml" );

}

I create a "Positive" and "Negative" XML descriptor set for my directories containing positive and negative image samples:

/// Give this a path to the images to describe with HoG algorithm
Mat HoGDescriptorExtractor::createXmlDescriptor(QString path, QString posOrNeg, int numberImagesToTrain) {

     // The vectors to store the descriptors
     vector< vector <float> > v_descriptorsValues;
     vector< vector <Point> > v_locations;

     // Get a directory iterator
     QDir dir(path);
     QFileInfoList fileList = dir.entryInfoList(QDir::AllEntries | QDir::NoDotAndDotDot);
     int count = 0;

     qDebug() << "Looking in directory: " << path;

     // Loop for the number of images that we have
     foreach(QFileInfo files, fileList){

         if (count >= numberImagesToTrain) {
             break;
         };

         // Get the image file
         QString imageName = files.filePath();
         qDebug() << "Image is: " << imageName;
         if ( imageName.contains("png") ) {

             //qDebug() << "Opening file: " << imageName;

             // Read image file
             Mat img, img_gray;
             img = imread(imageName.toStdString().c_str());

             // Ensure 64x128
             resize(img, img, Size(64,128) ); //Size(64,48) ); //Size(32*2,16*2)); //Size(80,72) );

             // Grayscale
             cvtColor(img, img_gray, COLOR_RGB2GRAY);

             // Extract features
             HOGDescriptor d( Size(32,16), Size(8,8), Size(4,4), Size(4,4), 9);
             vector< float> descriptorsValues;
             vector< Point> locations;
             d.compute( img_gray, descriptorsValues, Size(0,0), Size(0,0), locations);

             // Vector to Mat
             Mat fm = Mat(1, descriptorsValues.size(), CV_32FC1, descriptorsValues.data());

             qDebug() << "Train Descriptor Vector..." << fm.rows << fm.cols;

             // Store the desctriptors
             v_descriptorsValues.push_back( descriptorsValues );
             v_locations.push_back( locations );

             // Show image
             imshow("Original image", img);
             waitKey(10);

             // Increment counter
             count++;

         }

         qDebug() << count << fileList.size();

     }

     // Destroy the original image window
     destroyWindow("Original image");

     qDebug() << "Created this many desctipors: " << v_descriptorsValues.size();

     // Figure out the name
     QString saveXmlName;
     if ( posOrNeg == "pos" ) {
         saveXmlName = "Positive.xml";
     }
     else {
         saveXmlName = "Negative.xml";
     }

     // Save to xml
     FileStorage hogXml(saveXmlName.toStdString().c_str(), FileStorage::WRITE);

     // 2d vector to Mat
     int row = v_descriptorsValues.size(), col=v_descriptorsValues[0].size();
     fprintf(stderr,"row=%d, col=%d\n", row, col);
     Mat result(row,col,CV_32F);

     qDebug() << "Mem copy prior to saving  to XML...";

     // Save Mat to XML
     for( int i=0; i < row; ++i ) {
        memcpy( &(result.data[col * i * sizeof(float) ]) ,v_descriptorsValues[i].data(),col*sizeof(float));
     }

     qDebug() << "Writing XML now...";

     // Write xml

     if ( posOrNeg == "pos" ) {
         saveXmlName = "Positive";
     }
     else {
         saveXmlName = "Negative";
     }
     write(hogXml, "Descriptor_of_images",  result);

     qDebug() << "Done writing XML...";

     // Release it
     hogXml.release();

     // Return the Mat
     return result;

}

The "trainedSVM.xml" data looks like this:

<?xml version="1.0"?>
<opencv_storage>
<my_svm type_id="opencv-ml-svm">
  <svm_type>C_SVC</svm_type>
  <kernel><type>LINEAR</type></kernel>
  <C>1.</C>
  <term_criteria><epsilon>2.2204460492503131e-16</epsilon>
    <iterations>10000</iterations></term_criteria>
  <var_all>197316</var_all>
  <var_count>197316</var_count>
  <class_count>2</class_count>
  <class_labels type_id="opencv-matrix">
    <rows>1</rows>
    <cols>2</cols>
    <dt>i</dt>
    <data>
      -1 1</data></class_labels>
  <sv_total>1</sv_total>
  <support_vectors>
    <_>
      -3.88036860e-05 2.05393124e-04 1.22791782e-04 -5.50679579e-05
      -3.21937609e-04 3.46977649e-05 -2.07526100e-04 -1.91126266e-04
      -3.10885953e-05 1.90627572e-04 3.08653078e-04 8.38103952e-05
      -8.98162471e-05 -2.25773605e-04 3.44226355e-05 -2.37034255e-04
      -1.99947317e-05 2.15609951e-04 -1.37146817e-05 3.16219724e-04
      -3.26509580e-05 1.89740647e-04 -3.89780209e-04 -2.88823299e-04
Sunderam Dubey
  • 1
  • 11
  • 20
  • 40
PhilBot
  • 748
  • 18
  • 85
  • 173

0 Answers0