0

I'm trying to implement a Neural Network with OpenCV 3.1.0. When predicting I get a vector with -1.#QNAN values. What am I doing wrong?

// train
Ptr<ANN_MLP> ann = ml::ANN_MLP::create();
Mat layers(1, 3, CV_32F);
layers.at<float>(0) = features.cols;
layers.at<float>(1) = nlayers;
layers.at<float>(2) = numLabels;
ann->setActivationFunction(ANN_MLP::SIGMOID_SYM);
ann->setLayerSizes(layers);
Mat trainClasses;
trainClasses.create(features.rows, numLabels, CV_32F);
for (int i = 0; i < trainClasses.rows; i++)
{
    for (int k = 0; k < trainClasses.cols; k++)
    {
        if (k == labels[i])
            trainClasses.at<float>(i, k) = 1;
        else
            trainClasses.at<float>(i, k) = 0;
    }

}
Mat weights(1, features.rows, CV_32F, Scalar::all(1));
Ptr<TrainData> tdata = TrainData::create(features, ROW_SAMPLE,
trainClasses, Mat(), Mat(), weights, Mat());
ann->train(tdata);

// predict
Mat output(1, numLabels, CV_32F);
ann->predict(test_data, output);
moshe
  • 1
  • 4

1 Answers1

-1

I am new to Neural Nets but I guess your input layer should have neurons equal to the overall attribute size.

Suppose you have an 5 images of 10*10 size for training, then you should have input neuron for each pixel. That is, 5*10*10 = 500.

Aditya Narkar
  • 11
  • 1
  • 5