0

My CSVDataset is structured as

N,P,K,temperature,humidity,ph,rainfall,label
90,42,43,20.87974371,82.00274423,6.502985292000001,202.9355362,rice

The goal is to predict the recommend the optimal crop given the features. My pipeline is as follows:

private static void dataLoad2() {
        try(RecordReader myReader = new CSVRecordReader(1,',')){
            myReader.initialize(

                    //reading file
                    new FileSplit(
                            //setting dir
                            new File("dir/to/cropSet.csv")));
            
            Schema dbSchema = new Schema.Builder()
                    .addColumnInteger("N")
                    .addColumnInteger("P")
                    .addColumnInteger("K")
                    .addColumnDouble("temperature")
                    .addColumnDouble("humidity")
                    .addColumnDouble("ph")
                    .addColumnDouble("rainfall")
                    .addColumnCategorical("label", "rice", "maize", "chickpea", "kidneybeans", "pigeonpeas","mothbeans", "mungbean", "blackgram", "lentil", "pomegranate","banana", "mango", "grapes", "watermelon", "muskmelon", "apple","orange", "papaya", "coconut", "cotton", "jute", "coffee")
                    .build();


            DataAnalysis analysis = AnalyzeLocal.analyze(dbSchema, myReader);

            TransformProcess transformer = new TransformProcess.Builder(dbSchema)
                    .convertToInteger("N" ).normalize("N",Normalize.MinMax,analysis)
                    .convertToInteger("P"  ).normalize("N",Normalize.MinMax,analysis)
                    .convertToInteger("K"  ).normalize("K", Normalize.MinMax,analysis )
                    .removeColumns("temperature"  )
                    .removeColumns("humidity"  )
                    .removeColumns("ph"  )
                    .removeColumns("rainfall"  )
                    .categoricalToInteger("label")
                    .build();

            Schema transSchema = transformer.getFinalSchema();

            RecordReader recordReader2 = new CSVRecordReader(1, ',');
            

            TransformProcessRecordReader tprr = new TransformProcessRecordReader(recordReader2,transformer);


            tprr.initialize( //reading file                                                                           );
                             new FileSplit(
                                     //setting dir
                                     new File("/Users/mac/Desktop/RTS_ML/src/main/resources/cropSet.csv")

                             ));



            RecordReaderDataSetIterator builderObj = new RecordReaderDataSetIterator.Builder(tprr,100)
                    .classification(transSchema.getIndexOfColumn("label"),23)
                            .build()

                    ;


            DataSet mutiData = builderObj.next();
            


            //Normalizing the data
            DataNormalization normalizerObj = new NormalizerStandardize();

            //Fit dataset to normalizer
            normalizerObj.fit(mutiData);

            //Perform normalization
            normalizerObj.transform(mutiData);

            //Splitting the dataset
            SplitTestAndTrain testAndTrain = mutiData.splitTestAndTrain(75);

            //train data
            DataSet train = testAndTrain.getTrain();

            //test data
            DataSet test = testAndTrain.getTest();

            cropNetwork( train,test,transSchema);

        }catch (FrameFilter.Exception e){
            out.println("Error: " + e.getLocalizedMessage());

        } catch (IOException | InterruptedException e) {
            throw new RuntimeException(e);
        }
        
    }

And the model:

private static void cropNetwork(DataSet training, DataSet testing, Schema nwdbSchema){
    int outputNum = 23;
    int numOfRows =training.numExamples();
    int numOfColumns = nwdbSchema.numColumns();


    MultiLayerConfiguration nnConfig = new NeuralNetConfiguration.Builder()
            .seed(0xC0FFEE)
            .weightInit(WeightInit.XAVIER)
            .activation(Activation.TANH)
            .updater(new Adam.Builder().learningRate(0.005).build())
            .l2(0.000316)
            .list()

            .layer(0,new DenseLayer.Builder().nIn( numOfRows * numOfColumns).nOut(24).activation(Activation.RELU)
                    .weightInit(WeightInit.XAVIER).build())
            .layer(1,new DenseLayer.Builder().nIn(numOfRows * numOfColumns).nOut(numOfRows * numOfColumns).activation(Activation.RELU)
                    .weightInit(WeightInit.XAVIER).build())
            .layer(2,new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nIn(numOfRows * numOfColumns).nOut(23)
                    .activation(Activation.SOFTMAX).weightInit(WeightInit.XAVIER).build())
            .setInputType(InputType.feedForward(numOfRows * numOfColumns))
            .build();







    // Initlizing model
    MultiLayerNetwork model2 = new MultiLayerNetwork(nnConfig);
    model2.init();

    //nnModel.setListeners( new ScoreIterationListener(100));

    // Training model
    model2.fit(training);

    try (INDArray modeOutput = model2.output(training.getFeatures(),false)) {
        Evaluation evalModel = new Evaluation(23);
        evalModel.eval(testing.getLabels(),modeOutput);
        out.println(modeOutput);
    }

}

This returns:

Exception in thread "main" org.deeplearning4j.exception.DL4JInvalidInputException: Input size (3 columns; shape = [75, 3]) is invalid: does not match layer input size (layer # inputs = 300) (layer name: layer0, layer index: 0, layer type: DenseLayer) at org.deeplearning4j.nn.layers.BaseLayer.preOutputWithPreNorm(BaseLayer.java:317)

I know it's definitely got to do with my network configuration, but part of me feels like it's the pipeline. What am I doing worng?

1 Answers1

0

For InputType.feedForward that should only be equal to the number of columns or features in the input layer.

That then sets up the rest of the layers to have the appropriate number of inputs.

Each row is considered an example. You don't need to tell the InputType the number of rows up front that is always expected to vary by default.

Adam Gibson
  • 3,055
  • 1
  • 10
  • 12
  • so then how do I soecify the nIn because it can't be left blank – Sams Got the blues Jun 05 '22 at 03:10
  • I made them both (num of rows -1 ) which resulted in Exception in thread "main" java.lang.IllegalArgumentException: Unable to evaluate. Predictions and labels arrays are not same shape. Predictions shape: [75, 23], Labels shape: [25, 23] at org.nd4j.evaluation.classification.Evaluation.eval(Evaluation.java:399) – Sams Got the blues Jun 05 '22 at 03:16
  • I assumed it might it be an issue with the way I implemented the pipeline. When I tried to use categoricalToOneHot on the label column, the RecordReaderDataSetIterator returns 'Unknown column: "label"' – Sams Got the blues Jun 05 '22 at 04:11
  • Wow okay, perfect opportunity to really understand the training process. The issue was with the data split, upon changing the split to 50/50 it matched. I keep forgetting that this isn't ML lol. Thanks! – Sams Got the blues Jun 05 '22 at 04:38
  • I managed to get the output, but how can I test the models performance on a sample input .eg: {N=5,P=10....}? – Sams Got the blues Jun 05 '22 at 05:13
  • You don't need to specify the nIn. setInputType takes care of that. Don't add any other concepts to this. The only thing setInputType does is sets the nIn for you. For feed forward layers that's just the number of columns. – Adam Gibson Jun 05 '22 at 22:32