0

Hello everyone This is the code of iRPROP+ algo for my MLP. When I try to train my network, standart deviation decreases for 1500 epoches (so slow: from ~0.5 to 0.4732) but suddenly it starts to increase. Can someone say what did I do wrong?

 public void RPROP()
    {
        double a = 1.2, b = 0.5, nMax = 50, nMin = 0.000001;

        for (int l = Network.Length - 1; l > 0; l--)
        {
            for (int i = 0; i < Network[l].getSize(); i++)
            {
                Neuron n = Network[l].Neurons[i];
                double sum = 0;
                if (l == Network.Length - 1) n.Delta = (n.Output - DesiredOutput[i]) * ActFunc.calcDeprivateFunction(n.Output);
                else
                {
                    for (int k = 0; k < Network[l + 1].getSize(); k++)
                    {
                        sum += Network[l + 1].Neurons[k].getWeight(i) * Network[l + 1].Neurons[k].Delta;
                    }
                    n.Delta = sum * ActFunc.calcDeprivateFunction(n.Output);
                }

            }
        }
        for (int l = 1; l < Network.Length; l++)
        {
            for (int i = 0; i < Network[l].getSize(); i++)
            {
                Neuron n = Network[l].Neurons[i];
                if ((n.PrevDelta * n.Delta) > 0)
                {
                    n.N = Math.Min(a * n.PrevN, nMax);
                    n.Bias -= n.N * Math.Sign(n.Delta);
                    for (int j = 0; j < Network[l - 1].getSize(); j++)
                    {
                        n.setWeight(j, n.getWeight(j) - n.N * Math.Sign(n.Delta));
                    }
                    n.PrevDelta = n.Delta;
                }
                else if ((n.PrevDelta * n.Delta) < 0)
                {
                    n.N = Math.Max(b * n.PrevN, nMin);
                    if (this.CurrentError > this.LastError)
                    {
                        n.Bias += n.PrevN * Math.Sign(n.PrevDelta);
                        for (int j = 0; j < Network[l - 1].getSize(); j++)
                        {
                            n.setWeight(j, n.getWeight(j) + n.PrevN * Math.Sign(n.PrevDelta));
                        }
                    }
                    n.Delta = 0;

                }
                else if ((n.PrevDelta * n.Delta) == 0)
                {
                    n.Bias -= n.N * Math.Sign(n.Delta);
                    for (int j = 0; j < Network[l - 1].getSize(); j++)
                    {
                        n.setWeight(j, n.getWeight(j) - n.N * Math.Sign(n.Delta));
                    }
                    n.PrevDelta = n.Delta;
                }
                n.PrevN = n.N;
            }
        }
    }

1 Answers1

0

For the first view, you calculate one train element error and you instantly teach it to the network. try to run over the full train set, without change the weights, and just summarize the Delta. After that, update the weights once, set the prev delta and start over.
Also, there is no update for neuron threshold.

Vaszil
  • 121
  • 4