Hello dear users of neuorlab, I want to change the activation function nodes of the hidden layer to ReLU and keep Linear function in output nodes
import numpy as np
import neurolab as nl
# Create train samples
input = np.random.uniform(-1, 1, (5, 2))
target = (input[:, 0] + input[:, 1]).reshape(5, 1)
net = nl.net.newff([[-1, 1]]*2, [4, 1])
# What I try to do
import numpy as np
import neurolab as nl
# Create train samples
input = np.random.uniform(-1, 1, (5, 2))
target = (input[:, 0] + input[:, 1]).reshape(5, 1)
net = nl.net.newff([[-1, 1]]*2, [4, 1],[nl.trans.PoseLin(), nl.trans.PureLin()])