Please, find the working code below. If for some reason it doesn't work on your machine, then check the version of mxnet you have. I am running it on mac with mxnet version 0.10.1.
Since you told that you want to copy the code as close as the example one, I have changed the values of the attributes to initial ones. Feel free to change them, if you need. For example, momentum of 0.5 seems like too little - usually a value of 0.9 and higher is used. While the value of learning rate of 0.5 is too big, usually learning rate is not higher than 0.1.
library('mxnet')
neural.train = function(model,XY)
{
XY <- as.matrix(XY)
X <- XY[,-ncol(XY)]
Y <- XY[,ncol(XY)]
Y <- ifelse(Y > 0,1,0)
Models[[model]] <<- mx.mlp(X,Y,
hidden_node = c(30,30,30),
activation = "tanh",
momentum = 0.5,
learning.rate = 0.5,
out_activation = "softmax",
num.round = 100,
out_node = 2,
array.batch.size = 100,
dropout = 0,
array.layout = "rowmajor")
}
neural.predict = function(model,X)
{
if(is.vector(X)) X <- t(X)
return(predict(Models[[model]], X, array.layout = "rowmajor"))
}
neural.save = function(name)
{
save(Models,file=name)
}
neural.init = function()
{
set.seed(365)
Models <<- vector("list")
}
Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2))
Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1))
Var3 <- sample(c(0,1), replace=T, size=100)
training.data <- matrix(c(Var1, Var2, Var3), nrow = 100, ncol = 3)
Var4 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2))
Var5 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1))
test.data <- matrix(c(Var4, Var5), nrow = 100, ncol = 2)
neural.init()
neural.train("mx_mlp_model", training.data)
neural.predict("mx_mlp_model", test.data)
After executing this, I get the following output:
> neural.predict("mx_mlp_model", test.data)
[,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10] [,11] [,12] [,13] [,14] [,15] [,16] [,17] [,18] [,19] [,20]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,21] [,22] [,23] [,24] [,25] [,26] [,27] [,28] [,29] [,30] [,31] [,32] [,33] [,34] [,35] [,36] [,37] [,38] [,39]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,40] [,41] [,42] [,43] [,44] [,45] [,46] [,47] [,48] [,49] [,50] [,51] [,52] [,53] [,54] [,55] [,56] [,57] [,58]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,59] [,60] [,61] [,62] [,63] [,64] [,65] [,66] [,67] [,68] [,69] [,70] [,71] [,72] [,73] [,74] [,75] [,76] [,77]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,78] [,79] [,80] [,81] [,82] [,83] [,84] [,85] [,86] [,87] [,88] [,89] [,90] [,91] [,92] [,93] [,94] [,95] [,96]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,97] [,98] [,99] [,100]
[1,] 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53
Hope it helps.