I am trying to train an ActionValueNetwork
on a simple XOR function, but the results look like random.
""" Reinforcement Learning to learn xor function
"""
# generic import
import numpy as np
import random
# pybrain import
from pybrain.rl.explorers import EpsilonGreedyExplorer
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners.valuebased import ActionValueNetwork, NFQ
# The parameters of your algorithm
av_network = ActionValueNetwork(2, 2) # 2 dimensions in input, 2 actions possible (1 or 0)
learner = NFQ()
learner._setExplorer(EpsilonGreedyExplorer(0.0)) # No exploration
agent = LearningAgent(av_network, learner)
# The training
for _ in xrange(1,25): # we iterate 25 times
for x in xrange(1,4): # batch of 4 questions.
listxor = random.choice([[0, 0],[0, 1], [1, 0], [1, 1]])
resultxor = listxor[0]^listxor[1] # xor operation
agent.integrateObservation(listxor)
action = agent.getAction()
reward = 1 - 2*abs(resultxor - float(action[0])) # 1 if correct, -1 otherwise
print "xor(",listxor,") = ", resultxor, " || action = " , action[0], "reward = ", reward
agent.giveReward(reward)
agent.learn()
# Test
print "test : "
print "[0, 0] ", learner.module.getMaxAction([0, 0])
print "[0, 1] ", learner.module.getMaxAction([0, 1])
print "[1, 0] ", learner.module.getMaxAction([1, 0])
print "[1, 1] ", learner.module.getMaxAction([1, 1])
I know, it's not the oriented-way of Pybrain (tast, env, ect), but I must do it this way.
I had good results with ActionValueTable
and Q, but I want to use the weight of each dimension.
Can someone explain where I am wrong? It's seems like the network doesn't learn anything.
Thanks!