Here's my replay/train function implementation. I made the DDQN so that model
lags behind model2
by 1 batch size during replay/training. By setting self.ddqn = False
it becomes a normal DQN. Is this correctly implemented? I am using this paper as reference:
http://papers.nips.cc/paper/3964-double-q-learning.pdf
DDQN Code
def replay(self, batch_size):
if self.ddqn:
self.model2.load_state_dict(self.model.state_dict()) # copies model weights to model2
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
state = torch.Tensor(state)
next_state = torch.Tensor(next_state)
if self.cuda:
state = torch.Tensor(state).cuda()
next_state = torch.Tensor(next_state).cuda()
Q_current = self.model(state)
Q_target = Q_current.clone() # TODO: test copy.deepcopy() and Tensor.copy_()
Q_next = (1-done)*self.model(next_state).cpu().detach().numpy()
next_action = np.argmax(Q_next)
if self.ddqn:
Q_next = (1-done)*self.model2(next_state).cpu().detach().numpy()
Q_target[action] = Q_current[action] + self.alpha*(reward + self.gamma*Q_next[next_action] - Q_current[action])
self.optim.zero_grad()
loss = self.loss(Q_current, Q_target)
loss.backward()
self.optim.step()
if self.epsilon > self.epsilon_min:
self.epsilon = max(self.epsilon*self.epsilon_decay, self.epsilon_min)