I have modified the cartpole environment from OpenAi so that it starts in the inverted position and has to learn the upswing. I use Google collab to run it because it's way faster than on my laptop. I thought. It´s super slow... I need 40 sec. for an episode, kinda the same time as on my laptop. I even tried to optimize it for the google TPU´s, but nothing has changed. The main time consumers are .fit()
and .predict()
, so I belive. I use .predict()
here
def get_qs(self, state): return self.model.predict(np.array(state).reshape(-1,*state.shape), workers = 8, use_multiprocessing = True)[0]
and here also .fit()
@tf.function
def train(self, terminal_state, step):
"Zum trainieren lohnt es sich immer einen größeren Satz von Daten zu nehmen um ein Overfitting zu verhindern"
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
# Get a minibatch of random samples from memory replay table
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
# Get current states from minibatch, then query NN model for Q values
current_states = np.array([transition[0] for transition in minibatch])
current_qs_list = self.model.predict(current_states)
# Get future states from minibatch, then query NN model for Q values
# When using target network, query it, otherwise main network should be queried
new_current_states = np.array([transition[3] for transition in minibatch])
future_qs_list = self.target_model.predict(new_current_states, workers = 8, use_multiprocessing = True)
X = []
y = []
# Now we need to enumerate our batches
for index, (current_states, action, reward, new_current_states, done) in enumerate(minibatch):
# If not a terminal state, get new q from future states, otherwise set it to 0
# almost like with Q Learning, but we use just part of equation here
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
# And append to our training data
X.append(state)
y.append(current_qs)
# Fit on all samples as one batch, log only on terminal state callbacks=[self.tensorboard] if terminal_state else None
self.model.fit(np.array(X), np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, workers = 8, use_multiprocessing = True)
# Update target network counter every episode
if terminal_state:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
Can anybody help me to fasten things up?