I'm training a RNN to classify three different classes. Because the accuracy of class 2 is very high I tried to train the RNN first to classify the two other classes, save the model, load the model, freeze the lower layers and then include class 2 again for training. Even after training over 4 epochs and freezing all layers the accuracy of class 2 is very high and the others are dropping again.
The model looks like this:
model = Sequential()
model.add(LSTM(64, return_sequences=True,
input_shape=(data.X_train.shape[1],
data.X_train.shape[2])))
model.add(Dropout(0.2))
model.add(LSTM(128, return_sequences=True))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(LSTM(128, return_sequences=True))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(LSTM(64, return_sequences=True))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(LSTM(32, return_sequences=True))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(LSTM(16, return_sequences=True))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(LSTM(8, return_sequences=True))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(LSTM(4))
model.add(Dense(data.y_test.shape[1], activation='softmax'))
Loading and freezing the first four layers:
model = load_model(model_path)
for layer in model.layers[:4]:
layer.trainable = False
model.compile(loss='categorical_crossentropy',
optimizer=ad,
metrics=['accuracy', 'categorical_accuracy'])