I'm trying to make a model like GAN. But I can't figure out how to properly set trainable to False for just one model. Seems all models using the sub-model are affected.
Code:
import tensorflow as tf
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import Dense
print(tf.__version__)
def build_submodel():
inp = tf.keras.Input(shape=(3,))
x = Dense(5)(inp)
model = Model(inputs=inp, outputs=x)
return model
def build_model_A():
inp = tf.keras.Input(shape=(3,))
x = submodel(inp)
x = Dense(7)(x)
model = Model(inputs=inp, outputs=x)
return model
def build_model_B():
inp = tf.keras.Input(shape=(11,))
x = Dense(3)(inp)
x = submodel(x)
model = Model(inputs=inp, outputs=x)
return model
submodel = build_submodel()
model_A = build_model_A()
model_A.compile("adam", "mse")
model_A.summary()
submodel.trainable = False
# same result with freezing layers
# for layer in submodel.layers:
# layer.trainable = True
model_B = build_model_B()
model_B.compile("adam", "mse")
model_B.summary()
model_A.summary()
Output:
Model: "model_10"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_11 (InputLayer) [(None, 3)] 0
_________________________________________________________________
model_9 (Model) (None, 5) 20
_________________________________________________________________
dense_10 (Dense) (None, 7) 42
=================================================================
Total params: 62
Trainable params: 62
Non-trainable params: 0
_________________________________________________________________
Model: "model_11"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_12 (InputLayer) [(None, 11)] 0
_________________________________________________________________
dense_11 (Dense) (None, 3) 36
_________________________________________________________________
model_9 (Model) (None, 5) 20
=================================================================
Total params: 56
Trainable params: 36
Non-trainable params: 20
_________________________________________________________________
Model: "model_10"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_11 (InputLayer) [(None, 3)] 0
_________________________________________________________________
model_9 (Model) (None, 5) 20
_________________________________________________________________
dense_10 (Dense) (None, 7) 42
=================================================================
Total params: 62
Trainable params: 42
Non-trainable params: 20
_________________________________________________________________
At first model_A has no non-trainable weights. But after building model_B. model_A has some non-trainable weights.
Also, the summary does not show which layers are non-trainable, just total non-trainable parameter count. Is there a better way to inspect which layers are frozen in a model?