I use the following example to make my question clear:
class Encoder(K.layers.Layer):
def __init__(self, filters):
super(Encoder, self).__init__()
self.conv1 = Conv2D(filters=filters[0], kernel_size=3, strides=1, activation='relu', padding='same')
self.conv2 = Conv2D(filters=filters[1], kernel_size=3, strides=1, activation='relu', padding='same')
self.conv3 = Conv2D(filters=filters[2], kernel_size=3, strides=1, activation='relu', padding='same')
self.pool = MaxPooling2D((2, 2), padding='same')
def call(self, input_features):
x = self.conv1(input_features)
#print("Ex1", x.shape)
x = self.pool(x)
#print("Ex2", x.shape)
x = self.conv2(x)
x = self.pool(x)
x = self.conv3(x)
x = self.pool(x)
return x
class Decoder(K.layers.Layer):
def __init__(self, filters):
super(Decoder, self).__init__()
self.conv1 = Conv2D(filters=filters[2], kernel_size=3, strides=1, activation='relu', padding='same')
self.conv2 = Conv2D(filters=filters[1], kernel_size=3, strides=1, activation='relu', padding='same')
self.conv3 = Conv2D(filters=filters[0], kernel_size=3, strides=1, activation='relu', padding='valid')
self.conv4 = Conv2D(1, 3, 1, activation='sigmoid', padding='same')
self.upsample = UpSampling2D((2, 2))
def call(self, encoded):
x = self.conv1(encoded)
print("dx1", x.shape)
x = self.upsample(x)
#print("dx2", x.shape)
x = self.conv2(x)
x = self.upsample(x)
x = self.conv3(x)
x = self.upsample(x)
return self.conv4(x)
class Autoencoder(K.Model):
def __init__(self, filters):
super(Autoencoder, self).__init__()
self.loss = []
self.encoder = Encoder(filters)
self.decoder = Decoder(filters)
def call(self, input_features):
#print(input_features.shape)
encoded = self.encoder(input_features)
#print(encoded.shape)
reconstructed = self.decoder(encoded)
#print(reconstructed.shape)
return reconstructed
max_epochs = 5
model = Autoencoder(filters)
model.compile(loss='binary_crossentropy', optimizer='adam')
loss = model.fit(x_train_noisy,
x_train,
validation_data=(x_test_noisy, x_test),
epochs=max_epochs,
batch_size=batch_size)
As you can see, the model
that has created using some layers from keras.Layer, then if I want to show the model's architecture using the model.summary()
function, I will have:
Model: "autoencoder"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
encoder (Encoder) multiple 14192
decoder (Decoder) multiple 16497
=================================================================
Total params: 30,689
Trainable params: 30,689
Non-trainable params: 0
For me I want to have a more detailed description of the encoder layer and decoder layer. Any ideas?