I'm very confused as to why a sequential model (model = keras.Sequential()) is being nested within "Model" (a functional model from keras) in the last line & that's what is being returned. Why not just create a sequential model with something like model.add(InputLayer(input_shape=(#, ), name='Input_Layer')) as the first layer? Is there something I'm missing here as to why this nesting is required?
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D,Conv2DTranspose
from keras.models import Sequential, Model
from keras.optimizers import Adam
def build_generator(self):
model = keras.Sequential()
model.add(layers.Dense(256 * 4 * 3, activation="relu", input_dim=self.latent_dim*2))
model.add(layers.Reshape((4,3,256)))
model.add(layers.Conv2DTranspose())
model.add(UpSampling2D())
model.add(Conv2D(256, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(32, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(16, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(self.latent_dim,))
cond_input = Input(shape=(self.latent_dim,))
model_input = Concatenate()([noise,cond_input])
img = model(model_input)
return Model(inputs=[noise,cond_input], outputs=img)
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])