I want to create autoencoder and optimize its hyperparameters using Keras Tuner. I want it to contain from 1 to 3 layers in encoder part and each layer to contain less neurons than the previous layer (but more than latent dimensionality, which is also a hyperparameter). Is my code below correct? Or should I use conditional_scope?
input_dim = 25
def model_builder(hp):
latent_dim = hp.Int(name = 'latent_dim', min_value = 3, max_value = 8, step = 1)
ae = tf.keras.Sequential()
ae.add(tf.keras.layers.Dense(units = hp.Int(name = 'n_units_1', min_value = latent_dim, max_value = input_dim, step = 2), activation = act))
n_layers = hp.Int(name = 'n_layers', min_value = 1, max_value = 3, step = 1)
if n_layers >= 2:
ae.add(tf.keras.layers.Dense(units = hp.Int(name = 'n_units_2', min_value = latent_dim, max_value = hp.get('n_units_1'), step = 2), activation = act))
if n_layers >= 3:
ae.add(tf.keras.layers.Dense(units = hp.Int(name = 'n_units_3', min_value = latent_dim, max_value = hp.get('n_units_2'), step = 2), activation = act))
ae.add(tf.keras.layers.Dense(units = latent_dim))
if n_layers >= 3:
ae.add(tf.keras.layers.Dense(units = hp.get('n_units_3'), activation = act))
if n_layers >= 2:
ae.add(tf.keras.layers.Dense(units = hp.get('n_units_2'), activation = act))
ae.add(tf.keras.layers.Dense(units = hp.get('n_units_1'), activation = act))
ae.add(tf.keras.layers.Dense(units = input_dim, activation = act))
lr = hp.Float(name = 'lr', min_value = 0.001, max_value = 0.01, sampling = 'log')
optimizer = tf.keras.optimizers.Adam(learning_rate = lr)
ae.compile(optimizer = optimizer, loss = tf.keras.losses.MeanSquaredError())
return ae