I have this function that implements a neural network:
def create(self, size_1l, size_2l, size_3l=0, size_4l=0):
"""
This function build the denoising autoencoder
using the parameters as size, the first is the input layer,
the third and the fourth if equal to 0 are omitted.
"""
input_layer = Input(shape=(size_1l, size_1l, 1))
# encoder
x = Conv2D(size_2l, (3, 3), activation='relu', padding='same', use_bias=True)(input_layer)
if size_3l > 0:
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(size_3l, (3, 3), activation='relu', padding='same', use_bias=True)(x)
if size_4l > 0:
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(size_4l, (3, 3), activation='relu', padding='same', use_bias=True)(x)
x = MaxPooling2D((2, 2), padding='same')(x) # encoded layer
# decoder
if size_4l > 0:
x = Conv2D(size_4l, (3, 3), activation='relu', padding='same', use_bias=True)(x)
x = UpSampling2D((2, 2))(x)
if size_3l > 0:
x = Conv2D(size_3l, (3, 3), activation='relu', padding='same', use_bias=True)(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(size_2l, (3, 3), activation='relu', padding='same', use_bias=True)(x)
x = UpSampling2D((2, 2))(x)
output_layer = Conv2D(1, (3, 3), activation='tanh', padding='same', use_bias=True)(x)
# building
self.autoencoder = Model(input_layer, output_layer)
self.autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
And this code for the training:
def train(self, training_data, target_data, training_test_data, target_test_data, epochs=100, batch_size=32, verbose=0):
history = self.autoencoder.fit(training_data, target_data,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
verbose=1,
validation_data=(training_test_data, target_test_data))
The problem is that the accuracy value decreases constantly during the training, as you can see below:
Have some suggestions? What happens? Why the value of accuracy not increase during the learning?
Thanks