I have trained an image similarity network. The network is designed to distinguish between similar/dissimilar pairs of images.
Whare a pair contains a camera image and its corresponding sketch image.
The test dataset contains 4 image directories (camera_positive, sketch_positive, camera_negative, sketch_negative).
I am facing problem while evaluating the performance of the network on the test dataset.
As the test dataset is huge to fit into the memory, I decided to use Keras ImageDataGenerator.
I implemented the following code. Each directory contains 20 images (for small demonstration).
Therefore, in total 80 images and 40 predictions.
As the ImageDataGenerator gives us the option to save the image I used "save_to_dir" parameter as can be seen in the following code to verify the correct working.
Each directory contains 20 images therefore, I am expecting after running the predictions it will save the same images to the specified directories.
After running the code, it generates 31 images in each folder instead of 20!
I played around the different step sizes but no one gives accurate results.
What is wrong with this code. Please suggest!
import os
import numpy as np
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
batch_size = 1
image_size = 224
class_mode = None
"""
c_pos/neg: camera positive/neg image
s_pos/neg: sketch positive/neg image
"""
c_pos = r"testing\c_pos"
c_neg = r"testing\c_neg"
s_pos = r"testing\s_pos"
s_neg = r"testing\s_neg"
datagen_constructor = ImageDataGenerator()
def initialize_generator(generator, c_pos, s_pos, c_neg, s_neg):
camera_pos=generator.flow_from_directory(
c_pos,
target_size=(image_size, image_size),
color_mode="rgb",
batch_size=batch_size,
class_mode=class_mode,
shuffle = False,
seed=7,
save_to_dir='results/c_pos',
save_format='jpeg',
save_prefix='CPOS'
)
sketch_pos=generator.flow_from_directory(
s_pos,
target_size=(image_size, image_size),
color_mode="rgb",
batch_size=batch_size,
class_mode=class_mode,
shuffle = False,
seed=7,
save_to_dir='results/s_pos',
save_format='jpeg',
save_prefix='SPOS'
)
camera_neg=generator.flow_from_directory(
c_neg,
target_size=(image_size, image_size),
color_mode="rgb",
batch_size=batch_size,
class_mode=class_mode,
shuffle = False,
seed=7,
save_to_dir='results/c_neg',
save_format='jpeg',
save_prefix='CNEG'
)
sketch_neg=generator.flow_from_directory(
s_neg,
target_size=(image_size, image_size),
color_mode="rgb",
batch_size=batch_size,
class_mode=class_mode,
shuffle = False,
seed=7,
save_to_dir='results/s_neg',
save_format='jpeg',
save_prefix='SNEG'
)
while True:
camerapos = np.expand_dims(camera_pos.next(), axis=0)
sketchpos = np.expand_dims(sketch_pos.next(), axis=0)
cameraneg = np.expand_dims(camera_neg.next(), axis=0)
sketchneg = np.expand_dims(sketch_neg.next(), axis=0)
camera = np.concatenate((camerapos[0], cameraneg[0]))
sketch = np.concatenate((sketchpos[0], sketchneg[0]))
camera = np.asarray(list(camera), dtype=np.float32)
sketch = np.asarray(list(sketch), dtype=np.float32)
yield [camera, sketch]
test_datagen = initialize_generator(datagen_constructor, c_pos, s_pos, c_neg, s_neg)
# Load pre-trained model
model = load_model("model.h")
# Evaluating network performance on test dataset
predict = model.predict_generator(test_datagen, steps = 20)