I have trained the VGG16 net using keras with my own dataset, which has 10 classes. so i modified the activation layer with 10 classes.
Here is the code
TRAIN_DIR = "D:\\Dataset\\training"
VALIDATION_DIR = "D:\\Dataset\\validation"
part 2
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
IMAGE_WIDTH = 300
IMAGE_HEIGHT = 300
BATCH_SIZE = 16
part 3
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
validation_datagen = ImageDataGenerator(rescale=1./255,
)
train_generator = train_datagen.flow_from_directory(TRAIN_DIR,
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
batch_size = BATCH_SIZE,
shuffle=True, # By shuffling the images we add some randomness and prevent overfitting
class_mode="categorical")
validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR,
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
batch_size = BATCH_SIZE,
shuffle=True,
class_mode="categorical")
part 4
training_samples = 1097
validation_samples = 272
total_steps = training_samples // BATCH_SIZE
loading the VGG16
#VGG16 network with pretrained weights is used
from keras.applications import vgg16
model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, 3), pooling="max")
for layer in model.layers[:-5]:
layer.trainable = False
for layer in model.layers:
print(layer, layer.trainable)
part 5
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
from keras.models import Model, Sequential
# Although this part can be done also with the functional API, I found that for this simple models, this becomes more intuitive
transfer_model = Sequential()
for layer in model.layers:
transfer_model.add(layer)
transfer_model.add(Dense(512, activation="relu"))
transfer_model.add(Dropout(0.5))
transfer_model.add(Dense(10, activation="softmax"))
part 6
# Adam optimizer and learning rate 0.0001
from keras import optimizers
adam = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.00001)
transfer_model.compile(loss="categorical_crossentropy",
optimizer=adam,
metrics=["accuracy"])
finally the training
model_history = transfer_model.fit_generator(train_generator, steps_per_epoch=training_samples // BATCH_SIZE,
epochs=25,
validation_data=validation_generator,
validation_steps=validation_samples // BATCH_SIZE)
part 7, using some random images from the internet to predict
test_path = "D:\\Dataset\\predict\\"
test_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
test_generator = test_datagen.flow_from_directory(test_path,
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
batch_size = 50,
class_mode="categorical")
enter code here
in this part i am trying to predict, but getting this kind of numbers without getting the actual prediction result which i want as images
pred = model.predict_generator(test_generator, steps=1)
print(pred)
the result is like this, but i want these to be real images, but cannot figure out how.