I modified dog cat binary classification to make multi-class activation by using Sigmoid activation function in output layer to get prediction for each individual class, but it failed to get expected results.
I created a image which have both dog and cat in single image.
Expected Result: Dog : 70% or more than 70%, Cat : 70% or more than 70%
Actual Results: Dog : 70 %, Cat : 25 %
Why is it not predicting individual class with high accuracy?
import numpy as np
from keras.models import Sequential;
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten;
from keras.utils.np_utils import to_categorical
from keras import optimizers
classifier = Sequential();
classifier.add(Conv2D(32,(3,3),input_shape=(64,64,3),activation='relu'));
classifier.add(MaxPooling2D((2,2)));
classifier.add(Conv2D(32,(3,3),activation='relu'));
classifier.add(MaxPooling2D((2,2)));
classifier.add(Conv2D(32,(3,3),activation='relu'));
classifier.add(MaxPooling2D((2,2)));
classifier.add(Flatten());
classifier.add(Dense(100, activation='sigmoid'))
classifier.add(Dense(2,activation='sigmoid'));
classifier.compile(optimizer="adam", loss='categorical_crossentropy', metrics=['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
trainingDataOptions = ImageDataGenerator(rescale=1./255,shear_range=0.2,zoom_range=0.2,horizontal_flip=True)
testingDataOptions = ImageDataGenerator(rescale=1./255)
trainingData = trainingDataOptions.flow_from_directory('dataset/training',target_size=(64,64),batch_size=32);
testingData = testingDataOptions.flow_from_directory('dataset/testing',target_size=(64,64),batch_size=32);
classifier.fit_generator(trainingData, samples_per_epoch=1757, nb_epoch=10, validation_data=testingData, nb_val_samples=308)
classifier.save('model.h5')
# Output
from keras.preprocessing import image
test_image = image.load_img('samples/319b5fa.jpg',target_size=(64,64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
res = classifier.predict(test_image)
label_map = (trainingData.class_indices)
print(res);
i = 0;
for label in label_map:
score = res[0][i]
score = score*100
score = "{0:.0f}".format(score)
print(label,"====>",score,'%');
i = i+1;
I did not use softmax in output layer, so why sum of individual predictions not going more than 100%? It is keeping multi-class classification always under 1.0 (which I guess it has to do with softmax where it distribute probability).