0

I'm trying to image classify with Keras Efficient Net. like here https://keras.io/examples/vision/image_classification_efficientnet_fine_tuning/ I have two folder named 1 and 0. The number of images in both folders is the same. F1 score is generated only for 0 class in sklearn report results. f1-score for class 1 is always 0. classifies everything as 0. How can I fix ? I would love any help. both folders contain 62 images.

UPDATE: I also tried with 500 images for each class and for 1 folder the results are still 0.

UPDATE

    #----TF
import tensorflow as tf
from keras.utils import to_categorical
from tensorflow.python.data import AUTOTUNE
from keras.models import load_model
from keras import layers
from keras.applications import EfficientNetB0
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import SGD
from keras.applications import imagenet_utils
#---SK
from sklearn.metrics import classification_report
#
import imutils
from imutils import paths
import random
import cv2
import os
import numpy as np
import pandas as pd
import shutil
import matplotlib.pyplot as plt

main_path = 'C:\\Users\\A\\Desktop\\data\\'
all_images = list(paths.list_images(main_path))

random_images = random.choices(all_images, k=3)

for i in random_images:
     random_image = cv2.imread(i)
     random_images = cv2.cvtColor(random_image, cv2.COLOR_BGR2RGB)
     random_image = imutils.resize(random_image, height=400)
     cv2.imshow("example", random_image)
     cv2.waitKey(0)
     cv2.destroyAllWindows()
random.shuffle(all_images)

i = int(len(all_images)*0.8) #%80
trainData = all_images[:i]
testData = all_images[i:]

#validation data
i = int(len(trainData)*0.10) #%10  
validData = trainData[:i]
trainData = trainData[i:]

train_path = main_path+'training'
test_path = main_path+'test'
valid_path = main_path+'valid'

datasets = [("training", trainData, train_path ), ("validation", validData, valid_path),  ("testing", testData, test_path)]

for (dtype, imagepaths, out_path) in datasets:
    if not os.path.exists(out_path): 
        os.makedirs(out_path)

    for inputpath in imagepaths:
        filename = inputpath.split(os.path.sep)[-1]
        label = inputpath.split(os.path.sep)[-2]

        labelPath = os.path.sep.join([out_path, label])

        if not os.path.exists(labelPath):
            os.makedirs(labelPath)  # onceden tanimlanan klasor

         #goruntu alinir ve label klasorune kopyalanir
        p = os.path.sep.join([labelPath, filename])
        shutil.copy2(inputpath, p)  

    #3- tf.data() pipeline dagitma
    def load_images(imagePath):
        # --images
        image = tf.io.read_file(imagePath)
        image = tf.io.decode_jpeg(image, channels=3)  
        image = tf.image.resize(image, (150, 150)) / 255.0  
        # --labels
        label = tf.strings.split(imagePath, os.path.sep)[-2]
        label = tf.strings.to_number(label, tf.int32)
        print(label)
        return (image, label)


    def augment(image, label):
        image = tf.image.random_flip_up_down(image)
        image = tf.image.random_flip_left_right(image)
        image = tf.image.random_brightness(image,0.2)  # data augmentation
        return (image, label)


trainPaths = list(paths.list_images(train_path))
valPaths = list(paths.list_images(valid_path))
testPaths = list(paths.list_images(test_path))

trainDS = tf.data.Dataset.from_tensor_slices(trainPaths)
    
trainDS = (trainDS.shuffle(len(trainPaths)).map(load_images, num_parallel_calls=AUTOTUNE).map(augment,num_parallel_calls=AUTOTUNE).cache().batch(16).prefetch(AUTOTUNE))
    # --validation
valDS = tf.data.Dataset.from_tensor_slices(valPaths)
valDS = (valDS.map(load_images, num_parallel_calls=AUTOTUNE).cache().batch(16).prefetch(AUTOTUNE))

# --Test
testDS = tf.data.Dataset.from_tensor_slices(testPaths)
testDS = (testDS.map(load_images, num_parallel_calls=AUTOTUNE).cache().batch(16).prefetch(AUTOTUNE))



NUM_CLASSES = 2
IMG_SIZE = 150
size = (IMG_SIZE, IMG_SIZE,3) 

inputs = layers.Input(shape=(IMG_SIZE, IMG_SIZE, 3))





model = EfficientNetB0(include_top=False, input_shape=size)


flat1 = layers.Flatten()(model.layers[-1].output)
class1 = layers.Dense(1024, activation='relu')(flat1)
output = layers.Dense(1, activation='sigmoid')(class1) 

model = tf.keras.Model(inputs=model.inputs, outputs=output)

model.summary()


# model = EfficientNetB0(include_top=False, input_shape=size)
# for layer in model.layers:
#     layer.trainable = False

opt = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])

early_s = EarlyStopping(monitor="val_loss", patience = 10, restore_best_weights=True)
save_b = ModelCheckpoint(filepath ="C:\\Users\\A\\Desktop\\Gun\\", monitor="val_loss", verbose = 1 )
callbacks = [early_s, save_b]


hist = model.fit(x = trainDS, validation_data=valDS, epochs= 50, callbacks=callbacks, verbose=1)

plt.figure()
plt.plot(hist.history["loss"], label="train_loss")
plt.plot(hist.history["val_loss"], label="val_loss")
plt.plot(hist.history["accuracy"], label="train_acc")
plt.plot(hist.history["val_accuracy"], label="val_acc")
plt.title("training loss and accuracy")
plt.xlabel("Epoch #")
plt.ylabel("loss/accuracy")
plt.legend(loc="lower left")
plt.show()

#test setinin etiketleri
test_paths = list(paths.list_images(test_path))
testlabels = [int(p.split(os.path.sep)[-2]) for p in test_paths]
testlabels = to_categorical(testlabels)


predictions = model.predict(testDS)

print(classification_report(testlabels.argmax(axis=1), predictions.argmax(axis=1), target_names=["0", "1"]))

UPDATE REPORT classificationReport

Laura
  • 56
  • 5
  • can u update here with latest code – Amin S Feb 02 '23 at 20:20
  • yes I updated @Amin S . I found this question https://stackoverflow.com/questions/66022943/how-to-fix-this-classification-report-warning I'm reading the accepted answer, but I still haven't fixed it. – Laura Feb 03 '23 at 14:35
  • maybe u should print out all of your variables to find out which one is not working properly – Amin S Feb 03 '23 at 16:23
  • UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples, now it gives this error @Amin S I updated the code – Laura Feb 06 '23 at 10:05

0 Answers0