0

I used Dice Loss and binary_crossentropy whenever I train my model it shows very high train and validation accuracy but always prints out blank images. My masks are black and white binary images where 0 corresponds to black and 1 corresponds to white. In my output image, almost all pixels have value 0 please tell me where am I going wrong.

def train_generator():
    while True:
        for start in range(0, len(os.listdir('/gdrive/My Drive/Train/img/images/')), 16):
            x_batch = np.empty((16,256,512,1),dtype=np.float32)
            y_batch = np.empty((16,256,512,1),dtype=np.float32)
            end = min(start + 16, len(os.listdir('/gdrive/My Drive/Train/img/images/')))
            ids_train_batch_images =os.listdir('/gdrive/My Drive/Train/img/images/')[start:end]
            ids_train_batch_mask =os.listdir('/gdrive/My Drive/Train/msk/mask/')[start:end]
            for i,id in enumerate(ids_train_batch_images):
                x_sample = cv2.imread('/gdrive/My Drive/Train/img/images/'+ids_train_batch_images[i])
                y_sample = cv2.imread('/gdrive/My Drive/Train/msk/mask/'+ids_train_batch_mask[i])
                x_sample=cv2.resize(x_sample,(512,256),interpolation = cv2.INTER_AREA)
                y_sample=cv2.resize(y_sample,(512,256),interpolation = cv2.INTER_AREA)
                x_sample=x_sample[:,:,0]
                y_sample=y_sample[:,:,0]
                x_sample=np.expand_dims(x_sample,axis=-1)
                y_sample=np.expand_dims(y_sample,axis=-1)
                x_batch[i]=x_sample
                y_batch[i]=y_sample.astype(np.bool)
            x_batch = np.array(x_batch, np.float32)/255.0
            y_batch = np.array(y_batch, np.bool)
            yield x_batch, y_batch

def val_generator():
    while True:
        for start in range(0, len(os.listdir('/gdrive/My Drive/Validation/img/images/')), 16):
            x_batch = np.empty((16,256,512,1),dtype=np.float32)
            y_batch = np.empty((16,256,512,1),dtype=np.float32)
            end = min(start + 16, len(os.listdir('/gdrive/My Drive/Validation/img/images/')))
            ids_train_batch_images =os.listdir('/gdrive/My Drive/Validation/img/images/')[start:end]
            ids_train_batch_mask =os.listdir('/gdrive/My Drive/Validation/msk/mask/')[start:end]
            for i,id in enumerate(ids_train_batch_images):
                x_sample = cv2.imread('/gdrive/My Drive/Validation/img/images/'+ids_train_batch_images[i])
                y_sample = cv2.imread('/gdrive/My Drive/Validation/msk/mask/'+ids_train_batch_mask[i])
                x_sample=cv2.resize(x_sample,(512,256),interpolation = cv2.INTER_AREA)
                y_sample=cv2.resize(y_sample,(512,256),interpolation = cv2.INTER_AREA)
                x_sample=x_sample[:,:,0]
                y_sample=y_sample[:,:,0]
                x_sample=np.expand_dims(x_sample,axis=-1)
                y_sample=np.expand_dims(y_sample,axis=-1)
                x_batch[i]=x_sample
                y_batch[i]=y_sample.astype(np.bool)
            x_batch = np.array(x_batch, np.float32)/255.0
            y_batch = np.array(y_batch, np.bool)
            yield x_batch, y_batch

train_gen=train_generator()
val_gen=val_generator()

def unet():
    inputs = tf.keras.layers.Input((256,512,1))
    s = inputs
    c1 = tf.keras.layers.Conv2D(16, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(s)
    c1 = tf.keras.layers.Dropout(0.3)(c1)
    c1 = tf.keras.layers.Conv2D(16, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(c1)
    p1 = tf.keras.layers.MaxPooling2D((2, 2))(c1)

    c2 = tf.keras.layers.Conv2D(32, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal', padding='same')(p1)
    c2 = tf.keras.layers.Dropout(0.3)(c2)
    c2 = tf.keras.layers.Conv2D(32, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(c2)
    p2 = tf.keras.layers.MaxPooling2D((2, 2))(c2)

    c3 = tf.keras.layers.Conv2D(64, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(p2)
    c3 = tf.keras.layers.Dropout(0.3)(c3)
    c3 = tf.keras.layers.Conv2D(64, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(c3)
    p3 = tf.keras.layers.MaxPooling2D((2, 2))(c3)

    c4 = tf.keras.layers.Conv2D(128, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(p3)
    c4 = tf.keras.layers.Dropout(0.3)(c4)
    c4 = tf.keras.layers.Conv2D(128, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(c4)
    p4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(c4)

    c6 = tf.keras.layers.Conv2D(256, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(p4)
    c6 = tf.keras.layers.Dropout(0.3)(c6)
    c6 = tf.keras.layers.Conv2D(256, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(c6)
    p6 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(c6)

    # c6 = tf.keras.layers.Conv2D(1024, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(p5)
    # c6 = tf.keras.layers.Dropout(0.1)(c6)
    # c6 = tf.keras.layers.Conv2D(1024, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(c6)
    # p6 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(c6)

    c7 = tf.keras.layers.Conv2D(512, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(p6)
    c7 = tf.keras.layers.Dropout(0.3)(c7)
    c7 = tf.keras.layers.Conv2D(512, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(c7)

    # u8 = tf.keras.layers.Conv2DTranspose(1024, (2, 2), strides=(2, 2), padding='same')(c7)
    # u8 = tf.keras.layers.concatenate([u8, c6])
    # c8 = tf.keras.layers.Conv2D(1024, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(u8)
    # c8 = tf.keras.layers.Dropout(0.1)(c8)
    # c8 = tf.keras.layers.Conv2D(1024, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(c8)

    u9 = tf.keras.layers.Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(c7)
    u9 = tf.keras.layers.concatenate([u9, c6])
    c9 = tf.keras.layers.Conv2D(256, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(u9)
    c9 = tf.keras.layers.Dropout(0.3)(c9)
    c9 = tf.keras.layers.Conv2D(256, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal', padding='same')(c9)

    u10 = tf.keras.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c9)
    u10 = tf.keras.layers.concatenate([u10, c4])
    c10 = tf.keras.layers.Conv2D(128, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(u10)
    c10 = tf.keras.layers.Dropout(0.3)(c10)
    c10 = tf.keras.layers.Conv2D(128, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(c10)

    u11 = tf.keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c10)
    u11 = tf.keras.layers.concatenate([u11, c3], axis=3)
    c11 = tf.keras.layers.Conv2D(64, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(u11)
    c11 = tf.keras.layers.Dropout(0.3)(c11)
    c11 = tf.keras.layers.Conv2D(64, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(c11)

    u12 = tf.keras.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c11)
    u12 = tf.keras.layers.concatenate([u12, c2], axis=3)
    c12 = tf.keras.layers.Conv2D(32, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(u12)
    c12 = tf.keras.layers.Dropout(0.3)(c12)
    c12 = tf.keras.layers.Conv2D(32, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(c12)

    u13 = tf.keras.layers.Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c12)
    u13 = tf.keras.layers.concatenate([u13, c1], axis=3)
    c13 = tf.keras.layers.Conv2D(16, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(u13)
    c13 = tf.keras.layers.Dropout(0.3)(c13)
    c13 = tf.keras.layers.Conv2D(16, (3, 3), activation=tf.keras.activations.elu, kernel_initializer='he_normal',padding='same')(c13)

    outputs = tf.keras.layers.Conv2D(1, (1, 1), activation='sigmoid')(c13)

    model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
    return model

Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_4 (InputLayer)            [(None, 256, 512, 1) 0                                            
__________________________________________________________________________________________________
conv2d_69 (Conv2D)              (None, 256, 512, 16) 160         input_4[0][0]                    
__________________________________________________________________________________________________
dropout_33 (Dropout)            (None, 256, 512, 16) 0           conv2d_69[0][0]                  
__________________________________________________________________________________________________
conv2d_70 (Conv2D)              (None, 256, 512, 16) 2320        dropout_33[0][0]                 
__________________________________________________________________________________________________
max_pooling2d_15 (MaxPooling2D) (None, 128, 256, 16) 0           conv2d_70[0][0]                  
__________________________________________________________________________________________________
conv2d_71 (Conv2D)              (None, 128, 256, 32) 4640        max_pooling2d_15[0][0]           
__________________________________________________________________________________________________
dropout_34 (Dropout)            (None, 128, 256, 32) 0           conv2d_71[0][0]                  
__________________________________________________________________________________________________
conv2d_72 (Conv2D)              (None, 128, 256, 32) 9248        dropout_34[0][0]                 
__________________________________________________________________________________________________
max_pooling2d_16 (MaxPooling2D) (None, 64, 128, 32)  0           conv2d_72[0][0]                  
__________________________________________________________________________________________________
conv2d_73 (Conv2D)              (None, 64, 128, 64)  18496       max_pooling2d_16[0][0]           
__________________________________________________________________________________________________
dropout_35 (Dropout)            (None, 64, 128, 64)  0           conv2d_73[0][0]                  
__________________________________________________________________________________________________
conv2d_74 (Conv2D)              (None, 64, 128, 64)  36928       dropout_35[0][0]                 
__________________________________________________________________________________________________
max_pooling2d_17 (MaxPooling2D) (None, 32, 64, 64)   0           conv2d_74[0][0]                  
__________________________________________________________________________________________________
conv2d_75 (Conv2D)              (None, 32, 64, 128)  73856       max_pooling2d_17[0][0]           
__________________________________________________________________________________________________
dropout_36 (Dropout)            (None, 32, 64, 128)  0           conv2d_75[0][0]                  
__________________________________________________________________________________________________
conv2d_76 (Conv2D)              (None, 32, 64, 128)  147584      dropout_36[0][0]                 
__________________________________________________________________________________________________
max_pooling2d_18 (MaxPooling2D) (None, 16, 32, 128)  0           conv2d_76[0][0]                  
__________________________________________________________________________________________________
conv2d_77 (Conv2D)              (None, 16, 32, 256)  295168      max_pooling2d_18[0][0]           
__________________________________________________________________________________________________
dropout_37 (Dropout)            (None, 16, 32, 256)  0           conv2d_77[0][0]                  
__________________________________________________________________________________________________
conv2d_78 (Conv2D)              (None, 16, 32, 256)  590080      dropout_37[0][0]                 
__________________________________________________________________________________________________
max_pooling2d_19 (MaxPooling2D) (None, 8, 16, 256)   0           conv2d_78[0][0]                  
__________________________________________________________________________________________________
conv2d_79 (Conv2D)              (None, 8, 16, 512)   1180160     max_pooling2d_19[0][0]           
__________________________________________________________________________________________________
dropout_38 (Dropout)            (None, 8, 16, 512)   0           conv2d_79[0][0]                  
__________________________________________________________________________________________________
conv2d_80 (Conv2D)              (None, 8, 16, 512)   2359808     dropout_38[0][0]                 
__________________________________________________________________________________________________
conv2d_transpose_15 (Conv2DTran (None, 16, 32, 256)  524544      conv2d_80[0][0]                  
__________________________________________________________________________________________________
concatenate_15 (Concatenate)    (None, 16, 32, 512)  0           conv2d_transpose_15[0][0]        
                                                                 conv2d_78[0][0]                  
__________________________________________________________________________________________________
conv2d_81 (Conv2D)              (None, 16, 32, 256)  1179904     concatenate_15[0][0]             
__________________________________________________________________________________________________
dropout_39 (Dropout)            (None, 16, 32, 256)  0           conv2d_81[0][0]                  
__________________________________________________________________________________________________
conv2d_82 (Conv2D)              (None, 16, 32, 256)  590080      dropout_39[0][0]                 
__________________________________________________________________________________________________
conv2d_transpose_16 (Conv2DTran (None, 32, 64, 128)  131200      conv2d_82[0][0]                  
__________________________________________________________________________________________________
concatenate_16 (Concatenate)    (None, 32, 64, 256)  0           conv2d_transpose_16[0][0]        
                                                                 conv2d_76[0][0]                  
__________________________________________________________________________________________________
conv2d_83 (Conv2D)              (None, 32, 64, 128)  295040      concatenate_16[0][0]             
__________________________________________________________________________________________________
dropout_40 (Dropout)            (None, 32, 64, 128)  0           conv2d_83[0][0]                  
__________________________________________________________________________________________________
conv2d_84 (Conv2D)              (None, 32, 64, 128)  147584      dropout_40[0][0]                 
__________________________________________________________________________________________________
conv2d_transpose_17 (Conv2DTran (None, 64, 128, 64)  32832       conv2d_84[0][0]                  
__________________________________________________________________________________________________
concatenate_17 (Concatenate)    (None, 64, 128, 128) 0           conv2d_transpose_17[0][0]        
                                                                 conv2d_74[0][0]                  
__________________________________________________________________________________________________
conv2d_85 (Conv2D)              (None, 64, 128, 64)  73792       concatenate_17[0][0]             
__________________________________________________________________________________________________
dropout_41 (Dropout)            (None, 64, 128, 64)  0           conv2d_85[0][0]                  
__________________________________________________________________________________________________
conv2d_86 (Conv2D)              (None, 64, 128, 64)  36928       dropout_41[0][0]                 
__________________________________________________________________________________________________
conv2d_transpose_18 (Conv2DTran (None, 128, 256, 32) 8224        conv2d_86[0][0]                  
__________________________________________________________________________________________________
concatenate_18 (Concatenate)    (None, 128, 256, 64) 0           conv2d_transpose_18[0][0]        
                                                                 conv2d_72[0][0]                  
__________________________________________________________________________________________________
conv2d_87 (Conv2D)              (None, 128, 256, 32) 18464       concatenate_18[0][0]             
__________________________________________________________________________________________________
dropout_42 (Dropout)            (None, 128, 256, 32) 0           conv2d_87[0][0]                  
__________________________________________________________________________________________________
conv2d_88 (Conv2D)              (None, 128, 256, 32) 9248        dropout_42[0][0]                 
__________________________________________________________________________________________________
conv2d_transpose_19 (Conv2DTran (None, 256, 512, 16) 2064        conv2d_88[0][0]                  
__________________________________________________________________________________________________
concatenate_19 (Concatenate)    (None, 256, 512, 32) 0           conv2d_transpose_19[0][0]        
                                                                 conv2d_70[0][0]                  
__________________________________________________________________________________________________
conv2d_89 (Conv2D)              (None, 256, 512, 16) 4624        concatenate_19[0][0]             
__________________________________________________________________________________________________
dropout_43 (Dropout)            (None, 256, 512, 16) 0           conv2d_89[0][0]                  
__________________________________________________________________________________________________
conv2d_90 (Conv2D)              (None, 256, 512, 16) 2320        dropout_43[0][0]                 
__________________________________________________________________________________________________
conv2d_91 (Conv2D)              (None, 256, 512, 1)  17          conv2d_90[0][0]                  
==================================================================================================
Total params: 7,775,313
Trainable params: 7,775,313
Non-trainable params: 0
_________________________________________________________

from keras import backend as K
def dice_coef(y_true, y_pred, smooth=1):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)

def dice_coef_loss(y_true, y_pred):
    return 1-dice_coef(y_true, y_pred)

from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import CSVLogger
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
NO_OF_TRAINING_IMAGES = len(os.listdir('/gdrive/My Drive/Train/img/images/'))
NO_OF_VAL_IMAGES = len(os.listdir('/gdrive/My Drive/Validation/img/images/'))
NO_OF_EPOCHS = 1
BATCH_SIZE = 32
filepath="weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5"
m = unet()
opt = Adam(lr=1E-5, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
m.compile(optimizer=opt,loss=dice_coef_loss, metrics=[dice_coef])
checkpoint = ModelCheckpoint(filepath, monitor=dice_coef_loss, 
                             verbose=1, save_best_only=True, mode='min')
earlystopping = EarlyStopping(monitor = dice_coef_loss, verbose = 1,
                              min_delta = 0.01, patience = 1, mode ='min')
callbacks_list = [checkpoint,earlystopping]
results = m.fit_generator(train_gen, epochs=NO_OF_EPOCHS, 
                          steps_per_epoch = (NO_OF_TRAINING_IMAGES//BATCH_SIZE),
                          validation_data=val_gen, 
                          validation_steps=(NO_OF_VAL_IMAGES//BATCH_SIZE), 
                          use_multiprocessing=False,
                          workers=1)
m.save('Model.h5')
418/418 [==============================] - 9828s 24s/step - loss: 0.0700 - dice_coef: 0.9300 - val_loss: 0.0299 - val_dice_coef: 0.9701

but wen i take the output everything is just blank. I am scaling up the output by multiplying it by 255 before visualizing and batch normalization is also off

1 Answers1

0

Your output is likely normalized between integer values 0-20~ which requires scaling up these values to 0-255 range prior to visualization. Furthermore, make sure to turn off batch normalization by indicating that the model is running in inference mode. Supposing out is your output from the model

img1 = out[0,:,:,:] # select first element from our batch
img1 = img1.permute(1,2,0) # model outputs channel in first dim but to visualize we need it in last dim
matplotlib.imshow( img1 )
ychnh
  • 197
  • 1
  • 12