1

I'm using a CNN based on Unet Architecture , With one input and two outputs:(b(array),C13(Image)), and I would like to apply Earlystopping on my validation data using a different loss function(Dice_loss) compared to Training set one (Shape_loss). I think to use K.in_train_phase, but it doesn't seem the correct way. Can you help me? This is my model :

def dice_loss(y_true, y_pred):
print("[dice_loss] y_pred=",y_pred,"y_true=",y_true)
y_true = tf.cast(y_true, tf.float32)
numerator = 2 * tf.reduce_sum(y_true * y_pred)
denominator = tf.reduce_sum(y_true + y_pred)

return 1 - numerator / denominator

def Euclidean_loss(y_true, y_pred):

Shape_loss= K.sqrt(K.sum(K.square(y_pred[0] - y_true[0]), axis=-1))

Dice_loss= dice_loss(y_true[0],y_pred[1])
return K.in_train_phase(Shape_loss, Dice_loss)




def conv2d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):
# first layer
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal",
           padding="same")(input_tensor)
if batchnorm:
    x = BatchNormalization()(x)
x = Activation("relu")(x)
# second layer
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal",
           padding="same")(x)
if batchnorm:
    x = BatchNormalization()(x)
x = Activation("relu")(x)
return x


def deconv2d_block(input_tensor, n_filters, kernel_size=2, batchnorm=True):
# first layer
x = Conv2DTranspose(filters=n_filters, kernel_size=(kernel_size, kernel_size), 
kernel_initializer="he_normal",
           padding="same")(input_tensor)

x = BatchNormalization()(x)
x = Activation("relu")(x)
return x



def get_Spnet(pretrained_weights = None,input_size = (400,400,1), Wt = 
None,mu=None,shape_parameter=True):

input_im=Input(input_size)

c1 = conv2d_block(input_im, 64)
c1_2 = conv2d_block(c1, 64)
p1 = MaxPooling2D(pool_size=(2,2),strides=2,padding='same') (c1_2)

c2 = conv2d_block(p1, 128)
c2_2 = conv2d_block(c2, 128)
p2 = MaxPooling2D(pool_size=(2,2),strides=2,padding='same') (c2_2)

c3 = conv2d_block(p2, 256)
c3_2 = conv2d_block(c3, 256)
p3 = MaxPooling2D(pool_size=(2,2),strides=2,padding='same') (c3_2)

c4 = conv2d_block(p3, 512)
c4_2 = conv2d_block(c4, 512)

c5 = conv2d_block(c4_2, 1024)
c5_2 = conv2d_block(c5, 1024)

c6 = conv2d_block(c5_2, 512)
c6_2 = conv2d_block(c6, 512)
c6_2 = concatenate([c6_2, c4_2])

c7 = deconv2d_block(c6_2,512)
c7 = UpSampling2D(size=(2,2))(c7)
c8 = conv2d_block(c7, 256)
c8_2 = conv2d_block(c8, 256)
c8_2 = concatenate([c8_2,c3_2])

c9 = deconv2d_block(c8_2,256)
c9 = UpSampling2D(size=(2,2))(c9)
c10 = conv2d_block(c9, 128)
c10_2 = conv2d_block(c10, 128)
c10_2 = concatenate([c10_2,c2_2])

c11 = deconv2d_block(c10_2,128)
c11 = UpSampling2D(size=(2,2))(c11)
c12 = conv2d_block(c11, 64)
c12_2 = conv2d_block(c12, 64)
c12_2 = concatenate([c12_2,c1_2])

c13 = Conv2D(1, kernel_size=(3, 3),padding="same")(c12_2)

if shape_parameter:
    
    r1 = Flatten()(c13)
    b=Dot(axes = (1,2))([r1, Wt])
    r2=b


    
    
    

  
else:
  r2 = c13


model = Model(inputs=[input_im], outputs=[r2,c13])
model.outputs[0]._uses_learning_phase = True
    
if(pretrained_weights):
    model.load_weights(pretrained_weights)

return model

and this my Earlystopping:

 tensorflow.keras.backend.set_floatx('float32')
earlystopping = EarlyStopping(monitor='loss', verbose=0, min_delta=0.001,
                              patience=100, mode='min', restore_best_weights=True)

#Tran Data generatr
train_gen = dgen.SpnetDataGenerator(20, training_folder, IMAGES_RESOLUTION,
                                           b_tr[0].shape[0],seed=None)
valid_gen=dgen.datagenerator(20, val_dir,IMAGES_RESOLUTION, max_rot = None, flip_h = False, flip_v = 
False, seed = None, huge_ram = False, eq_hist = False)



#Definizione del Modello
model = _SPNET_.get_Spnet(None,(IMAGES_RESOLUTION[0], IMAGES_RESOLUTION[1], 1), 
 np.expand_dims(Wt.astype(np.float32), axis=0),sdf_mean.astype(np.float32))

#Definizione Loss Fuction
Euclidean_loss=_SPNET_.Euclidean_loss
optimizer_fn=tf.keras.optimizers.Adam(
    learning_rate=0.00001)
model.compile(loss=Euclidean_loss, optimizer=optimizer_fn)

history = model.fit(
    train_gen,
    epochs=Eps,
    verbose=1,
    callbacks=[earlystopping],
    validation_data=valid_gen,
    steps_per_epoch=30,
    validation_steps=5)


enter code here
Progman
  • 16,827
  • 6
  • 33
  • 48
Emilio95
  • 19
  • 1

0 Answers0