I'm trying to define the neural network model as a function and use it after inside a for-loop. But I'm having different results when defining the model inside the for loop as the following:
The first Case:
def model_1():
model_1 = keras.Sequential()
model_1.add(Dense(4, input_dim=5, activation='tanh'))
model_1.add(Dropout(0.3))
model_1.add(Dense(2, activation='sigmoid'))
model_1.compile(loss= 'binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model_1
and next
for train_index_1, test_index_1 in kfold_1.split(x1, y1):
[X_train_1, X_test_1] = x1[train_index_1], x1[test_index_1]
[y_train_1, y_test_1] = y1[train_index_1], y1[test_index_1]
history_1 = model_1().fit(X_train_1, y_train_1, validation_data=(X_test_1, y_test_1), batch_size=64, verbose=0, epochs=500)
The second Case:
for train_index_1, test_index_1 in kfold_1.split(x1, y1):
[X_train_1, X_test_1] = x1[train_index_1], x1[test_index_1]
[y_train_1, y_test_1] = y1[train_index_1], y1[test_index_1]
model_1 = keras.Sequential()
model_1.add(Dense(4, input_dim=5, activation='tanh'))
model_1.add(Dropout(0.3))
model_1.add(Dense(2, activation='sigmoid'))
model_1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
history_1 = model_1.fit(X_train_1, y_train_1, validation_data=(X_test_1, y_test_1), batch_size=64, verbose=0, epochs=500)
What is the problem with those? What is the difference?