The problem i am facing is this that I have performed grid search using imblearn pipeline and using sklearn gridsearchcv as I was dealing with an extremely unbalanced dataset, but when I try to save the model , I am getting the error 'TypeError: can't pickle _thread.RLock objects'. The statements that I an using to save the model are
Case-1:
import pickle
pickle.dump(grid_result,open(model_filename,'wb'))
Case-2:
from sklearn.externals import joblib
joblib.dump(grid_result.best_estimator_, 'GS_obj.pkl')
1) For a Binary Classification problem, I have defined model architecture as given below
Build Function to create model, required by KerasClassifier
def create_model(optimizer_val='RMSprop',hidden_layer_size=16,activation_fn='relu',dropout_rate=0.1,regularization_fn=tf.keras.regularizers.l1(0.001),kernel_initializer_fn=tf.keras.initializers.glorot_uniform,bias_initializer_fn=tf.keras.initializers.zeros):
model = tf.keras.models.Sequential([
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Input(shape=(D,)),
tf.keras.layers.Dense(units=1024, activation='relu',kernel_regularizer=regularization_fn,kernel_initializer=kernel_initializer_fn,bias_initializer=bias_initializer_fn), # 1st Layer
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(units=1024, activation='relu',kernel_regularizer=regularization_fn,kernel_initializer=kernel_initializer_fn,bias_initializer=bias_initializer_fn), #2nd Layer
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(units=1024, activation='relu',kernel_regularizer=regularization_fn,kernel_initializer=kernel_initializer_fn,bias_initializer=bias_initializer_fn), #3rd Layer
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(units=1024, activation='relu',kernel_regularizer=regularization_fn,kernel_initializer=kernel_initializer_fn,bias_initializer=bias_initializer_fn), #4th Layer
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(units=1024, activation='relu',kernel_regularizer=regularization_fn,kernel_initializer=kernel_initializer_fn,bias_initializer=bias_initializer_fn), #5th Layer
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(units=1024, activation='relu',kernel_regularizer=regularization_fn,kernel_initializer=kernel_initializer_fn,bias_initializer=bias_initializer_fn), #6th Layer
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(units=1024, activation='relu',kernel_regularizer=regularization_fn,kernel_initializer=kernel_initializer_fn,bias_initializer=bias_initializer_fn), #7th Layer
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(units=1024, activation='relu',kernel_regularizer=regularization_fn,kernel_initializer=kernel_initializer_fn,bias_initializer=bias_initializer_fn), #8th Layer
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(units=1024, activation=activation_fn,kernel_regularizer=regularization_fn,kernel_initializer=kernel_initializer_fn,bias_initializer=bias_initializer_fn), #9th Layer
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(units=1024, activation=activation_fn,kernel_regularizer=regularization_fn,kernel_initializer=kernel_initializer_fn,bias_initializer=bias_initializer_fn), #10th Layer
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(units=1,activation='sigmoid',kernel_regularizer=regularization_fn,kernel_initializer=kernel_initializer_fn,bias_initializer=bias_initializer_fn) ])
model.compile(optimizer=optimizer_val, loss='binary_crossentropy',metrics=['binary_accuracy'])
return model
#Create the model with the wrapper
model = tf.keras.wrappers.scikit_learn.KerasClassifier(build_fn=create_model,verbose=2) #We donot specify batch_size and epochs here as it is part of the parameter grid
2)Built the parameter search grid as given below
Initialize the parameter grid
nn_param_grid = {
'NN_clf__epochs': [500],
'NN_clf__batch_size':[32],
'NN_clf__optimizer_val': ['Adam','SGD'],
'NN_clf__hidden_layer_size': [1024],
'NN_clf__activation_fn': ['relu'],
'NN_clf__dropout_rate': [0.5],
'NN_clf__regularization_fn':['L1L2'],
'NN_clf__kernel_initializer_fn':['glorot_normal', 'glorot_uniform'], #Works
'NN_clf__bias_initializer_fn':[tf.keras.initializers.zeros]
}
2) Instantiated a pipeline and performed GridSearch as given below
Instantiate the SMOTE object
smote_obj=SMOTE(sampling_strategy='minority',random_state=42,n_jobs=-1)
Instantiate the Scaler Object
scaler_obj=StandardScaler()
Instantiate the Pipeline
steps = [('standardize', scaler_obj),('oversample', smote_obj),('NN_clf', model)]
pipe_clf = Pipeline(steps)
pipe_clf
Perform GridSearchCV
grid = GridSearchCV(estimator=pipe_clf, param_grid=nn_param_grid, verbose=2, cv=10,scoring='precision',return_train_score=False,n_jobs=-1)
grid_result = grid.fit(X_train, y_train)
Please advise as to how can I save the model as using the given below, I am unable to save the model and getting the error 'TypeError: can't pickle _thread.RLock objects'
Case-1:
import pickle
pickle.dump(grid_result,open(model_filename,'wb'))
Case-2:
from sklearn.externals import joblib
joblib.dump(grid_result.best_estimator_, 'GS_obj.pkl')
Thanks
Surajit