Why does the classification performance degrades when I shuffle the test dataset?
For replication purposes: I created an imbalanced dataset:
n = 1
centers=[[0.0, -5, 2.5], [0, 0,2.5], [0, 5,2.5]]
cluster_std = [1.0, 1.0,1.0]
X, y = make_blobs(n_samples=[250,24500,250], centers=centers, cluster_std=cluster_std,n_features=len(cluster_std), random_state = n)
dataset_x = pd.DataFrame({'var1': X[:, 0], 'var2': X[:, 1],'var3': X[:, 2]})
dataset_y = pd.DataFrame({'target': y})
simulated_blob_dataset = pd.concat([dataset_x,dataset_y], axis=1)
I split the dataset into training and testing:
training_data, testing_data = data_split(raw_data=simulated_blob_dataset,target_variable_name="target",test_size=0.2)
I created a base models:
def base_models():
models = dict()
models['rf'] = RandomForestClassifier(n_jobs=-1)
models['gbm'] = GradientBoostingClassifier()
models['dt'] = DecisionTreeClassifier()
models['svc'] = SVC()
models['knn'] = KNeighborsClassifier(n_jobs=-1)
models['nb'] = GaussianNB()
models['SE_rf'] = stack_ensemble_1()
models['SE_gbm'] = stack_ensemble_2()
models['SE_dt'] = stack_ensemble_3()
models['SE_svc'] = stack_ensemble_4()
models['SE_knn'] = stack_ensemble_5()
models['SE_nb'] = stack_ensemble_6()
return models
# evaluate a given model using cross-validation
def evaluate_model(model, X, y):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='balanced_accuracy', cv=cv,n_jobs=-1, error_score='raise')
return scores
def stack_ensemble_1():
# define the base models
level0 = list()
level0.append(('rf', RandomForestClassifier(n_jobs=-1)))
level0.append(('gbm', GradientBoostingClassifier()))
level0.append(('dt', DecisionTreeClassifier()))
level0.append(('svc', SVC()))
level0.append(('knn', KNeighborsClassifier(n_jobs=-1)))
level0.append(('nb', GaussianNB()))
# define meta learner model
level1 = RandomForestClassifier(n_jobs=-1)
# define the stacking ensemble
model = StackingClassifier(estimators=level0, final_estimator=level1, cv=10) # use n_job= -1 for all cores
return model
def stack_ensemble_2():
# define the base models
level0 = list()
level0.append(('rf', RandomForestClassifier(n_jobs=-1)))
level0.append(('gbm', GradientBoostingClassifier()))
level0.append(('dt', DecisionTreeClassifier()))
level0.append(('svc', SVC()))
level0.append(('knn', KNeighborsClassifier(n_jobs=-1)))
level0.append(('nb', GaussianNB()))
# define meta learner model
level1 = GradientBoostingClassifier()
# define the stacking ensemble
model = StackingClassifier(estimators=level0, final_estimator=level1, cv=10) # use -1 for all cores
return model
def stack_ensemble_3():
# define the base models
level0 = list()
level0.append(('rf', RandomForestClassifier(n_jobs=-1)))
level0.append(('gbm', GradientBoostingClassifier()))
level0.append(('dt', DecisionTreeClassifier()))
level0.append(('svc', SVC()))
level0.append(('knn', KNeighborsClassifier(n_jobs=-1)))
level0.append(('nb', GaussianNB()))
# define meta learner model
level1 = DecisionTreeClassifier()
# define the stacking ensemble
model = StackingClassifier(estimators=level0, final_estimator=level1, cv=10) # use n_jobs = -1 for all cores
return model
def stack_ensemble_4():
# define the base models
level0 = list()
level0.append(('rf', RandomForestClassifier(n_jobs=-1)))
level0.append(('gbm', GradientBoostingClassifier()))
level0.append(('dt', DecisionTreeClassifier()))
level0.append(('svc', SVC()))
level0.append(('knn', KNeighborsClassifier(n_jobs=-1)))
level0.append(('nb', GaussianNB()))
# define meta learner model
level1 = SVC()
# define the stacking ensemble
model = StackingClassifier(estimators=level0, final_estimator=level1, cv=10) # use -1 for all cores
return model
def stack_ensemble_5():
# define the base models
level0 = list()
level0.append(('rf', RandomForestClassifier(n_jobs=-1)))
level0.append(('gbm', GradientBoostingClassifier()))
level0.append(('dt', DecisionTreeClassifier()))
level0.append(('svc', SVC()))
level0.append(('knn', KNeighborsClassifier(n_jobs=-1)))
level0.append(('nb', GaussianNB()))
# define meta learner model
level1 = KNeighborsClassifier(n_jobs=-1)
# define the stacking ensemble
model = StackingClassifier(estimators=level0, final_estimator=level1, cv=10) # use -1 for all cores
return model
def stack_ensemble_6():
# define the base models
level0 = list()
level0.append(('rf', RandomForestClassifier(n_jobs=-1)))
level0.append(('gbm', GradientBoostingClassifier()))
level0.append(('dt', DecisionTreeClassifier()))
level0.append(('svc', SVC()))
level0.append(('knn', KNeighborsClassifier(n_jobs=-1)))
level0.append(('nb', GaussianNB()))
# define meta learner model
level1 = GaussianNB()
# define the stacking ensemble
model = StackingClassifier(estimators=level0, final_estimator=level1, cv=10) # use -1 for all cores
return model
First I tried to run it the normal way (the rows from the test dataset is not reshuffled):
X, y = training_data[['var1', 'var2', 'var3']].values,training_data['target'].values
models = base_models()
results, names = list(), list()
for name, model in models.items():
print(name)
clf = model.fit(X,y.ravel())
y_pred = clf.predict(testing_data[['var1', 'var2', 'var3']].values)
cnf_matrix = confusion_matrix(testing_data['target'].values, y_pred)
print(cnf_matrix)
the results are good:
however, when I reshuffled the rows of the test dataset, to check the robustness of the models[ by creating a different angle of the same truth]:
X, y = training_data[['var1', 'var2', 'var3']].values,training_data['target'].values
models = base_models()
results, names = list(), list()
for name, model in models.items():
print(name)
clf = model.fit(X,y.ravel())
y_pred = clf.predict(testing_data[['var1', 'var2', 'var3']].values).sample(frac=1,replace= False,random_state=1).reset_index(drop = True).values)
cnf_matrix = confusion_matrix(testing_data['target'].values, y_pred)
print(cnf_matrix)
the result degraded: