I'm using input of both text and numeric data in a classification task and is interested to see the text for the instances of incorrect predictions in order to see if there is a pattern in what is incorrectly classified - is there a way of doing so?
X1_train/X1_test constitutes the text data whereas X2_train/X2_test consists of the numeric data
n_folds = 5
skf = StratifiedKFold(n_splits=n_folds, shuffle=True)
skf = skf.split(X1_train, y_train)
cv_score = []
for i, (train, test) in enumerate(skf):
model_2 = create_model()
print("Running Fold", i+1, "/", n_folds)
model_2.fit([X1_train[train], X2_train[train]], y_train[train], epochs=5, batch_size=64)
result = model_2.evaluate([X1_train[test], X2_train[test]], y_train[test])
cv_score.append(result[1])
print(cv_score)
print("\nMean accuracy of the crossvalidation: {}".format(np.mean(cv_score)))
pred = model_2.predict([X1_test, X2_test])
???