I'm trying to use the train_test_split
function by providing the labels array that is a 2-d array for stratifying, with only 0 or 1 values (i.e. [0,0], [0,1], [1,0] or [1,1] are the four possible labels). I cannot rename labels (e.g. to 1,2,3,4 for instance) for code compatibility reasons.
Hereafter the code of the function where train_test_split
is used
def preprocess_csv_deceptive_opinion(path, prediction, dataset_recreations):
"""
:param path: path of the original csv file
:param prediction: it can be 'deceptive', 'polarity' or both on the basis of what you want to predict
:param dataset_recreations: it is the number of random seeds used
:return:
"""
read_dataset = pd.read_csv(path) # Dataset columns: deceptive,hotel,polarity,source,text
# print('average sentence length: ', df.text.str.split().str.len().mean())
# print('stdev sentence length: ', df.text.str.split().str.len().std())
read_dataset = read_dataset.drop(['hotel', 'source'], axis=1)
if prediction == 'multi':
# 00 = truthful positive, 01 = truthful negative, 10 = deceptive positive, 11 = deceptive negative
read_dataset['deceptive'] = (read_dataset['deceptive'] == 'deceptive').astype(int)
read_dataset['polarity'] = (read_dataset['polarity'] == 'negative').astype(int)
elif prediction == 'deceptive':
read_dataset = read_dataset.drop(['polarity'], axis=1) # 0 = truthful, 1 = deceptive
read_dataset['deceptive'] = (read_dataset['deceptive'] == 'deceptive').astype(int)
elif prediction == 'polarity':
read_dataset = read_dataset.drop(['deceptive'], axis=1)
read_dataset['polarity'] = (read_dataset['polarity'] == 'negative').astype(int) # 0 = positive, 1 = negative
else:
sys.exit('Label not valid!')
cols = read_dataset.columns
if prediction == 'multi':
label_cols = list(cols[:2])
elif prediction == 'deceptive' or prediction == 'polarity':
label_cols = list(cols[:1])
else:
sys.exit('"label_cols" variable not initialised!')
num_labels = len(label_cols)
print('Label columns: ', label_cols)
read_dataset['one_hot_labels'] = list(read_dataset[label_cols].to_numpy())
# def get_scalar(x):
# if (x == [0, 0]).all():
# return 0 # truthful positive
# elif (x == [0, 1]).all():
# return 1 # truthful negative
# elif (x == [1, 0]).all():
# return 2 # deceptive positive
# elif (x == [1, 1]).all():
# return 3 # deceptive negative
# if prediction == 'multi':
# read_dataset['one_hot_labels'] = list(map(get_scalar, read_dataset['one_hot_labels']))
read_dataset = read_dataset.drop(['deceptive', 'polarity'], axis=1)
# read_dataset = read_dataset.drop(['deceptive', 'polarity'], axis=1)
# Resulting dataset: x | text | one_hot_labels where:
# x is deceptive or polarity or [deceptive, polarity] where get_scalar function is applied to the latest alternative
# one_hot_labels is [deceptive] or [polarity] or [deceptive, polarity]
training_sizes = [0.5, 0.6, 0.7, 0.8, 0.9]
training_data = {}
validation_data = {}
for size in training_sizes:
training_data[size] = {}
validation_data[size] = {}
for i in range(dataset_recreations):
print(type(read_dataset.one_hot_labels))
training, validation = train_test_split(read_dataset, train_size=size, shuffle=True, random_state=i, stratify=read_dataset['one_hot_labels'].to_numpy())
training_data[size][i] = training
validation_data[size][i] = validation
return [read_dataset, num_labels, training_data, validation_data]
But I receive the following:
File "/home/[...]/main.py", line 1409, in preprocess_csv_deceptive_opinion
training, validation = train_test_split(read_dataset, train_size=size, shuffle=True, random_state=i, > stratify=read_dataset['one_hot_labels'].to_numpy())
File "/home/[...]/.conda/envs/RC_37/lib/python3.7/site-packages/sklearn/model_selection/_split.py", line 2197, in train_test_split
train, test = next(cv.split(X=arrays[0], y=stratify))
File "/home/[...]/.conda/envs/RC_37/lib/python3.7/site-packages/sklearn/model_selection/_split.py", line 1793, in split
y = check_array(y, ensure_2d=False, dtype=None)
File "/home/[...]/.conda/envs/RC_37/lib/python3.7/site-packages/sklearn/utils/validation.py", line 63, in inner_f
return f(*args, **kwargs)
File "/home/[...]/.conda/envs/RC_37/lib/python3.7/site-packages/sklearn/utils/validation.py", line 664, in check_array
allow_nan=force_all_finite == 'allow-nan')
File "/home/[...]/.conda/envs/RC_37/lib/python3.7/site-packages/sklearn/utils/validation.py", line 110, in _assert_all_finite
if _object_dtype_isnan(X).any():
AttributeError: 'bool' object has no attribute 'any'
The dataset used is available here.
What is the problem and how can I solve it?