I have recently attempted to perform an experiment whereby a neural network written in the Python IDE IDLE using Keras is used to analyse the GTZAN dataset of songs. I am attempting to vary the layers in order to see if there is any impact on performance. I am basing my experiment on a particular article detailing the basis of this project:
On the advice of another developer on Stack Overflow, I have enlisted the assistance of the scikit-learn module.
My code is shown here:
import librosa
import librosa.feature
import librosa.display
import glob
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.utils.np_utils import to_categorical
def display_mfcc(song):
y, _ = librosa.load(song)
mfcc = librosa.feature.mfcc(y)
plt.figure(figsize=(10, 4))
librosa.display.specshow(mfcc, x_axis='time', y_axis='mel')
plt.colorbar()
plt.title(song)
plt.tight_layout()
plt.show()
def extract_features_song(f):
y, _ = librosa.load(f)
mfcc = librosa.feature.mfcc(y)
mfcc /= np.amax(np.absolute(mfcc))
return np.ndarray.flatten(mfcc)[:25000]
def generate_features_and_labels():
all_features = []
all_labels = []
genres = ['blues', 'classical', 'country', 'disco', 'hiphop',
'jazz', 'metal', 'pop', 'reggae', 'rock']
for genre in genres:
sound_files = glob.glob('genres/'+genre+'/*.au')
print('Processing %d songs in %s genre...' %
(len(sound_files), genre))
for f in sound_files:
features = extract_features_song(f)
all_features.append(features)
all_labels.append(genre)
label_uniq_ids, label_row_ids = np.unique(all_labels,
(len(sound_files), genre))
label_row_ids = label_row_ids.astype(np.int32, copy=False)
onehot_labels = to_categorical(label_row_ids,
len(label_uniq_ids))
return np.stack(all_features), onehot_labels
features, labels = generate_features_and_labels()
print(np.shape(features))
print(np.shape(labels))
training_split = 0.8
x = features
y = labels
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.20,
random_state=37)
for train_index, test_index in sss.split(features, labels):
x_train, x_test = features[train_index], features[test_index]
y_train, y_test = labels[train_index], labels[test_index]
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
train_input = train_index[:,:-10:]
train_labels = train_index[:,-10:]
test_input = test_index[:,:-10:]
test_labels = test_index[:,-10:]
print(np.shape(train_input))
print(np.shape(train_labels))
model = Sequential([
Dense(100, input_dim=np.shape(train_input)[1]),
Activation('relu'),
Dense(10),
Activation('softmax'),
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
print(model.summary())
model.fit(train_input, train_labels, epochs=10, batch_size=32,
validation_split=0.2)
loss, acc = model.evaluate(test_input, test_labels, batch_size=32)
print('Done!')
print('Loss: %.4f, accuracy: %.4f' % (loss, acc))
As I ran the program, Python began to print the expected response:
Processing 100 songs in blues genre...
Processing 100 songs in classical genre...
Processing 100 songs in country genre...
Processing 100 songs in disco genre...
Processing 100 songs in hiphop genre...
Processing 100 songs in jazz genre...
Processing 100 songs in metal genre...
Processing 100 songs in pop genre...
Processing 100 songs in reggae genre...
Processing 100 songs in rock genre...
(1000, 25000)
(1000, 10)
(800, 25000) (200, 25000) (800, 10) (200, 10)
But this was interrupted by an error message:
Traceback (most recent call last):
File "/Users/surengrigorian/Documents/Stage1.py", line 74, in <module>
train_input = train_index[:,:-10:]
IndexError: too many indices for array
Thank you for any assistance concerning this issue.