Firstly I apologize that I'm not experienced in asking questions here so it might be a little messy.
using the following repo in GitHub: https://github.com/gcunhase/wav2midi2wav
Melodia keeps showing the following error:
Loading audio...
Extracting melody f0 with MELODIA...
Converting Hz to MIDI notes...
Traceback (most recent call last):
File "audio_to_midi_melodia/audio_to_midi_melodia.py", line 210, in <module>
audio_to_midi_melodia(args.infile, args.outfile, args.bpm,
File "audio_to_midi_melodia/audio_to_midi_melodia.py", line 178, in audio_to_midi_melodia
notes = midi_to_notes(midi_pitch, fs, hop, smooth, minduration)
File "audio_to_midi_melodia/audio_to_midi_melodia.py", line 109, in midi_to_notes
if p_prev > 0:
TypeError: '>' not supported between instances of 'NoneType' and 'int'
when running this command:
python3 audio_to_midi_melodia/audio_to_midi_melodia.py Documents/fairuz.wav bpm 146 --smooth 0.25 --minduration 0.1 --jams
and this is the python code/file for melodia:
# CREATED: 11/9/15 3:57 PM by Justin Salamon <justin.salamon@nyu.edu>
import librosa
import vamp
import argparse
import os
import numpy as np
from midiutil.MidiFile import MIDIFile
from scipy.signal import medfilt
import jams
import __init__
'''
Extract the melody from an audio file and convert it to MIDI.
The script extracts the melody from an audio file using the Melodia algorithm,
and then segments the continuous pitch sequence into a series of quantized
notes, and exports to MIDI using the provided BPM. If the --jams option is
specified the script will also save the output as a JAMS file. Note that the
JAMS file uses the original note onset/offset times estimated by the algorithm
and ignores the provided BPM value.
Note: Melodia can work pretty well and is the result of several years of
research. The note segmentation/quantization code was hacked in about 30
minutes. Proceed at your own risk... :)
usage: audio_to_midi_melodia.py [-h] [--smooth SMOOTH]
[--minduration MINDURATION] [--jams]
infile outfile bpm
Examples:
python audio_to_midi_melodia.py --smooth 0.25 --minduration 0.1 --jams
~/song.wav ~/song.mid 60
'''
def save_jams(jamsfile, notes, track_duration, orig_filename):
# Construct a new JAMS object and annotation records
jam = jams.JAMS()
# Store the track duration
jam.file_metadata.duration = track_duration
jam.file_metadata.title = orig_filename
midi_an = jams.Annotation(namespace='pitch_midi',
duration=track_duration)
midi_an.annotation_metadata = \
jams.AnnotationMetadata(
data_source='audio_to_midi_melodia.py v%s' % __init__.__version__,
annotation_tools='audio_to_midi_melodia.py (https://github.com/'
'justinsalamon/audio_to_midi_melodia)')
# Add midi notes to the annotation record.
for n in notes:
midi_an.append(time=n[0], duration=n[1], value=n[2], confidence=0)
# Store the new annotation in the jam
jam.annotations.append(midi_an)
# Save to disk
jam.save(jamsfile)
def save_midi(outfile, notes, tempo):
track = 0
time = 0
midifile = MIDIFile(1)
# Add track name and tempo.
midifile.addTrackName(track, time, "MIDI TRACK")
midifile.addTempo(track, time, tempo)
channel = 0
volume = 100
for note in notes:
onset = note[0] * (tempo/60.)
duration = note[1] * (tempo/60.)
# duration = 1
pitch = note[2]
midifile.addNote(track, channel, pitch, onset, duration, volume)
# And write it to disk.
binfile = open(outfile, 'wb')
midifile.writeFile(binfile)
binfile.close()
def midi_to_notes(midi, fs, hop, smooth, minduration):
# smooth midi pitch sequence first
if (smooth > 0):
filter_duration = smooth # in seconds
filter_size = int(filter_duration * fs / float(hop))
if filter_size % 2 == 0:
filter_size += 1
midi_filt = medfilt(midi, filter_size)
else:
midi_filt = midi
# print(len(midi),len(midi_filt))
notes = []
p_prev = None
duration = 0
onset = 0
for n, p in enumerate(midi_filt):
if p == p_prev:
duration += 1
else:
# treat 0 as silence
if p_prev > 0:
# add note
duration_sec = duration * hop / float(fs)
# only add notes that are long enough
if duration_sec >= minduration:
onset_sec = onset * hop / float(fs)
notes.append((onset_sec, duration_sec, p_prev))
# start new note
onset = n
duration = 1
p_prev = p
# add last note
if p_prev > 0:
# add note
duration_sec = duration * hop / float(fs)
onset_sec = onset * hop / float(fs)
notes.append((onset_sec, duration_sec, p_prev))
return notes
def hz2midi(hz):
# convert from Hz to midi note
hz_nonneg = hz.copy()
idx = hz_nonneg <= 0
hz_nonneg[idx] = 1
midi = 69 + 12*np.log2(hz_nonneg/440.)
midi[idx] = 0
# round
midi = np.round(midi)
return midi
def audio_to_midi_melodia(infile, outfile, bpm, smooth=0.25, minduration=0.1,
savejams=False):
# define analysis parameters
fs = 44100
hop = 128
# load audio using librosa
print("Loading audio...")
data, sr = librosa.load(infile, sr=fs, mono=True)
# extract melody using melodia vamp plugin
print("Extracting melody f0 with MELODIA...")
melody = vamp.collect(data, sr, "mtg-melodia:melodia",
parameters={"voicing": 0.2})
# hop = melody['vector'][0]
pitch = melody['vector'][1]
# impute missing 0's to compensate for starting timestamp
pitch = np.insert(pitch, 0, [0]*8)
# debug
# np.asarray(pitch).dump('f0.npy')
# print(len(pitch))
# convert f0 to midi notes
print("Converting Hz to MIDI notes...")
midi_pitch = hz2midi(pitch)
# segment sequence into individual midi notes
notes = midi_to_notes(midi_pitch, fs, hop, smooth, minduration)
# save note sequence to a midi file
print("Saving MIDI to disk...")
save_midi(outfile, notes, bpm)
if savejams:
print("Saving JAMS to disk...")
jamsfile = outfile.replace(".mid", ".jams")
track_duration = len(data) / float(fs)
save_jams(jamsfile, notes, track_duration, os.path.basename(infile))
print("Conversion complete.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("infile", default="assets/10.wav", help="Path to input audio file.")
parser.add_argument("outfile", default="assets/10.mid", help="Path for saving output MIDI file.")
parser.add_argument("bpm", type=int, default=146, help="Tempo of the track in BPM.")
parser.add_argument("--smooth", type=float, default=0.25,
help="Smooth the pitch sequence with a median filter "
"of the provided duration (in seconds).")
parser.add_argument("--minduration", type=float, default=0.1,
help="Minimum allowed duration for note (in seconds). "
"Shorter notes will be removed.")
parser.add_argument("--jams", action="store_const", const=True,
default=False, help="Also save output in JAMS format.")
args = parser.parse_args()
audio_to_midi_melodia(args.infile, args.outfile, args.bpm,
smooth=args.smooth, minduration=args.minduration,
savejams=args.jams)
I tried changing p_prev to 0, but it shows a completely different error:
Loading audio...
Extracting melody f0 with MELODIA...
Converting Hz to MIDI notes...
Saving MIDI to disk...
Traceback (most recent call last):
File "audio_to_midi_melodia/audio_to_midi_melodia.py", line 210, in <module>
audio_to_midi_melodia(args.infile, args.outfile, args.bpm,
File "audio_to_midi_melodia/audio_to_midi_melodia.py", line 182, in audio_to_midi_melodia
save_midi(outfile, notes, bpm)
File "audio_to_midi_melodia/audio_to_midi_melodia.py", line 83, in save_midi
midifile.writeFile(binfile)
File "/home/hadoop/.local/lib/python3.8/site-packages/midiutil/MidiFile.py", line 1637, in writeFile
self.close()
File "/home/hadoop/.local/lib/python3.8/site-packages/midiutil/MidiFile.py", line 1699, in close
self.tracks[i].writeMIDIStream()
File "/home/hadoop/.local/lib/python3.8/site-packages/midiutil/MidiFile.py", line 835, in writeMIDIStream
self.writeEventsToStream()
File "/home/hadoop/.local/lib/python3.8/site-packages/midiutil/MidiFile.py", line 852, in writeEventsToStream
self.MIDIdata += event.serialize(previous_event_tick)
File "/home/hadoop/.local/lib/python3.8/site-packages/midiutil/MidiFile.py", line 124, in serialize
midibytes += struct.pack('>B', self.pitch)
struct.error: required argument is not an integer
I think that the code is outdated, since the last time it was updated by the creator Justin Salamon maybe was in 2015.
update after changing p_prev = 0 and adding the line as the first comment suggests i get the following error:
Loading audio...
Extracting melody f0 with MELODIA...
Converting Hz to MIDI notes...
Saving MIDI to disk...
Saving JAMS to disk...
Traceback (most recent call last):
File "/home/hadoop/.local/lib/python3.8/site-packages/jams/core.py", line 142, in _open
with open_map[ext](name_or_fdesc, mode=mode) as fdesc:
KeyError: ''
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "audio_to_midi_melodia/audio_to_midi_melodia.py", line 211, in <module>
audio_to_midi_melodia(args.infile, args.outfile, args.bpm,
File "audio_to_midi_melodia/audio_to_midi_melodia.py", line 189, in audio_to_midi_melodia
save_jams(jamsfile, notes, track_duration, os.path.basename(infile))
File "audio_to_midi_melodia/audio_to_midi_melodia.py", line 58, in save_jams
jam.save(jamsfile)
File "/home/hadoop/.local/lib/python3.8/site-packages/jams/core.py", line 1778, in save
with _open(path_or_file, mode='w', fmt=fmt) as fdesc:
File "/usr/lib/python3.8/contextlib.py", line 113, in __enter__
return next(self.gen)
File "/home/hadoop/.local/lib/python3.8/site-packages/jams/core.py", line 146, in _open
raise ParameterError('Unknown JAMS extension '
jams.exceptions.ParameterError: Unknown JAMS extension format: ""