I want to write a very basic application that passes audio from microphone to speakers. This is very simple with pyaudio as described on https://people.csail.mit.edu/hubert/pyaudio/ .
def passthrough():
WIDTH = 2
CHANNELS = 1
RATE = 44100
p = pyaudio.PyAudio()
def callback(in_data, frame_count, time_info, status):
return (in_data, pyaudio.paContinue)
stream = p.open(format=p.get_format_from_width(WIDTH),
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
stream.stop_stream()
stream.close()
p.terminate()
But now I try to mix a wave file into this stream, when an event occurs. And that's where I am stuck right now. Playing a wave file seems to be easy, too.
def play_wave(wav_file):
wf = wave.open(wav_file, 'rb')
sample_width=wf.getsampwidth()
channels=wf.getnchannels()
rate=wf.getframerate()
second=sample_width*channels*rate
def callback(in_data, frame_count, time_info, status):
data = wf.readframes(frame_count)
return (data, pyaudio.paContinue)
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(sample_width),
channels=channels,
rate=int(rate),
output=True,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
stream.stop_stream()
stream.close()
wf.close()
p.terminate()
At this time, I have two problems.
- How do I mix the wave output into the continuous stream
- How can I trigger 1. on an event basis
Hope someone can light up the dark basement I am in right now.
EDIT: Assume the wave file to have same number of channels and same rate, so no conversion necessary.