Alright, basically I'm working on a simple video player and I'll probably be asking another question about lagging video\syncing to audio later, but for now I'm having a problem with audio. What I've managed to do to is go through all of the audio frames of a video and add them to a vector buffer then play the audio from that buffer using OpenAL.
This is inefficient and memory hogging and so I need to be able stream it using what I guess is called a rotating buffer. I've ran into problems, one being that there's not a lot of information on streaming with OpenAL let alone the proper way to decode audio with FFMPEG and pipe it to OpenAL. I'm even less comfortable using a vector for my buffer because I honestly have no idea how vectors work in C++, but I some how managed to pull something out of my head to make it work.
Currently I have a Video class that looks like this:
class Video
{
public:
Video(string MOV);
~Video();
bool HasError();
string GetError();
void UpdateVideo();
void RenderToQuad(float Width, float Height);
void CleanTexture();
private:
string FileName;
bool Error;
int videoStream, audioStream, FrameFinished, ErrorLevel;
AVPacket packet;
AVFormatContext *pFormatCtx;
AVCodecContext *pCodecCtx, *aCodecCtx;
AVCodec *pCodec, *aCodec;
AVFrame *pFrame, *pFrameRGB, *aFrame;
GLuint VideoTexture;
struct SwsContext* swsContext;
ALint state;
ALuint bufferID, sourceID;
ALenum format;
ALsizei freq;
vector <uint8_t> bufferData;
};
The bottom private variables are the relevant ones. Currently I'm decoding audio in the class constructor to an AVFrame and adding the data to bufferData like so:
av_init_packet(&packet);
alGenBuffers(1, &bufferID);
alGenSources(1, &sourceID);
alListener3f(AL_POSITION, 0.0f, 0.0f, 0.0f);
int GotFrame = 0;
freq = aCodecCtx->sample_rate;
if (aCodecCtx->channels == 1)
format = AL_FORMAT_MONO16;
else
format = AL_FORMAT_STEREO16;
while (av_read_frame(pFormatCtx, &packet) >= 0)
{
if (packet.stream_index == audioStream)
{
avcodec_decode_audio4(aCodecCtx, aFrame, &GotFrame, &packet);
bufferData.insert(bufferData.end(), aFrame->data[0], aFrame->data[0] + aFrame->linesize[0]);
av_free_packet(&packet);
}
}
av_seek_frame(pFormatCtx, audioStream, 0, AVSEEK_FLAG_BACKWARD);
alBufferData(bufferID, format, &bufferData[0], static_cast<ALsizei>(bufferData.size()), freq);
alSourcei(sourceID, AL_BUFFER, bufferID);
In my UpdateVideo() is where I'm decoding video to an OpenGL texture through the video stream, so it would make sense for me to decode my audio there and stream it:
void Video::UpdateVideo()
{
alGetSourcei(sourceID, AL_SOURCE_STATE, &state);
if (state != AL_PLAYING)
alSourcePlay(sourceID);
if (av_read_frame(pFormatCtx, &packet) >= 0)
{
if (packet.stream_index == videoStream)
{
avcodec_decode_video2(pCodecCtx, pFrame, &FrameFinished, &packet);
if (FrameFinished)
{
sws_scale(swsContext, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
av_free_packet(&packet);
}
}
else if (packet.stream_index == audioStream)
{
/*
avcodec_decode_audio4(aCodecCtx, aFrame, &FrameFinishd, &packet);
if (FrameFinished)
{
//Update Audio and rotate buffers here!
}
*/
}
glGenTextures(1, &VideoTexture);
glBindTexture(GL_TEXTURE_2D, VideoTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, 3, pCodecCtx->width, pCodecCtx->height, 0, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
}
else
{
av_seek_frame(pFormatCtx, videoStream, 0, AVSEEK_FLAG_BACKWARD);
}
}
So I guess the big question is how do I do it? I've got no clue. Any help is appreciated, thank you!