I'm trying to use the FFMPEG API to decode into a buffer defined by the client program by following the tips in this question but using the new pattern for decoding instead of the now deprecated avcodec_decode_video2
function.
If my input file is an I-frame only format, everything works great. I've tested with a .mov file encoded with v210 (uncompressed).
However, if the input is a long-GoP format (I'm trying with H.264 high profile 4:2:2 in an mp4 file) I get the following pleasingly psychedelic/impressionistic result:
There's clearly something motion-vectory going on here!
And if I let FFMPEG manage its own buffers with the H.264 input by not overriding AVCodecContext::get_buffer2
, I can make a copy from the resulting frame to my desired destination buffer and get good results.
Here's my decoder method, _frame
and _codecCtx
are object members of type AVFrame*
and AVCodecContext*
respectively. They get alloc'd and init'd in the constructor.
virtual const DecodeResult decode(const rv::sz_t toggle) override {
_toggle = toggle & 1;
using Flags_e = DecodeResultFlags_e;
DecodeResult ans(Flags_e::kNoResult);
AVPacket pkt; // holds compressed data
::av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
int ret;
// read the compressed frame to decode
_err = av_read_frame(_fmtCtx, &pkt);
if (_err < 0) {
if (_err == AVERROR_EOF) {
ans.set(Flags_e::kEndOfFile);
_err = 0; // we can safely ignore EOF errors
return ans;
} else {
baleOnFail(__PRETTY_FUNCTION__);
}
}
// send (compressed) packets to the decoder until it produces an uncompressed frame
do {
// sender
_err = ::avcodec_send_packet(_codecCtx, &pkt);
if (_err < 0) {
if (_err == AVERROR_EOF) {
_err = 0; // EOFs are ok
ans.set(Flags_e::kEndOfFile);
break;
} else {
baleOnFail(__PRETTY_FUNCTION__);
}
}
// receiver
ret = ::avcodec_receive_frame (_codecCtx, _frame);
if (ret == AVERROR(EAGAIN)) {
continue;
} else if (ret == AVERROR_EOF) {
ans.set(Flags_e::kEndOfFile);
break;
} else if (ret < 0) {
_err = ret;
baleOnFail(__PRETTY_FUNCTION__);
} else {
ans.set(Flags_e::kGotFrame);
}
av_packet_unref (&pkt);
} while (!ans.test(Flags_e::kGotFrame));
//packFrame(); <-- used to copy to client image
return ans;
}
And here's my override for get_buffer2
int getVideoBuffer(struct AVCodecContext* ctx, AVFrame* frm) {
// ensure frame pointers are all null
if (frm->data[0] || frm->data[1] || frm->data[2] || frm->data[3]){
::strncpy (_errMsg, "non-null frame data pointer detected.", AV_ERROR_MAX_STRING_SIZE);
return -1;
}
// get format descriptor, ensure it's valid.
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(static_cast<AVPixelFormat>(frm->format));
if (!desc) {
::strncpy (_errMsg, "Pixel format descriptor not available.", AV_ERROR_MAX_STRING_SIZE);
return AVERROR(EINVAL);
}
// for Video, extended data must point to the same place as data.
frm->extended_data = frm->data;
// set the data pointers to point at the Image data.
int chan = 0;
IMG* img = _imgs[_toggle];
// initialize active channels
for (; chan < 3; ++chan) {
frm->buf[chan] = av_buffer_create (
static_cast<uint8_t*>(img->begin(chan)),
rv::unsigned_cast<int>(img->size(chan)),
Player::freeBufferCallback, // callback does nothing
reinterpret_cast<void*>(this),
0 // i.e. AV_BUFFER_FLAG_READONLY is not set
);
frm->linesize[chan] = rv::unsigned_cast<int>(img->stride(chan));
frm->data[chan] = frm->buf[chan]->data;
}
// zero out inactive channels
for (; chan < AV_NUM_DATA_POINTERS; ++chan) {
frm->data[chan] = NULL;
frm->linesize[chan] = 0;
}
return 0;
}
I can reason that the codec needs to keep reference frames in memory and so I'm not really surprised that this isn't working, but I've not been able to figure out how to have it deliver clean decoded frames to client memory. I thought that AVFrame::key_frame
would have been a clue, but, after observing its behaviour in gdb, it doesn't provide a useful trigger for when to allocate AVFrame::buf
s from the buffer pool and when they can be initialized to point at client memory.
Grateful for any help!