I have written custom code to concat multiple mpeg-ts files into an mp4 video file. I've used as reference the remuxing code sample.
I'm having issues where the final output is unable to fast-forward or rewind as the video loses its information and plays the same frame till the end. But if I play from the beginning it plays fine.
I compared using ffprobe and a hex tool my custom code remuxer results to that of using the following terminal command:
ffmpeg -i "concat:input1.ts|input2.ts|input3.ts" -c copy output.mp4
To my surprise, the videos look almost identical but I'm noticing that I'm missing stss values on the MP4 header. This is where the key and intra frames are stored according to the MP4 Specs. I'm wondering if I'm missing something on my code. Please find below how I'm currently doing things.
int remuxVideos() {
// Other code removed....
//
for (auto file : files) {
if (FcStringUtils::endsWith(file.c_str(), ".ts") &&
FC_TIMELAPSE_ACTIVE_RECORDING_FILENAME != file) {
FcVideoStream videoStream;
error = videoStream.openStream(sourceDir + "/" + file);
if (ERROR_NO_ERROR != error) {
break;
}
// If the format context is not yet open, we open it using the stream
// settings.
if (!mpFormatCtx) {
error = openFormatContext(sourceDir + "/app.mp4", videoStream.getStream());
if (ERROR_NO_ERROR != error) {
break;
}
}
// Read video stream frames and mux them back into output.
int64_t pts = 0;
int64_t dts = 0;
int64_t duration = 0;
int ret = 0;
while (1) {
ret = videoStream.readFrame(pPacket);
if (0 > ret) {
// Any error we are technically EOF or just an error.
break;
}
if (pPacket->duration == AV_NOPTS_VALUE || pPacket->dts == AV_NOPTS_VALUE || pPacket->pts == AV_NOPTS_VALUE) {
LOGE("Invalid packet time");
continue;
}
pPacket->stream_index = 0;
pPacket->pos = -1;
// pPacket->flags |= AV_PKT_FLAG_KEY; // << Does not make a difference
// pts and dts should increase monotonically pts should be >= dts
pts = pPacket->pts;
pPacket->pts += nextPts;
dts = pPacket->dts;
pPacket->dts += nextDts;
duration = pPacket->duration;
// Write packet to encoder.
ret = av_interleaved_write_frame(mpFormatCtx, pPacket);
if (0 > ret) {
LOGE("Failed to write frame! ret=%d %s", ret, av_err2str(ret));
break;
}
// Release packet regardless if write frame failed.
av_packet_unref(pPacket);
}
// Update last dts and pts.
nextDts = nextDts + dts + duration;
nextPts = nextPts + pts + duration;
videoStream.closeStream();
}
}
if (ERROR_NO_ERROR == error) {
av_write_trailer(mpFormatCtx);
// close output
avio_closep(&mpFormatCtx->pb);
}
av_packet_free(&pPacket);
if (mpFormatCtx) {
mpVideoStream = nullptr;
avformat_free_context(mpFormatCtx);
}
int openFormatContext(const std::string &output, AVStream *pSourceStream) {
int ret = avformat_alloc_output_context2(&mpFormatCtx,
nullptr,
nullptr,
output.c_str());
if (!mpFormatCtx) {
LOGE("Unable to output codec: %s", av_err2str(ret));
return ret;
}
mpFormatCtx->interrupt_callback.callback = ffmpeg_interrupt_cb;
mpFormatCtx->interrupt_callback.opaque = this;
/*
* since all input files are supposed to be identical (framerate, dimension, color format, ...)
* we can safely set output codec values from first input file
*/
mpVideoStream = avformat_new_stream(mpFormatCtx, nullptr);
ret = avcodec_parameters_copy(mpVideoStream->codecpar, pSourceStream->codecpar);
if (0 > ret) {
LOGE("Failed to copy codec parameters");
return ret;
}
mpVideoStream->codecpar->codec_tag = 0;
av_dump_format(mpFormatCtx, 0, output.c_str(), 1);
ret = avio_open(&mpFormatCtx->pb, output.c_str(), AVIO_FLAG_WRITE);
if (0 > ret) {
LOGE("Error occurred when opening output file: %s", av_err2str(ret));
return ret;
}
ret = avformat_write_header(mpFormatCtx, nullptr);
if (0 > ret) {
LOGE("Error occurred when opening output file: %s", av_err2str(ret));
return ret;
}
return 0;
}