3

hI have been studying dranger ffmpeg tutorial which explains how to sync audio and visual once you have the frames displayed and audio playing which is where im at.

Unfortunately, the tutorial is out of date (Stephen Dranger explaained that himself to me) and also uses sdl which im not doing - this is for Blackberry 10 application.

I just cannot make the video frames display at the correct speed (they are just playing very fast) and I have been trying for over a week now - seriously!

I have 3 threads happening - one to read from stream into audio and video queues and then 2 threads for audio and video.

If somebody could explain whats happening after scanning my relevent code you would be a lifesaver.

The delay (what I pass to usleep(testDelay) seems to be going up (incrementing) which doesn't seem right to me.

count = 1;
    MyApp* inst = worker->app;//(VideoUploadFacebook*)arg;
    qDebug() << "\n start loadstream";
    w = new QWaitCondition();
    w2 = new QWaitCondition();
    context = avformat_alloc_context();
    inst->threadStarted = true;
    cout << "start of decoding thread";
    cout.flush();


    av_register_all();
    avcodec_register_all();
    avformat_network_init();
    av_log_set_callback(&log_callback);
    AVInputFormat   *pFormat;
    //const char      device[]     = "/dev/video0";
    const char      formatName[] = "mp4";
    cout << "2start of decoding thread";
    cout.flush();



    if (!(pFormat = av_find_input_format(formatName))) {
        printf("can't find input format %s\n", formatName);
        //return void*;
    }
    //open rtsp
    if(avformat_open_input(&context, inst->capturedUrl.data(), pFormat,NULL) != 0){
        // return ;
        cout << "error opening of decoding thread: " << inst->capturedUrl.data();
        cout.flush();
    }

    cout << "3start of decoding thread";
    cout.flush();
    // av_dump_format(context, 0, inst->capturedUrl.data(), 0);
    /*   if(avformat_find_stream_info(context,NULL) < 0){
        return EXIT_FAILURE;
    }
     */
    //search video stream
    for(int i =0;i<context->nb_streams;i++){
        if(context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
            inst->video_stream_index = i;
    }
    cout << "3z start of decoding thread";
    cout.flush();
    AVFormatContext* oc = avformat_alloc_context();
    av_read_play(context);//play RTSP
    AVDictionary *optionsDict = NULL;
    ccontext = context->streams[inst->video_stream_index]->codec;

    inst->audioc = context->streams[1]->codec;

    cout << "4start of decoding thread";
    cout.flush();
    codec = avcodec_find_decoder(ccontext->codec_id);
    ccontext->pix_fmt = PIX_FMT_YUV420P;

    AVCodec* audio_codec = avcodec_find_decoder(inst->audioc->codec_id);
    inst->packet = new AVPacket();
    if (!audio_codec) {
        cout << "audio codec not found\n"; //fflush( stdout );
        exit(1);
    }

    if (avcodec_open2(inst->audioc, audio_codec, NULL) < 0) {
        cout << "could not open codec\n"; //fflush( stdout );
        exit(1);
    }

    if (avcodec_open2(ccontext, codec, &optionsDict) < 0) exit(1);

    cout << "5start of decoding thread";
    cout.flush();
    inst->pic = avcodec_alloc_frame();

    av_init_packet(inst->packet);

    while(av_read_frame(context,inst->packet) >= 0 && &inst->keepGoing)
    {

        if(inst->packet->stream_index == 0){//packet is video

            int check = 0;



            // av_init_packet(inst->packet);
            int result = avcodec_decode_video2(ccontext, inst->pic, &check, inst->packet);

            if(check)
                break;
        }
    }



    inst->originalVideoWidth = inst->pic->width;
    inst->originalVideoHeight = inst->pic->height;
    float aspect = (float)inst->originalVideoHeight / (float)inst->originalVideoWidth;
    inst->newVideoWidth = inst->originalVideoWidth;
    int newHeight = (int)(inst->newVideoWidth * aspect);
    inst->newVideoHeight = newHeight;//(int)inst->originalVideoHeight / inst->originalVideoWidth * inst->newVideoWidth;// = new height
    int size = avpicture_get_size(PIX_FMT_YUV420P, inst->originalVideoWidth, inst->originalVideoHeight);
    uint8_t* picture_buf = (uint8_t*)(av_malloc(size));
    avpicture_fill((AVPicture *) inst->pic, picture_buf, PIX_FMT_YUV420P, inst->originalVideoWidth, inst->originalVideoHeight);

    picrgb = avcodec_alloc_frame();
    int size2 = avpicture_get_size(PIX_FMT_YUV420P, inst->newVideoWidth, inst->newVideoHeight);
    uint8_t* picture_buf2 = (uint8_t*)(av_malloc(size2));
    avpicture_fill((AVPicture *) picrgb, picture_buf2, PIX_FMT_YUV420P, inst->newVideoWidth, inst->newVideoHeight);



    if(ccontext->pix_fmt != PIX_FMT_YUV420P)
    {
        std::cout << "fmt != 420!!!: " << ccontext->pix_fmt << std::endl;//
        // return (EXIT_SUCCESS);//-1;

    }


    if (inst->createForeignWindow(inst->myForeignWindow->windowGroup(),
            "HelloForeignWindowAppIDqq", 0,
            0, inst->newVideoWidth,
            inst->newVideoHeight)) {

    } else {
        qDebug() << "The ForeginWindow was not properly initialized";
    }




    inst->keepGoing = true;

    inst->img_convert_ctx = sws_getContext(inst->originalVideoWidth, inst->originalVideoHeight, PIX_FMT_YUV420P, inst->newVideoWidth, inst->newVideoHeight,
            PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);

    is = (VideoState*)av_mallocz(sizeof(VideoState));
    if (!is)
        return NULL;

    is->audioStream = 1;
    is->audio_st = context->streams[1];
    is->audio_buf_size = 0;
    is->audio_buf_index = 0;
    is->videoStream = 0;
    is->video_st = context->streams[0];

    is->frame_timer = (double)av_gettime() / 1000000.0;
    is->frame_last_delay = 40e-3;

    is->av_sync_type = DEFAULT_AV_SYNC_TYPE;
    //av_strlcpy(is->filename, filename, sizeof(is->filename));
    is->iformat = pFormat;
    is->ytop    = 0;
    is->xleft   = 0;

    /* start video display */
    is->pictq_mutex = new QMutex();
    is->pictq_cond  = new QWaitCondition();

    is->subpq_mutex = new QMutex();
    is->subpq_cond  = new QWaitCondition();

    is->video_current_pts_time = av_gettime();


    packet_queue_init(&audioq);

    packet_queue_init(&videoq);
    is->audioq = audioq;
    is->videoq = videoq;
    AVPacket* packet2  = new AVPacket();

    ccontext->get_buffer = our_get_buffer;
    ccontext->release_buffer = our_release_buffer;


    av_init_packet(packet2);
    while(inst->keepGoing)
    {


        if(av_read_frame(context,packet2) < 0 && keepGoing)
        {
            printf("bufferframe Could not read a frame from stream.\n");
            fflush( stdout );


        }else {



            if(packet2->stream_index == 0) {
                packet_queue_put(&videoq, packet2);
            } else if(packet2->stream_index == 1) {
                packet_queue_put(&audioq, packet2);
            } else {
                av_free_packet(packet2);
            }


            if(!videoThreadStarted)
            {
                videoThreadStarted = true;
                QThread* thread = new QThread;
                videoThread = new VideoStreamWorker(this);

                // Give QThread ownership of Worker Object
                videoThread->moveToThread(thread);
                connect(videoThread, SIGNAL(error(QString)), this, SLOT(errorHandler(QString)));
                QObject::connect(videoThread, SIGNAL(refreshNeeded()), this, SLOT(refreshNeededSlot()));
                connect(thread, SIGNAL(started()), videoThread, SLOT(doWork()));
                connect(videoThread, SIGNAL(finished()), thread, SLOT(quit()));
                connect(videoThread, SIGNAL(finished()), videoThread, SLOT(deleteLater()));
                connect(thread, SIGNAL(finished()), thread, SLOT(deleteLater()));

                thread->start();
            }

            if(!audioThreadStarted)
            {
                audioThreadStarted = true;
                QThread* thread = new QThread;
                AudioStreamWorker* videoThread = new AudioStreamWorker(this);

                // Give QThread ownership of Worker Object
                videoThread->moveToThread(thread);

                // Connect videoThread error signal to this errorHandler SLOT.
                connect(videoThread, SIGNAL(error(QString)), this, SLOT(errorHandler(QString)));

                // Connects the thread’s started() signal to the process() slot in the videoThread, causing it to start.
                connect(thread, SIGNAL(started()), videoThread, SLOT(doWork()));
                connect(videoThread, SIGNAL(finished()), thread, SLOT(quit()));
                connect(videoThread, SIGNAL(finished()), videoThread, SLOT(deleteLater()));

                // Make sure the thread object is deleted after execution has finished.
                connect(thread, SIGNAL(finished()), thread, SLOT(deleteLater()));

                thread->start();
            }

        }

    } //finished main loop

    int MyApp::video_thread() {
    //VideoState *is = (VideoState *)arg;
    AVPacket pkt1, *packet = &pkt1;
    int len1, frameFinished;

    double pts;
    pic = avcodec_alloc_frame();

    for(;;) {
        if(packet_queue_get(&videoq, packet, 1) < 0) {
            // means we quit getting packets
            break;
        }

        pts = 0;

        global_video_pkt_pts2 = packet->pts;
        // Decode video frame
        len1 =  avcodec_decode_video2(ccontext, pic, &frameFinished, packet);
        if(packet->dts == AV_NOPTS_VALUE
                && pic->opaque && *(uint64_t*)pic->opaque != AV_NOPTS_VALUE) {
            pts = *(uint64_t *)pic->opaque;
        } else if(packet->dts != AV_NOPTS_VALUE) {
            pts = packet->dts;
        } else {
            pts = 0;
        }
        pts *= av_q2d(is->video_st->time_base);
        // Did we get a video frame?

                if(frameFinished) {
                    pts = synchronize_video(is, pic, pts);
                    actualPts = pts;
                    refreshSlot();
                }
                av_free_packet(packet);
    }
    av_free(pic);
    return 0;
}


int MyApp::audio_thread() {
    //VideoState *is = (VideoState *)arg;
    AVPacket pkt1, *packet = &pkt1;
    int len1, frameFinished;
    ALuint source;
    ALenum format = 0;
    //   ALuint frequency;
    ALenum alError;
    ALint val2;
    ALuint buffers[NUM_BUFFERS];
    int dataSize;


    ALCcontext *aContext;
    ALCdevice *device;
    if (!alutInit(NULL, NULL)) {
        // printf(stderr, "init alut error\n");
    }
    device = alcOpenDevice(NULL);
    if (device == NULL) {
        // printf(stderr, "device error\n");
    }

    //Create a context
    aContext = alcCreateContext(device, NULL);
    alcMakeContextCurrent(aContext);
    if(!(aContext)) {
        printf("Could not create the OpenAL context!\n");
        return 0;
    }

    alListener3f(AL_POSITION, 0.0f, 0.0f, 0.0f);









    //ALenum alError;
    if(alGetError() != AL_NO_ERROR) {
        cout << "could not create buffers";
        cout.flush();
        fflush( stdout );
        return 0;
    }
    alGenBuffers(NUM_BUFFERS, buffers);
    alGenSources(1, &source);
    if(alGetError() != AL_NO_ERROR) {
        cout << "after Could not create buffers or the source.\n";
        cout.flush(  );
        return 0;
    }

    int i;
    int indexOfPacket;
    double pts;
    //double pts;
    int n;


    for(i = 0; i < NUM_BUFFERS; i++)
    {
        if(packet_queue_get(&audioq, packet, 1) < 0) {
            // means we quit getting packets
            break;
        }
        cout << "streamindex=audio \n";
        cout.flush(  );
        //printf("before decode  audio\n");
        //fflush( stdout );
        // AVPacket *packet = new AVPacket();//malloc(sizeof(AVPacket*));
        AVFrame *decodedFrame = NULL;
        int gotFrame = 0;
        // AVFrame* decodedFrame;

        if(!decodedFrame) {
            if(!(decodedFrame = avcodec_alloc_frame())) {
                cout << "Run out of memory, stop the streaming...\n";
                fflush( stdout );
                cout.flush();


                return -2;
            }
        } else {
            avcodec_get_frame_defaults(decodedFrame);
        }

        int  len = avcodec_decode_audio4(audioc, decodedFrame, &gotFrame, packet);
        if(len < 0) {
            cout << "Error while decoding.\n";
            cout.flush(  );

            return -3;
        }
        if(len < 0) {
            /* if error, skip frame */
            is->audio_pkt_size = 0;
            //break;
        }
        is->audio_pkt_data += len;
        is->audio_pkt_size -= len;

        pts = is->audio_clock;
        // *pts_ptr = pts;
        n = 2 * is->audio_st->codec->channels;
        is->audio_clock += (double)packet->size/
                (double)(n * is->audio_st->codec->sample_rate);
        if(gotFrame) {
            cout << "got audio frame.\n";
            cout.flush(  );
            // We have a buffer ready, send it
            dataSize = av_samples_get_buffer_size(NULL, audioc->channels,
                    decodedFrame->nb_samples, audioc->sample_fmt, 1);

            if(!format) {
                if(audioc->sample_fmt == AV_SAMPLE_FMT_U8 ||
                        audioc->sample_fmt == AV_SAMPLE_FMT_U8P) {
                    if(audioc->channels == 1) {
                        format = AL_FORMAT_MONO8;
                    } else if(audioc->channels == 2) {
                        format = AL_FORMAT_STEREO8;
                    }
                } else if(audioc->sample_fmt == AV_SAMPLE_FMT_S16 ||
                        audioc->sample_fmt == AV_SAMPLE_FMT_S16P) {
                    if(audioc->channels == 1) {
                        format = AL_FORMAT_MONO16;
                    } else if(audioc->channels == 2) {
                        format = AL_FORMAT_STEREO16;
                    }
                }

                if(!format) {
                    cout << "OpenAL can't open this format of sound.\n";
                    cout.flush(  );

                    return -4;
                }
            }
            printf("albufferdata audio b4.\n");
            fflush( stdout );
            alBufferData(buffers[i], format, *decodedFrame->data, dataSize, decodedFrame->sample_rate);
            cout << "after albufferdata all buffers \n";
            cout.flush(  );
            av_free_packet(packet);
            //=av_free(packet);
            av_free(decodedFrame);

            if((alError = alGetError()) != AL_NO_ERROR) {
                printf("Error while buffering.\n");

                printAlError(alError);
                return -6;
            }
        }
    }


    cout << "before quoe buffers \n";
    cout.flush();
    alSourceQueueBuffers(source, NUM_BUFFERS, buffers);
    cout << "before play.\n";
    cout.flush();
    alSourcePlay(source);
    cout << "after play.\n";
    cout.flush();
    if((alError = alGetError()) != AL_NO_ERROR) {
        cout << "error strating stream.\n";
        cout.flush();
        printAlError(alError);
        return 0;
    }


    // AVPacket *pkt = &is->audio_pkt;

    while(keepGoing)
    {
        while(packet_queue_get(&audioq, packet, 1)  >= 0) {
            // means we quit getting packets

            do {
                alGetSourcei(source, AL_BUFFERS_PROCESSED, &val2);
                usleep(SLEEP_BUFFERING);
            } while(val2 <= 0);
            if(alGetError() != AL_NO_ERROR)
            {
                fprintf(stderr, "Error gettingsource :(\n");
                return 1;
            }

            while(val2--)
            {



                ALuint buffer;
                alSourceUnqueueBuffers(source, 1, &buffer);
                if(alGetError() != AL_NO_ERROR)
                {
                    fprintf(stderr, "Error unqueue buffers :(\n");
                    //  return 1;
                }
                AVFrame *decodedFrame = NULL;
                int gotFrame = 0;
                // AVFrame* decodedFrame;

                if(!decodedFrame) {
                    if(!(decodedFrame = avcodec_alloc_frame())) {
                        cout << "Run out of memory, stop the streaming...\n";
                        //fflush( stdout );
                        cout.flush();


                        return -2;
                    }
                } else {
                    avcodec_get_frame_defaults(decodedFrame);
                }

                int  len = avcodec_decode_audio4(audioc, decodedFrame, &gotFrame, packet);
                if(len < 0) {
                    cout << "Error while decoding.\n";
                    cout.flush(  );
                    is->audio_pkt_size = 0;
                    return -3;
                }

                is->audio_pkt_data += len;
                is->audio_pkt_size -= len;
                if(packet->size <= 0) {
                    /* No data yet, get more frames */
                    //continue;
                }


                if(gotFrame) {
                    pts = is->audio_clock;
                    len = synchronize_audio(is, (int16_t *)is->audio_buf,
                            packet->size, pts);
                    is->audio_buf_size = packet->size;
                    pts = is->audio_clock;
                    // *pts_ptr = pts;
                    n = 2 * is->audio_st->codec->channels;
                    is->audio_clock += (double)packet->size /
                            (double)(n * is->audio_st->codec->sample_rate);
                    if(packet->pts != AV_NOPTS_VALUE) {
                        is->audio_clock = av_q2d(is->audio_st->time_base)*packet->pts;
                    }
                    len = av_samples_get_buffer_size(NULL, audioc->channels,
                            decodedFrame->nb_samples, audioc->sample_fmt, 1);
                    alBufferData(buffer, format, *decodedFrame->data, len, decodedFrame->sample_rate);
                    if(alGetError() != AL_NO_ERROR)
                    {
                        fprintf(stderr, "Error buffering :(\n");
                        return 1;
                    }
                    alSourceQueueBuffers(source, 1, &buffer);
                    if(alGetError() != AL_NO_ERROR)
                    {
                        fprintf(stderr, "Error queueing buffers :(\n");
                        return 1;
                    }
                }





            }

            alGetSourcei(source, AL_SOURCE_STATE, &val2);
            if(val2 != AL_PLAYING)
                alSourcePlay(source);

        }


        //pic = avcodec_alloc_frame();
    }
    qDebug() << "end audiothread";
    return 1;
}

void MyApp::refreshSlot()
{


    if(true)
    {

        printf("got frame %d, %d\n", pic->width, ccontext->width);
        fflush( stdout );

        sws_scale(img_convert_ctx, (const uint8_t **)pic->data, pic->linesize,
                0, originalVideoHeight, &picrgb->data[0], &picrgb->linesize[0]);

        printf("rescaled frame %d, %d\n", newVideoWidth, newVideoHeight);
        fflush( stdout );
        //av_free_packet(packet);
        //av_init_packet(packet);

        qDebug() << "waking audio as video finished";
        ////mutex.unlock();
        //mutex2.lock();
        doingVideoFrame = false;
        //doingAudioFrame = false;
        ////mutex2.unlock();


        //mutex2.unlock();
        //w2->wakeAll();
        //w->wakeAll();
        qDebug() << "now woke audio";

        //pic = picrgb;
        uint8_t *srcy = picrgb->data[0];
        uint8_t *srcu = picrgb->data[1];
        uint8_t *srcv = picrgb->data[2];
        printf("got src yuv frame %d\n", &srcy);
        fflush( stdout );
        unsigned char *ptr = NULL;
        screen_get_buffer_property_pv(mScreenPixelBuffer, SCREEN_PROPERTY_POINTER, (void**) &ptr);
        unsigned char *y = ptr;
        unsigned char *u = y + (newVideoHeight * mStride) ;
        unsigned char *v = u + (newVideoHeight * mStride) / 4;
        int i = 0;
        printf("got buffer  picrgbwidth= %d \n", newVideoWidth);
        fflush( stdout );
        for ( i = 0; i < newVideoHeight; i++)
        {
            int doff = i * mStride;
            int soff = i * picrgb->linesize[0];
            memcpy(&y[doff], &srcy[soff], newVideoWidth);
        }

        for ( i = 0; i < newVideoHeight / 2; i++)
        {
            int doff = i * mStride / 2;
            int soff = i * picrgb->linesize[1];
            memcpy(&u[doff], &srcu[soff], newVideoWidth / 2);
        }

        for ( i = 0; i < newVideoHeight / 2; i++)
        {
            int doff = i * mStride / 2;
            int soff = i * picrgb->linesize[2];
            memcpy(&v[doff], &srcv[soff], newVideoWidth / 2);
        }
        printf("before posttoscreen \n");
        fflush( stdout );

        video_refresh_timer();
        qDebug() << "end refreshslot";

    }
    else
    {

    }





}

void  MyApp::refreshNeededSlot2()
    {
        printf("blitting to buffer");
        fflush(stdout);

        screen_buffer_t screen_buffer;
        screen_get_window_property_pv(mScreenWindow, SCREEN_PROPERTY_RENDER_BUFFERS, (void**) &screen_buffer);
        int attribs[] = { SCREEN_BLIT_SOURCE_WIDTH, newVideoWidth, SCREEN_BLIT_SOURCE_HEIGHT, newVideoHeight, SCREEN_BLIT_END };
        int res2 = screen_blit(mScreenCtx, screen_buffer, mScreenPixelBuffer, attribs);
        printf("dirty rectangles");
        fflush(stdout);
        int dirty_rects[] = { 0, 0, newVideoWidth, newVideoHeight };
        screen_post_window(mScreenWindow, screen_buffer, 1, dirty_rects, 0);
        printf("done screneposdtwindow");
        fflush(stdout);

    }

void MyApp::video_refresh_timer() {
    testDelay = 0;
    //  VideoState *is = ( VideoState* )userdata;
    VideoPicture *vp;
    //double pts = 0    ;
    double actual_delay, delay, sync_threshold, ref_clock, diff;

    if(is->video_st) {
        if(false)////is->pictq_size == 0)
        {
            testDelay = 1;
            schedule_refresh(is, 1);
        } else {
            // vp = &is->pictq[is->pictq_rindex];

            delay = actualPts - is->frame_last_pts; /* the pts from last time */
            if(delay <= 0 || delay >= 1.0) {
                /* if incorrect delay, use previous one */
                delay = is->frame_last_delay;
            }
            /* save for next time */
            is->frame_last_delay = delay;
            is->frame_last_pts = actualPts;

            is->video_current_pts = actualPts;
            is->video_current_pts_time = av_gettime();
            /* update delay to sync to audio */
            ref_clock = get_audio_clock(is);
            diff = actualPts - ref_clock;

            /* Skip or repeat the frame. Take delay into account
     FFPlay still doesn't "know if this is the best guess." */
            sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
            if(fabs(diff) < AV_NOSYNC_THRESHOLD) {
                if(diff <= -sync_threshold) {
                    delay = 0;
                } else if(diff >= sync_threshold) {
                    delay = 2 * delay;
                }
            }
            is->frame_timer += delay;
            /* computer the REAL delay */
            actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
            if(actual_delay < 0.010) {
                /* Really it should skip the picture instead */
                actual_delay = 0.010;
            }
            testDelay = (int)(actual_delay * 1000 + 0.5);
            schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
            /* show the picture! */
            //video_display(is);


            // SDL_CondSignal(is->pictq_cond);
            // SDL_UnlockMutex(is->pictq_mutex);
        }
    } else {
        testDelay = 100;
        schedule_refresh(is, 100);

    }
}

void MyApp::schedule_refresh(VideoState *is, int delay) {
    qDebug() << "start schedule refresh timer" << delay;
    typeOfEvent = FF_REFRESH_EVENT2;
    w->wakeAll();
    //  SDL_AddTimer(delay,


}

I am currently waiting on data in a loop in the following way

QMutex mutex;
    mutex.lock();
    while(keepGoing)
    {



        qDebug() << "MAINTHREAD" << testDelay;


        w->wait(&mutex);
        mutex.unlock();
        qDebug() << "MAINTHREAD past wait";

        if(!keepGoing)
        {
            break;
        }
        if(testDelay > 0 && typeOfEvent == FF_REFRESH_EVENT2)
        {
            usleep(testDelay);
            refreshNeededSlot2();
        }
        else   if(testDelay > 0 && typeOfEvent == FF_QUIT_EVENT2)
        {
            keepGoing = false;
            exit(0);
            break;
            // usleep(testDelay);
            // refreshNeededSlot2();
        }
        qDebug() << "MAINTHREADend";
        mutex.lock();

    }
    mutex.unlock();

Please let me know if I need to provide any more relevent code. I'm sorry my code is untidy - I still learning c++ and have been modifying this code for over a week now as previously mentioned.

Just added a sample of output I'm seeing from print outs I do to console - I can't get my head around it (it's almost too complicated for my level of expertise) but when you see the frames being played and audio playing it's very difficult to give up especially when it took me a couple of weeks to get to this stage.

Please someone give me a hand if they spot the problem.

MAINTHREAD past wait pts after syncvideo= 1073394046 got frame 640, 640 start video_refresh_timer actualpts = 1.66833 frame lastpts = 1.63497 start schedule refresh timer need to delay for 123

pts after syncvideo= 1073429033 got frame 640, 640 MAINTHREAD loop delay before refresh = 123 start video_refresh_timer actualpts = 1.7017 frame lastpts = 1.66833 start schedule refresh timer need to delay for 115

MAINTHREAD past wait pts after syncvideo= 1073464021 got frame 640, 640 start video_refresh_timer actualpts = 1.73507 frame lastpts = 1.7017 start schedule refresh timer need to delay for 140

MAINTHREAD loop delay before refresh = 140 pts after syncvideo= 1073499008 got frame 640, 640 start video_refresh_timer actualpts = 1.76843 frame lastpts = 1.73507 start schedule refresh timer need to delay for 163

MAINTHREAD past wait pts after syncvideo= 1073533996 got frame 640, 640 start video_refresh_timer actualpts = 1.8018 frame lastpts = 1.76843 start schedule refresh timer need to delay for 188

MAINTHREAD loop delay before refresh = 188 pts after syncvideo= 1073568983 got frame 640, 640 start video_refresh_timer actualpts = 1.83517 frame lastpts = 1.8018 start schedule refresh timer need to delay for 246

MAINTHREAD past wait pts after syncvideo= 1073603971 got frame 640, 640 start video_refresh_timer actualpts = 1.86853 frame lastpts = 1.83517 start schedule refresh timer need to delay for 299

MAINTHREAD loop delay before refresh = 299 pts after syncvideo= 1073638958 got frame 640, 640 start video_refresh_timer actualpts = 1.9019 frame lastpts = 1.86853 start schedule refresh timer need to delay for 358

MAINTHREAD past wait pts after syncvideo= 1073673946 got frame 640, 640 start video_refresh_timer actualpts = 1.93527 frame lastpts = 1.9019 start schedule refresh timer need to delay for 416

MAINTHREAD loop delay before refresh = 416 pts after syncvideo= 1073708933 got frame 640, 640 start video_refresh_timer actualpts = 1.96863 frame lastpts = 1.93527 start schedule refresh timer need to delay for 474

MAINTHREAD past wait pts after syncvideo= 1073742872 got frame 640, 640 MAINTHREAD loop delay before refresh = 474 start video_refresh_timer actualpts = 2.002 frame lastpts = 1.96863 start schedule refresh timer need to delay for 518

MAINTHREAD past wait pts after syncvideo= 1073760366 got frame 640, 640 start video_refresh_timer actualpts = 2.03537 frame lastpts = 2.002 start schedule refresh timer need to delay for 575

animuson
  • 53,861
  • 28
  • 137
  • 147
user1379811
  • 203
  • 2
  • 10
  • there may be some other tutorials you could look at http://trac.ffmpeg.org/wiki/Using%20libav* (ffmpeg itself has some vsync option IIRC that might be interesting) – rogerdpack Aug 22 '13 at 17:27
  • I'm currently trying to solve the exact same problem. I'm sad to see 3 years later we haven't found a solution. I'll let you know if I find something. – leetNightshade Mar 19 '16 at 13:01
  • I was creating a queue of frames to process just like you, like in the tutorial, however it seems like all of the queued frames were still being referenced and updated by FFmpeg. Either you have to find a way to get FFmpeg to stop updating those frames, deference them or what have you, or only decode one video packet at a time when you're ready to draw it. I did the later, I only decode a packet when it's ready to draw. I had one thread running av_read_frame, queuing the packets to separate audio and video threads. – leetNightshade Mar 20 '16 at 19:36

0 Answers0