6

I am writing an OpenCV application and the FPS is very important. How do I time the processing time of the main loop to get the current and Average FPS? This way, I can know how fast my application can operate. By the way, I'm using imread off an SSD so the processor is the bottleneck here!

yash101
  • 661
  • 1
  • 8
  • 20

1 Answers1

6

you can do something like this. regarding fps, i find that instead of deriving from average duration, it's slightly better to actually count in 1 second segments and average. you can control the stability of the averaging(or the interval window) by changing the _avgfps=0.7*_avgfps+0.3*_fps1sec;. for example _avgfps=0.9*_avgfps+0.1*_fps1sec; will make the convergence to actual slower but will be more resistant to temporary fluctuations. the ratios must sum to exactly 1.

#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv/cv.h>

#include <sys/timeb.h>
using namespace cv;


#if defined(_MSC_VER) || defined(WIN32)  || defined(_WIN32) || defined(__WIN32__) \
    || defined(WIN64)    || defined(_WIN64) || defined(__WIN64__) 
int CLOCK()
{
    return clock();
}
#endif

#if defined(unix)        || defined(__unix)      || defined(__unix__) \
    || defined(linux)       || defined(__linux)     || defined(__linux__) \
    || defined(sun)         || defined(__sun) \
    || defined(BSD)         || defined(__OpenBSD__) || defined(__NetBSD__) \
    || defined(__FreeBSD__) || defined __DragonFly__ \
    || defined(sgi)         || defined(__sgi) \
    || defined(__MACOSX__)  || defined(__APPLE__) \
    || defined(__CYGWIN__) 
int CLOCK()
{
    struct timespec t;
    clock_gettime(CLOCK_MONOTONIC,  &t);
    return (t.tv_sec * 1000)+(t.tv_nsec*1e-6);
}
#endif

double _avgdur=0;
int _fpsstart=0;
double _avgfps=0;
double _fps1sec=0;

double avgdur(double newdur)
{
    _avgdur=0.98*_avgdur+0.02*newdur;
    return _avgdur;
}

double avgfps()
{
    if(CLOCK()-_fpsstart>1000)      
    {
        _fpsstart=CLOCK();
        _avgfps=0.7*_avgfps+0.3*_fps1sec;
        _fps1sec=0;
    }

    _fps1sec++;
    return _avgfps;
}

void process(Mat& frame)
{
    imshow("frame",frame);
}

int main(int argc, char** argv)
{
    int frameno=0;
    cv::Mat frame;
    cv::VideoCapture cap(0);
    for(;;)
    {
        cap>>frame;

        clock_t start=CLOCK();

        if(frame.data)process(frame);

        double dur = CLOCK()-start;
        printf("avg time per frame %f ms. fps %f. frameno = %d\n",avgdur(dur),avgfps(),frameno++ );
        if(waitKey(1)==27)
            exit(0);
    }
    return 0;
}     
Zaw Lin
  • 5,629
  • 1
  • 23
  • 41
  • note that this is measuring cpu time only, not walltime. ( so io / sleeps and such won't show up there ) – berak Feb 21 '14 at 07:51
  • on windows, i have put a Sleep(1000) inside process. and the output is as expected(from walltime). on linux, it uses a different function, and thus should also be correct. – Zaw Lin Feb 21 '14 at 08:12
  • the code has been tested with many ip cameras, and also gave correct fps which is set in ip camera settings. and thus for all practical purposes, it will give correct fps(wall time). – Zaw Lin Feb 21 '14 at 08:23
  • for more official argument,the behaviour is `clock()` is different from windows and linux. http://msdn.microsoft.com/en-us/library/4e2ess30.aspx – Zaw Lin Feb 21 '14 at 08:25