0

I was writing some code and noticed this. When I use double, the time taken is slower than when I use uint64_t. Here is the code I was using: Double:

#include <stdio.h>
#include <sys/time.h>
#include <stdint.h>

int main () {
    double sum = 0;
    double add = 1;

    // Start measuring time
    struct timeval begin, end;
    gettimeofday(&begin, 0);
    
    int iterations = 1000*1000*1000;
    for (int i=0; i<iterations; i++) {
        sum += add;
    }
    
    // Stop measuring time and calculate the elapsed time
    gettimeofday(&end, 0);
    uint64_t seconds = end.tv_sec - begin.tv_sec;
    uint64_t microseconds = end.tv_usec - begin.tv_usec;
    uint64_t elapsed = (seconds * 1000000) + microseconds;
    
    printf("Result: %.20f\n", sum);
    
    printf("Time measured: %lu microseconds.\n", elapsed);
    
    return 0;
}

Uint64_t:

#include <stdio.h>
#include <sys/time.h>
#include <stdint.h>

int main () {
    uint64_t sum = 0;
    uint64_t add = 1;

    // Start measuring time
    struct timeval begin, end;
    gettimeofday(&begin, 0);
    
    int iterations = 1000*1000*1000;
    for (int i=0; i<iterations; i++) {
        sum += add;
    }
    
    // Stop measuring time and calculate the elapsed time
    gettimeofday(&end, 0);
    uint64_t seconds = end.tv_sec - begin.tv_sec;
    uint64_t microseconds = end.tv_usec - begin.tv_usec;
    uint64_t elapsed = (seconds * 1000000) + microseconds;
    
    printf("Result: %lu\n", sum);
    
    printf("Time measured: %lu microseconds.\n", elapsed);
    
    return 0;
}

Here are the results:

Double: 
Result: 1000000000.00000000000000000000
Time measured: 4669474 microseconds.

Uint64-t: 
Result: 1000000000
Time measured: 1888623 microseconds.

Why does this happen?

Barmar
  • 741,623
  • 53
  • 500
  • 612
  • 1
    `uint64_t` and `double` are different data types. Arithmetic operations with them work differently. Why shouldn't they have different cost? – John Bollinger Mar 22 '23 at 20:54
  • 2
    A guess is that you haven't enabled compiler optimizations. Otherwise I would expect the integer version to take zero time. The compiler can tell the result of adding 1 a billion times - without running any code. – BoP Mar 22 '23 at 20:55
  • Integer arithmetic is simpler and faster than floating point. This result should not be surprising. – Barmar Mar 22 '23 at 20:57
  • 1
    As @BoP said, any decent compiler will throw out the loop and just generate the code for `sum = iterations;` – Barmar Mar 22 '23 at 21:00
  • I have more interesting result for you here: https://godbolt.org/z/Wc1a93Woh – 0___________ Mar 22 '23 at 21:05

0 Answers0