I want to compare the performance of Unix domain sockets between two processes with that of another IPC.
I have a basic program that creates a socket pair and then calls fork. Then, it measures the RTT to send the 8192 bytes to the other process and back (distinct for each iteration).
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <unistd.h>
int main(int argc, char **argv) {
int i, pid, sockpair[2];
char buf[8192];
struct timespec tp1, tp2;
assert(argc == 2);
// Create a socket pair using Unix domain sockets with reliable,
// in-order data transmission.
socketpair(AF_UNIX, SOCK_STREAM, 0, sockpair);
// We then fork to create a child process and then start the benchmark.
pid = fork();
if (pid == 0) { // This is the child process.
for (i = 0; i < atoi(argv[1]); i++) {
assert(recv(sockpair[1], buf, sizeof(buf), 0) > 0);
assert(send(sockpair[1], buf, sizeof(buf), 0) > 0);
}
} else { // This is the parent process.
for (i = 0; i < atoi(argv[1]); i++) {
memset(buf, i, sizeof(buf));
buf[sizeof(buf) - 1] = '\0';
assert(clock_gettime(CLOCK_REALTIME, &tp1) == 0);
assert(send(sockpair[0], buf, sizeof(buf), 0) > 0);
assert(recv(sockpair[0], buf, sizeof(buf), 0) > 0);
assert(clock_gettime(CLOCK_REALTIME, &tp2) == 0);
printf("%lu ns\n", tp2.tv_nsec - tp1.tv_nsec);
}
}
return 0;
}
However, I noticed that for each repeated test the elapsed time for the first run (i = 0) is always an outlier:
79306 ns
18649 ns
19910 ns
19601 ns
...
I wonder if the kernel has to do some final set up on the first call to send()
- for example, allocate 8192 bytes in the kernel to buffer the data between calls to send()
and recv()
?