I am using boost to serialize a struct, then sending that struct over tcp to another application. (both on the same machine, for testing purposes).
This was all running well, and the total time to pack, send and unpack was around 10ms. Now, however, it has suddenly jumped to 30ms.
Am I measuring the latency correctly? And if so, what could be causing this slowdown? How can i get the speed back up?
struct:
struct frame
{
long milliseconds;
vector<float> buff;
template <typename Archive>
void serialize(Archive& ar, const unsigned int version)
{
ar & milliseconds;
ar & buff;
}
};
Sending application:
frame data;
static auto const flags = boost::archive::no_header | boost::archive::no_tracking;
boost::asio::io_service ios;
boost::asio::ip::tcp::endpoint endpoint
= boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), 4444);
boost::asio::ip::tcp::acceptor acceptor(ios, endpoint);
boost::asio::ip::tcp::iostream stream;
// Your program stops here until client connects.
acceptor.accept(*stream.rdbuf());
//get ms time to test latency
pt::ptime current_date_microseconds = pt::microsec_clock::local_time();
long milliseconds = current_date_microseconds.time_of_day().total_milliseconds();
//add dummy data to vector
std::vector<float> temp(100, 0.0);
frm.buff = temp;
//add milliseconds, for latency check
data.milliseconds = milliseconds;
//send data
boost::archive::binary_oarchive archive(stream);
archive << data;
receiving application:
frame data_in;
std::string ip = "127.0.0.1";
boost::asio::ip::tcp::iostream stream(ip, "4444");
if (!stream)
throw std::runtime_error("can't connect");
boost::archive::binary_iarchive archive(stream);
archive >> data_in;
pt::ptime current_date_microseconds = pt::microsec_clock::local_time();
long milliseconds = current_date_microseconds.time_of_day().total_milliseconds();
long timeElapsed = milliseconds - data_in.milliseconds;
std::cout << " tcp took: " << timeElapsed << "\n";