The MVCE below simplified from real codebase shows the same issue.
The server continuously sends a "burst" of 5 UDP frames filled with 150 bytes of value 0xA5 with small or no delay in between. A pause of 1 second is made.
The client use the boost::asio async_receive_from() function in parallel of a 1 second timer. The client works relatively well except when the delay between the UDP frames is "too" small. It seems that the correct size ( here 150 bytes ) is retrieved but the buffer/vector seems not to be updated.
- 5 x 150 bytes of UDP frames does not seem much.
- Wireshark DOES see the complete and correct frames sent.
- If I use a synchronous boost asio socket synchronous receive_from() I meet no issues
I tried maybe half a dozen times to dive into boost asio without much success in finding a single truth or rationale. Same posts on SO show very different code so that it is difficult to transpose them to the present code
Here are the code client (client_with_timer.cc)
#include <iostream>
#include <vector>
#include <string>
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
using namespace boost::asio;
void asyncReadHandler( const boost::system::error_code& error, std::size_t bytesTransferred );
void timeoutHandler( const boost::system::error_code& error, bool* ptime_out );
size_t ReceivedDataSize;
std::string ReadError;
int main(int argc, char * argv[])
{
io_service io;
ip::udp::socket socket(io, ip::udp::endpoint(ip::udp::v4(), 1620));
size_t num = 0;
while (true)
{
std::vector<unsigned char> vec(1500);
ip::udp::endpoint from;
socket.async_receive_from(
boost::asio::buffer( vec ),
from,
boost::bind(
asyncReadHandler,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred ) );
bool timeout = false;
ReceivedDataSize = 0;
ReadError = "";
// Creating and starting timer (by registering timeout handler)
deadline_timer timer( io, boost::posix_time::seconds( 1 ) );
timer.async_wait(
boost::bind( timeoutHandler, boost::asio::placeholders::error, &timeout ) );
// Resetting IO service instance
io.reset();
while(io.run_one())
{
if ( timeout ) {
socket.cancel();
timer.cancel();
//Leave the io run_one loop
break;
}
else if ( (0 != ReceivedDataSize ) || (!ReadError.empty())) {
timer.cancel();
socket.cancel();
std::cout << "Received n°" << num++ << ": " << ReceivedDataSize << "\r" << std::flush;
if (0 != ReceivedDataSize )
vec.resize(ReceivedDataSize);
if (!ReadError.empty())
std::cout << "Error: " << ReadError << std::endl;
bool result = true;
for ( auto x : vec )
if ( 0xA5 != x ) { result = false; break; }
if ( false == result ) {
std::cout << std::endl << "Bad reception" << std::endl << std::hex;
for ( auto x : vec )
std::cout << (int)x << " ";
std::cout << std::dec << "\n";
}
//Leave the io run_one loop
break;
}
else {
//What shall I do here ???
//another potential io.reset () did not bring much
}
}
}
return 0;
}
void asyncReadHandler( const boost::system::error_code& error, std::size_t bytesTransferred )
{
// If read canceled, simply returning...
if( error == boost::asio::error::operation_aborted ) return;
ReceivedDataSize = 0;
// If no error
if( !error ) {
ReceivedDataSize = bytesTransferred;
}
else {
ReadError = error.message();
}
}
void timeoutHandler( const boost::system::error_code& error, bool* ptime_out )
{
// If timer canceled, simply returning...
if( error == boost::asio::error::operation_aborted ) return;
// Setting timeout flag
*ptime_out = true;
}
Here is the server (server.cc) so that you do not have to roll your own
#include <iostream>
#include <vector>
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <unistd.h>
using namespace boost::asio;
int main(int argc, char * argv[])
{
io_service io;
ip::udp::socket socket(io, ip::udp::endpoint(ip::udp::v4(), 0));
std::vector<char> vec(150,0xA5);
#if 1
int separator = 1 * 1000;
#else
int separator = 0;
#endif
while (true)
{
socket.send_to(buffer(vec), ip::udp::endpoint(ip::udp::v4(), 1620));
if ( separator ) usleep(separator);
socket.send_to(buffer(vec), ip::udp::endpoint(ip::udp::v4(), 1620));
if ( separator ) usleep(separator);
socket.send_to(buffer(vec), ip::udp::endpoint(ip::udp::v4(), 1620));
if ( separator ) usleep(separator);
socket.send_to(buffer(vec), ip::udp::endpoint(ip::udp::v4(), 1620));
if ( separator ) usleep(separator);
socket.send_to(buffer(vec), ip::udp::endpoint(ip::udp::v4(), 1620));
usleep(1000*1000);
}
return 0;
}
I compiled both with the naive commands below:
g++ client_with_timer.cc -std=c++11 -O2 -Wall -o client_with_timer -lboost_system
g++ server.cc -std=c++11 -O2 -Wall -o server -lboost_system
It produces output like below when delay is too small
nils@localhost ASIO_C]$ ./client_with_timer
Received n°21: 150
Bad reception
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
Received n°148: 150
Bad reception
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
Received n°166: 150
Bad reception
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
Received n°194: 150
How to correct the client code to avoid missed frames ? Any hint for a better understanding of boost asio rationale is welcome