I am trying to get a feel for concurrency, so I tried to write a more flexible version (my_comp()) of Stroustrup's example code (comp4()) from A Tour of C++ (second edition) 15.7.3, page 205. It gives the right answer, but it does not use concurrency to improve the execution time. My question is: why does my_comp() not act as intended, and how can I fix it?
#include <iostream>
#include <chrono>
#include <cmath>
#include <vector>
#include <numeric>
#include <future>
#include <fstream>
using namespace std;
using namespace std::chrono;
constexpr auto sz = 500'000'000;
constexpr int conc_num{ 4 };
double accum(double* beg, double* end, double init)
{
return accumulate(beg, end, init);
}
double comp4(vector<double>& v)
//From Stroustrup, A Tour of C++ (Second edition)
//15.7.3 page 205
{
auto v0 = &v[0];
auto sz = v.size();
auto f0 = async(accum, v0, v0 + sz / 4, 0.0);
auto f1 = async(accum, v0 + sz / 4, v0 + sz / 2, 0.0);
auto f2 = async(accum, v0 + sz / 2, v0 + sz * 3 / 4, 0.0);
auto f3 = async(accum, v0 + sz * 3 / 4, v0 + sz, 0.0);
return f0.get() + f1.get() + f2.get() + f3.get();
}
double my_comp(vector<double>& v, int conc = 1)
//My idea of a more flexible version of comp4
{
if (conc < 1)
conc = 1;
auto v0 = &v[0];
auto sz = v.size();
vector<future<double>> fv(conc);
for (int i = 0; i != conc; ++i) {
auto f = async(accum, v0 + sz * (i / conc), v0 + sz * ((i + 1) / (conc)), 0.0);
fv[i] = move(f);
}
double ret{ 0.0 };
for (int i = 0; i != fv.size(); ++i) {
ret += fv[i].get();
}
return ret;
}
int main()
{
cout << "Calculating ..." << "\n\n";
auto tv0 = high_resolution_clock::now();
vector<double> vc;
vc.reserve(sz);
for (int i = 0; i != sz; ++i) {
vc.push_back(sin(i)); //Arbitrary test function
}
auto tv1 = high_resolution_clock::now();
auto durtv = duration_cast<milliseconds>(tv1 - tv0).count();
cout << "vector of size " << vc.size() << ": " << durtv << " msec\n\n";
////////////////////////////////////////////
auto vc_test = vc;
auto t0 = high_resolution_clock::now();
auto s1 = accumulate(vc_test.begin(), vc_test.end(), 0.0);
auto t1 = high_resolution_clock::now();
auto dur1 = duration_cast<milliseconds>(t1 - t0).count();
///////////////////////////////////////////
vc_test = vc;
auto tt0 = high_resolution_clock::now();
auto s2 = my_comp(vc_test, conc_num); //Should be faster
auto tt1 = high_resolution_clock::now();
auto dur2 = duration_cast<milliseconds>(tt1 - tt0).count();
////////////////////////////////////////////
vc_test = vc;
auto ttt0 = high_resolution_clock::now();
auto s3 = comp4(vc_test); //Really is faster
auto ttt1 = high_resolution_clock::now();
auto dur3 = duration_cast<milliseconds>(ttt1 - ttt0).count();
///////////////////////////////////////////
cout << dur1 << " msec\n";
cout << "Output = " << s1 << " (accumulate)" << "\n\n";
cout << dur2 << " msec" << " Ratio: " << double(dur2) / double(dur1) << "\n";
cout << "Output = " << s2 << " (my_comp)" << "\n\n";
cout << dur3 << " msec" << " Ratio: " << double(dur3) / double(dur1) << "\n";
cout << "Output = " << s3 << " (comp4)" << "\n\n";
}
Compiled with Visual C++ 2019 (ISO C++17 Standard (/std:c++17)) X64 Release. A typical output is:
424 msec Output = 1.93496 (accumulate)
431 msec Ratio: 1.01651 Output = 1.93496 (my_comp)
117 msec Ratio: 0.275943 Output = 1.93496 (comp4)
I am aware of parallel algorithms and std::reduce. My question is not how to optimize this particular calculation, but rather to learn something about how to write concurrent code that acts as intended.