Yes, it's certainly possible. Since you don't want any interference between them, give them unique data to work with so that you do not need to synchronize the access to that data with a std::mutex
or making it std::atomic
. To further minimize the interference between threads, align the data according to std::hardware_destructive_interference_size.
You can use boost::thread::hardware_concurrency() to get the number of hardware threads available on the current system so that you don't have to hardcode the number of threads to run.
Passing references to the thread can be done using std::ref
(or else the thread will get a ref to a copy of the data).
Here I create a std::list
of threads and a std::vector
of data to work on.
#include <cstdint> // std::int64_t
#include <iostream>
#include <list>
#include <new> // std::hardware_destructive_interference_size
#include <vector>
#include <boost/thread.hpp>
unsigned hardware_concurrency() {
unsigned rv = boost::thread::hardware_concurrency();
if(rv == 0) rv = 1; // fallback if hardware_concurrency returned 0
return rv;
}
// if you don't have hardware_destructive_interference_size, use something like this
// instead:
//struct alignas(64) data {
struct alignas(std::hardware_destructive_interference_size) data {
std::int64_t x;
};
void workerFunc(data& d) {
// work on the supplied data
for(int i = 0; i < 1024*1024-1; ++i) d.x -= i;
for(int i = 0; i < 1024*1024*1024-1; ++i) d.x += i;
}
int main() {
std::cout << "main: startup" << std::endl;
size_t number_of_threads = hardware_concurrency();
std::list<boost::thread> threads;
std::vector<data> dataset(number_of_threads);
// create the threads
for(size_t idx = 0; idx < number_of_threads; ++idx)
threads.emplace_back(workerFunc, std::ref(dataset[idx]));
std::cout << "main: waiting for threads" << std::endl;
// join all threads
for(auto& th : threads) th.join();
// display results
for(const data& d : dataset) std::cout << d.x << "\n";
std::cout << "main: done" << std::endl;
}
If you are using C++11 (or later), I suggest using std::thread
instead.