What is the best (fastest) way to send and receive sparse matrix that is defined using Eigen library?
Currently, I am creating a value, row and column vector (from a pre-defined sparse matrix) using only the nonzero elements and sending/receiving these 3 vectors one by one. These vectors are simple std::vector
if (0 == myrank) {
Sz.insert(0,0) = 0.5; Sz.insert(1,1) = -0.5;
//------ preparing to send ----------
shape[0] = Sz.rows();
shape[1] = Sz.cols();
int size=Sz.nonZeros();
val.resize(size); inner.resize(size); outer.resize(size);
cout << "val-size = "<< val.size() << endl;
int counter=0;
for (int k=0; k<Sz.outerSize(); ++k) {
for (CrsMatrixType::InnerIterator it(Sz,k); it; ++it) {
val[counter]=it.value();
inner[counter]=it.col();
outer[counter]=it.row();
counter++;
}
}
assert(counter==size);
MPI_Send(&shape[0],2,MPI_INT,1, 100, MPI_COMM_WORLD);
MPI_Send(&size,1,MPI_INT,1, 101, MPI_COMM_WORLD);
MPI_Send(&val[0],size,MPI_DOUBLE,1, 102, MPI_COMM_WORLD);
MPI_Send(&inner[0],size,MPI_INT,1, 103, MPI_COMM_WORLD);
MPI_Send(&outer[0],size,MPI_INT,1, 104, MPI_COMM_WORLD);
}
Later, I receive them using
if (1 == myrank) {
//------ preparing to receive ----------
shape.resize(2);
int size;
MPI_Recv(&shape[0],2,MPI_INT,0, 100, MPI_COMM_WORLD, &status);
MPI_Recv(&size,1,MPI_INT,0, 101, MPI_COMM_WORLD, &status);
val.resize(size); inner.resize(size); outer.resize(size);
MPI_Recv(&val[0],size,MPI_DOUBLE,0, 102, MPI_COMM_WORLD, &status);
MPI_Recv(&inner[0],size,MPI_INT,0, 103, MPI_COMM_WORLD, &status);
MPI_Recv(&outer[0],size,MPI_INT,0, 104, MPI_COMM_WORLD, &status);
Sz.resize(shape[0],shape[1]);
Sz.reserve(size); // allocate room for nonzero elements only.
for (int k=0; k<Sz.outerSize(); ++k) {
Sz.coeffRef(inner[k],outer[k]) = val[k];
}
cout << "my RANK " << myrank << endl;
cout << Sz << endl;
}
and add them to the sparse matrix of rank 1.
Is there any better way to do this? Thanks.