0

I am broadcasting a vector<vector<int>> (A 2D matrix actually) from the root process and then received at the remaining processes.

But when I try to print the result it shows irregular behavior every time. Any help would be appreciated.

The vector is of size 4x4 (working on fixed size for the time being)

This is the complete code.

const int num_rows1 = 4, num_rows2 = 4, num_cols1 = 4, num_cols2 = 4;
const int root = 0;
int i, j, k;

vector<vector<int>> matrix1(num_rows1, vector <int>(num_cols1));
vector<vector<int>> matrix2(num_rows1, vector <int>(num_cols1));

vector<vector<int>> result(num_rows1, vector <int>(num_cols1));
int finale[num_rows1][num_cols2];

vector<vector<int>> transpose_mat(num_rows2, vector <int>(num_cols2));
vector<int> column1;
vector<int> column2;

double start_time, end_time;
int * column3 = new int[];


//Function generating random matrices
vector<vector<int>> generate_matrix(int nrow, int ncol)
{

    vector<vector<int>> matrix(nrow, vector <int>(ncol));

    for (int i = 0; i < nrow; ++i)
    {
        for (int j = 0; j < ncol; ++j)
        {
            matrix[i][j] = (15 *rand() / RAND_MAX - 3);
        }
    }



    return matrix;
}


//function taking the transpose
vector<vector<int>>transpose(vector<vector<int>> matrix , int nrow, int ncol)
{

    //Transpose of matrix 2
    for (i = 0; i < nrow; ++i)
        for (j = 0; j < ncol; ++j)
        {
            transpose_mat[j][i] = matrix2[i][j];
        }

    cout << "Transpose " << endl;

    for (int i = 0; i < num_rows2; ++i)
    {
        for (int j = 0; j < num_cols2; ++j)
        {
            cout << transpose_mat[i][j] << "     ";
        }
        cout << endl;
    }

    return transpose_mat;
}


//main function
int main(int argc, char *argv[])
{


    MPI_Status status;
    MPI_Request request;
    int tag = 1;
    int rank;
    int world_size;    //Number of processes

    // Initialize the MPI environment
    MPI_Init(NULL, NULL);

    // Get the rank of the process
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    // Get the name of the processor
    char processor_name[MPI_MAX_PROCESSOR_NAME];
    int name_len;
    MPI_Get_processor_name(processor_name, &name_len);


    // Get the number of processes
    MPI_Comm_size(MPI_COMM_WORLD, &world_size);


    if (rank == root)
    {

        cout << "I am the root process" << endl;

        cout << "Filling and Displaying the first  matrix" << endl;

        //Filling
        matrix1 = generate_matrix(num_rows1, num_cols1);


        //Display
        for (int i = 0; i < num_rows1; ++i)
        {
            for (int j = 0; j < num_cols1; ++j)
            {
                cout << matrix1[i][j] << "     ";
            }
            cout << endl;
        }

        int x = matrix1.size();
        //The first process(root process -> rank = 0) then broadcasts the first matrix to all the processes
        for (int i = 0; i < num_rows1; ++i)
        {
                for (int j = 0; j < num_cols1; ++j)
                {   
                    MPI_Bcast(&matrix1[i][0], x, MPI_INT, root, MPI_COMM_WORLD);
                }
        }
    }

    //The second process (-> rank = 1) fills the second matrix with random integers and prints it
    else if (rank == 1)
    {
        srand(time(NULL));

        cout << "Filling and Displaying the second  matrix" << endl;

        //Filling
        matrix2 = generate_matrix(num_rows2, num_cols2);

        //Display
        for (int i = 0; i < num_rows2; ++i)
        {
            for (int j = 0; j < num_cols2; ++j)
            {
                cout << matrix2[i][j] << "     ";
            }
            cout << endl;
        }

        //Calling the function that determines transpose of the second matrix
        transpose_mat = transpose(matrix2, num_rows2, num_cols2);

        //Displaying the transpose

    }

    if (rank > root)
    {

    cout << "I am process " << rank << endl;

    int size = matrix1.size();
    result.resize(size);
    for (int i = 0; i < num_rows1; ++i)
    {
        for (int j = 0; j < num_cols1; ++j)
        {   //Receiving the first matrix broad casted by root process (-> rank = 0)
            MPI_Bcast(&result[j][0], size, MPI_INT, root, MPI_COMM_WORLD);
            //}
        }

    }
    cout << "The received matrix at process " << rank << " is: " << endl;
    for (int i = 0; i < num_rows1; ++i)
    {
        for (int i = 0; i < num_cols1; ++i)
        {
            cout << result[j][i] <<"         ";
        }

        cout << endl;
    }


    MPI_Barrier(MPI_COMM_WORLD);
    //Terminating MPI
    MPI_Finalize();

    return 0;

}

I tried resizing the vector at the receiving processes (i.e. result[][]) as well but it would give some errors. The result with the above code is a 4x4 matrix (result) which sometimes contains only the first row matrix1 and sometimes gives memory issues.

shaibi
  • 31
  • 4
  • "I tried resizing the vector" The obvious answer is that you're trying to access vector elements that are out of range - but hard to tell since you've not said what the error is, and you've not shown how you tried to resize the vector. – UKMonkey Oct 19 '16 at 14:49
  • if (rank > root) { cout << "I am process " << rank << endl; int size = matrix1.size(); result.resize(size); for (int i = 0; i < num_rows1; ++i) { for (int j = 0; j < num_cols1; ++j) { //Receiving the first matrix broad casted by root process (-> rank = 0) MPI_Bcast(&result[j][0], size, MPI_INT, root, MPI_COMM_WORLD); //} } } – shaibi Oct 19 '16 at 15:07
  • This is the code at the receiving processes. – shaibi Oct 19 '16 at 15:08
  • What did your debugger tell you about the value of "matrix1.size()" ? – UKMonkey Oct 19 '16 at 15:09
  • it says 4, I tried to run it with setting the value as 16 but then there were some segmentation errors. Moreover I did exactly the same code for arrays and that's working fine. I just wanted to try using vectors since they are better and easy to use. – shaibi Oct 19 '16 at 15:16
  • Have you even tried to work out on a piece of paper how both loops work? The code is completely wrong and has nothing to do with MPI. – Hristo Iliev Oct 19 '16 at 15:31
  • which loop are we talking about ? It was the issue with resizing. Thanks anyways for pointing out. – shaibi Oct 19 '16 at 15:54
  • The inner loop in both the root and non-root code is redundant as each broadcast sends an entire row anyway. It is even the case that in the non-root code the loop that goes over the outer index is the inner one while in the root code is the outer one (might be a desired behaviour, though it seems unlikely). Also, in your original question you were writing past the end of the array in `MPI_Bcast(&result[j][i], 4, MPI_INT, ...);` – Hristo Iliev Oct 19 '16 at 17:57

0 Answers0