I have to implement the inverse of a matrix algorithm using MPJ express. I succeeded in sending the transpose matrix and the determinant from rootProcess (rank=0) to the other processes.
The root process also assigns the lines interval for each worker process and each worker has to compute the elements of inverse matrix for those lines. This part works, but after computing matrix inverse elements in each worker, I have to send these lines and their index from worker to root process so that the root process will be able to build the result matrix.
This part does not work and the program get stuck. Can you help me to send some arrays and their index (the line for that array in the result matrix) from each worker process to the root?
public static void main(String[] args) {
MPI.Init(args);
comunicator = MPI.COMM_WORLD;
currentProcessorRank = comunicator.Rank();
processorsNumber = comunicator.Size();
if (currentProcessorRank == ROOT_PROCESSOR_RANK) {
rootProcessorAction();
} else {
workerProcessorAction();
}
MPI.Finalize();
}
private static void rootProcessorAction() {
// Util.generateInputData(5);
try (Scanner sc = new Scanner(new FileReader("D:\\workspace\\Inversa-mpj\\in.txt"))) {
final int n = Util.readInt(sc);
final float[][] result = new float[n][n];
int[][] a = Util.readMatrix(n, sc);
final int[][] aTranspus = Inversa.getTranspusa(a, n);
final int[] det = new int[] { Inversa.calculeazaDet(a, n) };
final int[] noLines = new int[] { n };
if (det[0] == 0) {
throw new Exception("Matricea nu e inversabila!");
}
for (int i = 1; i < processorsNumber; i++) {
MPI.COMM_WORLD.Send(noLines, 0, 1, MPI.INT, i, 0);
MPI.COMM_WORLD.Send(det, 0, 1, MPI.INT, i, 0);
MPI.COMM_WORLD.Send(aTranspus, 0, n, MPI.OBJECT, i, 0);
}
computeLinesAssignments(n);
for (int i = 0; i < n; i++) {
int index[] = new int[1];
float line[] = new float[1];
System.out.println("Astept pentru index=" + i);
MPI.COMM_WORLD.Recv(index, 0, 1, MPI.INT, 0, 0);
//MPI.COMM_WORLD.Recv(line, 0, 1, MPI.FLOAT, 0, 0);
result[index[0]] = line;
System.out.println("Am primit de la index=" + index[0]);
}
// Util.displayMatrix(n, result);
} catch (FileNotFoundException e) {
System.err.println("File not found!");
} catch (Exception e) {
System.out.println(e.getMessage());
}
}
private static void computeLinesAssignments(int n) {
final int linesPerThread = n / (processorsNumber - 1);
int rest = n % (processorsNumber - 1);
int start = 0;
int end = 0;
for (int i = 1; i < processorsNumber; i++) {
end = start + linesPerThread;
if (rest != 0) {
end++;
rest--;
}
MPI.COMM_WORLD.Send(new int[] { start }, 0, 1, MPI.INT, i, 0);
MPI.COMM_WORLD.Send(new int[] { end }, 0, 1, MPI.INT, i, 0);
start = end;
}
}
private static void workerProcessorAction() {
int rank = MPI.COMM_WORLD.Rank();
int det[] = new int[1];
int n[] = new int[1];
int start[] = new int[1];
int end[] = new int[1];
MPI.COMM_WORLD.Recv(n, 0, 1, MPI.INT, 0, 0);
MPI.COMM_WORLD.Recv(det, 0, 1, MPI.INT, 0, 0);
int transpusa[][] = new int[n[0]][];
MPI.COMM_WORLD.Recv(transpusa, 0, n[0], MPI.OBJECT, 0, 0);
MPI.COMM_WORLD.Recv(start, 0, 1, MPI.INT, 0, 0);
MPI.COMM_WORLD.Recv(end, 0, 1, MPI.INT, 0, 0);
System.out.println("Procesorul " + rank + " a primit start=" + start[0] + " si end=" + end[0]);
for (int i = start[0]; i < end[0]; i++) {
final float[] line = new float[n[0]];
for (int j = 0; j < n[0]; j++) {
line[j] = (float) (Inversa.calculeazaDet(Inversa.eliminaLinieColoana(transpusa, i, j, n[0]),
(n[0] - 1))) * Inversa.semn(i, j) / det[0];
System.out.println("calculat pentru i=" + i + " si j=" + j + " cu valoarea " + line[j]);
}
MPI.COMM_WORLD.Send(new int[] { i }, 0, 1, MPI.INT, 0, 0);
//MPI.COMM_WORLD.Send(line, 0, 1, MPI.FLOAT, ROOT_PROCESSOR_RANK, 1);
}
}
}