I am learning MPI and I am trying to convert my MPI program from Point to Point Communication to MPI Collectives ..
Below is a fragment of my code for Matrix Multiplication using MPI Point to Point communication ...
int i;
if(rank == 0) {
for(i = 1; i < size; i++){
MPI_Send(&rows, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(&columns, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
}
} else {
MPI_Recv(&rows, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
MPI_Recv(&columns, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
}
int local_block_size = rows / size;
int process, column_pivot;
if(rank == 0) {
for(i = 1; i < size; i++){
MPI_Send((matrix_1D_mapped + (i * (local_block_size * rows))), (local_block_size * rows), MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
MPI_Send((rhs + (i * local_block_size)), local_block_size, MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
}
for(i = 0; i < local_block_size * rows; i++){
matrix_local_block[i] = matrix_1D_mapped[i];
}
for(i = 0; i < local_block_size; i++){
rhs_local_block[i] = rhs[i];
}
} else {
MPI_Recv(matrix_local_block, local_block_size * rows, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &status);
MPI_Recv(rhs_local_block, local_block_size, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &status);
}
I am thinking about replacing MPI_Send with MPI_Bcast ... will that be the correct approach ?
For the first communication that data sent to all receivers is in fact identical, thus MPI_Bcast is the correct approach. The second communication distributes different chunks of a larger array to the recipients, this is done as a collective with MPI_Scatter. Note that scatter includes the root rank in the communication, so you can omit the manual local copy.
Related
I need assistance to resolve an error in the following code:
#include <iostream>
#include <mpi.h>
using namespace std;
//matrix in two dimension in memory!!
int main(int argc, char** argv)
{
const int WIDTH = 100;
const int HEIGHT = 100;
int id, P;
double tempValue = 0;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &P);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
double A[WIDTH][HEIGHT];
double x[HEIGHT], b[WIDTH];
int upperBound, lowerBound = 0;
// Master controls worksharing..
if (id == 0)
{
// Init A & x
for (int i = 0; i < WIDTH; i++)
for (int j = 0; j < HEIGHT; j++)
A[i][j] = 1;
for (int j = 0; j < HEIGHT; j++)
x[j] = 2;
// Send to each node its portion of A to be processed
int portionSize = WIDTH / P;
for (int i = 0; i < P; i++)
{
lowerBound = i * portionSize;
upperBound = (i + 1) * portionSize;
// let the last node process the remainder
if (i == (P - 1))
upperBound += (HEIGHT - portionSize * P);
if (i > 0)// Do not send to master node!!
{
// Send to node i the lower & upper bounds the A portion
//and complete vector x
MPI_Send(&lowerBound, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(&upperBound, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(&A[lowerBound][0], (upperBound - lowerBound) * HEIGHT,
MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
MPI_Send(&x[0], HEIGHT, MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
}
}
// master perform part of the job...
for (int i = 0; i < portionSize; i++)
{
tempValue = 0;
for (int j = 0; j < HEIGHT; j++)
tempValue += A[i][j] * x[j];
b[i] = tempValue;
}
//Get the results in order, each node would send their boundaries and data part
for (int i = 1; i < P; i++)
{
MPI_Recv(&lowerBound, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&upperBound, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&P[lowerBound], (upperBound - lowerBound), MPI_DOUBLE, i, 0,
MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
}
// Print the first 2 values to check..
cout << "b[0]=" << b[0] << " b[Width-1]=" << b[WIDTH - 1] << endl;
}
else // the rest of the workers do their parts
{
//Receive the inputs
MPI_Recv(&lowerBound, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&upperBound, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&A[lowerBound][0], (upperBound - lowerBound) * WIDTH, MPI_DOUBLE, 0, 0,
MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&x, HEIGHT, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
cout << "Node:" << id << " Received from:" << lowerBound << " to " << upperBound - 1
<< endl;
double* result = new double[upperBound - lowerBound];
//Do the job
for (int i = lowerBound, resultCounter = 0; i < upperBound; i++, resultCounter++)
{
tempValue = 0;
for (int j = 0; j < HEIGHT; j++)
tempValue += A[i][j] * x[j];
result[resultCounter] = tempValue;
}
//send the results
MPI_Send(&lowerBound, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(&upperBound, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(&result[0], upperBound - lowerBound, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
delete[] result;
}
MPI_Finalize();
return 0;
}
When I compile the code in Microsoft Visual Studio 2019, I get this error message:
Error (active) E0142 expression must have pointer-to-object type ConsoleApplication9 C:\Users\m_swe\Desktop\Assignments\Assignments\PrjP2P\MatMPI\MatMPI\Source.cpp 59
Error C2109 subscript requires array or pointer type ConsoleApplication9 C:\Users\m_swe\Desktop\Assignments\Assignments\PrjP2P\MatMPI\MatMPI\Source.cpp 59
I think the problem is on line: 59
MPI_Recv(&P[lowerBound], (upperBound - lowerBound), MPI_DOUBLE, i, 0,
MPI_Recv takes in a pointer to a buffer (the first argument) where you are going to receive and store the incoming data. In this case it could be in some variable which you can define inside the for loop, as:
int receivedValues[ WIDTH * HEIGHT ];
for (int i = 1; i < P; i++)
{
MPI_Recv(&lowerBound, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&upperBound, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&receivedValues[0], (upperBound - lowerBound), MPI_DOUBLE, i, 0,
MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
// do your computation here with receivedValues
}
}
I was trying to install and run MS-MPI following this tutorial. I have installed MS-MPI and all my system variables are set correctly, see:
I have set all links in VS
Having these linked to the project, I would expect MPI to work. In IDE no syntax errors are shown, MPI functions are recognized, just as in next picture. However compiling an c++ source file with MPI functions produces Undeclared identifiers errors. What do I do wrong?
Here is my code if it matters
/*
* Transmit a message in a 3-process system.
*/
#include <mpi.h>
#include "stdafx.h"
#include <stdio.h>
#include <stdlib.h>
#define BUFSIZE 10
int main(int argc, char *argv[])
{ int size, rank;
int slave;
int buf[BUFSIZE];
int n, value;
float rval;
MPI_Status status;
/* Initialize MPI */
MPI_Init(&argc, &argv);
/*
* Determine size in the world group.
*/
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size == 3) {/* Correct number of processes *}
/*
* Determine my rank in the world group.
* The master will be rank 0 and the slaves, rank 1...size-1
*/
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0) { /* Master */
buf[0] = 5; buf[1] = 1; buf[2] = 8; buf[3] = 7; buf[4] = 6;
buf[5] = 5; buf[6] = 4; buf[7] = 2; buf[8] = 3; buf[9] = 1;
printf("\n Sending the values {5,1,8,7,6,5,4,2,3,1}");
printf("\n -----------------------------");
for (slave = 1; slave < size; slave++) {
printf("\n from master %d to slave %d", rank, slave);
MPI_Send(buf, 10, MPI_INT, slave, 1, MPI_COMM_WORLD);
}
printf("\n\n Receiving the results from slaves");
printf("\n ---------------------------------");
MPI_Recv(&value, 1, MPI_INT, 1, 11, MPI_COMM_WORLD, &status);
printf("\n Minimum %4d from slave 1", value);
MPI_Recv(&value, 1, MPI_INT, 2, 21, MPI_COMM_WORLD, &status);
printf("\n Sum %4d from slave 2", value);
MPI_Recv(&value, 1, MPI_INT, 1, 12, MPI_COMM_WORLD, &status);
printf("\n Maximum %4d from slave 1", value);
MPI_Recv(&rval, 1, MPI_FLOAT, 2, 22, MPI_COMM_WORLD, &status);
printf("\n Average %4.2f from slave 2\n", rval);
}
else {
if (rank == 1) { /* minmax slave */
MPI_Recv(buf, 10, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
value = 100;
for (n = 0; n<BUFSIZE; n++) {
if (value>buf[n]) { value = buf[n]; }
}
MPI_Send(&value, 1, MPI_INT, 0, 11, MPI_COMM_WORLD);
value = 0;
for (n = 0; n<BUFSIZE; n++) {
if (value<buf[n]) { value = buf[n]; }
}
MPI_Send(&value, 1, MPI_INT, 0, 12, MPI_COMM_WORLD);
}
else { /* sumave slave */
MPI_Recv(buf, 10, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
value = 0;
for (n = 0; n<BUFSIZE; n++) {
value = value + buf[n];
}
MPI_Send(&value, 1, MPI_INT, 0, 21, MPI_COMM_WORLD);
rval = (float)value / BUFSIZE;
MPI_Send(&rval, 1, MPI_FLOAT, 0, 22, MPI_COMM_WORLD);
}
}
}
MPI_Finalize();
return(0);
}
After a time I have found where was the bug. Everything is fine with my setting, problem is only with the .cpp extension. Changing it to .c project helped and all works as expected.
If I want to run it as c++, #include stdafx.h bust take place before mpi
I have following code:
double * myX;
double * myY;
double * myZ;
int amount;
int count; // number of process
void SomeClass::someMethod(double *x, double *y, double *z, int amount) {
if (myId == 0) {
myX = x;
myY = y;
myZ = z;
amount = amount;
for(int i = 1; i < count; ++i) {
MPI_Send(&amount, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(myX, amount, MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
MPI_Send(myY, amount, MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
MPI_Send(myX, amount, MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
}
}
}
void SomeClass::anotherMethod(void) {
if(myId != 0) {
MPI_Recv(&amount, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(myX, amount, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(myY, amount, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(myZ, amount, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
// rest of code
MPI_Reduce(args);
}
But I have problem because I get Null buffer pointer when I run this code or Segmentation fault when I change something, for example set & before var name and then run.
MPI_init and other required function are called in other class, where I also create this class objects.
Can someone help me?
MPI_Recv will copy the data it receives to the buffer specified by the first parameter (myX in the case below):
MPI_Recv(myX, amount, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
The problem is that you haven't created a buffer to store this.
You could do:
myX = new double[amount];
For example to create the buffer, not forgetting to free the memory again afterwards with:
delete[] myX;
I am trying to simply sum up all variables called "train_hr" and "test_hr" from all 10 processors and store and print the sum on processor 0. I checked to make sure the individual sums are NOT 0 (they are not, they are all in the 1000s). The sum it keeps reporting is 0. I have no idea why. I have looked at many examples of this, and I have done it exactly as instructed. Any help would be appreciated.
double train_hr = 0, test_hr = 0;
double train_hr_global = 0, test_hr_global = 0;
//Master processor
if (my_rank == 0) {
// sends a task to each processor
int curr_task = 0;
for(i = 0; i < num_procs; i++) {
if (curr_task < nsamples_all) {
MPI_Send(&curr_task, 1, MPI_INT, i, 1, MPI_COMM_WORLD);
curr_task++;
}
}
int r;
MPI_Status status;
//keeps sending tasks to processors until there are no more tasks
while (curr_task < nsamples_all) {
MPI_Recv(&r, 1, MPI_INT, MPI_ANY_SOURCE, 1, MPI_COMM_WORLD, &status);
MPI_Send(&curr_task, 1, MPI_INT, status.MPI_SOURCE, 1, MPI_COMM_WORLD);
curr_task++;
}
//tell all processors to stop receiving
int a = -1;
for (i = 0; i < num_procs; i++) {
MPI_Send(&a, 1, MPI_INT, i, 1, MPI_COMM_WORLD);
}
}
//Helper processors
else {
int stop = 1;
while(stop != 0){
int i;
//Receives task OR stop alert from master
MPI_Status status;
MPI_Recv(&i, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
if (i == -1) {
stop = 0;
}
//computations
else{
float r;
//unimportant computations here
train_hr += r;
test_hr += r;
//Tells master processor it is done
MPI_Send(&i, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
}
}
}
//At this point I checked the current values of train_hr and test_hr on each helper processor. They are all non-zero.
MPI_Reduce(&train_hr, &train_hr_global, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&test_hr, &test_hr_global, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
//at this point, the vales of train_hr_global and test_hr_global on the master processor (processor 0) are 0 when they should be the sum of all the processors values.
}
I'm writing program for testing whether numbers are prime. At the beginning I calculate how much numbers assign to each process, then send this amount to the processes. Next, calculations are performed and data send back to process 0 that save the results. Below code works but when I increase number of process my program doesn't speedup. It seems to me that my program doesn't work in parallel. What's wrong? This is my first program in MPI so any advices are welcome.
I use mpich2 an I test my program on Intel Core i7-950.
main.cpp:
if (rank == 0) {
int workers = (size-1);
readFromFile(path);
int elements_per_proc = (N + (workers-1)) / workers;
int rest = N % elements_per_proc;
for (int i=1; i <= workers; i++) {
if((i == workers) && (rest != 0))
MPI_Send(&rest, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
else
MPI_Send(&elements_per_proc, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
}
int it = 1;
for (int i=0; i < N; i++) {
if((i != 0) && ((i % elements_per_proc) == 0))
it++;
MPI_Isend(&input[i], 1, MPI_INT, it, 0, MPI_COMM_WORLD, &send_request);
}
}
if (rank != 0) {
int count;
MPI_Recv(&count, 1, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
for (int j=0; j < count; j++) {
MPI_Recv(&number, 1, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
result = test(number, k);
send_array[0] = number;
send_array[1] = result;
MPI_Send(send_array, 2, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
}
if (rank == 0) {
for (int i=0; i < N; i++) {
MPI_Recv(rec_array, 2, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// save results
}
}
Your implementation probably doesn't scale well to many processes, since you communicate in every step. You currently communicate the numbers and results for each single input, which incurs a large latency overhead. Instead you should think about communicating the input in-bulk (ie, using a single message).
Furthermore, using MPI collective operations (MPI_Scatter/MPI_Gather) instead of loops of MPI_Send/MPI_Recv might increase your performance further.
Additionally, you can utilize the master process to work on a chunk of the input as well.
A much more scalable implementation might then look as follows:
// tell everybody how many elements there are in total
MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD);
// everybody determines how many elements it will work on
// (include the master process)
int num_local_elements = N / size + (N % size < rank ? 1 : 0);
// allocate local size
int* local_input = (int*) malloc(sizeof(int)*num_local_elements);
// distribute the input from master to everybody using MPI_Scatterv
int* counts; int* displs;
if (rank == 0) {
counts = (int*)malloc(sizeof(int) * size);
displs = (int*)malloc(sizeof(int) * size);
for (int i = 0; i < size; i++) {
counts[i] = N / size + (N % size < i ? 1 : 0);
if (i > 0)
displs[i] = displs[i-1] + counts[i-1];
}
// scatter from master
MPI_Scatterv(input, counts, displs, MPI_INT, local_input, num_local_elements, MPI_INT, 0, MPI_COMM_WORLD);
} else {
// receive scattered numbers
MPI_Scatterv(NULL, NULL, NULL, MPI_DATATYPE_NULL, local_input, num_local_elements, MPI_INT, 0, MPI_COMM_WORLD);
}
// perform prime testing
int* local_results = (int*) malloc(sizeof(int)*num_local_elements);
for (int i = 0; i < num_local_elements; ++i) {
local_results[i] = test(local_input[i], k);
}
// gather results back to master process
int* results;
if (rank == 0) {
results = (int*)malloc(sizeof(int)*N);
MPI_Gatherv(local_results, num_local_elements, MPI_INT, results, counts, displs, MPI_INT, 0, MPI_COMM_WORLD);
// TODO: save results on master process
} else {
MPI_Gatherv(local_results, num_local_elements, MPI_INT, NULL, NULL, NULL, MPI_INT, 0, MPI_COMM_WORLD);
}