Fatal error in MPI_Send: Invalid rank, error stack - c++

I am writing code with mpi in c++, and I have done this:
#include <stdio.h>
#include "mpi.h"
#define NMAX 100
#define NTIMES 10
int main(int argc, char **argv)
{
int rank, size, i, n, lmax;
double time_start, time, bandwidth, max, a[NMAX];
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
time_start = MPI_Wtime();
n = 0;
max = 0.0;
lmax = 0;
while(n<=NMAX){
time_start = MPI_Wtime();
for(i = 0; i<NTIMES; i++){
if(rank==0){
MPI_Send(a, n, MPI_DOUBLE, 1, 0, MPI_COMM_WORLD);
MPI_Recv(a, n, MPI_DOUBLE, 1, 1, MPI_COMM_WORLD, &status);
}
if(rank==1){
MPI_Recv(a, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &status);
MPI_Send(a, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD);
}
}
time = (MPI_Wtime()-time_start)/(2*NTIMES);
bandwidth = (sizeof(double)*n*1.0/(1024*1024*time));
if(max<bandwidth){
max = bandwidth;
lmax = sizeof(double)*n;
}
if(rank==0)
if(n==0) printf("latency = %lf seconds\n", time);
else printf("%d bytes, bandwidth = %lf Mb/s\n", (int)sizeof(double)*n, bandwidth);
if(n==0) n = 1;
else n = 2*n;
}
if(rank==0) printf("max bandwidth = %lf Mb/s length = %d bytes\n", max, lmax);
MPI_Finalize();
}
It shows no errors, but when i am trying to run the code, this what i have..
[1]: https://i.stack.imgur.com/tl0HF.png
Maybe someone knows hov can i fix it?

Related

Problem with MPI_Gather Couldnt gather 8 arrays of 32 element into a big array of 256 elements

i am new to MPI , i have an array of 256 integer , i want to divide each number by 16 , I suggested to Scatter 32 element on each Processor but i couldn't gather them as each Return value contains array of 32
int globalhistogram[256];
float globalProb[256];
float* localprob = new float[32];
int localpixel[32];
MPI_Scatter(&globalhistogram, 32, MPI_INT, localpixel, 32, MPI_INT, 0, MPI_COMM_WORLD);
for (int i = 0; i < 32; i++)
{
localprob[i] = (float)localpixel[i] / 16;
}
MPI_Gather(localprob, 32, MPI_FLOAT, &globalprob, 32, MPI_FLOAT, 0, MPI_COMM_WORLD);
I don't understand the issue - the code appears to run correctly after I correct what I assume is a typo float globalProb[256] -> float globalprob[256].
I agree with #victor-eijkhout about the &globalprob issue but it doesn't appear to make a difference.
If I compile and run the appended code I get the expected answer:
dsh#laptop$ mpicxx -o play play.cpp
dsh#laptop$ mpirun -n 8 ./play
rank 0: globalprob[0] = 0.000000
...
rank 0: globalprob[31] = 31.000000
rank 0: globalprob[32] = 64.000000
...
rank 0: globalprob[255] = 2040.000000
Here's the full code:
#include <stdio.h>
#include <mpi.h>
int main(void)
{
int rank, size, i;
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int globalhistogram[256];
float globalprob[256];
float* localprob = new float[32];
int localpixel[32];
for (i=0; i < 256; i++)
{
globalhistogram[i] = i;
}
MPI_Scatter(&globalhistogram, 32, MPI_INT, localpixel, 32, MPI_INT, 0, MPI_COMM_WORLD);
for (int i = 0; i < 32; i++)
{
localprob[i] = (float)localpixel[i] *(rank+1);
}
MPI_Gather(localprob, 32, MPI_FLOAT, &globalprob, 32, MPI_FLOAT, 0, MPI_COMM_WORLD);
if (rank == 0)
{
for (i=0; i < 256; i++)
{
printf("rank %d: globalprob[%d] = %f\n", rank, i, globalprob[i]);
}
}
MPI_Finalize();
}

Matrix-Vector Multiplication on MPI - ERROR Compiled code

I need assistance to resolve an error in the following code:
#include <iostream>
#include <mpi.h>
using namespace std;
//matrix in two dimension in memory!!
int main(int argc, char** argv)
{
const int WIDTH = 100;
const int HEIGHT = 100;
int id, P;
double tempValue = 0;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &P);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
double A[WIDTH][HEIGHT];
double x[HEIGHT], b[WIDTH];
int upperBound, lowerBound = 0;
// Master controls worksharing..
if (id == 0)
{
// Init A & x
for (int i = 0; i < WIDTH; i++)
for (int j = 0; j < HEIGHT; j++)
A[i][j] = 1;
for (int j = 0; j < HEIGHT; j++)
x[j] = 2;
// Send to each node its portion of A to be processed
int portionSize = WIDTH / P;
for (int i = 0; i < P; i++)
{
lowerBound = i * portionSize;
upperBound = (i + 1) * portionSize;
// let the last node process the remainder
if (i == (P - 1))
upperBound += (HEIGHT - portionSize * P);
if (i > 0)// Do not send to master node!!
{
// Send to node i the lower & upper bounds the A portion
//and complete vector x
MPI_Send(&lowerBound, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(&upperBound, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(&A[lowerBound][0], (upperBound - lowerBound) * HEIGHT,
MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
MPI_Send(&x[0], HEIGHT, MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
}
}
// master perform part of the job...
for (int i = 0; i < portionSize; i++)
{
tempValue = 0;
for (int j = 0; j < HEIGHT; j++)
tempValue += A[i][j] * x[j];
b[i] = tempValue;
}
//Get the results in order, each node would send their boundaries and data part
for (int i = 1; i < P; i++)
{
MPI_Recv(&lowerBound, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&upperBound, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&P[lowerBound], (upperBound - lowerBound), MPI_DOUBLE, i, 0,
MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
}
// Print the first 2 values to check..
cout << "b[0]=" << b[0] << " b[Width-1]=" << b[WIDTH - 1] << endl;
}
else // the rest of the workers do their parts
{
//Receive the inputs
MPI_Recv(&lowerBound, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&upperBound, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&A[lowerBound][0], (upperBound - lowerBound) * WIDTH, MPI_DOUBLE, 0, 0,
MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&x, HEIGHT, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
cout << "Node:" << id << " Received from:" << lowerBound << " to " << upperBound - 1
<< endl;
double* result = new double[upperBound - lowerBound];
//Do the job
for (int i = lowerBound, resultCounter = 0; i < upperBound; i++, resultCounter++)
{
tempValue = 0;
for (int j = 0; j < HEIGHT; j++)
tempValue += A[i][j] * x[j];
result[resultCounter] = tempValue;
}
//send the results
MPI_Send(&lowerBound, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(&upperBound, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(&result[0], upperBound - lowerBound, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
delete[] result;
}
MPI_Finalize();
return 0;
}
When I compile the code in Microsoft Visual Studio 2019, I get this error message:
Error (active) E0142 expression must have pointer-to-object type ConsoleApplication9 C:\Users\m_swe\Desktop\Assignments\Assignments\PrjP2P\MatMPI\MatMPI\Source.cpp 59
Error C2109 subscript requires array or pointer type ConsoleApplication9 C:\Users\m_swe\Desktop\Assignments\Assignments\PrjP2P\MatMPI\MatMPI\Source.cpp 59
I think the problem is on line: 59
MPI_Recv(&P[lowerBound], (upperBound - lowerBound), MPI_DOUBLE, i, 0,
MPI_Recv takes in a pointer to a buffer (the first argument) where you are going to receive and store the incoming data. In this case it could be in some variable which you can define inside the for loop, as:
int receivedValues[ WIDTH * HEIGHT ];
for (int i = 1; i < P; i++)
{
MPI_Recv(&lowerBound, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&upperBound, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Recv(&receivedValues[0], (upperBound - lowerBound), MPI_DOUBLE, i, 0,
MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
// do your computation here with receivedValues
}
}

MPI undefined in Visual Studio 2015

I was trying to install and run MS-MPI following this tutorial. I have installed MS-MPI and all my system variables are set correctly, see:
I have set all links in VS
Having these linked to the project, I would expect MPI to work. In IDE no syntax errors are shown, MPI functions are recognized, just as in next picture. However compiling an c++ source file with MPI functions produces Undeclared identifiers errors. What do I do wrong?
Here is my code if it matters
/*
* Transmit a message in a 3-process system.
*/
#include <mpi.h>
#include "stdafx.h"
#include <stdio.h>
#include <stdlib.h>
#define BUFSIZE 10
int main(int argc, char *argv[])
{ int size, rank;
int slave;
int buf[BUFSIZE];
int n, value;
float rval;
MPI_Status status;
/* Initialize MPI */
MPI_Init(&argc, &argv);
/*
* Determine size in the world group.
*/
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size == 3) {/* Correct number of processes *}
/*
* Determine my rank in the world group.
* The master will be rank 0 and the slaves, rank 1...size-1
*/
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0) { /* Master */
buf[0] = 5; buf[1] = 1; buf[2] = 8; buf[3] = 7; buf[4] = 6;
buf[5] = 5; buf[6] = 4; buf[7] = 2; buf[8] = 3; buf[9] = 1;
printf("\n Sending the values {5,1,8,7,6,5,4,2,3,1}");
printf("\n -----------------------------");
for (slave = 1; slave < size; slave++) {
printf("\n from master %d to slave %d", rank, slave);
MPI_Send(buf, 10, MPI_INT, slave, 1, MPI_COMM_WORLD);
}
printf("\n\n Receiving the results from slaves");
printf("\n ---------------------------------");
MPI_Recv(&value, 1, MPI_INT, 1, 11, MPI_COMM_WORLD, &status);
printf("\n Minimum %4d from slave 1", value);
MPI_Recv(&value, 1, MPI_INT, 2, 21, MPI_COMM_WORLD, &status);
printf("\n Sum %4d from slave 2", value);
MPI_Recv(&value, 1, MPI_INT, 1, 12, MPI_COMM_WORLD, &status);
printf("\n Maximum %4d from slave 1", value);
MPI_Recv(&rval, 1, MPI_FLOAT, 2, 22, MPI_COMM_WORLD, &status);
printf("\n Average %4.2f from slave 2\n", rval);
}
else {
if (rank == 1) { /* minmax slave */
MPI_Recv(buf, 10, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
value = 100;
for (n = 0; n<BUFSIZE; n++) {
if (value>buf[n]) { value = buf[n]; }
}
MPI_Send(&value, 1, MPI_INT, 0, 11, MPI_COMM_WORLD);
value = 0;
for (n = 0; n<BUFSIZE; n++) {
if (value<buf[n]) { value = buf[n]; }
}
MPI_Send(&value, 1, MPI_INT, 0, 12, MPI_COMM_WORLD);
}
else { /* sumave slave */
MPI_Recv(buf, 10, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
value = 0;
for (n = 0; n<BUFSIZE; n++) {
value = value + buf[n];
}
MPI_Send(&value, 1, MPI_INT, 0, 21, MPI_COMM_WORLD);
rval = (float)value / BUFSIZE;
MPI_Send(&rval, 1, MPI_FLOAT, 0, 22, MPI_COMM_WORLD);
}
}
}
MPI_Finalize();
return(0);
}
After a time I have found where was the bug. Everything is fine with my setting, problem is only with the .cpp extension. Changing it to .c project helped and all works as expected.
If I want to run it as c++, #include stdafx.h bust take place before mpi

MPI: Why I have the error when I using MPI_Barrier in the next example?

I'm new in MPI and I want to do make a problem where I have 2 array A and B with 15 elements and I have 16 processes and and each process represent an element in the arrays (I don't use process zero). The array A have stored input data an positions 8...15, where this positions reprezent the leaves of a tree and in the first step I make a compression in array, where the leaves send the number to the parent and parent receives from all sons and add the numbers and send to father. And the array A si done at process 1 where is the sum of all elements in the array. And in the second step I make prefix calculations where I start from process 0 and finish at leaves.
And to calculate the array B all the other processes need to wait the process 1 to finish the work and for that I using a MPI_Barrier but I have a error when I exec the code.
int m = 3;
int n = (int)pow(2, m);
int *A = (int*)malloc(2 * n * sizeof(int));
int *B = (int*)malloc(2 * n * sizeof(int));
int id;
MPI_Status status;
A[8] = 4; A[9] = 8; A[10] = 5; A[11] = 2;
A[12] = 10; A[13] = 6; A[14] = 9; A[15] = 11;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
if (id == 1)
{
int nr;
int suma = 0;
MPI_Recv(&nr, 1, MPI_INT, 2 * id, 99, MPI_COMM_WORLD, &status);
suma += nr;
MPI_Recv(&nr, 1, MPI_INT, 2 * id + 1, 99, MPI_COMM_WORLD, &status);
suma += nr;
A[id] = suma;
printf("A[%d]=%d\n", id, A[id]);
B[id] = A[id];
printf("B[%d]=%d\n", id, B[id]);
MPI_Barrier(MPI_COMM_WORLD);
}
else
{
if (id != 0)
{
if(id >= 8)
{
MPI_Send(&A[id], 1, MPI_INT, id / 2, 99, MPI_COMM_WORLD);
printf("%d a trimis %d catre %d\n", id, A[id], id / 2);
MPI_Barrier(MPI_COMM_WORLD);
}
else
{
int nr;
int suma = 0;
MPI_Recv(&nr, 1, MPI_INT, 2 * id, 99, MPI_COMM_WORLD, &status);
suma += nr;
MPI_Recv(&nr, 1, MPI_INT, 2 * id + 1, 99, MPI_COMM_WORLD, &status);
suma += nr;
A[id] = suma;
MPI_Send(&A[id], 1, MPI_INT, id / 2, 99, MPI_COMM_WORLD);
printf("%d a trimis %d catre %d\n", id, A[id], id / 2);
MPI_Barrier(MPI_COMM_WORLD);
}
if (id % 2 == 1)
{
B[id] = B[(id - 1) / 2];
printf("B[%d]=%d\n", id, B[id]);
}
else
{
B[id] = B[id / 2] - A[id + 1];
printf("B[%d]=%d\n", id, B[id]);
}
}
MPI_Finalize();
free(A);
return 0;
And I receive the next error:
[15]fatal error Fatal error in MPI_Barrier:Other MPI error,
error stack: MPI_Barrier(MPI_COMM_WORLD) failed failed to
attach to a bootstrap queue - 5064:344
How can I do to make the program work?
MPI_Barrier() is a collective operation, and it will completes once invoked by all the MPI tasks from the communicator.
If i read correctly your code, task 0 does not invoke MPI_Barrier(MPI_COMM_WORLD), so your program will deadlock unless some mechanism in the MPI library aborts it.

Rank 2 caused collective abort of all racks

The code below tries to finds the max number of an array using mpi. However I keep getting the following error:
Rank 2 in job 47 caused collective abort of all ranks.
Exit status of rank 2 : killed by signal 9
Can anyone please tell me what's wrong?
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
int main(int argc , char * argv[])
{
int myRank , numOfProcesses;
int source , destination;
int tag = 0;
int i = 0, j = 0, k = 0;
int masterArray[] = {5,6,8,10,12,3,9,-1,3,7};
int max , globalMax = -100000;
int flag = 0;
MPI_Init(&argc, &argv);
MPI_Status status;
MPI_Comm_rank(MPI_COMM_WORLD , &myRank);
MPI_Comm_size(MPI_COMM_WORLD , &numOfProcesses);
printf("Process : %d \n" , myRank);
int masterSize = sizeof(masterArray)/sizeof(int);
//printf("%d \n" , masterSize);
int slaveSize = masterSize/(numOfProcesses-1);
//printf("%d \n" , slaveSize);
int slaveArray[slaveSize];
if (myRank == 0){
for (i=1; i<numOfProcesses; i++){
for (j=0; j<slaveSize; j++){
slaveArray[j] = masterArray[k];
// printf("%d \n" , masterArray[k]);
k++;
}
MPI_Send(slaveArray, slaveSize, MPI_INT, i, tag, MPI_COMM_WORLD);
}
for (i=1; i<numOfProcesses; i++){
MPI_Recv(max , 1, MPI_INT, i, tag, MPI_COMM_WORLD, &status);
if (globalMax < max)
max = globalMax;
}
printf("Global Maximum %d \n" , globalMax);
}
else{
MPI_Recv(slaveArray , slaveSize, MPI_INT, 0, tag, MPI_COMM_WORLD, &status);
max = slaveArray[0];
for (i=0; i<slaveSize; i++){
if (slaveArray[i] > max)
max = slaveArray[i];
}
printf("Max in %d %d \n" , myRank, max);
MPI_Send(max , 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
Sending and receiving messages in MPI always works through addresses. In the following:
MPI_Recv(max , 1, MPI_INT, i, tag, MPI_COMM_WORLD, &status);
...
MPI_Send(max , 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
You use the value. You must add & to take the address.
You should also learn to use the appropriate collective operations: MPI_Scatter and MPI_Reduce.
By the way, this line is also in the wrong order:
max = globalMax;
Please also learn to listen to your compiler! Any reasonable compiler at resonable settings will warn you of passing an integer as an address.