BubbleSort in c++ using MPI - c++

I am a beginner in MPI and am trying to write sort code(BubbleSort)
The code works, but it seems like I'm missing something
Code is here:--->
#define N 10`
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <stddef.h>
#include "mpi.h"
using namespace std;
int main(int argc, char* argv[])
{
int i, j, k, rank, size;
int a[N] = { 10,9,8,7,6,5,4,3,2,1 };
int c[N];
int aa[N], cc[N];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Scatter(a, N/size, MPI_INT, aa, N/size , MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
int n = N/size;
for (int i = 0; i < n - 1; i++) {
for (int j = 0; j < n - i - 1; j++) {
if (aa[j] > aa[j + 1]) {
int temp = aa[j];
aa[j] = aa[j + 1];
aa[j + 1] = temp;
}
}
}
for (int i = 0; i < n; i++) {
cc[i] = aa[i];
};
MPI_Barrier(MPI_COMM_WORLD);
MPI_Gather(cc, N/size , MPI_INT, c, N/size, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
cout << cc[9];
if (rank == 0) {
cout << "C is look like : " << endl;
for (int i = 0; i < N; i++) {
cout << c[i] << " ";
}
}
}
Output of the program:-->
In the end we get errors
In general, my MPI is configured as 4 processors
-858993460 C is look like :
-858993460
-858993460
-858993460
9 10 7 8 5 6 3 4 -858993460 -858993460

There are several issues in your program :
cc[9] is used uninitialized
you only operate on (N/size)*size) elements, and in your case N=10, size=4, it means you operate on only 8 elements. The cure is to use MPI_Scatterv() and MPI_Gatherv()
assuming your bubble sort is correct (I did not check that part), your program gathers sorted (sub)arrays, and you cannot naively expect the outcome is a (full size) sorted array.

Related

MPI_Reduce on different communicators not working as expected

#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
using namespace std;
int ceil(int x, int y) {
return x / y + (x % y > 0);
}
void create_group_and_comm(MPI_Group *world_group, MPI_Group *group, MPI_Comm *comm, int size, bool is_even) {
int *ranks;
int count = is_even ? ceil(size, 2) : size / 2;
ranks = (int *)malloc(count * sizeof(int));
int i = is_even ? 0 : 1, j=0;
while(i < size) {
ranks[j] = i;
j++;
i+=2;
}
MPI_Group_incl(*world_group, j, ranks, group);
MPI_Comm_create(MPI_COMM_WORLD, *group, comm);
free(ranks);
}
int main(int argc, char *argv[])
{
int size, rank, *result_odd, *result_even;
int rank_gr;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Status status;
MPI_Comm even_comm, odd_comm;
MPI_Group even_group, odd_group, world_group;
int *A, *Rows;
int namelen;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Get_processor_name(processor_name, &namelen);
if (rank == 0)
{
A = (int *)malloc(size * size * sizeof(int));
for (int i = 0; i < size * size; i++) {
A[i] = rand() / 1000000;
}
printf("Initial data:\n");
for (int i = 0; i < size; i++)
{
putchar('|');
for (int j = 0; j < size; j++)
printf("%.4d ", A[i*size+j]);
printf("|\n");
}
MPI_Barrier(MPI_COMM_WORLD);
}
else
MPI_Barrier(MPI_COMM_WORLD);
MPI_Comm_group(MPI_COMM_WORLD, &world_group);
create_group_and_comm(&world_group, &even_group, &even_comm, size, true);
create_group_and_comm(&world_group, &odd_group, &odd_comm, size, false);
Rows = new int[size];
MPI_Scatter(A, size, MPI_INT, Rows, size, MPI_INT, 0, MPI_COMM_WORLD);
result_odd = new int[size];
result_even = new int[size];
if(rank % 2 == 0) {
MPI_Reduce(Rows,result_even,size,MPI_INT,MPI_MAX,0,even_comm);
} else {
MPI_Reduce(Rows,result_odd,size,MPI_INT,MPI_MIN,0,odd_comm);
}
if(rank == 0) {
printf("Max values for columns on even:\n");
for(int idx = 0; idx < size;idx++) {
printf("Column %d: %d\n", idx+1, result_even[idx]);
}
printf("Max values for columns on odd:\n");
for(int idx = 0; idx < size;idx++) {
printf("Column %d: %d\n", idx+1, result_odd[idx]);
}
}
//MPI_Comm_free(&even_comm);
//MPI_Comm_free(&odd_comm);
MPI_Group_free(&even_group);
MPI_Group_free(&odd_group);
MPI_Finalize();
return 0;
}
Hello i'm writing an application using MPI library, i'm trying to create 2 groups with each of them with their own communicator. Basically one group which holds processors with rank even calculates the maximum value per column using MPI_Reduce between them(processors in group), and the second one calculates the minimum for each column in matrice. For even rank MPI_Reduce works as expected but for processors with odd rank is not working as it should, can someone help me what i'm doing wrong? Below is a picture with the problem i described:
image here

Seg fault while using MPI_Scatter

I have problem with MPI_Scatter. Dont know hot to use it and my current program crashes with seg fault when I launch.
I guess that the problem in parameters of MPI_Scatter, particularly in calling it with right operator (& or * or void), but I've tried almost every combination and nothing actually helped.
#include <iostream>
#include <stdio.h>
#include <mpi.h>
// k = 3, N = 12, 1,2,3, 4,5,6, 7,8,9, 10,11,12
int main(int argc, char **argv) {
int N, size, myrank;
int k;
std::cin >> N;
std::cin >> k;
int *mass = new int[N];
int *recv = new int[k];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
if (myrank == 0) {
std::cout << "get k and n \n";
for (int i = 0; i < N; ++i) {
mass[i] = i;
std::cout << i << " written\n";
}
}
MPI_Scatter(mass, k, MPI_INT, recv, k, MPI_INT, 0, MPI_COMM_WORLD);
int sum = 0;
std::cout << "myrank" << myrank << '\n';
for (int i = 0; i < k; ++i) {
std::cout << recv[i] << '\n';
}
MPI_Finalize();
return 0;
}
When I launch this code, it prints this:
N = 12
k = 3
get k and n
0 written
1 written
2 written
3 written
4 written
5 written
6 written
7 written
8 written
9 written
10 written
11 written
myrank0
0
1
2
myrank1
myrank3
myrank2
[1570583203.522390] [calc:32739:0] mpool.c:38 UCX WARN object 0x7fe1f08b2f60 was not returned to mpool mm_recv_desc
[1570583203.523214] [calc:32740:0] mpool.c:38 UCX WARN object 0x7f4643986f60 was not returned to mpool mm_recv_desc
[1570583203.524205] [calc:32741:0] mpool.c:38 UCX WARN object 0x7f22535d4f60 was not returned to mpool mm_recv_desc
MPI typically redirects stdout to rank 0, so N and k are not correctly set on the other ranks.
Here is a working version of your program
#include <iostream>
#include <cassert>
#include <stdio.h>
#include <mpi.h>
// k = 3, N = 12, 1,2,3, 4,5,6, 7,8,9, 10,11,12
int main(int argc, char **argv) {
int k, N, size, myrank;
int *mass;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
if (myrank == 0) {
std::cout << "get k and n \n";
std::cin >> N;
std::cin >> k;
assert (N >= k*size);
mass = new int[N];
for (int i = 0; i < N; ++i) {
mass[i] = i;
std::cout << i << " written\n";
}
}
MPI_Bcast(&k, 1, MPI_INT, 0, MPI_COMM_WORLD);
int *recv = new int[k];
MPI_Scatter(mass, k, MPI_INT, recv, k, MPI_INT, 0, MPI_COMM_WORLD);
int sum = 0;
std::cout << "myrank" << myrank << '\n';
for (int i = 0; i < k; ++i) {
std::cout << recv[i] << '\n';
}
MPI_Finalize();
return 0;
}

Sum of the numbers 1 to 1000 in parallel

The following code uses 2n CPUs to calculate the sum of 1 to 1000. Each of the processors calculates a portion of this aggregate and independently displays the output.
The final result of the computing of all processors is collected by the first processor and aggregated and the final result is displayed in the output.
#include <iostream>
#include <stdio.h>
#include <mpi.h>
static int MyNode, Nodes;
using namespace std;
int main(int* argc, char** argv[])
{
MPI_Init(argc, argv);
MyNode = MPI_Comm_rank(MPI_COMM_WORLD, &MyNode);
Nodes = MPI_Comm_size(MPI_COMM_WORLD, &Nodes);
MPI_Status status;
int sum = 0;
int accum = 0;
int FIndex = 1000 * MyNode / Nodes + 1;
int LIndex = 1000 * (MyNode + 1) /
Nodes;
for (int I = FIndex; I <= LIndex; I = I + 1)
sum += I;
if (MyNode != 0)
MPI_Send(&sum, 1, MPI_INT, 0, 1,
MPI_COMM_WORLD);
else
for (int J = 1; J < Nodes; J = J + 1) {
MPI_Recv(&accum, 1, MPI_INT,
J, 1, MPI_COMM_WORLD,
&status);
sum += accum;
}
if (MyNode == 0) {
cout << "Total Nodes is " << Nodes << ".The sum from 1 to 1000 is: " << sum << endl;
}
MPI_Finalize();
return 0;
}
After running, I encounter a problem: Integer division by zero. (MyNode / Nodes)
why MyNode , Nodes are zero?
Just pass reference to MyNode and Nodes:
MPI_Comm_rank(MPI_COMM_WORLD, &MyNode);
MPI_Comm_size(MPI_COMM_WORLD, &Nodes);
MPI_Comm_size returns MPI_SUCCESS on success. Otherwise, the return
value is an error code.
The following functions return errors if any
MyNode = MPI_Comm_rank(MPI_COMM_WORLD, &MyNode);
Nodes = MPI_Comm_size(MPI_COMM_WORLD, &Nodes);
Since you are storing the error state in MyNode and Nodes, (In this case there is no error) The value of MyNOde and Nodes is 0.
Change it to this
int err;
err = MPI_Comm_rank(MPI_COMM_WORLD, &MyNode);
err = MPI_Comm_size(MPI_COMM_WORLD, &Nodes);

MPI - How to partition and communicate my array portions between master and worker processes

I am having a problem executing my master/worker MPI program.
The goal is to have the master pass portions of the integer array to the workers, have the workers sort their portions, and then return array portion to the master process which then combines the portions into finalArray[].
I think it has something to do with how I'm passing the portions of the array between processes, but I can't seem to think of anything new to try.
My code:
int compare(const void * a, const void * b) // used for quick sort method
{
if (*(int*)a < *(int*)b) return -1;
if (*(int*)a > *(int*)b) return 1;
return 0;
}
const int arraySize = 10000;
int main(int argc, char ** argv)
{
int rank;
int numProcesses;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numProcesses);
const int PART = floor(arraySize / (numProcesses - 1));
auto start = std::chrono::high_resolution_clock::now(); //start timer
//================================= MASTER PROCESS =================================
if (rank == 0)
{
int bigArray[arraySize];
int finalArray[arraySize];
for (int i = 0; i < arraySize; i++) //random number generator
{
bigArray[i] = rand();
}
for (int i = 0; i < numProcesses - 1; i++)
{
MPI_Send(&bigArray, PART, MPI_INT, i + 1, 0, MPI_COMM_WORLD); // send elements of the array
}
for (int i = 0; i < numProcesses - 1; i++)
{
std::unique_ptr<int[]> tmpArray(new int[PART]);
MPI_Recv(&tmpArray, PART, MPI_INT, i + 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); //recieve sorted array from workers
for (int k = 0; k < PART; k++)
{
finalArray[PART * i + k] = tmpArray[k];
}
}
for (int m = 0; m < arraySize; m++)
{
printf(" Sorted Array: %d \n", finalArray[m]); //print my sorted array
}
}
//================================ WORKER PROCESSES ===============================
if (rank != 0)
{
std::unique_ptr<int[]> tmpArray(new int[PART]);
MPI_Recv(&tmpArray, PART, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); //recieve data into local initalized array
qsort(&tmpArray, PART, sizeof(int), compare); // quick sort
MPI_Send(&tmpArray, PART, MPI_INT, 0, 0, MPI_COMM_WORLD); //send sorted array back to rank 0
}
MPI_Barrier(MPI_COMM_WORLD);
auto end = std::chrono::high_resolution_clock::now(); //end timer
std::cout << "process took: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count() //prints timer
<< " nanoseconds\n ";
MPI_Finalize();
return 0;
}
I am fairly new to MPI and C++ so any advice on either subject related to this problem is extremely helpful. I realize there may be many problems with this code so thank you for all help in advance.

MPI_Bcast one of proces does not recive

I have a problem with mpi_Bcast. I want to sent an array of calculate how many numbers are per process to another process and the random process don't recive anything and crash(the process rank 2 and last-1).The number per process can be differ aboout 1. Can anybody help my ?
#include <stdio.h> // printf
#include <mpi.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
#include "EasyBMP.h"
using namespace std;
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int* per_process = new int[size];
for (int i = 0; i < size; i++){
per_process[i] = 0;
}
if (rank == 0){
for (int i = 0; i < size; i++){
int default_for_process = 12 / size;
int rest = 12 % size;
if (i < rest){
default_for_process++;
}
per_process[i] = default_for_process;
}
}
MPI_Bcast(&per_process, size, MPI_INT, 0, MPI_COMM_WORLD);
for (int i = 0; i < size; i++){
cout <<rank<<" "<< per_process[i];
}
cout << endl;
MPI_Finalize();
return 0;
}