In my parallel programming book, I came across this code that says the slaves generate the data set, however, I think the master acutally generates the data set.
This line in particular is why I believe that master generates the data set.
for (i=0; i < ARRAY_SIZE; i++)
numbers[i] = i;
Can someone confirm if master or slaves generate the data set?
#include "mpi.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#define TRIALS 20
#define ARRAY_SIZE 1000000
int main(int argc, char *argv[])
{
int myid, numprocs;
double startwtime, endwtime;
int namelen;
int* numbers = new int[ARRAY_SIZE];
int i, j, sum, part_sum;
int s, s0, startIndex, endIndex;
double totalTime;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
MPI_Get_processor_name(processor_name,&namelen);
fprintf(stderr,"Process %d on %s\n", myid, processor_name);
fflush(stderr);
for (i=0; i < ARRAY_SIZE; i++)
numbers[i] = i;
if (myid == 0)
{
s = (int) floor(ARRAY_SIZE/numprocs);
s0 = s + ARRAY_SIZE%numprocs;
//printf("s=%d , s0= %d\n", s, s0);
}
MPI_Bcast(&s, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&s0, 1, MPI_INT, 0, MPI_COMM_WORLD);
startIndex = s0 + (myid - 1)*s;
endIndex = startIndex + s;
totalTime = 0;
for (j = 1; j <= TRIALS; j++)
{
if (myid == 0)
{
startwtime = MPI_Wtime();
}
sum = 0;
part_sum = 0;
if (myid == 0) // master
{
// compute sum of master's numbers
for (i = 0; i < s0; i++)
{
part_sum += numbers[i];
}
}
else
{
for (i = startIndex; i < endIndex; i++)
{
part_sum += numbers[i];
}
}
MPI_Reduce(&part_sum, &sum, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (myid == 0)
{
double runTime;
endwtime = MPI_Wtime();
runTime = endwtime - startwtime;
printf("Trial %d : Execution time (sec) = %f\n", j, runTime);
printf("Sum = %d \n", sum);
totalTime += runTime;
}
} // end for
if (myid == 0)
printf("Average time for %d trials = %f", TRIALS, totalTime/TRIALS);
MPI_Finalize();
}
Both the master and the slaves generate the entire array. You have to remember that your program runs on all nodes and the part of the code in question doesn't distinguish between master/slave. So the wording of your book isn't wrong, but it could be clarified. :)
Related
I have written a c program to run on MPI with 4 nodes.
The program takes in an int of N elements. This is then Bcasted to the various nodes in MPI. This int is used to dynamically create arrays of N size on each of the nodes.
I have tried running this program and inputting from 64 to 1 million and this works fine.
When I try inputting 10 million or higher, MPI crashes and occasionally gives the following error:
Fatal error in MPI_Bcast: Other MPI error, error stack:
MPI_Bcast(buf=0x000000000067FD74, count=1, MPI_INT, root=0, MPI_COMM_WORLD) failed
failed to attach to a bootstrap queue - 6664:280
10 million is within the limits of an Integer, so Im not sure why this is occurring.
The code is below:
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#include <time.h>
int main(int argc, char *argv[]){
int process_Rank, size_Of_Cluster;
int number_of_elements;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size_Of_Cluster);
MPI_Comm_rank(MPI_COMM_WORLD, &process_Rank);
if(process_Rank == 0){
printf("Enter the number of elements:\n");
fflush(stdout);
scanf("%d", &number_of_elements);
}
MPI_Bcast(&number_of_elements,1,MPI_INT, 0, MPI_COMM_WORLD);
int *outputs = (int*)malloc(number_of_elements * sizeof(int));
unsigned long long chunk_size = number_of_elements/ size_Of_Cluster;
int my_input[chunk_size], my_output[chunk_size];
for(int i = 0; i < number_of_elements; i++){
outputs[i] = i+1;
}
MPI_Barrier(MPI_COMM_WORLD);
clock_t begin = clock();
MPI_Scatter(outputs, chunk_size, MPI_INT, &my_input, chunk_size, MPI_INT, 0, MPI_COMM_WORLD);
for(int i = 0; i <= chunk_size; i++){
my_output[i] = my_input[i];
}
MPI_Gather(&my_output, chunk_size, MPI_INT, outputs, chunk_size, MPI_INT, 0, MPI_COMM_WORLD);
int iterate_terms[5] = {2,4,8,4,2};
int starting_terms[5] = {1,3,7,3,1};
int subtract_terms[5] = {1,2,4,0,0};
int adding_terms[5] = {0,0,0,2,1};
for(int j = 0; j < 5; j++){
MPI_Scatter(outputs, chunk_size, MPI_INT, &my_input, chunk_size, MPI_INT, 0, MPI_COMM_WORLD);
for(int i = starting_terms[j]; i <= chunk_size; i+= iterate_terms[j]){
my_output[i+adding_terms[j]] += my_input[i-subtract_terms[j]];
}
MPI_Gather(&my_output, chunk_size, MPI_INT, outputs, chunk_size, MPI_INT, 0, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
if(process_Rank == 0){
for(int i = chunk_size-1; i < number_of_elements; i+=chunk_size){
outputs[i+1] += outputs[i];
outputs[i+2] += outputs[i];
outputs[i+3] += outputs[i];
}
clock_t end = clock();
double time_spent = (double)(end-begin) / CLOCKS_PER_SEC;
for(int i = 0; i < number_of_elements; i++){
printf("%d \n", outputs[i]);
fflush(stdout);
}
printf("took %f", time_spent);
fflush(stdout);
} else {
clock_t end = clock();
}
MPI_Finalize();
return 0;
}
Good day. I have some issues with running MPI program that multiply matrices.
This is code (it is not my code) I get it from http://dkl.cs.arizona.edu/teaching/csc522-fall16/examples/hybrid-openmp-mm.c
I will be very grateful if you help me
Also I was looking for similar problems and solutions, but it didn't solve my problem
#include <omp.h>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define TAG 13
int main(int argc, char* argv[]) {
double** A, ** B, ** C, * tmp;
double startTime, endTime;
int numElements, offset, stripSize, myrank, numnodes, N, i, j, k;
int numThreads, chunkSize = 10;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &numnodes);
N = atoi(argv[1]);
numThreads = atoi(argv[2]); // difference from MPI: how many threads/rank?
omp_set_num_threads(numThreads); // OpenMP call to set threads per rank
// allocate A, B, and C --- note that you want these to be
// contiguously allocated. Workers need less memory allocated.
if (myrank == 0) {
tmp = (double*)malloc(sizeof(double) * N * N);
A = (double**)malloc(sizeof(double*) * N);
for (i = 0; i < N; i++)
A[i] = &tmp[i * N];
}
else {
tmp = (double*)malloc(sizeof(double) * N * N / numnodes);
A = (double**)malloc(sizeof(double*) * N / numnodes);
for (i = 0; i < N / numnodes; i++)
A[i] = &tmp[i * N];
}
tmp = (double*)malloc(sizeof(double) * N * N);
B = (double**)malloc(sizeof(double*) * N);
for (i = 0; i < N; i++)
B[i] = &tmp[i * N];
if (myrank == 0) {
tmp = (double*)malloc(sizeof(double) * N * N);
C = (double**)malloc(sizeof(double*) * N);
for (i = 0; i < N; i++)
C[i] = &tmp[i * N];
}
else {
tmp = (double*)malloc(sizeof(double) * N * N / numnodes);
C = (double**)malloc(sizeof(double*) * N / numnodes);
for (i = 0; i < N / numnodes; i++)
C[i] = &tmp[i * N];
}
if (myrank == 0) {
// initialize A and B
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
A[i][j] = 1.0;
B[i][j] = 1.0;
}
}
}
// start timer
if (myrank == 0) {
startTime = MPI_Wtime();
}
stripSize = N / numnodes;
// send each node its piece of A -- note could be done via MPI_Scatter
if (myrank == 0) {
offset = stripSize;
numElements = stripSize * N;
for (i = 1; i < numnodes; i++) {
MPI_Send(A[offset], numElements, MPI_DOUBLE, i, TAG, MPI_COMM_WORLD);
offset += stripSize;
}
}
else { // receive my part of A
MPI_Recv(A[0], stripSize * N, MPI_DOUBLE, 0, TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
// everyone gets B
MPI_Bcast(B[0], N * N, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Let each process initialize C to zero
for (i = 0; i < stripSize; i++) {
for (j = 0; j < N; j++) {
C[i][j] = 0.0;
}
}
// do the work---this is the primary difference from the pure MPI program
#pragma omp parallel for shared(A,B,C,numThreads) private(i,j,k) schedule (static, chunkSize)
for (i = 0; i < stripSize; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < N; k++) {
C[i][j] += A[i][k] * B[k][j];
}
}
}
// master receives from workers -- note could be done via MPI_Gather
if (myrank == 0) {
offset = stripSize;
numElements = stripSize * N;
for (i = 1; i < numnodes; i++) {
MPI_Recv(C[offset], numElements, MPI_DOUBLE, i, TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
offset += stripSize;
}
}
else { // send my contribution to C
MPI_Send(C[0], stripSize * N, MPI_DOUBLE, 0, TAG, MPI_COMM_WORLD);
}
// stop timer
if (myrank == 0) {
endTime = MPI_Wtime();
printf("Time is %f\n", endTime - startTime);
}
// print out matrix here, if I'm the master
if (myrank == 0 && N < 10) {
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%f ", C[i][j]);
}
printf("\n");
}
}
MPI_Finalize();
return 0;
}
And this is my issue
You are doing a MPI_Bcast on B as if it's a contiguous block of N*N elements. However, it's not: it's an array of pointers to N separate arrays for length N. So either you need to allocate B contiguously, or you need to do N broadcasts.
My program is meant to take an array size and the elements of that particular array from the user.
However, I want the program to be able to distribute the array elements evenly for any number of processors used.
I think the problem is on the displs array, but even after countless try-outs, I don't seem to be reaching any logical conclusion.
Let's say I enter a sequence of 7 numbers -> 1,2,3,4,5,6,7
I will have an output as such:
processor 0
arr[0] = 1
arr[1] = 2
arr[2] = 3
processor 1
arr[0] = 4
arr[1] = 5
processor 2
arr[0] = 7
arr[1] = 32767
The code is the following:
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#define ARRAY_SIZE 100
int main(int argc, char **argv)
{
int myrank, wsize;
int i,N;
int *arr,*displs, *arr_r, *sendcount;
int sum1=0;
int portion,remainder,x,y;
int root;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &wsize);
if(myrank == 0)
{
printf("Enter number N of integers\n");
scanf("%d", &N);
arr = (int*)malloc(N*sizeof(int));
for(i = 0; i < N; i++)
{
printf("Enter number %d\n", i+1);
scanf("%d",&arr[i]);
}
}
MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD);
portion = N / wsize;
remainder = N % wsize;
x = portion;
y = portion +1;
displs = (int*)malloc(N*sizeof(int));
sendcount = (int*)malloc(N*sizeof(int));
for(i=0; i < N; i++)
{
if(myrank < remainder)
{
sendcount[i] = portion + (remainder);
displs[i] = (portion + (remainder)) * i;
}
else if(remainder == 0)
{
sendcount[i] = portion;
displs[i] = portion *i;
}
else
{
sendcount[i] = portion;
displs[i] = portion * i;
}
}
arr_r = (int*)malloc(N *sizeof(int));
MPI_Scatterv(arr, sendcount, displs, MPI_INT, arr_r, N, MPI_INT, 0, MPI_COMM_WORLD);
if(myrank < remainder)
{
printf("process %d \n",myrank);
for(i = 0; i < portion + 1; i++)
{
printf("Arr[%d] = %d\n",i,arr_r[i]);
}
}
else if(remainder == 0)
{
printf("process %d \n",myrank);
for(i = 0; i < portion; i++)
{
printf("Arr[%d] = %d\n",i,arr_r[i]);
}
}
else
{
printf("process %d \n",myrank);
for(i = 0; i < portion; i++)
{
printf("Arr[%d] = %d\n",i,arr_r[i]);
}
}
MPI_Finalize();
return 0;
}
I was trying to calculate elementwise multiplication of matrix elements.
But I've got this error and don't know what to do.
===================================================================================
= BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES
= PID 16855 RUNNING AT kevlinsky-PC
= EXIT CODE: 139
= CLEANING UP REMAINING PROCESSES
= YOU CAN IGNORE THE BELOW CLEANUP MESSAGES
===================================================================================
YOUR APPLICATION TERMINATED WITH THE EXIT STRING: Segmentation fault (signal 11)
This typically refers to a problem with your application.
Please see the FAQ page for debugging suggestions
The task was to split it between processes, calculate the result and return it to the zero process.
Code example:
#include <iostream>
#include <math.h>
#include "mpi.h"
int main(int argc, char *argv[]){
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
const int n = 4;
int arrayA[n][n];
int arrayB[n][n];
for (int i = 0; i < n; i++){
for (int j = 0; j < n; i++) {
arrayA[i][j] = (rand() % 1000) - 500;
}
for (int j = 0; j < n; i++) {
arrayB[i][j] = (rand() % 1000) - 500;
}
}
int getbufA[n];
int getbufB[n];
int arrayC[n][n];
int bufC[n];
MPI_Scatter(&arrayA, n, MPI_INT, &getbufA, n, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Scatter(&arrayB, n, MPI_INT, &getbufB, n, MPI_INT, 0, MPI_COMM_WORLD);
for (int i = 0; i < n; i++) {
bufC[i] = getbufA[i] * getbufB[i];
}
MPI_Gather(&bufC, n, MPI_INT, &arrayC, n, MPI_INT, 0, MPI_COMM_WORLD);
if (rank == 0) {
printf("MATRIX C \n");
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
printf("%d ", arrayC[i][j]);
}
printf("\n");
}
}
MPI_Finalize();
}
Can someone help with this?
I think this is your error:
for (int j = 0; j < n; i++) {
arrayA[i][j] = (rand() % 1000) - 500;
}
You need j++ in this loop. And you have this error in two places. j is never incremented and stays 0, and i is incremented indefinitely (because the condition for the loop is based on j), so very soon you go out of bounds for the array, hence the segmentation fault.
Can anybody tell what am I doing wrong due to which I am getting this error.
Code:
#include<stdio.h>
#include<mpi.h>
void transpose(int ** p, int row, int col)
{
int ** tempVar;
tempVar = (int *)malloc(sizeof(int *)* row);
int i = 0;
for (; i < row; i++)
{
tempVar[i] = (int *)malloc(sizeof (int *)* col);
int j = 0;
while (j < col)
{
tempVar[i][j] = p[j][i];
j++;
}
}
p = tempVar;
}
void main(int argc, char * argv[])
{
int rank, size;
MPI_Init(argc, argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int d[] = { 1000, 1000, 1000, 1000, 1000, 1000 };
int vt[6] = { 1000, 1000, 1000, 1000, 1000, 1000 };
int ** p;
p = (int *)malloc(sizeof(int *)* 6);
int i = 0;
int row = 6;
int col = 6;
while (i < 6)
{
p[i] = (int *)malloc(sizeof(int *)* 6);
/*int j = 0;
if (rank == 0)
{
while (j < 6)
{
scanf("%d", p[i][j]);
j++;
}
}*/
i++;
}
p[0][0] = 0; p[0][1] =2 ; p[0][2] =3 ; p[0][3] =1 ; p[0][4] =1000 ; p[0][5] =1000 ;
p[1][0] = 2; p[1][1] = 0; p[1][2] = 1000; p[1][3] = 1000; p[1][4] = 5; p[1][5] = 1000;
p[2][0] = 3; p[2][1] = 1000; p[2][2] = 0; p[2][3] = 1000; p[2][4] = 1000; p[2][5] = 1;
p[3][0] = 1; p[3][1] = 1000; p[3][2] = 1000; p[3][3] = 0; p[3][4] = 4; p[3][5] = 3;
p[4][0] = 1000; p[4][1] = 5; p[4][2] = 1000; p[4][3] = 4; p[4][4] = 0; p[4][5] = 2;
p[5][0] = 1000; p[5][1] = 1000; p[5][2] = 1; p[5][3] = 3; p[5][4] = 2; p[5][5] = 0;
int smallest;
if (rank == 0)
{
//transpose(&p , row , col);
smallest = 0;
vt[smallest] = smallest;
//MPI_Bcast();
}
int vt1, d1;
vt1 = d1 = 0;
int roww[6];
MPI_Scatter(vt, 6, MPI_INT, vt1, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Scatter(d, 6, MPI_INT, d1, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Scatter(p, row *row, MPI_INT,roww, 6, MPI_INT, 0, MPI_COMM_WORLD);
i = 0;
while (i < (row*row)/size)
{
MPI_Bcast(smallest, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (vt1 != rank)
{
if (roww[smallest] != 1000)
{
if (d1 > roww[smallest])
d1 = roww[smallest];
}
}
MPI_Gather(d1, 1, MPI_INT, d, row, MPI_INT, 0, MPI_COMM_WORLD);
if (rank == 0)
{
smallest = d[0];
int k = 1;
int index = 0;
while (k < 6)
{
if (d[k] < smallest)
{
smallest = d[k];
index = k;
}
k++;
}
vt[k] = index;
}
MPI_Scatter(vt, 6, MPI_INT, vt1, (row) / size, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Scatter(d, 6, MPI_INT, d1, (row) / size, MPI_INT, 0, MPI_COMM_WORLD);
i++;
}
MPI_Finalize();
}
The error that I am getting is
Fatal Error: fatal error in MPI_Scatter: Invalid buffer pointer, error stack:
MPI_Scatter(760): MPI_Scatter(sbuf=0x0085f7ac , scount , MPI_INT , rbuf =0x0000000 , rcount =1, MPI_INT , root= 0 , MPI_COMM_WORLD) failed
The code you provided compiles with lots of warnings that are not to be ignored such as :
passing argument 2 of ‘MPI_Init’ from incompatible pointer type
Look carefully at the prototype of functions : int* fun(int* b); is likely to fail if you call something like int d;fun(d);. If the function needs a pointer to the data, fun(&d) may work better. This problem occurs many times, as MPI functions are called.
More : the function transpose(int ** p) tries to modify p by doing p= tempVar. As signaled by #WhozCraig, by doing int **p;...;transpose(p,...), a copy of p in the scope of the function transpose() is modified, but not p. Hence, the right prototype of this function is transpose(int ***p,...) and the right way to call it is int** p;...;transpose(&p,...);
Regarding memory allocation : you found a way to allocate 2D array ! But the data is not contiguous in memory since rows are allocated one at a time. If you plan to use MPI functions such as MPI_Scatter(), allocating a contiguous 2D array is the right way to go (more).
Additional advice : call free() at the right time to free the memory and avoid memory leaks. Do not cast the return of malloc()
Here is a piece of code that should compile well with mpicc main.c -o main -Wall. The option -Wall enables all warnings. It seems to run fine, though i did not check if the result is correct.
#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
void transpose(int *** p, int row, int col)
{
int ** tempVar;
tempVar = malloc(sizeof(int *)* row);
if (tempVar==NULL){printf("malloc failed\n"); exit (1);}
tempVar[0] = malloc(sizeof (int )* col*row);
if (tempVar[0]==NULL){printf("malloc failed\n"); exit (1);}
int i = 0;
for (i=0; i < row; i++)
{
tempVar[i] = &tempVar[0][col*i];
int j = 0;
while (j < col)
{
tempVar[i][j] = (*p)[j][i];
j++;
}
}
free((*p)[0]);
free(*p);
*p = tempVar;
}
int main(int argc, char * argv[])
{
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int d[] = { 1000, 1000, 1000, 1000, 1000, 1000 };
int vt[6] = { 1000, 1000, 1000, 1000, 1000, 1000 };
int ** p;
int i = 0;
int row = 6;
int col = 6;
p = malloc(sizeof(int *)* row);
if (p==NULL){printf("malloc failed\n"); exit (1);}
p[0] = malloc(sizeof(int )* row*col);
if (p[0]==NULL) {printf("malloc failed\n"); exit (1);}
while (i < row)
{
p[i] = &p[0][i*col];
/*int j = 0;
if (rank == 0)
{
while (j < 6)
{
scanf("%d", p[i][j]);
j++;
}
}*/
i++;
}
p[0][0] = 0; p[0][1] =2 ; p[0][2] =3 ; p[0][3] =1 ; p[0][4] =1000 ; p[0][5] =1000 ;
p[1][0] = 2; p[1][1] = 0; p[1][2] = 1000; p[1][3] = 1000; p[1][4] = 5; p[1][5] = 1000;
p[2][0] = 3; p[2][1] = 1000; p[2][2] = 0; p[2][3] = 1000; p[2][4] = 1000; p[2][5] = 1;
p[3][0] = 1; p[3][1] = 1000; p[3][2] = 1000; p[3][3] = 0; p[3][4] = 4; p[3][5] = 3;
p[4][0] = 1000; p[4][1] = 5; p[4][2] = 1000; p[4][3] = 4; p[4][4] = 0; p[4][5] = 2;
p[5][0] = 1000; p[5][1] = 1000; p[5][2] = 1; p[5][3] = 3; p[5][4] = 2; p[5][5] = 0;
int smallest;
if (rank == 0)
{
//transpose(&p , row , col);
smallest = 0;
vt[smallest] = smallest;
//MPI_Bcast();
}
int vt1, d1;
vt1 = d1 = 0;
int roww[col];
MPI_Scatter(vt, 1, MPI_INT, &vt1, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Scatter(d, 1, MPI_INT, &d1, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Scatter(&p[0][0], col, MPI_INT,roww, col, MPI_INT, 0, MPI_COMM_WORLD);
i = 0;
while (i < (row*row)/size)
{
MPI_Bcast(&smallest, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (vt1 != rank)
{
if (roww[smallest] != 1000)
{
if (d1 > roww[smallest])
d1 = roww[smallest];
}
}
MPI_Gather(&d1, 1, MPI_INT, d, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (rank == 0)
{
smallest = d[0];
int k = 1;
int index = 0;
while (k < 6)
{
if (d[k] < smallest)
{
smallest = d[k];
index = k;
}
k++;
}
vt[k] = index;
}
MPI_Scatter(vt, 1, MPI_INT, &vt1, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Scatter(d, 1, MPI_INT, &d1, 1, MPI_INT, 0, MPI_COMM_WORLD);
i++;
}
free(p[0]);
free(p);
MPI_Finalize();
return 0;
}