MPI_Scatter a 2D array in other 2D arrays - c++

I want to scatter a 2D array in other 2D arrays (one for each process) using this specific way of allocating memory.
int (*matrix)[cols] = malloc(sizeof *matrix* rows);
I keep getting this error:
One of the processes started by mpirun has exited with a nonzero exit
code. This typically indicates that the process finished in error.
If your process did not finish in error, be sure to include a "return
0" or "exit(0)" in your C code before exiting the application.
PID 7035 failed on node n0 (127.0.0.1) due to signal 11.
I think the problem is on scatter but I am new to parallel programming so if anyone knows what the issue is please help me.
Thanks in advance.
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "mpi.h"
int main(int argc, char** argv) {
int my_rank;
int p;
int root;
int rows = 0;
int cols = 0;
int **matrix;
int i, j;
int local_rows;
int answer = 0;
int broke = 0;
MPI_Init(& argc, & argv);
MPI_Comm_rank(MPI_COMM_WORLD, & my_rank);
MPI_Comm_size(MPI_COMM_WORLD, & p);
if (my_rank == 0) {
do {
printf("Enter Dimensions NxN\n");
scanf("%d", & rows);
scanf("%d", & cols);
if (cols != rows) {
printf("Columns must be the same as rows,enter dimensions again.\n");
}
} while (rows != cols);
int (*matrix)[cols] = malloc(sizeof *matrix* rows);
printf("Fill array %dx%d\n", rows, cols);
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
scanf("%d",&matrix[i][j]);
}
}
printf("\n");
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
printf("%d ",matrix[i][j]);
}
printf("\n");
}
}
root = 0;
MPI_Bcast(&rows, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&cols, 1, MPI_INT, root, MPI_COMM_WORLD);
local_rows = rows / p;
int (*local_matrix)[rows] = malloc(sizeof *local_matrix* local_rows);
MPI_Scatter(matrix, local_rows*rows, MPI_INT,local_matrix, local_rows*rows, MPI_INT, 0, MPI_COMM_WORLD);
printf("\nLocal matrix fo the process %d is :\n", my_rank);
for (i = 0; i < local_rows; i++) {
for (j = 0; j < cols; j++) {
printf("%d ", local_matrix[i][j]);
}
printf("\n");
}
if (my_rank==0){
free(matrix);
free(local_matrix);
}
MPI_Finalize();
}

The problem with your code is that you declared two variables with the name matrix:
int **matrix;
and
int (*matrix)[cols] = malloc(sizeof *matrix* rows);
and since the latter was declared inside the if (my_rank == 0) {..} the variable begin used in the scatter MPI_Scatter(matrix, local_rows*rows, MPI_INT,local_matrix, local_rows*rows, MPI_INT, 0, MPI_COMM_WORLD);
is the first one, the not allocated one, and not the one you allocated space for. That is why you are getting the error.
Try this:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "mpi.h"
int main(int argc, char** argv) {
int my_rank;
int p;
int root;
int rows = 0;
int cols = 0;
int i, j;
int local_rows;
int answer = 0;
int broke = 0;
MPI_Init(& argc, & argv);
MPI_Comm_rank(MPI_COMM_WORLD, & my_rank);
MPI_Comm_size(MPI_COMM_WORLD, & p);
int (*matrix)[cols];
if (my_rank == 0) {
do {
printf("Enter Dimensions NxN\n");
scanf("%d", & rows);
scanf("%d", & cols);
if (cols != rows) {
printf("Columns must be the same as rows,enter dimensions again.\n");
}
} while (rows != cols);
matrix = malloc(sizeof *matrix * rows);
printf("Fill array %dx%d\n", rows, cols);
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
scanf("%d",&matrix[i][j]);
}
}
printf("\n");
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
printf("%d ",matrix[i][j]);
}
printf("\n");
}
}
root = 0;
MPI_Bcast(&rows, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&cols, 1, MPI_INT, root, MPI_COMM_WORLD);
local_rows = rows / p;
// Changed from the original
int (*local_matrix)[cols] = malloc(sizeof *local_matrix* local_rows);
printf("R = (%d, %d, %d) \n",my_rank, local_rows, cols);
if(my_rank == 0)
{
printf("\n");
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
printf("%d ",matrix[i][j]);
}
printf("\n");
}
}
MPI_Scatter(matrix, local_rows*cols, MPI_INT,local_matrix,
local_rows*cols, MPI_INT, 0, MPI_COMM_WORLD);
...
Btw I think you meant:
int (*local_matrix)[cols] = malloc(sizeof *local_matrix* local_rows);
and not
int (*local_matrix)[rows] = malloc(sizeof *local_matrix* local_rows);
Also do not forget to free the "local_matrix" for the slaves also.

Related

How to evenly distribute an array with Scatterv

My program is meant to take an array size and the elements of that particular array from the user.
However, I want the program to be able to distribute the array elements evenly for any number of processors used.
I think the problem is on the displs array, but even after countless try-outs, I don't seem to be reaching any logical conclusion.
Let's say I enter a sequence of 7 numbers -> 1,2,3,4,5,6,7
I will have an output as such:
processor 0
arr[0] = 1
arr[1] = 2
arr[2] = 3
processor 1
arr[0] = 4
arr[1] = 5
processor 2
arr[0] = 7
arr[1] = 32767
The code is the following:
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#define ARRAY_SIZE 100
int main(int argc, char **argv)
{
int myrank, wsize;
int i,N;
int *arr,*displs, *arr_r, *sendcount;
int sum1=0;
int portion,remainder,x,y;
int root;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &wsize);
if(myrank == 0)
{
printf("Enter number N of integers\n");
scanf("%d", &N);
arr = (int*)malloc(N*sizeof(int));
for(i = 0; i < N; i++)
{
printf("Enter number %d\n", i+1);
scanf("%d",&arr[i]);
}
}
MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD);
portion = N / wsize;
remainder = N % wsize;
x = portion;
y = portion +1;
displs = (int*)malloc(N*sizeof(int));
sendcount = (int*)malloc(N*sizeof(int));
for(i=0; i < N; i++)
{
if(myrank < remainder)
{
sendcount[i] = portion + (remainder);
displs[i] = (portion + (remainder)) * i;
}
else if(remainder == 0)
{
sendcount[i] = portion;
displs[i] = portion *i;
}
else
{
sendcount[i] = portion;
displs[i] = portion * i;
}
}
arr_r = (int*)malloc(N *sizeof(int));
MPI_Scatterv(arr, sendcount, displs, MPI_INT, arr_r, N, MPI_INT, 0, MPI_COMM_WORLD);
if(myrank < remainder)
{
printf("process %d \n",myrank);
for(i = 0; i < portion + 1; i++)
{
printf("Arr[%d] = %d\n",i,arr_r[i]);
}
}
else if(remainder == 0)
{
printf("process %d \n",myrank);
for(i = 0; i < portion; i++)
{
printf("Arr[%d] = %d\n",i,arr_r[i]);
}
}
else
{
printf("process %d \n",myrank);
for(i = 0; i < portion; i++)
{
printf("Arr[%d] = %d\n",i,arr_r[i]);
}
}
MPI_Finalize();
return 0;
}

Segmentation fault by using MPI_Scatter and MPI_Gather

I was trying to calculate elementwise multiplication of matrix elements.
But I've got this error and don't know what to do.
===================================================================================
= BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES
= PID 16855 RUNNING AT kevlinsky-PC
= EXIT CODE: 139
= CLEANING UP REMAINING PROCESSES
= YOU CAN IGNORE THE BELOW CLEANUP MESSAGES
===================================================================================
YOUR APPLICATION TERMINATED WITH THE EXIT STRING: Segmentation fault (signal 11)
This typically refers to a problem with your application.
Please see the FAQ page for debugging suggestions
The task was to split it between processes, calculate the result and return it to the zero process.
Code example:
#include <iostream>
#include <math.h>
#include "mpi.h"
int main(int argc, char *argv[]){
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
const int n = 4;
int arrayA[n][n];
int arrayB[n][n];
for (int i = 0; i < n; i++){
for (int j = 0; j < n; i++) {
arrayA[i][j] = (rand() % 1000) - 500;
}
for (int j = 0; j < n; i++) {
arrayB[i][j] = (rand() % 1000) - 500;
}
}
int getbufA[n];
int getbufB[n];
int arrayC[n][n];
int bufC[n];
MPI_Scatter(&arrayA, n, MPI_INT, &getbufA, n, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Scatter(&arrayB, n, MPI_INT, &getbufB, n, MPI_INT, 0, MPI_COMM_WORLD);
for (int i = 0; i < n; i++) {
bufC[i] = getbufA[i] * getbufB[i];
}
MPI_Gather(&bufC, n, MPI_INT, &arrayC, n, MPI_INT, 0, MPI_COMM_WORLD);
if (rank == 0) {
printf("MATRIX C \n");
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
printf("%d ", arrayC[i][j]);
}
printf("\n");
}
}
MPI_Finalize();
}
Can someone help with this?
I think this is your error:
for (int j = 0; j < n; i++) {
arrayA[i][j] = (rand() % 1000) - 500;
}
You need j++ in this loop. And you have this error in two places. j is never incremented and stays 0, and i is incremented indefinitely (because the condition for the loop is based on j), so very soon you go out of bounds for the array, hence the segmentation fault.

How to fix issue while doing parallel programming with MPI for Matrix-Multiplication with dynamic 2D array?

I am trying to create three matrices a,b,c where c = a*b with using MPI. Also, I am taking the length of these matrices as N (common for all) as it I have to create a square matrix. However, whenever I enter the value of N in runtime, I am getting a segmentation fault error and if I put I gave the value of N in the program, then it works fine.
I have tried this with scatter and gather as given it in here: matrix multiplication using Mpi_Scatter and Mpi_Gather
. Now I have to di dynamically, so that can check the time consumption done by the program to execute it. Just want to inform that I have done this OpenMP, and which was great, but want to compare which one is really good i.e. OpenMP or MPI.
#include <iostream>
#include <math.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stddef.h>
#include "mpi.h"
int main(int argc, char *argv[])
{
int i, j, k, rank, size, tag = 99, blksz, sum = 0,N=0;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int aa[N],cc[N];
if(rank ==0)
{
std::cout << "input value of N" << '\n';
std::cin >> N;
}
MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD);
int **a = new int*[N];
for (int i = 0; i < N; i++)
a[i] = new int[N];
int **b = new int*[N];
for (int i = 0; i < N; i++)
b[i] = new int[N];
int **c = new int*[N];
for (int i = 0; i < N; i++)
c[i] = new int[N];
if (rank == 0)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
a[i][j] =rand() % 10;
std::cout << a[i][j];
}
std::cout << '\n';
}
std::cout << '\n';
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
b[i][j] =rand() % 10;
std::cout << b[i][j];
}
std::cout << '\n';
}
}
MPI_Scatter(a, N*N/size, MPI_INT, aa, N*N/size, MPI_INT,0,MPI_COMM_WORLD);
//broadcast second matrix to all processes
MPI_Bcast(b, N*N, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
//perform vector multiplication by all processes
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
sum = sum + aa[j] * b[j][i]; //MISTAKE_WAS_HERE
}
cc[i] = sum;
sum = 0;
}
MPI_Gather(cc, N*N/size, MPI_INT, c, N*N/size, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
if (rank == 0) //I_ADDED_THIS
{
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++)
{
std::cout << a[i][j]<< '\n';
}
std::cout << '\n';
}
std::cout << '\n' << '\n';
}
delete *a;
delete *b;
delete *c;
}
The error which I am getting is:
mpirun noticed that process rank 3 with PID 3580 on node localhost exited on signal 11 (Segmentation fault).
I just wanted here that matrix multiplication to be done.
Declaring array like this
int **a = new int*[N];
for (int i = 0; i < N; i++)
a[i] = new int[N];
will not allocate it in contiguous memory location. Replacing above declaration with one of the following will make the application work.
int a[N][N]; // or
int **a=malloc(N*N*sizeof(int));
MPI_Scatter, Gather etc works on arrays with contiguous memory location.
#include <iostream>
#include <math.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stddef.h>
#include "mpi.h"
int main(int argc, char *argv[])
{
int i, j, k, rank, size, tag = 99, blksz, sum = 0,N=0;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if(rank ==0)
{
std::cout << "input value of N" << '\n';
std::cin >> N;
}
MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD);
int size_array=(N*N)/size;
int aa[size_array],cc[size_array]; // Declare arrays here since value of N is 0 otherwise
int a[N][N];
int b[N][N];
int c[N][N];
for (int i = 0; i < N; i++)
c[i] = new int[N];
if (rank == 0)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
a[i][j] =rand() % 10;
std::cout << a[i][j];
}
std::cout << '\n';
}
std::cout << '\n';
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
b[i][j] =rand() % 10;
std::cout << b[i][j];
}
std::cout << '\n';
}
}
MPI_Scatter(a, N*N/size, MPI_INT, aa, N*N/size, MPI_INT,0,MPI_COMM_WORLD);
//broadcast second matrix to all processes
MPI_Bcast(b, N*N, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
//perform vector multiplication by all processes
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
sum = sum + aa[j] * b[j][i]; //MISTAKE_WAS_HERE
}
cc[i] = sum;
sum = 0;
}
MPI_Gather(cc, N*N/size, MPI_INT, c, N*N/size, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
if (rank == 0) //I_ADDED_THIS
{
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++)
{
std::cout << a[i][j]<< '\n';
}
std::cout << '\n';
}
std::cout << '\n' << '\n';
}
}
Also declare the array int aa[N],cc[N]; after the scanf.

MPI_Scatter and MPI_Allgather

Okay, so here is another question related to my previous post MPI_Broadcast using vectors
I want to scatter a matrix (4x4) in such a way that each process receives one row (total of 4 processes). I am using vectors which need to be resized and a bit of playing around. It worked well when using arrays but with vectors I can't get the desired output.
updated code (Minimal)
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <cmath>
#include <chrono>
#include <cmath>
#include <iomanip>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include"mpi.h"
using namespace std;
const int num_rows1 = 4, num_rows2 = 4, num_cols1 = 4, num_cols2 = 4;
const int root = 0;
int i, j, k;
vector<vector<int> > matrix1(num_rows1, vector <int>(num_cols1));
vector<vector<int> > matrix2(num_rows2, vector <int>(num_cols2));
vector<vector<int> > result(num_rows1, vector <int>(num_cols1));
int finale[num_rows1][num_cols2];
vector<vector<int> > transpose_mat(num_cols2, vector <int>(num_rows2));
vector<int> column1(num_rows1);
vector<int> column2(num_cols2);
double start_time, end_time;
int * column3 = new int[];
//Function working with the multiplication
vector<int> mult(vector<vector<int> > A, vector<int> B)
{
//Multiplication
for (int i = 0; i < num_rows1; ++i)
{
int sum = 0;
for (int j = 0; j < num_cols1; ++j)
{
sum += A[i][j] * B[j];
}
column1.push_back(sum);
}
return column1;
}
//Function generating random matrices
vector<vector<int>> generate_matrix(int nrow, int ncol)
{
vector<vector<int>> matrix(nrow, vector <int>(ncol));
for (int i = 0; i < nrow; ++i)
{
for (int j = 0; j < ncol; ++j)
{
matrix[i][j] = (15 *rand() / RAND_MAX - 3);
}
}
return matrix;
}
//function taking the transpose
vector<vector<int>>transpose(vector<vector<int> > matrix , int nrow, int ncol)
{
//Transpose of matrix 2
for (i = 0; i < nrow; ++i)
for (j = 0; j < ncol; ++j)
{
transpose_mat[j][i] = matrix2[i][j];
}
cout << "Transpose " << endl;
for (int i = 0; i < num_rows2; ++i)
{
for (int j = 0; j < num_cols2; ++j)
{
cout << transpose_mat[i][j] << " ";
}
cout << endl;
}
return transpose_mat;
}
//main function
int main(int argc, char *argv[])
{
MPI_Status status;
MPI_Request request;
int tag = 1;
int rank;
int world_size; //Number of processes
// Initialize the MPI environment
MPI_Init(NULL, NULL);
// Get the rank of the process
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
// Get the name of the processor
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
// Get the number of processes
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
if (rank == root)
{
//Filling
matrix1 = generate_matrix(num_rows1, num_cols1);
for (int i = 0; i < num_rows1; ++i)
{
MPI_Bcast(&matrix1[i][0], num_rows1, MPI_INT, root, MPI_COMM_WORLD);
}
}
else if (rank == 1)
{
srand(time(NULL));
//Filling
matrix2 = generate_matrix(num_rows2, num_cols2);
transpose_mat = transpose(matrix2, num_rows2, num_cols2);
}
if (rank > root)
{
int size = matrix1.size();
result.resize(size);
for (int i = 0; i < size; i++){
result[i].resize(size);
}
for (int i = 0; i < size; ++i)
{
MPI_Bcast(&result[i][0], size, MPI_INT, root, MPI_COMM_WORLD);
}
}
int size1 = transpose_mat.size();
column2.resize(size1);
//Scattering the transposed matrix
for (j = 0; j < num_rows2; ++j)
{
MPI_Scatter(&transpose_mat[0][j], size1*size1 / world_size, MPI_INT, &column2[j], size1*size1 / world_size, MPI_INT, 1, MPI_COMM_WORLD);
}
cout << "The scattered data at process " << rank << " is: " << endl;
for (int j = 0; j < num_cols2; ++j)
{
cout << column2[j] << endl;
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
In the updated PNG one can see that the first two rows of the matrix are scattered. The first one being received by Process 0 and the second row by Process 2. Why do Process 1 and Process 3 don't give the desired reault.
Updated PNG

C++ contiguous memory operation

I have c++ program in which I am calculating determinant of a matrix using normal array which is as follows:
/* rand example: guess the number */
#include <stdio.h> /* printf, scanf, puts, NULL */
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include <iostream>
#include <cstdlib>
#include <vector>
using namespace std;
int** generateStandardMatrix(int dimension);
void ijMinor(int *matrix[], int *minorMatrix[], int size, int row, int column);
int determinant(int *matrix[], int size);
void ijMinor(int *matrix[], int *minorMatrix[], int size, int row, int column) {
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
if (i < row) {
if (j < column)minorMatrix[i][j] = matrix[i][j];
else if (j == column)continue;
else minorMatrix[i][j - 1] = matrix[i][j];
}
else if (i == row)continue;
else {
if (j < column)minorMatrix[i - 1][j] = matrix[i][j];
else if (j == column)continue;
else minorMatrix[i - 1][j - 1] = matrix[i][j];
}
}
}
}
int determinant(int *matrix[], int size) {
if (size == 1)return matrix[0][0];
else {
int result = 0, sign = -1;
for (int j = 0; j < size; j++) {
int **minorMatrix;
minorMatrix = new int*[size - 1];
for (int k = 0 ; k < size - 1 ; k++)
minorMatrix[k] = new int[size - 1];
ijMinor(matrix, minorMatrix, size, 0, j);
sign *= -1;
result += sign * matrix[0][j] * determinant(minorMatrix, size - 1);
for (int i = 0; i < size - 1; i++) {
delete minorMatrix[i];
}
}
return result;
}
}
int main (int argc, char* argv[])
{
/* initialize random seed: */
srand (time(NULL));
// int iSecret, iGuess;
int dimension = atoi(argv[1]);
int rowCount = dimension , colCount = dimension;
//2d array storing the integer values
int** ary = new int*[dimension];
//vector of vector storing the indices across the array for the threads to pick up from
vector<vector<int> > vec;
ary = generateStandardMatrix(dimension);
printf("Array value : %d\n", ary[0][0]);
int detVal = determinant(ary, dimension);
printf("determinant value : %d\n", detVal);
return 0;
}
int** generateStandardMatrix(int dimension) {
int** ary = new int*[dimension];
int counter = 0;
for (int i = 0; i < dimension; ++i) {
ary[i] = new int[dimension];
counter = counter + 1;
for (int j = 0; j < dimension; ++j)
{
ary[i][j] = counter;
std::cout << ary[i][j] << "\t" << std::flush;
}
std::cout << std::endl;
}
return ary;
}
I want to replace it with code in which I allocate memory for the array before the start of the algorithm and then change the determinant and the ijMonor functions so that they don't create new array's but use the same array only.
The determinant will take parameter like: determinant(int *matrix, int *startOfMyWorkspace, int size) so that it knows where to start.
I am not good at c++ and so far I was not able to do it.
Can someone please provide some sample code.
I allocated some memory for array and created and array but was unable to change the ijMinor and determinant functions for that.
This is how I am allocating memory:
int main (int argc, char* argv[])
{
/* initialize random seed: */
srand (time(NULL));
// int iSecret, iGuess;
int dimension = atoi(argv[1]);
int *a;
size_t const N_BYTES = dimension * dimension * sizeof(int);
a = (int*)malloc(N_BYTES);
createData(dimension,a);
return 0;
}
void createData(int const dimension, int* const a)
{
int row, col;
srand((unsigned)time(NULL));
int counter;
for(int row = 0; row < dimension; row++) {
counter = counter + 1;
for(int col = 0; col < dimension; col++) {
int i = col + row * dimension;
a[i] = counter;
std::cout << a[i] << "\t" << std::flush;
}
std::cout << std::endl;
}
}
Try this.
Note if you use new to allocate an array, you need to use delete[] to free all of it. You'll get away with delete (i.e. it won't crash) but this will only free the first element. Your other functions are the same as you posted.
You're dynamically allocating space for minorMatrix in determinant function, but it's hard to see how that could be preallocated. I've modified determinant function to use allocate_arr and deallocate_arr.
int ** allocate_arr(int dimension)
{
int** a = new int*[dimension];
for (int i = 0; i < dimension; ++i)
a[i] = new int[dimension];
return a;
}
void deallocate_arr(int dimension, int **a)
{
for (int i = 0; i < dimension; ++i)
delete[] a[i];
delete[] a;
}
int determinant(int *matrix[], int size) {
if (size == 1)return matrix[0][0];
else {
int result = 0, sign = -1;
for (int j = 0; j < size; j++) {
int **minorMatrix = allocate_arr(size - 1);
ijMinor(matrix, minorMatrix, size, 0, j);
sign *= -1;
result += sign * matrix[0][j] * determinant(minorMatrix, size - 1);
deallocate_arr(size - 1, minorMatrix);
}
return result;
}
}
void generateStandardMatrix(int dimension, int**ary) {
int counter = 0;
for (int i = 0; i < dimension; ++i) {
counter = counter + 1;
for (int j = 0; j < dimension; ++j)
{
ary[i][j] = counter;
std::cout << ary[i][j] << "\t" << std::flush;
}
std::cout << std::endl;
}
}
int main(int argc, char* argv[])
{
srand(time(NULL));
int dimension = atoi(argv[1]);
int** a = allocate_arr(dimension);
generateStandardMatrix(dimension, a);
printf("Array value : %d\n", a[0][0]);
int detVal = determinant(a, dimension);
printf("determinant value : %d\n", detVal);
// ... do more computations here, reusing `a` ...
deallocate_arr(dimension, a);
return 0;
}