MPI, Fortran, multitasking - fortran

I would like to perform many independent operations (e.g. time integration of an ODE with different initial conditions) using MPI and Fortran. The initial conditions are a 2$\times 1000$ vector IC for example.
do i=1,1000
(x0,y0) = (x(i),y(i))
Solve an ODE with (x0,y0) for a time duration
Save the result at the end of this duration
enddo
Can anyone help with a minimal code using MPI or a link to something similar.
I have already used OMP but I think with MPI I would have access to more CPUs

If the operations are truly independent (and the number of cases is a multiple of the number of processors) then:
call mpi_scatter to distribute start points from root
call
call mpi_gather to collect the results back on root
root can then write to file.
If the number of processors doesn't divide into the number of cases then you can use mpi_scatterv and mpi_gatherv instead.
Example (rather trivial work per job, rather than solving ODEs):
program main
use iso_fortran_env
use mpi
implicit none
integer stat(mpi_status_size), tag, ierr
integer size, rank
integer, parameter :: N = 256 * 1000 ! assumes this is a multiple of the number of processors
integer, parameter :: root = 0
integer myN
integer i
real(real64), allocatable :: Y(:), myY(:)
real(real64) start, finish
call mpi_init( ierr )
call mpi_comm_size( mpi_comm_world, size, ierr )
call mpi_comm_rank( mpi_comm_world, rank, ierr )
! Set initial values for full array, then start timing
if ( rank == root ) then
allocate( Y(N) )
Y = [ ( i + 0.0_real64, i = 1, N ) ]
start = gettime()
end if
! Root parcels out the work (i.e., distributes the starting points)
! Processor with rank r will look at indices 1+r*N/size to (r+1)*N/size
myN = N / size
allocate( myY(myN) )
call mpi_scatter( Y, myN, mpi_double_precision, &
myY, myN, mpi_double_precision, root, mpi_comm_world, ierr )
! Each processor does its own work
call myWork( myN, myY )
! Root gets its results back
call mpi_gather ( myY, myN, mpi_double_precision, &
Y, myN, mpi_double_precision, root, mpi_comm_world, ierr )
! Root concludes timing, then writes to file
if ( rank == root ) then
finish = gettime()
write( *, * ) "Time taken = ", finish - start
open( 10, file="output.txt" )
write( 10, "( i8, 1x, es11.4 )" ) ( i, Y(i), i = 1, N )
close( 10 )
deallocate( Y )
end if
deallocate( myY )
call mpi_finalize( ierr )
contains
subroutine myWork( N, Y )
integer , intent(in ) :: N
real(real64), intent(inout) :: Y(N)
integer i
do i = 1, 10000
Y = 2 * Y - Y ! silly example, just to use some flops
end do
end subroutine myWork
real(real64) function getTime()
integer t(8)
call date_and_time( values=t )
getTime = 3600 * t(5) + 60 * t(6) + t(7) + 0.001 * t(8)
end function getTime
end program main

Related

PARPACK implementation runs into memory errors

I am making a module in Fortran 90 to run PARPACK on a given matrix. I have an existing ARPACK code which functions normally as expected. I tried converting it into PARPACK and it runs into memory clear errors. I am fairly new to coding and fortran, please excuse any blunders I've made.
The code:
!ARPACK module
module parpack
implicit none
contains
subroutine parp
! use mpi
include '/usr/lib/x86_64-linux-gnu/openmpi/include/mpif.h'
integer comm, myid, nprocs, rc, nloc, status(MPI_STATUS_SIZE)
integer, parameter :: pres=8
integer nev, ncv, maxn, maxnev, maxncv
parameter (maxn=10**7, maxnev=maxn-1, maxncv=maxn)
! Arrays for SNAUPD
integer iparam(11), ipntr(14)
logical, allocatable :: select(:)
real(kind=pres), allocatable :: workd(:), workl(:), worktmp1(:), worktmp2(:)
! Scalars for SNAUPD
character bmat*1, which*2
integer ido, n, info, ierr, ldv
integer i, j, ishfts, maxitr, mode1, nconv
integer(kind=pres) lworkl
real(kind=pres) tol
! Arrays for SNEUPD
real(kind=pres), allocatable :: d(:,:), resid(:), v(:,:), workev(:), z(:,:)
! Scalars for SNEUPD
logical rvec, first
real sigmar, sigmai
!==============================================
real(kind=pres), allocatable :: mat(:,:)
open (11, file = 'matrix.dat', status = 'old')
read (11,*) n
!=============================================
! Dimension of the problem
nev = n/10
ncv = nev+2
ldv = n
bmat = 'I'
which = 'LM'
! Additional environment variables
ido = 0
tol = 0.0E+0
info = 0
lworkl = 3*ncv**2+6*ncv
! Algorithm Mode specifications:
ishfts = 1
maxitr = 300
mode1 = 1
iparam(1) = ishfts
iparam(3) = maxitr
iparam(7) = mode1
! Distribution to nodes
!=============================================
! Matrix allocation
allocate (mat(n,n))
! PDNAUPD
allocate (workd(5*n))
allocate (workl(lworkl))
allocate (resid(n))
allocate (worktmp1(n))
allocate (worktmp2(n))
! PDNEUPD
allocate (d(n,3))
allocate (v(ldv,ncv))
allocate (workev(3*n))
allocate (z(ldv,ncv))
allocate (select(ncv))
!===========================================
! Read Matrix from the provided file
mat = 0
read(11,*) mat
mat = transpose(mat)
!===========================================
! MPI Calling
call MPI_INIT(ierr)
comm = MPI_COMM_WORLD
call MPI_COMM_RANK(comm, myid, ierr)
call MPI_COMM_SIZE(comm, nprocs, ierr)
nloc = n/nprocs
! if ( mod(n, nprocs) .gt. myid ) nloc = nloc + n
!===============================================
20 continue
call pdnaupd(comm, ido, bmat, nloc, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, info) !Top level solver
call MPI_BARRIER(comm,ierr)
print *, ido, info, iparam(5) !for testing
!===============================================
if (ido .eq. -1 .or. ido .eq. 1) then
worktmp1 = 0
if (myid .ne. 0) then !It is slave
call MPI_SEND(workd(ipntr(1)), nloc, MPI_DOUBLE_PRECISION, 0, 0, comm, ierr)
else !It is host
worktmp1(1:nloc) = workd(ipntr(1):ipntr(1)+nloc-1)
i = nprocs
if (i .gt. 1) then
do i=1,nprocs-1
call MPI_RECV(worktmp1(i*nloc+1), nloc, MPI_DOUBLE_PRECISION, i, 0, comm, status, ierr)
end do
endif
endif
call MPI_BARRIER(comm,ierr)
if (myid .eq. 0) then !It is host
! Matrix multiplication
worktmp2 = 0
call matmultiply(n, mat, worktmp1, worktmp2)
workd(ipntr(2):ipntr(2)+nloc-1) = worktmp2(1:nloc)
i = nprocs
if (i .gt. 1) then
do i=1,nprocs-1
call MPI_SEND(worktmp2(i*nloc+1), nloc, MPI_DOUBLE_PRECISION, i, 100*i, comm, ierr)
end do
endif
else !It is slave
call MPI_RECV(workd(ipntr(2)), nloc, MPI_DOUBLE_PRECISION, 0, 100*myid, comm, status, ierr)
endif
go to 20
! call matmultiply(n, mat, workd(ipntr(1):ipntr(1)+n-1), workd(ipntr(2):ipntr(2)+n-1))
! go to 20
endif
! print *, info !for testing
!===============================================================
! Post-processing for eigenvalues
rvec = .true.
if (myid .eq. 0) then
call pdneupd ( comm, rvec, 'A', select, d, d(1,2), z, ldv, sigmar, sigmai, &
workev, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, &
workd, workl, lworkl, info)
endif
! print *, info !for testing
close(11)
call MPI_FINALIZE(ierr)
return
end subroutine
!==============================================================================================
! Additional Function definitions
subroutine matmultiply(n, mat, v, w)
integer n, i, j
integer, parameter :: pres=8
real(kind = pres) mat(n,n), temp(n)
real(kind = pres) v(n), w(n)
temp = 0
do j = 1,n
do i = 1,n
temp(j) = temp(j) + mat(i,j)*v(i)
end do
end do
w = temp
return
end subroutine
end module
I apologize for the ton of redundant lines and comments, I am yet to clean it up for finalization.
When I run the code on a single thread with ./a.out, I get the following output:
Invalid MIT-MAGIC-COOKIE-1 key 1 0 1629760560
1 0 1629760560
1 0 1629760560
1 0 1629760560
.
.
. <A long chain as the code is exhausting all iterations>
.<first of the numbers is ido, which starts with 1 instead of -1 for some reason, second being
.info and third being iparam(5) which is a random number until the final iteration>
.
99 1 1
munmap_chunk(): invalid pointer
Program received signal SIGABRT: Process abort signal.
Backtrace for this error:
#0 0x7f5a863d0d01 in ???
#1 0x7f5a863cfed5 in ???
#2 0x7f5a8620420f in ???
#3 0x7f5a8620418b in ???
#4 0x7f5a861e3858 in ???
#5 0x7f5a8624e3ed in ???
#6 0x7f5a8625647b in ???
#7 0x7f5a862566cb in ???
#8 0x560f05ac1819 in ???
#9 0x560f05abd7bc in checker
at /home/srivatsank/Desktop/fortran/lap_vs_arp/ptest/ptest.f90:45
#10 0x560f05abd8d9 in main
at /home/srivatsank/Desktop/fortran/lap_vs_arp/ptest/ptest.f90:3
Aborted (core dumped)
line 45 in ptest is call parp
line 3 in ptest is use parpack(name of the module)
The main code is as follows:
program checker
use parpack
use arpack
! use lapack
implicit none
!Program to test LAPACK and ARPACK
! 1. Variable definition
integer a,n,i
real, allocatable :: mat(:,:)
real t0, t1
a=2
! Loop
! do 20 a = 1,3
! Open File
open(unit=10, file = 'matrix.dat', status = 'replace')
! 2. Generate Symmetric matrices
n = 10**a
allocate (mat(n,n))
call RANDOM_NUMBER(mat)
! 3. Save symmetric matrices to r.dat
write (10,*) n
do 30 i=1,n
write(10,*) mat(i,:)
30 end do
deallocate(mat)
close(10)
! 4. Test time taken by each of the routines
! call cpu_time(t0)
! call arp
! call cpu_time(t1)
! print *, 'n:', n, 'ARPACK time taken:', t1-t0
call cpu_time(t0)
call parp
call cpu_time(t1)
print *, 'n:', n, 'PARPACK time taken:', t1-t0
!20 end do
end program checker
The memory error occurs at the very end of the subroutine, when the mail program tries to exit from the subroutine. I have verified this by printing statements as the last line in the subroutine.
And on running mpirun -np 4 a.out, the code just enters the pdneupd process and sits there for eternity. Could anyone help?

Parallelizing formated writes in Fortran with OpenMP

I'm trying to parallelize a Fortran code which at one moment writes a tons of numbers to formated output. Some simple profiling showed that most CPU time is spent in format conversion, so I had the idea to do the formatting in parallel to character buffers and later write the unformatted buffers to the file.
My proof of concept looks like this:
program parawrite
implicit none
integer (kind = 4) :: i, j, tstart, tstop, rate
integer (kind = 4), parameter :: bufsize = 100000, n = 10000000, llen = 22
character (kind=1, len=:), allocatable :: buf
real (kind=8), dimension(n) :: a
! some input
do i = 1, n
a(i) = dble(i) * dble(i)
enddo
! formated writes for reference
open(unit=10, file="out1.txt", form="formatted")
call system_clock(tstart, rate);
do i = 1, n
write(10,"(E21.15)") a(i)
end do
call system_clock(tstop, rate);
print *, 'Formated write: ', dble(tstop - tstart) / dble(rate), 's'
close(10)
! parallel stuff
open(unit=10, file="out2.txt", access="stream", form="unformatted")
call system_clock(tstart, rate);
!$omp parallel private(buf, j)
allocate(character(bufsize * llen) :: buf)
j = 0;
!$omp do ordered schedule(dynamic,bufsize)
do i = 1, n
write (buf(j*llen+1:(j+1)*llen),"(E21.15,A1)") a(i), char(10)
j = j + 1
if (mod(i, bufsize) == 0) then
!$omp ordered
write (10) buf
!$omp end ordered
j = 0
end if
end do
deallocate(buf)
!$omp end parallel
close(10)
call system_clock(tstop, rate);
print *, 'Parallel write: ', dble(tstop - tstart) / dble(rate), 's'
end program parawrite
When I run it, however, not only is the parallel version much slower when at single thread, it also doesn't scale too much...
$ gfortran -O2 -fopenmp writetest.f90
$ OMP_NUM_THREADS=1 ./a.out
Formated write: 11.330000000000000 s
Parallel write: 15.625999999999999 s
$ OMP_NUM_THREADS=6 ./a.out
Formated write: 11.331000000000000 s
Parallel write: 6.1799999999999997 s
My first question would be how to make it the same speed at single thread? The time spent writing the buffer to the file is negligible, so why are the writes to the buffer slower than when writing directly to file?
My second question is about why the scaling is so bad? I have an equivalent C code which uses sprintf and fwrite and there I can get almost perfect linear scaling (I can post the code if needed), however with Fortran I can only reduce runtime to around 40% at 6 threads (with C I can reduce it to 18% at the same number of threads). It is still faster than the serial version, but I hope this could be improved.
From some experiments, it seems that an internal file is rather slow if an array element is converted to an internal file one at a time. This is also the case for an external file, but the degree of slowdown seems much greater for internal files (for some reason...). So I've modified the code such that a set of array elements are converted at once and then written to an external file via stream output. Below, four patterns are compared:
Sequential (1): The original code (which writes each element via do-loop)
Sequential (2): Write an array at once (or via implied loop) to an external file
Parallel (1): Make an internal file for many elements and then write to an external file
Parallel (2): Simplest parallel code with formatted write or spirntf for each element
Among these, Parallel (2) + sprintf (marked with *2 in the code) was the fastest, while Parallel (2) + write for each element (marked with *1) was the slowest (timing shown as Parallel (*) in the table, which does not scale with OpenMP for some reason). I guess sprintf will be the fastest probably because of the least amount of internal checks and overhead etc (just a guess!)
Results (please see the bottom for the modified codes)
$ gcc -O3 -c conv.c && gfortran -O3 -fopenmp test.f90 conv.o
# Machine: Core i7-8550U (1.8GHz), 4-core/8-thread, Ubuntu18.04 (GCC7.3.0)
# Note: The amount of data has been reduced to 1/5 of the
# original code, n = bufsize * 20, but the relative
# timing results remain the same even for larger data.
$ OMP_NUM_THREADS=1 ./a.out
Sequential (1): 2.0080000000000000 s
Sequential (2): 1.6510000000000000 s
Parallel (1): 1.6960000000000000 s
Parallel (2): 1.2640000000000000 s
Parallel (*): 3.1480000000000001 s
$ OMP_NUM_THREADS=2 ./a.out
Sequential (1): 1.9990000000000001 s
Sequential (2): 1.6479999999999999 s
Parallel (1): 0.98599999999999999 s
Parallel (2): 0.72999999999999998 s
Parallel (*): 1.8600000000000001 s
$ OMP_NUM_THREADS=4 ./a.out
Sequential (1): 2.0289999999999999 s
Sequential (2): 1.6499999999999999 s
Parallel (1): 0.61199999999999999 s
Parallel (2): 0.49399999999999999 s
Parallel (*): 1.4470000000000001 s
$ OMP_NUM_THREADS=8 ./a.out
Sequential (1): 2.0059999999999998 s
Sequential (2): 1.6499999999999999 s
Parallel (1): 0.56200000000000006 s
Parallel (2): 0.41299999999999998 s
Parallel (*): 1.7689999999999999 s
main.f90:
program main
implicit none
integer :: i, j, k, tstart, tstop, rate, idiv, ind1, ind2
integer, parameter :: bufsize = 100000, n = bufsize * 20, llen = 22, ndiv = 8
character(len=:), allocatable :: buf(:), words(:)
character(llen + 1) :: word
real(8), allocatable :: a(:)
allocate( a( n ) )
! Some input
do i = 1, n
a(i) = dble(i)**2
enddo
!.........................................................
! Formatted writes (1).
open(unit=10, file="dat_seq1.txt", form="formatted")
call system_clock(tstart, rate);
do i = 1, n
write(10,"(ES21.15)") a(i)
end do
call system_clock(tstop, rate);
print *, 'Sequential (1):', dble(tstop - tstart) / dble(rate), 's'
close(10)
!.........................................................
! Formatted writes (2).
open(unit=10, file="dat_seq2.txt", form="formatted")
call system_clock(tstart, rate);
write( 10, "(ES21.15)" ) a
! write( 10, "(ES21.15)" ) ( a( k ), k = 1, n )
call system_clock(tstop, rate);
print *, 'Sequential (2):', dble(tstop - tstart) / dble(rate), 's'
close(10)
!.........................................................
! Parallel writes (1): make a formatted string for many elements at once
allocate( character( llen * bufsize / ndiv ) :: buf( ndiv ) )
open(unit=10, file="dat_par1.txt", access="stream", form="unformatted")
call system_clock(tstart, rate);
do i = 1, n, bufsize
!$omp parallel do private( idiv, ind1, ind2, k ) shared( i, buf, a )
do idiv = 1, ndiv
ind1 = i + (idiv - 1) * bufsize / ndiv
ind2 = ind1 + bufsize / ndiv - 1
write( buf( idiv ),"(*(ES21.15, A1))") &
( a( k ), char(10), k = ind1, ind2 )
enddo
!$omp end parallel do
write(10) buf
end do
call system_clock(tstop, rate);
print *, 'Parallel (1):', dble(tstop - tstart) / dble(rate), 's'
deallocate(buf)
close(10)
!.........................................................
! Parallel writes (2): sprintf vs write for each element
allocate( character( llen ) :: words( n ) )
open(unit=10, file="dat_par2.txt", access="stream", form="unformatted")
call system_clock(tstart, rate);
!$omp parallel do private( i, word ) shared( a, words )
do i = 1, n
! write( word, "(ES21.15, A1)" ) a( i ), char(10) !! slow (*1)
call conv( word, a( i ) ) !! sprintf (*2)
words( i ) = word( 1 : llen )
enddo
!$omp end parallel do
write( 10 ) words
call system_clock(tstop, rate);
print *, 'Parallel (2):', dble(tstop - tstart) / dble(rate), 's'
close(10)
end program
conv.c:
#include <stdio.h>
void conv_( char *buf, double *val )
{
sprintf( buf, "%21.15E\n", *val );
}

There is no specific subroutine for the generic ‘mpi_send

subroutine collect(rank, nprocs, n_local, n_global, u_initial_local)
use mpi
implicit none
integer*8 :: i_local_low, i_local_high
integer*8 :: i_global_low, i_global_high
integer*8 :: i_local, i_global
integer*8 :: n_local, n_global
real*8 :: u_initial_local(n_local)
real*8, dimension(:), allocatable :: u_global
integer :: procs
integer*8 :: n_local_procs
! Data declarations for MPI
integer :: ierr ! error signal variable, Standard value - 0
integer :: rank ! process ID (pid) / Number
integer :: nprocs ! number of processors
! MPI send/ receive arguments
integer :: buffer(2)
integer, parameter :: collect1 = 10
integer, parameter :: collect2 = 20
! status variable - tells the status of send/ received calls
! Needed for receive subroutine
integer, dimension(MPI_STATUS_SIZE) :: status1
i_global_low = (rank *(n_global-1))/nprocs
i_global_high = ((rank+1) *(n_global-1))/nprocs
if (rank > 0) then
i_global_low = i_global_low - 1
end if
i_local_low = 0
i_local_high = i_global_high - i_global_low
if (rank == 0) then
allocate(u_global(1:n_global))
do i_local = i_local_low, i_local_high
i_global = i_global_low + i_local - i_local_low
u_global(i_global) = u_initial_local(i_local)
end do
do procs = 1,nprocs-1
call MPI_RECV(buffer, 2, MPI_INTEGER, procs, collect1, MPI_COMM_WORLD, status1, ierr)
i_global_low = buffer(1)
n_local_procs = buffer(2)
call MPI_RECV(u_global(i_global_low+1), n_local_procs, MPI_DOUBLE_PRECISION, procs, collect2, MPI_COMM_WORLD, status1, ierr)
end do
print *, u_global
else
buffer(1) = i_global_low
buffer(2) = n_local
call MPI_SEND(buffer, 2, MPI_INTEGER, 0, collect1, MPI_COMM_WORLD, ierr)
call MPI_SEND(u_initial_local, n_local, MPI_DOUBLE_PRECISION, 0, collect2, MPI_COMM_WORLD, ierr)
end if
return
end subroutine collect
I am getting the error for MPI_SEND and MPI_RECV corresponding to collect2 tag. "There is no specific subroutine for the generic ‘mpi_recv’ at (1)" and 1 is at the end of .......ierr). MPI_SEND for collect2 tag is sending an array and MPI_RECV is receiving that array.
This does not happen for collect1 tag.
Your n_local is integer*8 but it must be integer (see How to debug Fortran 90 compile error "There is no specific subroutine for the generic 'foo' at (1)"?).
There are many articles (like https://blogs.cisco.com/performance/can-i-mpi_send-and-mpi_recv-with-a-count-larger-than-2-billion) about the problem with large arrays (more than maxint elements) and MPI. If you do have the problems with n_local being too large for integer, you can use derived types (like MPI_Type_contiguous) to lower the number of elements passed to MPI procedures so that it fits into a 4-byte integer.

MPI_SCATTERV in Fortran - sending rows of 2D array

I have a 2D array of integers and I want to send its rows to each separate process. I assume that number of rows (M=5) is not evenly divisible by number of processes (size = 4), so in my case the process 0 will obtain additional row. Size of the 2D array A is MxN (5x10).
Here is my code
PROGRAM SCATTERV_MATRIX
INCLUDE 'mpif.h'
integer :: rank, size, ierr, dest, src, tag !MPI variables
integer :: status(MPI_STATUS_SIZE) !MPI variables
INTEGER, PARAMETER :: N = 10 !number of columns
INTEGER, PARAMETER :: M = 5 !number of rows
INTEGER, ALLOCATABLE, DIMENSION(:,:) :: A !MxN matrix A
INTEGER :: NEWTYPE, RESIZEDTYPE !MPI derived data types
INTEGER, ALLOCATABLE, DIMENSION(:,:) :: LOCAL
INTEGER, ALLOCATABLE :: SENDCOUNTS(:), DISPLS(:)
INTEGER :: RECVCOUNT, NRBUF
INTEGER :: MMIN, MEXTRA, INTSIZE, K, I, J
INTEGER :: START, EXTENT !(KIND=MPI_ADRESS_KIND)
CALL MPI_INIT(ierr)
CALL MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
CALL MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierr)
IF ( rank == 0 ) THEN !allocate and create 2Darray
ALLOCATE( A (M, N) )
K = 1
DO I = 1, M
DO J = 1, N
A(I, J) = K
K = K + 1
END DO
END DO
END IF
ALLOCATE( SENDCOUNTS(0:size-1), DISPLS(0:size-1) )
MMIN = M/size !number of rows divided by number of processors
MEXTRA = MOD(M, size) !extra rows
K = 0
DO I = 0, size-1
IF (I < MEXTRA) THEN !SENDCOUNTS=(/2,1,1,1/)
SENDCOUNTS(I) = MMIN + 1
ELSE
SENDCOUNTS(I) = MMIN
END IF
DISPLS(I) = K !DISPLS=(/0,2,3,4/)
K = K + SENDCOUNTS(I)
END DO
RECVCOUNT = SENDCOUNTS(rank)
ALLOCATE( LOCAL(RECVCOUNT,N) )
CALL MPI_TYPE_VECTOR(N, 1, M, MPI_INTEGER, NEWTYPE, ierr)
CALL MPI_TYPE_COMMIT(NEWTYPE, ierr)
START = 0
CALL MPI_TYPE_SIZE(MPI_INTEGER, INTSIZE, ierr)
EXTENT = 1*INTSIZE
CALL MPI_TYPE_CREATE_RESIZED(NEWTYPE, START, EXTENT, RESIZEDTYPE, ierr)
CALL MPI_TYPE_COMMIT(RESIZEDTYPE, ierr)
LOCAL(:, :) = 0
CALL MPI_SCATTERV( &
A, SENDCOUNTS, DISPLS, RESIZEDTYPE, &
LOCAL, RECVCOUNT*N, MPI_INTEGER, &
0, MPI_COMM_WORLD, ierr)
WRITE(*,*) rank, ':', LOCAL
CALL MPI_FINALIZE(ierr)
END PROGRAM SCATTERV_MATRIX
After sucessfull compilation I got "Program Exception - access violation" error. All my previous Fortan MPI programs worked fine. There must be some bug in the code, probably in MPI_SCATTERV.
I was mainly following this answer. I will be gratefull for any suggestion. Thank you.
There's an error in your code:
INTEGER :: START, EXTENT !(KIND=MPI_ADRESS_KIND)
This line should be:
INTEGER(KIND=MPI_ADDRESS_KIND) :: START, EXTENT
In MPI, anything that is related to memory address, or similar concepts such as memory displacement, file size, file cursor etc., must not be normal integer. Some how you have this information in your comment and you also misspell MPI_ADDRESS_KIND.
Vladimir F correctly pointed out that you should 'USE MPI' instead of 'INCLUDE 'mpif.h''. This gives the compiler the opportunity to check the data types. For example, gfortran gives the following error message:
test.f90:59:71:
CALL MPI_TYPE_CREATE_RESIZED(NEWTYPE, START, EXTENT, RESIZEDTYPE, ierr)
1
Error: There is no specific subroutine for the generic
‘mpi_type_create_resized’ at (1)

Seg Faults while using MPI (Fortran)

I am very new to MPI and Fortran alike. I have been working on trying to figure this out for a few hours now, with no luck. In my code below, everything is working just fine (besides the fact that my s variable is isolated between processes. When I try to implement the MPI_SEND and MPI_RECV I get the seg faults constantly. I can't seem to figure out what the issue is.
SUBROUTINE do_mpi_simpsons(l, u, n)
INTEGER, INTENT (in) :: l, u, n
! REAL, INTENT (in) :: func
DOUBLE PRECISION :: result, walltime
INTEGER :: clock_start, clock_rate, clock_max, clock_end
DOUBLE PRECISION :: h, s, argVal, finalS
INTEGER :: rank, size, ierror, tag, status(MPI_STATUS_SIZE), count, start, stop
walltime = 0.0D0
h = (u - l) / dble(n)
s = func_hw(dble(l)) + func_hw(dble(u))
CALL system_clock(clock_start, clock_rate, clock_max)
CALL MPI_INIT(ierror)
CALL MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierror)
CALL MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierror)
count = n / size
start = rank * count
stop = start + count -1
! WRITE(*,*) "Start: ", start
! WRITE(*,*) "Stop: ", stop
WRITE(*,*) rank
DO i = start, stop, 2
s = s + 4 * func_hw(dble(l)+dble(i)*h)
END DO
DO i = start+1, stop-1, 2
s = s + 2 * func_hw(dble(l)+dble(i)*h)
END DO
! This block is causing the seg faults
IF(rank.eq.0) THEN
finalS = s
DO i = 1, size - 1
CALL MPI_RECV(s, 64, MPI_DOUBLE, i, 1, MPI_COMM_WORLD, status, ierror)
finalS = finalS + s
END DO
ELSE
CALL MPI_SEND(s, 64, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, ierror)
END IF
CALL MPI_FINALIZE(ierror)
CALL system_clock(clock_end, clock_rate, clock_max)
walltime = walltime + real(clock_end - clock_start) / real(clock_rate)
result = s * h / 3
WRITE(*,*) "walltime = ", walltime, " seconds"
WRITE(*,*) "result = ", result
END SUBROUTINE