PARPACK implementation runs into memory errors - fortran

I am making a module in Fortran 90 to run PARPACK on a given matrix. I have an existing ARPACK code which functions normally as expected. I tried converting it into PARPACK and it runs into memory clear errors. I am fairly new to coding and fortran, please excuse any blunders I've made.
The code:
!ARPACK module
module parpack
implicit none
contains
subroutine parp
! use mpi
include '/usr/lib/x86_64-linux-gnu/openmpi/include/mpif.h'
integer comm, myid, nprocs, rc, nloc, status(MPI_STATUS_SIZE)
integer, parameter :: pres=8
integer nev, ncv, maxn, maxnev, maxncv
parameter (maxn=10**7, maxnev=maxn-1, maxncv=maxn)
! Arrays for SNAUPD
integer iparam(11), ipntr(14)
logical, allocatable :: select(:)
real(kind=pres), allocatable :: workd(:), workl(:), worktmp1(:), worktmp2(:)
! Scalars for SNAUPD
character bmat*1, which*2
integer ido, n, info, ierr, ldv
integer i, j, ishfts, maxitr, mode1, nconv
integer(kind=pres) lworkl
real(kind=pres) tol
! Arrays for SNEUPD
real(kind=pres), allocatable :: d(:,:), resid(:), v(:,:), workev(:), z(:,:)
! Scalars for SNEUPD
logical rvec, first
real sigmar, sigmai
!==============================================
real(kind=pres), allocatable :: mat(:,:)
open (11, file = 'matrix.dat', status = 'old')
read (11,*) n
!=============================================
! Dimension of the problem
nev = n/10
ncv = nev+2
ldv = n
bmat = 'I'
which = 'LM'
! Additional environment variables
ido = 0
tol = 0.0E+0
info = 0
lworkl = 3*ncv**2+6*ncv
! Algorithm Mode specifications:
ishfts = 1
maxitr = 300
mode1 = 1
iparam(1) = ishfts
iparam(3) = maxitr
iparam(7) = mode1
! Distribution to nodes
!=============================================
! Matrix allocation
allocate (mat(n,n))
! PDNAUPD
allocate (workd(5*n))
allocate (workl(lworkl))
allocate (resid(n))
allocate (worktmp1(n))
allocate (worktmp2(n))
! PDNEUPD
allocate (d(n,3))
allocate (v(ldv,ncv))
allocate (workev(3*n))
allocate (z(ldv,ncv))
allocate (select(ncv))
!===========================================
! Read Matrix from the provided file
mat = 0
read(11,*) mat
mat = transpose(mat)
!===========================================
! MPI Calling
call MPI_INIT(ierr)
comm = MPI_COMM_WORLD
call MPI_COMM_RANK(comm, myid, ierr)
call MPI_COMM_SIZE(comm, nprocs, ierr)
nloc = n/nprocs
! if ( mod(n, nprocs) .gt. myid ) nloc = nloc + n
!===============================================
20 continue
call pdnaupd(comm, ido, bmat, nloc, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, info) !Top level solver
call MPI_BARRIER(comm,ierr)
print *, ido, info, iparam(5) !for testing
!===============================================
if (ido .eq. -1 .or. ido .eq. 1) then
worktmp1 = 0
if (myid .ne. 0) then !It is slave
call MPI_SEND(workd(ipntr(1)), nloc, MPI_DOUBLE_PRECISION, 0, 0, comm, ierr)
else !It is host
worktmp1(1:nloc) = workd(ipntr(1):ipntr(1)+nloc-1)
i = nprocs
if (i .gt. 1) then
do i=1,nprocs-1
call MPI_RECV(worktmp1(i*nloc+1), nloc, MPI_DOUBLE_PRECISION, i, 0, comm, status, ierr)
end do
endif
endif
call MPI_BARRIER(comm,ierr)
if (myid .eq. 0) then !It is host
! Matrix multiplication
worktmp2 = 0
call matmultiply(n, mat, worktmp1, worktmp2)
workd(ipntr(2):ipntr(2)+nloc-1) = worktmp2(1:nloc)
i = nprocs
if (i .gt. 1) then
do i=1,nprocs-1
call MPI_SEND(worktmp2(i*nloc+1), nloc, MPI_DOUBLE_PRECISION, i, 100*i, comm, ierr)
end do
endif
else !It is slave
call MPI_RECV(workd(ipntr(2)), nloc, MPI_DOUBLE_PRECISION, 0, 100*myid, comm, status, ierr)
endif
go to 20
! call matmultiply(n, mat, workd(ipntr(1):ipntr(1)+n-1), workd(ipntr(2):ipntr(2)+n-1))
! go to 20
endif
! print *, info !for testing
!===============================================================
! Post-processing for eigenvalues
rvec = .true.
if (myid .eq. 0) then
call pdneupd ( comm, rvec, 'A', select, d, d(1,2), z, ldv, sigmar, sigmai, &
workev, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, &
workd, workl, lworkl, info)
endif
! print *, info !for testing
close(11)
call MPI_FINALIZE(ierr)
return
end subroutine
!==============================================================================================
! Additional Function definitions
subroutine matmultiply(n, mat, v, w)
integer n, i, j
integer, parameter :: pres=8
real(kind = pres) mat(n,n), temp(n)
real(kind = pres) v(n), w(n)
temp = 0
do j = 1,n
do i = 1,n
temp(j) = temp(j) + mat(i,j)*v(i)
end do
end do
w = temp
return
end subroutine
end module
I apologize for the ton of redundant lines and comments, I am yet to clean it up for finalization.
When I run the code on a single thread with ./a.out, I get the following output:
Invalid MIT-MAGIC-COOKIE-1 key 1 0 1629760560
1 0 1629760560
1 0 1629760560
1 0 1629760560
.
.
. <A long chain as the code is exhausting all iterations>
.<first of the numbers is ido, which starts with 1 instead of -1 for some reason, second being
.info and third being iparam(5) which is a random number until the final iteration>
.
99 1 1
munmap_chunk(): invalid pointer
Program received signal SIGABRT: Process abort signal.
Backtrace for this error:
#0 0x7f5a863d0d01 in ???
#1 0x7f5a863cfed5 in ???
#2 0x7f5a8620420f in ???
#3 0x7f5a8620418b in ???
#4 0x7f5a861e3858 in ???
#5 0x7f5a8624e3ed in ???
#6 0x7f5a8625647b in ???
#7 0x7f5a862566cb in ???
#8 0x560f05ac1819 in ???
#9 0x560f05abd7bc in checker
at /home/srivatsank/Desktop/fortran/lap_vs_arp/ptest/ptest.f90:45
#10 0x560f05abd8d9 in main
at /home/srivatsank/Desktop/fortran/lap_vs_arp/ptest/ptest.f90:3
Aborted (core dumped)
line 45 in ptest is call parp
line 3 in ptest is use parpack(name of the module)
The main code is as follows:
program checker
use parpack
use arpack
! use lapack
implicit none
!Program to test LAPACK and ARPACK
! 1. Variable definition
integer a,n,i
real, allocatable :: mat(:,:)
real t0, t1
a=2
! Loop
! do 20 a = 1,3
! Open File
open(unit=10, file = 'matrix.dat', status = 'replace')
! 2. Generate Symmetric matrices
n = 10**a
allocate (mat(n,n))
call RANDOM_NUMBER(mat)
! 3. Save symmetric matrices to r.dat
write (10,*) n
do 30 i=1,n
write(10,*) mat(i,:)
30 end do
deallocate(mat)
close(10)
! 4. Test time taken by each of the routines
! call cpu_time(t0)
! call arp
! call cpu_time(t1)
! print *, 'n:', n, 'ARPACK time taken:', t1-t0
call cpu_time(t0)
call parp
call cpu_time(t1)
print *, 'n:', n, 'PARPACK time taken:', t1-t0
!20 end do
end program checker
The memory error occurs at the very end of the subroutine, when the mail program tries to exit from the subroutine. I have verified this by printing statements as the last line in the subroutine.
And on running mpirun -np 4 a.out, the code just enters the pdneupd process and sits there for eternity. Could anyone help?

Related

There is no specific subroutine for the generic ‘mpi_send

subroutine collect(rank, nprocs, n_local, n_global, u_initial_local)
use mpi
implicit none
integer*8 :: i_local_low, i_local_high
integer*8 :: i_global_low, i_global_high
integer*8 :: i_local, i_global
integer*8 :: n_local, n_global
real*8 :: u_initial_local(n_local)
real*8, dimension(:), allocatable :: u_global
integer :: procs
integer*8 :: n_local_procs
! Data declarations for MPI
integer :: ierr ! error signal variable, Standard value - 0
integer :: rank ! process ID (pid) / Number
integer :: nprocs ! number of processors
! MPI send/ receive arguments
integer :: buffer(2)
integer, parameter :: collect1 = 10
integer, parameter :: collect2 = 20
! status variable - tells the status of send/ received calls
! Needed for receive subroutine
integer, dimension(MPI_STATUS_SIZE) :: status1
i_global_low = (rank *(n_global-1))/nprocs
i_global_high = ((rank+1) *(n_global-1))/nprocs
if (rank > 0) then
i_global_low = i_global_low - 1
end if
i_local_low = 0
i_local_high = i_global_high - i_global_low
if (rank == 0) then
allocate(u_global(1:n_global))
do i_local = i_local_low, i_local_high
i_global = i_global_low + i_local - i_local_low
u_global(i_global) = u_initial_local(i_local)
end do
do procs = 1,nprocs-1
call MPI_RECV(buffer, 2, MPI_INTEGER, procs, collect1, MPI_COMM_WORLD, status1, ierr)
i_global_low = buffer(1)
n_local_procs = buffer(2)
call MPI_RECV(u_global(i_global_low+1), n_local_procs, MPI_DOUBLE_PRECISION, procs, collect2, MPI_COMM_WORLD, status1, ierr)
end do
print *, u_global
else
buffer(1) = i_global_low
buffer(2) = n_local
call MPI_SEND(buffer, 2, MPI_INTEGER, 0, collect1, MPI_COMM_WORLD, ierr)
call MPI_SEND(u_initial_local, n_local, MPI_DOUBLE_PRECISION, 0, collect2, MPI_COMM_WORLD, ierr)
end if
return
end subroutine collect
I am getting the error for MPI_SEND and MPI_RECV corresponding to collect2 tag. "There is no specific subroutine for the generic ‘mpi_recv’ at (1)" and 1 is at the end of .......ierr). MPI_SEND for collect2 tag is sending an array and MPI_RECV is receiving that array.
This does not happen for collect1 tag.
Your n_local is integer*8 but it must be integer (see How to debug Fortran 90 compile error "There is no specific subroutine for the generic 'foo' at (1)"?).
There are many articles (like https://blogs.cisco.com/performance/can-i-mpi_send-and-mpi_recv-with-a-count-larger-than-2-billion) about the problem with large arrays (more than maxint elements) and MPI. If you do have the problems with n_local being too large for integer, you can use derived types (like MPI_Type_contiguous) to lower the number of elements passed to MPI procedures so that it fits into a 4-byte integer.

MPI_Allgather receiving junk

I have the following code:
real :: s_s, d_s, s_r(size), d_r(size)
integer :: k, k_r(size)
! - size = number of processors
! - Do something to initialise s_s, d_s, k
write(*,*) "SENDING >>>>"
write(*,*) s_s, d_s
call MPI_Allgather( s_s, 1, MPI_REAL,
& s_r, 1, MPI_REAL, MPI_COMM_PGM, mpi_err)
call MPI_Allgather( d_s, 1, MPI_REAL,
& d_r, 1, MPI_REAL, MPI_COMM_PGM, mpi_err)
call MPI_Allgather ( k, 1, MPI_INTEGER,
& k_r, 1, MPI_INTEGER, MPI_COMM_PGM, mpi_err)
write(*,*) "RECEIVED <<<<"
write(*,*) s_r, d_r, kr
This generates the following output:
SENDING >>>>
-1803.80339864908 0.616157856320407
RECEIVED <<<<
6.953077622513053E-310 3.565412685916647E-314 1.221334434576037E-314
1.498827614035474E-314 6.952991536467244E-310 6.953288052096687E-310
6.953108563966064E-310 2.350861403096908E-314 4 1
2 3
kr is being gathered correctly however, s_r and d_r seem to be receiving junk. Could this be because of the MPI datatypes? I tried with MPI_REAL MPI_REAL8 and MPI_DOUBLE but that didn't work. Furthermore, mpi_err = MPI_SUCCESS
What could I do to resolve this?
EDIT 1
I worked on the following prototype program:
program allgather
implicit none
include "mpif.h"
real a(4)
integer rank,size,ierr
real as(4)
real ar(16)
integer i, j, k,z
a=1
call MPI_INIT(ierr)
call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
call MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierr)
if(size.ne.4)then
write(*,*)'Error!:# of processors must be equal to 4'
write(*,*)'Programm aborting....'
call MPI_ABORT(ierr)
endif
do k=1,4
if ( rank == (mod(k, size))) then
a(k) = k
else
a(k) = 0.0
endif
enddo
write(*,*) "Rank :", rank
write(*,*) a
call MPI_Allgather(a, 4, MPI_REAL, ar,
& 4,
& MPI_REAL, MPI_COMM_WORLD, ierr)
write(*,*) "Recieved array"
write(*,*) ar
do i = 1, 16
if ( ar(i) /= 0.0 ) then
z = mod(i, size)
if ( z == 0 ) then
a( size ) = ar(i)
else
a ( z ) = ar(i)
endif
endif
enddo
write(*,*) "---------"
write(*,*) a
write(*,*) "---------"
call MPI_FINALIZE(ierr)
end
And this generates the expected results i.e. ar doesn't gather junk. I'm unable to however tell the difference between the implementations.
It turns out that for the project, the data type to be used was MPI_FLT. It is strange that MPI_FLT works and not MPI_REALx where x=4,8 also not MPI_FLOAT. I grep-ed MPI_FLT in the project to see what it is defined as but didn't turn up anywhere in the project.
The OpenMPI version I'm using is:
$ mpirun --version
mpirun (Open MPI) 3.0.0
The compiler I use is:
$ mpifort --version
ifort (IFORT) 19.0.1.144 20181018
In a future edit I will elaborate on the cause.

Gathering 2D Arrays in Fortran90, MPI_Gather

I have problem with combining several 2D arrays into one big 2D array using MPI in Fortran.
I have equal size 2D arrays containing real numbers, every array is contained in different process:
numerical(subdsize,nt)
I want to combine them to one big array
numerical_final(nx,nt)
I am using the following command
CALL MPI_Gather(numerical(1:subdsize,nt),subdsize*nt,MPI_DOUBLE_PRECISION,numerical_final,subdsize*nt,MPI_DOUBLE_PRECISION,0, MPI_COMM_WORLD, mpierror)
Unfortunately the data that numerical_final array contains are a complete mess. I was looking for solutions really everywhere. I read this topic but it did not help me:
sending blocks of 2D array in C using MPI
I am using Intel Fortran 2018 compiler and Ubuntu 16.04.
Full code below.
I will be grateful for the help.
PROGRAM Advection
IMPLICIT NONE
INCLUDE 'mpif.h'
INTEGER :: nt,nx,i,steptime,tag,j
DOUBLE PRECISION :: R_dx, R_dt, R_c, R_cfl, R_t
DOUBLE PRECISION, DIMENSION(3) :: R_input
DOUBLE PRECISION, ALLOCATABLE, DIMENSION(:) :: xcoord
DOUBLE PRECISION, ALLOCATABLE, DIMENSION(:,:) :: numerical, numerical_final
DOUBLE PRECISION :: time_begin,time_end,time_elapsed
INTEGER:: myrank,nproc,mpierror,xdomains,subdsize
INTEGER:: status(MPI_STATUS_SIZE)
CALL MPI_Init(mpierror)
CALL MPI_Comm_size(MPI_COMM_WORLD,nproc,mpierror)
CALL MPI_Comm_rank(MPI_COMM_WORLD,myrank,mpierror)
IF (nproc<2) THEN
PRINT*, "Error, only more than 1"
CALL MPI_ABORT
END IF
IF (myrank .EQ. 0) THEN
OPEN(UNIT = 1, FILE = 'inputdata.dat')
READ(1,*) R_input(1)
READ(1,*) R_input(2)
READ(1,*) R_input(3)
READ(1,*) nx
CLOSE(1)
END IF
CALL MPI_Bcast(R_input, 3, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, mpierror)
CALL MPI_Bcast(nx, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, mpierror)
R_c=R_input(1)
R_cfl=R_input(2)
R_t=R_input(3)
R_dx=80./(nx-1)
nt=15
R_dt=R_t/(nt-1)
IF (myrank .EQ. 0) THEN
PRINT*, R_c*R_dt/R_dx
END IF
xdomains = nproc
IF ((MOD(nx,xdomains))==0) THEN
subdsize =nx/xdomains
ELSE
DO
nx=nx+1
IF ((MOD(nx,xdomains)) .EQ. 0) THEN
subdsize=nx/xdomains
EXIT
END IF
END DO
END IF
RAYS
ALLOCATE(xcoord(0:subdsize+1))
ALLOCATE(numerical(0:subdsize+1,nt))
DO i=0,subdsize+1
xcoord(i) = -40.-R_dx+i*R_dx+myrank*R_dx*subdsize
END DO
DO i = 0,subdsize+1
numerical(i,1)=0.5*(sign(1.,xcoord(i))+1.0)
END DO
IF (myrank .EQ. 0) THEN
DO i=1,nt
numerical(0:1,i)=0.
END DO
END IF
IF (myrank .EQ. nproc-1) THEN
DO i=1,nt
numerical(subdsize:subdsize+1,i)=1.
END DO
END IF
DO steptime=1, nt-1
tag = 1
IF (myrank .LT. nproc-1) THEN
CALL MPI_Send (numerical(subdsize,steptime),1,MPI_DOUBLE_PRECISION,myrank+1,tag,MPI_COMM_WORLD,mpierror)
END IF
IF (myrank .GT. 0) THEN
CALL MPI_Recv (numerical(0,steptime),1,MPI_DOUBLE_PRECISION,myrank-1,tag,MPI_COMM_WORLD,status,mpierror )
END IF
IF (myrank .EQ. 0) THEN
DO i = 2, subdsize+1
numerical(i,steptime+1)=numerical(i,steptime)-R_c*R_dt/R_dx*(numerical(i,steptime)-numerical(i-1,steptime))
END DO
ELSE
DO i = 1, subdsize+1
numerical(i,steptime+1)=numerical(i,steptime)-R_c*R_dt/R_dx*(numerical(i,steptime)-numerical(i-1,steptime))
END DO
END IF
END DO
ALLOCATE(numerical_final(nx,nt))
CALL MPI_Gather(numerical(1:subdsize,nt),subdsize*nt,MPI_DOUBLE_PRECISION,numerical_final,subdsize*nt,MPI_DOUBLE_PRECISION,0, MPI_COMM_WORLD, mpierror)
CALL MPI_Finalize(mpierror)
DEALLOCATE (numerical,numerical_final)
END PROGRAM
And inputfile
1.5 !c
0.5 !Courant
5.0 !time
100 !x points

MPI_SCATTERV in Fortran - sending rows of 2D array

I have a 2D array of integers and I want to send its rows to each separate process. I assume that number of rows (M=5) is not evenly divisible by number of processes (size = 4), so in my case the process 0 will obtain additional row. Size of the 2D array A is MxN (5x10).
Here is my code
PROGRAM SCATTERV_MATRIX
INCLUDE 'mpif.h'
integer :: rank, size, ierr, dest, src, tag !MPI variables
integer :: status(MPI_STATUS_SIZE) !MPI variables
INTEGER, PARAMETER :: N = 10 !number of columns
INTEGER, PARAMETER :: M = 5 !number of rows
INTEGER, ALLOCATABLE, DIMENSION(:,:) :: A !MxN matrix A
INTEGER :: NEWTYPE, RESIZEDTYPE !MPI derived data types
INTEGER, ALLOCATABLE, DIMENSION(:,:) :: LOCAL
INTEGER, ALLOCATABLE :: SENDCOUNTS(:), DISPLS(:)
INTEGER :: RECVCOUNT, NRBUF
INTEGER :: MMIN, MEXTRA, INTSIZE, K, I, J
INTEGER :: START, EXTENT !(KIND=MPI_ADRESS_KIND)
CALL MPI_INIT(ierr)
CALL MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
CALL MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierr)
IF ( rank == 0 ) THEN !allocate and create 2Darray
ALLOCATE( A (M, N) )
K = 1
DO I = 1, M
DO J = 1, N
A(I, J) = K
K = K + 1
END DO
END DO
END IF
ALLOCATE( SENDCOUNTS(0:size-1), DISPLS(0:size-1) )
MMIN = M/size !number of rows divided by number of processors
MEXTRA = MOD(M, size) !extra rows
K = 0
DO I = 0, size-1
IF (I < MEXTRA) THEN !SENDCOUNTS=(/2,1,1,1/)
SENDCOUNTS(I) = MMIN + 1
ELSE
SENDCOUNTS(I) = MMIN
END IF
DISPLS(I) = K !DISPLS=(/0,2,3,4/)
K = K + SENDCOUNTS(I)
END DO
RECVCOUNT = SENDCOUNTS(rank)
ALLOCATE( LOCAL(RECVCOUNT,N) )
CALL MPI_TYPE_VECTOR(N, 1, M, MPI_INTEGER, NEWTYPE, ierr)
CALL MPI_TYPE_COMMIT(NEWTYPE, ierr)
START = 0
CALL MPI_TYPE_SIZE(MPI_INTEGER, INTSIZE, ierr)
EXTENT = 1*INTSIZE
CALL MPI_TYPE_CREATE_RESIZED(NEWTYPE, START, EXTENT, RESIZEDTYPE, ierr)
CALL MPI_TYPE_COMMIT(RESIZEDTYPE, ierr)
LOCAL(:, :) = 0
CALL MPI_SCATTERV( &
A, SENDCOUNTS, DISPLS, RESIZEDTYPE, &
LOCAL, RECVCOUNT*N, MPI_INTEGER, &
0, MPI_COMM_WORLD, ierr)
WRITE(*,*) rank, ':', LOCAL
CALL MPI_FINALIZE(ierr)
END PROGRAM SCATTERV_MATRIX
After sucessfull compilation I got "Program Exception - access violation" error. All my previous Fortan MPI programs worked fine. There must be some bug in the code, probably in MPI_SCATTERV.
I was mainly following this answer. I will be gratefull for any suggestion. Thank you.
There's an error in your code:
INTEGER :: START, EXTENT !(KIND=MPI_ADRESS_KIND)
This line should be:
INTEGER(KIND=MPI_ADDRESS_KIND) :: START, EXTENT
In MPI, anything that is related to memory address, or similar concepts such as memory displacement, file size, file cursor etc., must not be normal integer. Some how you have this information in your comment and you also misspell MPI_ADDRESS_KIND.
Vladimir F correctly pointed out that you should 'USE MPI' instead of 'INCLUDE 'mpif.h''. This gives the compiler the opportunity to check the data types. For example, gfortran gives the following error message:
test.f90:59:71:
CALL MPI_TYPE_CREATE_RESIZED(NEWTYPE, START, EXTENT, RESIZEDTYPE, ierr)
1
Error: There is no specific subroutine for the generic
‘mpi_type_create_resized’ at (1)

Using mpi_scatterv with 4D fortran array

I'm trying to break up a 4D array over the third dimension, and send to each node using MPI. Basically, I'm computing derivatives of a matrix, Cpq, with respect to atom positions in each of the three cartesian directions. Cpq is of size nat_sl x nat_sl, so dCpqdR is of size nat_sl x nat_sl x nat x 3. At the end of the day, for ever s,i pair, I have to compute the matrix product of dCpqdR between the transpose of the eigenvectors of Cpq and the eigenvectors of Cpq like so:
temp = MATMUL(TRANSPOSE(Cpq), MATMUL(dCpqdR(:, :, s, i), Cpq))
This is fine, but as it turns out, the loop over s and i is now by far the slow part of my code. Because each can be done independently, I was hoping that I could break up dCpqdR, and give each task it's own s, i to compute the derivative of. That is, I'd like task 1 to get dCpqdR(:,:,1,1), task 2 to get dCpqdR(:,:,1,2), etc.
I've got this working in some sense by using a buffered send/recv pair of calls. The root node allocates a temporary array, fills it, sends to the relevant nodes, and the relevant nodes do their computations as they wish. This is fine, but can be slow and memory inefficient. I'd ideally like to break it up in a more memory efficient way.
The logical thing to do, then, is to use mpi_scatterv, but here is where I start running into trouble, as I'm having trouble figuring out the memory layout for this. I've written this, so far:
call mpi_type_create_subarray(4, (/ nat_sl, nat_sl, nat, 3 /), (/nat_sl, nat_sl, n_pairs(me_image+1), 3/),&
(/0, 0, 0, 0/), mpi_order_fortran, mpi_double_precision, subarr_typ, ierr)
call mpi_type_commit(subarr_typ, ierr)
call mpi_scatterv(dCpqdR, n_pairs(me_image+1), f_displs, subarr_typ,&
my_dCpqdR, 3*nat_sl*3*nat_sl*3*n_pairs(me_image+1), subarr_typ,&
root_image, intra_image_comm, ierr)
I've computed n_pairs using this subroutine:
subroutine mbdvdw_para_init_int_forces()
implicit none
integer :: p, s, i, counter, k, cpu_ind
integer :: num_unique_rpq, n_pairs_per_proc, cpu
real(dp) :: Rpq(3), Rpq_norm, current_val
num_pairs = nat
if(.not.allocated(f_cpu_id)) allocate(f_cpu_id(nat, 3))
n_pairs_per_proc = floor(dble(num_pairs)/nproc_image)
cpu = 0
n_pairs = 0
counter = 1
p = 1
do counter = 0, num_pairs-1, 1
n_pairs(modulo(counter, nproc_image)+1) = n_pairs(modulo(counter, nproc_image)+1) + 1
end do
do s = 1, nat, 1
f_cpu_id(s) = cpu
if((counter.lt.num_pairs)) then
if(p.eq.n_pairs(cpu+1)) then
cpu = cpu + 1
p = 0
end if
end if
p = p + 1
end do
call mp_set_displs( n_pairs, f_displs, num_pairs, nproc_image)
f_displs = f_displs*nat_sl*nat_sl*3
end subroutine mbdvdw_para_init_int_forces
and the full method for the matrix multiplication is
subroutine mbdvdw_interacting_energy(energy, forcedR, forcedh, forcedV)
implicit none
real(dp), intent(out) :: energy
real(dp), dimension(nat, 3), intent(out) :: forcedR
real(dp), dimension(3,3), intent(out) :: forcedh
real(dp), dimension(nat), intent(out) :: forcedV
real(dp), dimension(3*nat_sl, 3*nat_sl) :: temp
real(dp), dimension(:,:,:,:), allocatable :: my_dCpqdR
integer :: num_negative, i_atom, s, i, j, counter
integer, parameter :: eigs_check = 200
integer :: subarr_typ, ierr
! lapack work variables
integer :: LWORK, errorflag
real(dp) :: WORK((3*nat_sl)*(3+(3*nat_sl)/2)), eigenvalues(3*nat_sl)
call start_clock('mbd_int_energy')
call mp_sum(Cpq, intra_image_comm)
eigenvalues = 0.0_DP
forcedR = 0.0_DP
energy = 0.0_DP
num_negative = 0
forcedV = 0.0_DP
errorflag=0
LWORK=3*nat_sl*(3+(3*nat_sl)/2)
call DSYEV('V', 'U', 3*nat_sl, Cpq, 3*nat_sl, eigenvalues, WORK, LWORK, errorflag)
if(errorflag.eq.0) then
do i_atom=1, 3*nat_sl, 1
!open (unit=eigs_check, file="eigs.tmp",action="write",status="unknown",position="append")
! write(eigs_check, *) eigenvalues(i_atom)
!close(eigs_check)
if(eigenvalues(i_atom).ge.0.0_DP) then
energy = energy + dsqrt(eigenvalues(i_atom))
else
num_negative = num_negative + 1
end if
end do
if(num_negative.ge.1) then
write(stdout, '(3X," WARNING: Found ", I3, " Negative Eigenvalues.")'), num_negative
end if
else
end if
energy = energy*nat/nat_sl
!!!!!!!!!!!!!!!!!!!!
! Forces below here. There's going to be some long parallelization business.
!!!!!!!!!!!!!!!!!!!!
call start_clock('mbd_int_forces')
if(.not.allocated(my_dCpqdR)) allocate(my_dCpqdR(nat_sl, nat_sl, n_pairs(me_image+1), 3)), my_dCpqdR = 0.0_DP
if(mbd_vdw_forces) then
do s=1,nat,1
if(me_image.eq.(f_cpu_id(s)+1)) then
do i=1,3,1
temp = MATMUL(TRANSPOSE(Cpq), MATMUL(my_dCpqdR(:, :, counter, i), Cpq))
do j=1,3*nat_sl,1
if(eigenvalues(j).ge.0.0_DP) then
forcedR(s, i) = forcedR(s, i) + 1.0_DP/(2.0_DP*dsqrt(eigenvalues(j)))*temp(j,j)
end if
end do
end do
counter = counter + 1
end if
end do
forcedR = forcedR*nat/nat_sl
do s=1,3,1
do i=1,3,1
temp = MATMUL(TRANSPOSE(Cpq), MATMUL(dCpqdh(:, :, s, i), Cpq))
do j=1,3*nat_sl,1
if(eigenvalues(j).ge.0.0_DP) then
forcedh(s, i) = forcedh(s, i) + 1.0_DP/(2.0_DP*dsqrt(eigenvalues(j)))*temp(j,j)
end if
end do
end do
end do
forcedh = forcedh*nat/nat_sl
call mp_sum(forcedR, intra_image_comm)
call mp_sum(forcedh, intra_image_comm)
end if
call stop_clock('mbd_int_forces')
call stop_clock('mbd_int_energy')
return
end subroutine mbdvdw_interacting_energy
But when run, it's complaining that
[MathBook Pro:58100] *** An error occurred in MPI_Type_create_subarray
[MathBook Pro:58100] *** reported by process [2560884737,2314885530279477248]
[MathBook Pro:58100] *** on communicator MPI_COMM_WORLD
[MathBook Pro:58100] *** MPI_ERR_ARG: invalid argument of some other kind
[MathBook Pro:58100] *** MPI_ERRORS_ARE_FATAL (processes in this communicator will now abort,
[MathBook Pro:58100] *** and potentially your MPI job)
so something is going wrong, but I have no idea what. I know my description is somewhat sparse to start with, so please let me know what information would be necessary to help.