I modified my code and put MPI_RECV before MPI_SEND. This time, I did not receive any error message; however, it seemed that code still encountered deadlock. The reason is that I opened some files (UNIT=11, 12, 13,14) before MPI_RECV and MPI_SEND commands; then, I collected data through these two commands and wrote them into these files but there was no data written into these files. I paste my modified code below. Would you please have a look at it and give me some suggestions? Thank you so much.
PROGRAM MAIN
USE MPI
USE CAL
IMPLICIT NONE
INTEGER :: nb !Number of valence band
DOUBLE PRECISION :: me !Minimum eigen value
DOUBLE COMPLEX, ALLOCATABLE :: u_s1(:,:) !Array to store the contribution of each eigen state to the total spin orbit torque
DOUBLE COMPLEX, ALLOCATABLE :: u_s2(:,:) !Array to store the contribution of each eigen state to the total spin orbit torque
DOUBLE COMPLEX, ALLOCATABLE :: u_t1(:,:) !Array to collect the contribution of each eigen state to the total spin orbit torque from all processors
DOUBLE COMPLEX, ALLOCATABLE :: u_t2(:,:) !Array to collect the contribution of each eigen state to the total spin orbit torque from all processors
DOUBLE COMPLEX :: sr1 !Sum of Femri surface part for spin orbit torque on all km(1) k points
DOUBLE COMPLEX :: sr2 !Sum of Femri surface part for spin orbit torque on all km(1) k points
DOUBLE PRECISION, ALLOCATABLE, TARGET :: nme(:) !Array to store the minimum eigen value
INTEGER, ALLOCATABLE, TARGET :: nnb(:) !Array to store the number of valence band
INTEGER :: world_size !MPI
INTEGER :: world_rank, ierr !MPI
INTEGER :: irank, j0 !MPI
!
!Initializing MPI
CALL MPI_Init(ierr)
CALL MPI_Comm_size(MPI_COMM_WORLD, world_size, ierr)
CALL MPI_Comm_rank(MPI_COMM_WORLD, world_rank, ierr)
!
!Opening file that stores the total spin orbit torque value for the Fermi surface part
OPEN (UNIT=11, FILE='SOT_Surface.dat', STATUS='UNKNOWN')
!
!Opening file that stores the spin orbit torque for the Fermi surface part versus energy
OPEN (UNIT=12, FILE='SOT_Surface_sve_xz.dat', STATUS='UNKNOWN')
OPEN (UNIT=13, FILE='SOT_Surface_sve_yz.dat', STATUS='UNKNOWN')
!
!Opening file that stores the minimum eigen value and number of valence band
OPEN (UNIT=14, FILE='SOT_mineig_numval.dat', STATUS='UNKNOWN')
!
!Allocating the array used to store the contribution of each eigen state to the total spin orbit torque
ALLOCATE (u_s1(2,nu_wa*km(1)))
ALLOCATE (u_s2(2,nu_wa*km(1)))
!
!Allocating array to collect the contribution of each eigen state to the total spin orbit torque from all processors
IF (world_rank .EQ. 0) THEN
ALLOCATE (u_t1(2,nu_wa*km(1)*km(2)))
ALLOCATE (u_t2(2,nu_wa*km(1)*km(2)))
END IF
u_t1 = CMPLX(0.0d0, 0.0d0)
u_t2 = CMPLX(0.0d0, 0.0d0)
!
!Allocating array to collect the number of valence band and the minimum eigen value
IF (world_rank .EQ. 0) THEN
ALLOCATE (nme(km(2)))
ALLOCATE (nnb(km(2)))
END IF
nme = 0.0d0
nnb = 0
!
!Allocating array to collect the contribution of each eigen state to the total spin orbit torque from all processors
IF (world_rank .EQ. 0) THEN
ALLOCATE (u_t1(2,nu_wa*km(1)*km(2)))
ALLOCATE (u_t2(2,nu_wa*km(1)*km(2)))
END IF
u_t1 = CMPLX(0.0d0, 0.0d0)
u_t2 = CMPLX(0.0d0, 0.0d0)
!
!Allocating array to collect the number of valence band and the minimum eigen value
IF (world_rank .EQ. 0) THEN
ALLOCATE (nme(km(2)))
ALLOCATE (nnb(km(2)))
END IF
nme = 0.0d0
nnb = 0
!
!opening test files
open (unit=21,file='normalisedprefactor.dat',status='unknown')
open (unit=22,file='gd.dat',status='unknown')
open (unit=23,file='con.dat',status='unknown')
open (unit=24,file='par.dat',status='unknown')
open (unit=25,file='grga.dat',status='unknown')
open (unit=26,file='nfdk.dat',status='unknown')
!Reading the Cartesian coordinates of k-point mesh
DO j = 1, km(2), 1
IF (mod(j-1, world_size) .NE. world_rank) CYCLE
DO k = 1, km(1), 1
kp(k,:) = ka(j,k,:)
END DO
!Building up Hamiltonian matrix on k points and diagonalising the matrix to obtain Eigen vectors and values
CALL HAMSUR(vd,kp,nu_wa,nu_nr,km(1),nd1,nd2,nd3,nd4,nd5,hr1,hr2,hr3,hr4,hr5,tb,ec,ev,fermi,an,wf,bv,dk,u_s1,u_s2,sr1,sr2,nb,me)
!
CALL MPI_Barrier(MPI_COMM_WORLD, ierr)
IF (WORLD_RANK .EQ. 0) THEN
u_t1(1:2,1+nu_wa*km(1)*(j-1):nu_wa*km(1)*j) = u_s1
u_t2(1:2,1+nu_wa*km(1)*(j-1):nu_wa*km(1)*j) = u_s2
DO k = 1, WORLD_SIZE-1, 1
IF (j-1+k .EQ. km(2)) EXIT
l = k + 101
n = k + 102
CALL MPI_RECV(u_t1(1,1+nu_wa*km(1)*(j-1+k)), 2*nu_wa*km(1), MPI_DOUBLE_COMPLEX, k,l, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr)
CALL MPI_RECV(u_t2(1,1+nu_wa*km(1)*(j-1+k)), 2*nu_wa*km(1), MPI_DOUBLE_COMPLEX, k,n, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr)
END DO
ELSE
l = WORLD_RANK + 101
n = WORLD_RANK + 102
CALL MPI_SEND(u_s1,2*nu_wa*km(1), MPI_DOUBLE_COMPLEX, 0, l, MPI_COMM_WORLD, ierr)
CALL MPI_SEND(u_s2,2*nu_wa*km(1), MPI_DOUBLE_COMPLEX, 0, n, MPI_COMM_WORLD, ierr)
END IF
crr1 = crr1 + sr1
crr2 = crr2 + sr2
IF (WORLD_RANK .EQ. 0) THEN
nme(j-1) = me
nnb(j-1) = nb
DO k = 1, WORLD_SIZE-1, 1
IF (j-1+k .EQ. km(2)) EXIT
l = k + 103
n = k + 104
CALL MPI_RECV(nme(j-1+k), 1, MPI_DOUBLE, k,l, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr)
CALL MPI_RECV(nnb(j-1+k), 1, MPI_INT, k,n, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr)
END DO
ELSE
l = WORLD_RANK + 103
n = WORLD_RANK + 104
CALL MPI_SEND(me, 1, MPI_DOUBLE, 0, l, MPI_COMM_WORLD, ierr)
CALL MPI_SEND(nb, 1, MPI_INT, 0, n, MPI_COMM_WORLD, ierr)
END IF
END DO
!
CALL MPI_Barrier(MPI_COMM_WORLD, ierr)
IF (world_rank .EQ. 0) THEN
ALLOCATE (crr1_all(world_size))
ALLOCATE (crr2_all(world_size))
END IF
crr1_all = CMPLX(0.0d0, 0.0d0)
crr2_all = CMPLX(0.0d0, 0.0d0)
CALL MPI_Gather(crr1, 1, MPI_double_complex, crr1_all, 1, MPI_double_complex, 0, MPI_COMM_WORLD, ierr)
CALL MPI_Gather(crr2, 1, MPI_double_complex, crr2_all, 1, MPI_double_complex, 0, MPI_COMM_WORLD, ierr)
!Writing total conductivity value into the file
IF (world_rank .EQ. 0) THEN
crr1_total = CMPLX(0.0d0, 0.0d0)
crr2_total = CMPLX(0.0d0, 0.0d0)
DO i = 1, world_size, 1
crr1_total = crr1_total + crr1_all(i)
crr2_total = crr2_total + crr2_all(i)
END DO
!Finding the minimum eigen value
NULLIFY (p1, p2)
p1 => nme(1)
p2 => nnb(1)
DO i = 2, km(2), 1
IF (p1 .GE. nme(i)) THEN
p1 => nme(i)
END IF
IF (p2 .LE. nnb(i)) THEN
p2 => nnb(i)
END IF
END DO
WRITE (UNIT=14, FMT='(A27,$)') 'The minimum eigen value is:'
WRITE (UNIT=14, FMT=*) p1
WRITE (UNIT=14, FMT='(A30,$)') 'The number of valence band is:'
WRITE (UNIT=14, FMT=*) p2
!
!Constant for the coefficient
pi = DACOS(-1.0d0)
hb = 1.054571817d-34 !(unit - J)
es = 1.602176634d-19 !(unit - J*s)
!
WRITE (UNIT=11, FMT='(A55,$)') 'Spin Orbit Torque without coeffieicnt within x-z plane:'
WRITE (UNIT=11, FMT=*) crr1_total
WRITE (UNIT=11, FMT='(A52,$)') 'Spin Orbit Torque with coefficient within x-z plane:'
WRITE (UNIT=11, FMT=*) crr1_total * es ** 2 * hb / 4.0d0 / pi
WRITE (UNIT=11, FMT='(A55,$)') 'Spin Orbit Torque without coeffieicnt within y-z plane:'
WRITE (UNIT=11, FMT=*) crr2_total
WRITE (UNIT=11, FMT='(A52,$)') 'Spin Orbit Torque with coefficient within y-z plane:'
WRITE (UNIT=11, FMT=*) crr2_total * es ** 2 * hb / 4.0d0 / pi
DO i = 1, nu_wa*km(1)*km(2), 1
WRITE (UNIT=12, FMT=*) u_t1(1:2,i)
WRITE (UNIT=13, FMT=*) u_t2(1:2,i)
END DO
END IF
!
!Finalising MPI
CALL MPI_Finalize(ierr)
!
!Deallocating array that sotres and collect the fermi-surface-part contribution of each eigen state to the total spin orbit torque
DEALLOCATE (u_s1)
DEALLOCATE (u_s2)
DEALLOCATE (u_t1)
DEALLOCATE (u_t2)
!
STOP
END PROGRAM MAIN
Related
I am making a module in Fortran 90 to run PARPACK on a given matrix. I have an existing ARPACK code which functions normally as expected. I tried converting it into PARPACK and it runs into memory clear errors. I am fairly new to coding and fortran, please excuse any blunders I've made.
The code:
!ARPACK module
module parpack
implicit none
contains
subroutine parp
! use mpi
include '/usr/lib/x86_64-linux-gnu/openmpi/include/mpif.h'
integer comm, myid, nprocs, rc, nloc, status(MPI_STATUS_SIZE)
integer, parameter :: pres=8
integer nev, ncv, maxn, maxnev, maxncv
parameter (maxn=10**7, maxnev=maxn-1, maxncv=maxn)
! Arrays for SNAUPD
integer iparam(11), ipntr(14)
logical, allocatable :: select(:)
real(kind=pres), allocatable :: workd(:), workl(:), worktmp1(:), worktmp2(:)
! Scalars for SNAUPD
character bmat*1, which*2
integer ido, n, info, ierr, ldv
integer i, j, ishfts, maxitr, mode1, nconv
integer(kind=pres) lworkl
real(kind=pres) tol
! Arrays for SNEUPD
real(kind=pres), allocatable :: d(:,:), resid(:), v(:,:), workev(:), z(:,:)
! Scalars for SNEUPD
logical rvec, first
real sigmar, sigmai
!==============================================
real(kind=pres), allocatable :: mat(:,:)
open (11, file = 'matrix.dat', status = 'old')
read (11,*) n
!=============================================
! Dimension of the problem
nev = n/10
ncv = nev+2
ldv = n
bmat = 'I'
which = 'LM'
! Additional environment variables
ido = 0
tol = 0.0E+0
info = 0
lworkl = 3*ncv**2+6*ncv
! Algorithm Mode specifications:
ishfts = 1
maxitr = 300
mode1 = 1
iparam(1) = ishfts
iparam(3) = maxitr
iparam(7) = mode1
! Distribution to nodes
!=============================================
! Matrix allocation
allocate (mat(n,n))
! PDNAUPD
allocate (workd(5*n))
allocate (workl(lworkl))
allocate (resid(n))
allocate (worktmp1(n))
allocate (worktmp2(n))
! PDNEUPD
allocate (d(n,3))
allocate (v(ldv,ncv))
allocate (workev(3*n))
allocate (z(ldv,ncv))
allocate (select(ncv))
!===========================================
! Read Matrix from the provided file
mat = 0
read(11,*) mat
mat = transpose(mat)
!===========================================
! MPI Calling
call MPI_INIT(ierr)
comm = MPI_COMM_WORLD
call MPI_COMM_RANK(comm, myid, ierr)
call MPI_COMM_SIZE(comm, nprocs, ierr)
nloc = n/nprocs
! if ( mod(n, nprocs) .gt. myid ) nloc = nloc + n
!===============================================
20 continue
call pdnaupd(comm, ido, bmat, nloc, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, info) !Top level solver
call MPI_BARRIER(comm,ierr)
print *, ido, info, iparam(5) !for testing
!===============================================
if (ido .eq. -1 .or. ido .eq. 1) then
worktmp1 = 0
if (myid .ne. 0) then !It is slave
call MPI_SEND(workd(ipntr(1)), nloc, MPI_DOUBLE_PRECISION, 0, 0, comm, ierr)
else !It is host
worktmp1(1:nloc) = workd(ipntr(1):ipntr(1)+nloc-1)
i = nprocs
if (i .gt. 1) then
do i=1,nprocs-1
call MPI_RECV(worktmp1(i*nloc+1), nloc, MPI_DOUBLE_PRECISION, i, 0, comm, status, ierr)
end do
endif
endif
call MPI_BARRIER(comm,ierr)
if (myid .eq. 0) then !It is host
! Matrix multiplication
worktmp2 = 0
call matmultiply(n, mat, worktmp1, worktmp2)
workd(ipntr(2):ipntr(2)+nloc-1) = worktmp2(1:nloc)
i = nprocs
if (i .gt. 1) then
do i=1,nprocs-1
call MPI_SEND(worktmp2(i*nloc+1), nloc, MPI_DOUBLE_PRECISION, i, 100*i, comm, ierr)
end do
endif
else !It is slave
call MPI_RECV(workd(ipntr(2)), nloc, MPI_DOUBLE_PRECISION, 0, 100*myid, comm, status, ierr)
endif
go to 20
! call matmultiply(n, mat, workd(ipntr(1):ipntr(1)+n-1), workd(ipntr(2):ipntr(2)+n-1))
! go to 20
endif
! print *, info !for testing
!===============================================================
! Post-processing for eigenvalues
rvec = .true.
if (myid .eq. 0) then
call pdneupd ( comm, rvec, 'A', select, d, d(1,2), z, ldv, sigmar, sigmai, &
workev, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, &
workd, workl, lworkl, info)
endif
! print *, info !for testing
close(11)
call MPI_FINALIZE(ierr)
return
end subroutine
!==============================================================================================
! Additional Function definitions
subroutine matmultiply(n, mat, v, w)
integer n, i, j
integer, parameter :: pres=8
real(kind = pres) mat(n,n), temp(n)
real(kind = pres) v(n), w(n)
temp = 0
do j = 1,n
do i = 1,n
temp(j) = temp(j) + mat(i,j)*v(i)
end do
end do
w = temp
return
end subroutine
end module
I apologize for the ton of redundant lines and comments, I am yet to clean it up for finalization.
When I run the code on a single thread with ./a.out, I get the following output:
Invalid MIT-MAGIC-COOKIE-1 key 1 0 1629760560
1 0 1629760560
1 0 1629760560
1 0 1629760560
.
.
. <A long chain as the code is exhausting all iterations>
.<first of the numbers is ido, which starts with 1 instead of -1 for some reason, second being
.info and third being iparam(5) which is a random number until the final iteration>
.
99 1 1
munmap_chunk(): invalid pointer
Program received signal SIGABRT: Process abort signal.
Backtrace for this error:
#0 0x7f5a863d0d01 in ???
#1 0x7f5a863cfed5 in ???
#2 0x7f5a8620420f in ???
#3 0x7f5a8620418b in ???
#4 0x7f5a861e3858 in ???
#5 0x7f5a8624e3ed in ???
#6 0x7f5a8625647b in ???
#7 0x7f5a862566cb in ???
#8 0x560f05ac1819 in ???
#9 0x560f05abd7bc in checker
at /home/srivatsank/Desktop/fortran/lap_vs_arp/ptest/ptest.f90:45
#10 0x560f05abd8d9 in main
at /home/srivatsank/Desktop/fortran/lap_vs_arp/ptest/ptest.f90:3
Aborted (core dumped)
line 45 in ptest is call parp
line 3 in ptest is use parpack(name of the module)
The main code is as follows:
program checker
use parpack
use arpack
! use lapack
implicit none
!Program to test LAPACK and ARPACK
! 1. Variable definition
integer a,n,i
real, allocatable :: mat(:,:)
real t0, t1
a=2
! Loop
! do 20 a = 1,3
! Open File
open(unit=10, file = 'matrix.dat', status = 'replace')
! 2. Generate Symmetric matrices
n = 10**a
allocate (mat(n,n))
call RANDOM_NUMBER(mat)
! 3. Save symmetric matrices to r.dat
write (10,*) n
do 30 i=1,n
write(10,*) mat(i,:)
30 end do
deallocate(mat)
close(10)
! 4. Test time taken by each of the routines
! call cpu_time(t0)
! call arp
! call cpu_time(t1)
! print *, 'n:', n, 'ARPACK time taken:', t1-t0
call cpu_time(t0)
call parp
call cpu_time(t1)
print *, 'n:', n, 'PARPACK time taken:', t1-t0
!20 end do
end program checker
The memory error occurs at the very end of the subroutine, when the mail program tries to exit from the subroutine. I have verified this by printing statements as the last line in the subroutine.
And on running mpirun -np 4 a.out, the code just enters the pdneupd process and sits there for eternity. Could anyone help?
I use MPI_Gather command to collect data from each processor but got the following error information (The 523rd line in MAINp.f90 has error).
forrtl: severe (174): SIGSEGV, segmentation fault occurred
Image PC Routine Line Source
sot 0000000000427FD3 Unknown Unknown Unknown
libpthread-2.26.s 00002AAAB0D1C2F0 Unknown Unknown Unknown
sot 000000000041D2AE MAIN__ 523 MAINp.f90
sot 0000000000409B92 Unknown Unknown Unknown
libc-2.26.so 00002AAAB115034A __libc_start_main Unknown Unknown
sot 0000000000409AAA Unknown Unknown Unknown
srun: error: nid01236: task 19: Exited with exit code 174
srun: Terminating job step 14213926.0
slurmstepd: error: *** STEP 14213926.0 ON nid01236 CANCELLED AT 2020-04-23T06:53:35 ***
I do not know why it is wrong. I just want to collect data from each processor. I only put part of my MAINp.F90 below and the error line follows the label (!THIS IS THE ERROR LINE). Would anyone please give me some suggestions? Thank you.
PROGRAM MAIN
USE MPI
USE CAL
IMPLICIT NONE
!Variables for setting up the parameters in INPUT.dat file
CHARACTER (LEN=50) :: na(6) !Array to store the names of Hamiltonian files from wannier90
DOUBLE PRECISION :: an !Angel interval
INTEGER :: km(2) !k point mesh
INTEGER :: vd !Velocity direction of the Hamiltonian matrix
DOUBLE PRECISION :: fermi !Fermi energy value
DOUBLE PRECISION :: wf !Energy window
DOUBLE PRECISION :: bv !Broadening value
DOUBLE PRECISION :: pi !pi
DOUBLE PRECISION :: hb !h_bar
DOUBLE PRECISION :: es !Electron volt
!
!Variables for parameters in '.wout' file
INTEGER :: sta !Status of files
DOUBLE PRECISION :: rea_c(3,3) !Lattice constant of unit cell in real space
DOUBLE PRECISION :: rec_c(3,3) !Vectors of unit cell in the reciprocal space
!
!Variables for parameters in Hamiltonian ('_hr.dat') file from wannier90
INTEGER :: nu_wa !Number of wannier function
INTEGER :: nu_nr(5) !Number of Wigner-Seitz grid point
INTEGER, ALLOCATABLE :: nd1(:) !Degeneracy of each Wigner-Seitz grid point with magnetizaiton along z axis
INTEGER, ALLOCATABLE :: nd2(:) !Degeneracy of each Wigner-Seitz grid point with magnetizaiton along different axes
INTEGER, ALLOCATABLE :: nd3(:) !Degeneracy of each Wigner-Seitz grid point with magnetizaiton along different axes
INTEGER, ALLOCATABLE :: nd4(:) !Degeneracy of each Wigner-Seitz grid point with magnetizaiton along different axes
INTEGER, ALLOCATABLE :: nd5(:) !Degeneracy of each Wigner-Seitz grid point with magnetizaiton along different axes
DOUBLE PRECISION, ALLOCATABLE :: hr1(:,:) !Array to store the Hamitlonian matrix information in '_hr.dat' file, magnetization along z axis
DOUBLE PRECISION, ALLOCATABLE :: hr2(:,:) !Array to store the Hamitlonian matrix information in '_hr.dat' file, magnetization along other axes
DOUBLE PRECISION, ALLOCATABLE :: hr3(:,:) !Array to store the Hamitlonian matrix information in '_hr.dat' file, magnetization along other axes
DOUBLE PRECISION, ALLOCATABLE :: hr4(:,:) !Array to store the Hamitlonian matrix information in '_hr.dat' file, magnetization along other axes
DOUBLE PRECISION, ALLOCATABLE :: hr5(:,:) !Array to store the Hamitlonian matrix information in '_hr.dat' file, magnetization along other axes
!
!Internal variables
INTEGER :: i, j, k, l, n !Integer for loop
CHARACTER (LEN=100) :: str !String for transitting data
DOUBLE PRECISION :: tr(3) !Array for transitting data
DOUBLE PRECISION, ALLOCATABLE :: kp(:,:) !Array to store the Cartesian coordinate of k-point mesh
DOUBLE PRECISION, ALLOCATABLE :: ka(:,:,:) !Array to store the Cartesian coordiantes of all k points
DOUBLE COMPLEX, ALLOCATABLE :: tb(:,:) !Array to store the extracted tight binding Hamiltonian matrix
DOUBLE COMPLEX, ALLOCATABLE :: ec(:,:) !Array to store the Eigen vector matrix
DOUBLE PRECISION, ALLOCATABLE :: ev(:,:) !Array to store the Eigen value on single k point
DOUBLE PRECISION :: dk(2) !Array to store the Delta kx and ky
INTEGER :: nb !Number of valence band
DOUBLE PRECISION :: me !Minimum eigen value
DOUBLE COMPLEX, ALLOCATABLE :: u_s1(:,:) !Array to store the contribution of each eigen state to the total spin orbit torque
DOUBLE COMPLEX, ALLOCATABLE :: u_s2(:,:) !Array to store the contribution of each eigen state to the total spin orbit torque
DOUBLE COMPLEX, ALLOCATABLE :: u_t1(:,:) !Array to collect the contribution of each eigen state to the total spin orbit torque from all processors
DOUBLE COMPLEX, ALLOCATABLE :: u_t2(:,:) !Array to collect the contribution of each eigen state to the total spin orbit torque from all processors
DOUBLE COMPLEX :: sr1 !Sum of Femri surface part for spin orbit torque on all km(1) k points
DOUBLE COMPLEX :: sr2 !Sum of Femri surface part for spin orbit torque on all km(1) k points
DOUBLE COMPLEX, ALLOCATABLE :: crr1_all(:) !Array of ct
DOUBLE COMPLEX, ALLOCATABLE :: crr2_all(:) !Array of ct
DOUBLE COMPLEX :: crr1 !Sum of conductivity on all k points
DOUBLE COMPLEX :: crr2 !Sum of conductivity on all k points
DOUBLE COMPLEX :: crr1_total !Sum of conductivity
DOUBLE COMPLEX :: crr2_total !Sum of conductivity
DOUBLE PRECISION, ALLOCATABLE, TARGET :: nme(:) !Array to store the minimum eigen value
INTEGER, ALLOCATABLE, TARGET :: nnb(:) !Array to store the number of valence band
DOUBLE PRECISION, POINTER :: p1 !Pointer used to find the minimum eigen value
INTEGER, POINTER :: p2 !Pointer used to find the number of valence band
!
!Parameters for timer
INTEGER :: cr, t00, t0, t !Timer variables
DOUBLE PRECISION :: ra !Timer rate
!Parameters for MPI
INTEGER :: world_size !MPI
INTEGER :: world_rank, ierr !MPI
INTEGER :: irank, j0 !MPI
!
!Initializing MPI
CALL MPI_Init(ierr)
CALL MPI_Comm_size(MPI_COMM_WORLD, world_size, ierr)
CALL MPI_Comm_rank(MPI_COMM_WORLD, world_rank, ierr)
!
!Allocating the array used to store the contribution of each eigen state to the total spin orbit torque
ALLOCATE (u_s1(2,nu_wa*km(1)))
ALLOCATE (u_s2(2,nu_wa*km(1)))
!
!Initialising array used to store the total conductivity
cr = CMPLX(0.0d0, 0.0d0)
!
!Allocating array to collect the contribution of each eigen state to the total spin orbit torque from all processors
IF (world_rank .EQ. 0) THEN
ALLOCATE (u_t1(2,nu_wa*km(1)*km(2)))
ALLOCATE (u_t2(2,nu_wa*km(1)*km(2)))
END IF
u_t1 = CMPLX(0.0d0, 0.0d0)
u_t2 = CMPLX(0.0d0, 0.0d0)
!
!Allocating array to collect the number of valence band and the minimum eigen value
IF (world_rank .EQ. 0) THEN
ALLOCATE (nme(km(2)))
ALLOCATE (nnb(km(2)))
END IF
nme = 0.0d0
nnb = 0
!
!Reading the Cartesian coordinates of k-point mesh
DO j = 1, km(2), 1
IF (mod(j-1, world_size) .NE. world_rank) CYCLE
DO k = 1, km(1), 1
kp(k,:) = ka(j,k,:)
END DO
!Building up Hamiltonian matrix on k points and diagonalising the matrix to obtain Eigen vectors and values
CALL HAMSUR(vd,kp,nu_wa,nu_nr,km(1),nd1,nd2,nd3,nd4,nd5,hr1,hr2,hr3,hr4,hr5,tb,ec,ev,fermi,an,wf,bv,dk,u_s1,u_s2,sr1,sr2,nb,me)
!
!THIS IS THE ERROR LINE
CALL MPI_Gather(u_s1, 2*nu_wa*km(1), MPI_DOUBLE_COMPLEX, u_t1(1:2,1+nu_wa*km(1)*(j-1):nu_wa*km(1)*j),&
2*nu_wa*km(1), MPI_DOUBLE_COMPLEX, 0, MPI_COMM_WORLD, ierr)
CALL MPI_Gat**her(u_s2, 2*nu_wa*km(1), MPI_DOUBLE_COMPLEX, u_t2(1:2,1+nu_wa*km(1)*(j-1):nu_wa*km(1)*j),&
2*nu_wa*km(1), MPI_DOUBLE_COMPLEX, 0, MPI_COMM_WORLD, ierr)
crr1 = crr1 + sr1
crr2 = crr2 + sr2
CALL MPI_Gather(me, 1, MPI_DOUBLE, nme(j), 1, MPI_INT, 0, MPI_COMM_WORLD, ierr)
CALL MPI_Gather(nb, 1, MPI_INT, nnb(j), 1, MPI_INT, 0, MPI_COMM_WORLD, ierr)
END DO
!
CALL MPI_Barrier(MPI_COMM_WORLD, ierr)
IF (world_rank .EQ. 0) THEN
ALLOCATE (crr1_all(world_size))
ALLOCATE (crr2_all(world_size))
END IF
crr1_all = CMPLX(0.0d0, 0.0d0)
crr2_all = CMPLX(0.0d0, 0.0d0)
CALL MPI_Gather(crr1, 1, MPI_double_complex, crr1_all, 1, MPI_double_complex, 0, MPI_COMM_WORLD, ierr)
CALL MPI_Gather(crr2, 1, MPI_double_complex, crr2_all, 1, MPI_double_complex, 0, MPI_COMM_WORLD, ierr)
!Writing total conductivity value into the file
IF (world_rank .EQ. 0) THEN
crr1_total = CMPLX(0.0d0, 0.0d0)
crr2_total = CMPLX(0.0d0, 0.0d0)
DO i = 1, world_size, 1
crr1_total = crr1_total + crr1_all(i)
crr2_total = crr2_total + crr2_all(i)
END DO
!Finding the minimum eigen value
NULLIFY (p1, p2)
p1 => nme(1)
p2 => nnb(1)
DO i = 2, km(2), 1
IF (p1 .GE. nme(i)) THEN
p1 => nme(i)
END IF
IF (p2 .LE. nnb(i)) THEN
p2 => nnb(i)
END IF
END DO
WRITE (UNIT=14, FMT='(A27,$)') 'The minimum eigen value is:'
WRITE (UNIT=14, FMT=*) p1
WRITE (UNIT=14, FMT='(A30,$)') 'The number of valence band is:'
WRITE (UNIT=14, FMT=*) p2
!
!Constant for the coefficient
pi = DACOS(-1.0d0)
hb = 1.054571817d-34 !(unit - J)
es = 1.602176634d-19 !(unit - J*s)
!
END IF
!
IF (world_rank .EQ. 0) THEN
DEALLOCATE (crr1_all)
DEALLOCATE (crr2_all)
END IF
!Finalising MPI
CALL MPI_Finalize(ierr)
!
!Deallocating array that sotres and collect the fermi-surface-part contribution of each eigen state to the total spin orbit torque
DEALLOCATE (u_s1)
DEALLOCATE (u_s2)
DEALLOCATE (u_t1)
DEALLOCATE (u_t2)
!
STOP
END PROGRAM MAIN
I got my parallel code (conductivityMAINp.f90 and conductivityCALp.f90) work and update them below. Can I ask some more questions?
I find that the results from my serial and parallel codes give the almost same value but the decimal part of the value is different. I paste one of the test result below. Do you think that this difference between decimal part is normal or there may still be something wrong with code? Do you think that the results from serial and parallel code should be exactly the same or not?
serial version
(-50979.1014624820,-8.548064305026142E-013)
parallel version
(-50979.0937138954,-6.321723719222822E-013)
I also compared the files generated by serial and parallel ones. I find that some files have different size; like these files below.
serial version
par.dat 26600
con.dat 3730147
parallel version
par.dat 266
con.dat 37623
I understand that different processes enter these files and write down data into them separately so the data in these files were erased and overwritten by different processes. This is why the data in these files from serial and parallel ones are different from each other. Do you think that there is a way to keep data from all processes in the same file?
Would you please recommend some textbooks for the MPI skills to me? I want to have a better understanding of the parallelization.
The conductivityMAINp.f90 source code
PROGRAM MAIN
USE MPI
USE CAL
IMPLICIT NONE
!Variables for setting up the parameters in INPUT.dat file
CHARACTER (LEN=50) :: na(2) !Array to store the names of Hamiltonian files from wannier90
INTEGER :: km(2) !k point mesh
INTEGER :: vd !Velocity direction of the Hamiltonian matrix
DOUBLE PRECISION :: fermi !Fermi energy value
DOUBLE PRECISION :: bv !Broadening value
!
!Variables for parameters in '.wout' file
INTEGER :: sta !Status of files
DOUBLE PRECISION :: rea_c(3,3) !Lattice constant of unit cell in real space
DOUBLE PRECISION :: rec_c(3,3) !Vectors of unit cell in the reciprocal space
!
!Variables for parameters in Hamiltonian ('_hr.dat') file from wannier90
INTEGER :: nu_wa !Number of wannier function
INTEGER :: nu_nr !Number of Wigner-Seitz grid point
INTEGER, ALLOCATABLE :: nd(:) !Degeneracy of each Wigner-Seitz grid point
DOUBLE PRECISION, ALLOCATABLE :: hr(:,:) !Array to store the Hamitlonian matrix information in '_hr.dat' file
!
!Internal variables
INTEGER :: i, j, k, l, n !Integer for loop
CHARACTER (LEN=100) :: str !String for transitting data
DOUBLE PRECISION :: tr(3) !Array for transitting data
DOUBLE PRECISION, ALLOCATABLE :: kp(:,:) !Array to store the Cartesian coordinate of k-point mesh
DOUBLE PRECISION, ALLOCATABLE :: ka(:,:,:) !Array to store the Cartesian coordiantes of all k points
DOUBLE COMPLEX, ALLOCATABLE :: tb(:,:) !Array to store the extracted tight binding Hamiltonian matrix
DOUBLE COMPLEX, ALLOCATABLE :: ec(:,:) !Array to store the Eigen vector matrix
DOUBLE PRECISION, ALLOCATABLE :: ev(:,:) !Array to store the Eigen value on single k point
DOUBLE COMPLEX, ALLOCATABLE :: vh(:,:) !Array to store the velocity of Hamiltonian matrix
DOUBLE PRECISION :: dk(2) !Array to store the Delta kx and ky
DOUBLE COMPLEX :: sc !Sum of conductivity on all km(1) k points
DOUBLE COMPLEX, ALLOCATABLE :: ct_all(:) !Array of ct
DOUBLE COMPLEX :: ct !Sum of conductivity on all k points
DOUBLE COMPLEX :: ct_total !Sum of conductivity
!
!Parameters for timer
INTEGER :: cr, t00, t0, t !Timer variables
DOUBLE PRECISION :: ra !Timer rate
!Parameters for MPI
INTEGER :: world_size !MPI
INTEGER :: world_rank, ierr !MPI
INTEGER :: irank, j0 !MPI
!
!Initializing MPI
CALL MPI_Init(ierr)
CALL MPI_Comm_size(MPI_COMM_WORLD, world_size, ierr)
CALL MPI_Comm_rank(MPI_COMM_WORLD, world_rank, ierr)
!
!Initializing timer
IF (world_rank .EQ. 0) THEN
CALL system_clock(count_rate=cr)
ra = REAL(cr)
END IF
!
!Starting timer for reading and broadcasting all input parameters
IF (world_rank .EQ. 0) THEN
CALL system_clock(t00)
CALL system_clock(t0)
END IF
!
!Reading the parameters in the INPUT.dat file
IF (world_rank .EQ. 0) THEN
!Opening INPUT.dat file
OPEN (UNIT=3, FILE='INPUT.dat', STATUS='OLD')
!
READ (UNIT=3, FMT=*)
READ (UNIT=3, FMT='(a)') na(1)
READ (UNIT=3, FMT=*)
READ (UNIT=3, FMT='(a)') na(2)
DO i = 1, 8, 1
READ (UNIT=3, FMT=*)
END DO
READ (UNIT=3, FMT=*) km
READ (UNIT=3, FMT=*)
READ (UNIT=3, FMT=*) vd
READ (UNIT=3, FMT=*)
READ (UNIT=3, FMT=*) fermi
READ (UNIT=3, FMT=*)
READ (UNIT=3, FMT=*)
READ (UNIT=3, FMT=*)
READ (UNIT=3, FMT=*) bv
!
!Closing INPUT.dat file
CLOSE(UNIT=3)
!
!Opening files with magnetization along z axis
OPEN (UNIT=4, FILE=TRIM(ADJUSTL(na(2))), STATUS='OLD', IOSTAT=sta)
OPEN (UNIT=6, FILE=TRIM(ADJUSTL(na(1))), STATUS='OLD')
!
END IF
!
!Broadcasting parameters from rank 0 to all other ranks
CALL MPI_Bcast(na, 50*2, MPI_char, 0, MPI_COMM_WORLD, ierr)
CALL MPI_Bcast(km, 2, MPI_int, 0, MPI_COMM_WORLD, ierr)
CALL MPI_Bcast(vd, 1, MPI_int, 0, MPI_COMM_WORLD, ierr)
CALL MPI_Bcast(fermi, 1, MPI_double, 0, MPI_COMM_WORLD, ierr)
CALL MPI_Bcast(bv, 1, MPI_double, 0, MPI_COMM_WORLD, ierr)
!
!Allocating array to store Cartesian coordinates of all k points
ALLOCATE (ka(km(2),km(1),3))
!
!Insitialising the array to store Carteisan coordiantes of all k points
ka = 0.0d0
!
!Reading the '.wout' file, generating coordiantes of all k points and computing delta kx and ky
IF (world_rank .EQ. 0) THEN
!Reading Lattice constant in real space
DO WHILE (sta .EQ. 0)
READ (UNIT=4, FMT='(a)', IOSTAT=sta) str
IF (TRIM(ADJUSTL(str)) .EQ. 'Lattice Vectors (Ang)') THEN
DO i = 1, 3, 1
READ (UNIT=4, FMT='(a)', IOSTAT=sta) str
str = ADJUSTL(str)
READ (UNIT=str(4:), FMT=*) rea_c(i,:)
END DO
EXIT
END IF
END DO
!
!Reading Vectors of unit cell in the reciprocal space
DO WHILE (sta .EQ. 0)
READ (UNIT=4, FMT='(a)', IOSTAT=sta) str
IF (TRIM(ADJUSTL(str)) .EQ. 'Reciprocal-Space Vectors (Ang^-1)') THEN
DO i = 1, 3, 1
READ (UNIT=4, FMT='(a)', IOSTAT=sta) str
str = ADJUSTL(str)
READ (UNIT=str(4:), FMT=*) rec_c(i,:)
END DO
EXIT
END IF
END DO
!
!Closing the output file with magnetization along z axis
CLOSE (UNIT=4)
!
!Generating the Cartesian coordinates for Monkhorst k-point mesh
OPEN (UNIT=5, FILE='k_cartesian.dat', STATUS='UNKNOWN')
WRITE (UNIT=5, FMT='(I10)') km(1) * km(2)
DO i = 1, km(2), 1
DO j = 1, km(1), 1
tr(1) = 0.0d0 + 1.0d0 / DBLE(km(1)) * DBLE(j - 1)
tr(2) = 0.0d0 + 1.0d0 / DBLE(km(2)) * DBLE(i - 1)
tr(3) = 0.0d0
ka(i,j,1) = tr(1) * rec_c(1,1) + tr(2) * rec_c(2,1) +&
tr(3) * rec_c(3,1)
ka(i,j,2) = tr(1) * rec_c(1,2) + tr(2) * rec_c(2,2) +&
tr(3) * rec_c(3,2)
ka(i,j,3) = tr(1) * rec_c(1,3) + tr(2) * rec_c(2,3) +&
tr(3) * rec_c(3,3)
WRITE (UNIT=5, FMT='(F15.8,3X,F15.8,3X,F15.8)') ka(i,j,1:3)
END DO
END DO
CLOSE (UNIT=5)
!
!Computing Delta kx and ky
dk(1) = DSQRT(rec_c(1,1) ** 2 + rec_c(1,2) ** 2 + rec_c(1,3) ** 2) / DBLE(km(1))
dk(2) = DSQRT(rec_c(2,1) ** 2 + rec_c(2,2) ** 2 + rec_c(2,3) ** 2) / DBLE(km(2))
!
END IF
!
!Broadcasting lattice constants in both real and reciprocal spaces, the Cartesian coordiantes of all k points and
!delta kx and ky from rank 0 to all ranks
CALL MPI_Bcast(rea_c, 3*3, MPI_double, 0, MPI_COMM_WORLD, ierr)
CALL MPI_Bcast(rec_c, 3*3, MPI_double, 0, MPI_COMM_WORLD, ierr)
CALL MPI_Bcast(ka, km(2)*km(1)*3, MPI_double, 0, MPI_COMM_WORLD, ierr)
CALL MPI_Bcast(dk, 2, MPI_double, 0, MPI_COMM_WORLD, ierr)
!
!Stopping timer for reading and broadcasting all input parameters
IF (world_rank .EQ. 0) THEN
CALL system_clock(t)
WRITE (*,'(A,F10.3)') "Time for INIT (seconds):", (t - t0) / ra
END IF
!
!Starting timer for computing conductivity
IF (world_rank .EQ. 0) THEN
CALL system_clock(t0)
END IF
!
!Reading number of wannier function
IF (world_rank .EQ. 0) THEN
READ (UNIT=6, FMT=*)
READ (UNIT=6, FMT=*) nu_wa
!Reading number of Wigner-Seitz grind point in Hamiltonian file
READ (UNIT=6, FMT=*) nu_nr
!
END IF
!
!Broadcasting number of wannier function and the degenerancy of each Wigner-Seitz grid point from rank 0 to all other ranks
CALL MPI_Bcast(nu_wa, 1, MPI_int, 0, MPI_COMM_WORLD, ierr)
CALL MPI_Bcast(nu_nr, 1, MPI_int, 0, MPI_COMM_WORLD, ierr)
!
!Allocating the array to store the degeneracy of each Wigner-Seitz grid point
ALLOCATE (nd(nu_nr))
!
!Allocating array to store k point, Hamiltonian matrix, eigen vector matrix and eigen value
!Allocating the array to store the Cartesian coordinates of k-point mesh
ALLOCATE (kp(km(1),3))
!
!Allocating the array to store the extracted tight binding Hamiltonian matrix
ALLOCATE (tb(nu_wa*km(1),nu_wa))
!
!Allocating the array to store the tight binding Eigen vector matrix
ALLOCATE (ec(nu_wa*km(1),nu_wa))
!
!Allocating the array to store the tight binding Eigen value
ALLOCATE (ev(km(1),nu_wa))
!
!Allocating array to store the velocity of Hamiltonian matrix
ALLOCATE (vh(nu_wa*km(1),nu_wa*2))
!
!Allocating the array to store Hamiltonian matrix information in '_hr.dat' file from wannier90
ALLOCATE (hr(nu_wa**2*nu_nr,7))
!
!Reading relevant information in Hamiltonian matrix
!Reading the degeneracy of each Wigner-Seitz grid point
IF (world_rank .EQ. 0) THEN
IF (MOD(nu_nr, 15) .EQ. 0) THEN
DO i = 1, nu_nr / 15, 1
READ (UNIT=6, FMT=*) nd(1+(i-1)*15:i*15)
END DO
ELSE
DO i = 1, nu_nr / 15, 1
READ (UNIT=6, FMT=*) nd(1+(i-1)*15:15+i*15)
END DO
READ (UNIT=6, FMT=*) nd(1+(nu_nr/15)*15:nu_nr)
END IF
!
!Reading the Hamiltonian matrix information in '_hr.dat' file
DO i = 1, nu_wa**2*nu_nr, 1
READ (UNIT=6, FMT=*) hr(i,:)
END DO
!Converting the unit number into coordinate for R in exponent term of phase factor in
!tight binding Hamiltonian matrix for magnetic moment along z axis case
DO i = 1, nu_wa**2*nu_nr, 1
tr(1) = hr(i,1) * rea_c(1,1) + hr(i,2) * rea_c(2,1) + hr(i,3) * rea_c(3,1)
tr(2) = hr(i,1) * rea_c(1,2) + hr(i,2) * rea_c(2,2) + hr(i,3) * rea_c(3,2)
tr(3) = hr(i,1) * rea_c(1,3) + hr(i,2) * rea_c(2,3) + hr(i,3) * rea_c(3,3)
hr(i,1:3) = tr
END DO
!
END IF
!Broadcasting Hamiltonian from rank 0 to all other ranks
CALL MPI_Bcast(nd, nu_nr, MPI_int, 0, MPI_COMM_WORLD, ierr)
CALL MPI_Bcast(hr, nu_wa**2*nu_nr*7, MPI_double, 0, MPI_COMM_WORLD, ierr)
!
!Opening file that stores the total conductivity value
OPEN (UNIT=7, FILE='Conductivity.dat', STATUS='UNKNOWN')
!
!!Building up the Hamitonian
!Initialising array used to store the total conductivity
ct = CMPLX(0.0d0, 0.0d0)
!
!opening test files
open (unit=21,file='normalisedprefactor.dat',status='unknown')
open (unit=22,file='gd.dat',status='unknown')
open (unit=23,file='con.dat',status='unknown')
open (unit=24,file='par.dat',status='unknown')
open (unit=25,file='grga.dat',status='unknown')
open (unit=26,file='nfdk.dat',status='unknown')
!Reading the Cartesian coordinates of k-point mesh
DO j = 1, km(2), 1
IF (mod(j-1, world_size) .NE. world_rank) CYCLE
DO k = 1, km(1), 1
kp(k,:) = ka(j,k,:)
END DO
!Building up Hamiltonian matrix on k points and diagonalising the matrix to obtain Eigen vectors and values
CALL HAMCON(vd,kp,nu_wa,nu_nr,km(1),nd,hr,tb,ec,ev,vh,fermi,bv,dk,sc)
!
ct = ct + sc
END DO
!
CALL MPI_Barrier(MPI_COMM_WORLD, ierr)
IF (world_rank .EQ. 0) THEN
ALLOCATE (ct_all(world_size))
END IF
ct_all = CMPLX(0.0d0, 0.0d0)
CALL MPI_Gather(ct, 1, MPI_double_complex, ct_all, 1, MPI_double_complex, 0, MPI_COMM_WORLD, ierr)
!Writing total conductivity value into the file
IF (world_rank .EQ. 0) THEN
ct_total = CMPLX(0.0d0, 0.0d0)
DO i = 1, world_size, 1
ct_total = ct_total + ct_all(i)
END DO
WRITE (UNIT=7, FMT='(A33,$)') 'Conductivity without coeffieicnt:'
WRITE (UNIT=7, FMT=*) ct_total
WRITE (UNIT=7, FMT='(A30,$)') 'Conductivity with coefficient:'
WRITE (UNIT=7, FMT=*) !ct_total /
END IF
!
IF (world_rank .EQ. 0) THEN
DEALLOCATE (ct_all)
END IF
!Stopping timer for computing conductivity
IF (world_rank .EQ. 0) THEN
CALL system_clock(t)
WRITE (*,'(A,f10.3)') "Time for HAM&CON (seconds):", (t-t0)/ra
WRITE (*,'(A,f10.3)') "Time for ALL (seconds):", (t-t00)/ra
END IF
!
!Finalising MPI
CALL MPI_Finalize(ierr)
!
!Deallocating array that stores the degeneracy of each Wigner-Seitz grid point
DEALLOCATE (nd)
!
!Deallocating array that stores the Hamitlonian matrix information in '_hr.dat' file
DEALLOCATE (hr)
!
!Deallocating the array to store the Cartesian coordinates of k-point mesh
DEALLOCATE (kp)
!
!Deallocating the array to store the extracted tight binding Hamiltonian matrix
DEALLOCATE (tb)
!
!Deallocating array that stores the tight binding Eigen vector matrix
DEALLOCATE (ec)
!
!Deallocating array that stores the tight binding Eigen value
DEALLOCATE (ev)
!
!Deallocating array to store the velocity of Hamiltonian matrix
DEALLOCATE (vh)
!
!Closing files with magnetization along z axis
CLOSE (UNIT=6)
!
!Closing file that store the total conductivity
CLOSE (UNIT=7)
!
close(unit=21)
close(unit=22)
close(unit=23)
close(unit=24)
close(unit=25)
close(unit=26)
STOP
END PROGRAM MAIN
The conductivityCALp.f90 source code
MODULE CAL
!USE MKL!LAPACK
IMPLICIT NONE
CONTAINS
!Building up tight binding Hamiltonian matrix and computing eigen vector matrix and eigen value
SUBROUTINE HAMCON(vd,kp,nu_wa,nu_ws,nu_kp,nd,hr,tb,ec,ev,vh,fermi,bv,dk,sc)
!External variables
INTEGER :: vd !Velocity direction of the Hamiltonian matrix
DOUBLE PRECISION :: kp(:,:) !Array to store the Cartesian coordinate of k-point mesh
INTEGER :: nu_wa !Number of wannier function
INTEGER :: nu_ws !Number of Wigner-Seitz grid point for different magnetic moment direction cases
INTEGER, ALLOCATABLE :: nd(:) !Degeneracy of each Wigner-Seitz grid point
DOUBLE PRECISION, ALLOCATABLE :: hr(:,:) !Array to store the Hamitlonian matrix information in '_hr.dat' file
DOUBLE COMPLEX, ALLOCATABLE :: tb(:,:) !Array to store the extracted tight binding Hamiltonian matrix
DOUBLE COMPLEX, ALLOCATABLE :: ec(:,:) !Array to store the Eigen vector matrix
DOUBLE PRECISION :: ev(:,:) !Array to store the Eigen value
DOUBLE COMPLEX, ALLOCATABLE :: vh(:,:) !Array to store the velocity of Hamiltonian matrix
DOUBLE PRECISION :: fermi !Fermi energy value
DOUBLE PRECISION :: bv !Broadening value
DOUBLE PRECISION :: dk(2) !Array to store the Delta kx and ky
DOUBLE COMPLEX :: sc !Sum of conductivity on all km(1) k points
!
!Internal variables
INTEGER :: nu_kp !Number of k point passed by the main code
INTEGER :: i, j, k, l, m !Integer for loop
DOUBLE COMPLEX :: dc(3) !Array to store complex number i
DOUBLE COMPLEX, ALLOCATABLE :: tr1(:,:) !Array for transitting Hamiltonian matrix
DOUBLE COMPLEX, ALLOCATABLE :: tr2(:,:) !Array for transitting Hamiltonian matrix
DOUBLE COMPLEX, ALLOCATABLE :: tr3(:,:) !Array for transitting Hamiltonian matrix
DOUBLE COMPLEX, ALLOCATABLE :: tr4(:,:) !Array for transitting Hamiltonian matrix
!
!Variables for ZHEEV subroutine
DOUBLE COMPLEX, ALLOCATABLE :: a(:,:) !Array for transitting the Eigen vector matrix
DOUBLE PRECISION, ALLOCATABLE :: w(:) !Array for transitting the Eigen value
INTEGER :: n, lda, lwork, info !Parameters in ZHEEV subroutine
DOUBLE PRECISION, ALLOCATABLE :: rwork(:) !Parameters in ZHEEV subroutine
DOUBLE COMPLEX, ALLOCATABLE :: work(:) !Parameters in ZHEEV subroutine
!
!Variables for computing conductivity
DOUBLE COMPLEX :: gr(2) !Array to store the retarded Green functions
DOUBLE COMPLEX :: ga(2) !Array to store the advanced Green functions
DOUBLE COMPLEX :: gd(2) !Array to store the Gr - Ga
DOUBLE COMPLEX, ALLOCATABLE :: mt1(:,:) !Array for storing conjugate eigen vectors
DOUBLE COMPLEX, ALLOCATABLE :: mt2(:,:) !Array for storing eigen vectors
DOUBLE COMPLEX, ALLOCATABLE :: mt3(:,:) !Array for storing conjugate eigen vectors
DOUBLE COMPLEX, ALLOCATABLE :: mt4(:,:) !Array for storing eigen vectors
DOUBLE COMPLEX, ALLOCATABLE :: mt5(:,:) !Array for storing velocity of Hamiltonian
DOUBLE PRECISION, ALLOCATABLE :: nm(:) !Array for storage of normalising prefactor
DOUBLE COMPLEX :: oc(2) !Conductivity value on single k point
!
write(unit=24,fmt=*)vd,nu_wa,nu_ws,fermi,bv,dk(1),dk(2)
tb = CMPLX(0.0d0, 0.0d0)
dc(1) = CMPLX(0.0d0, 1.0d0)
!Allocating array to transit Hamiltonian matrix
ALLOCATE (tr1(nu_wa,nu_wa))
ALLOCATE (tr2(nu_wa,nu_wa))
ALLOCATE (tr3(nu_wa,nu_wa))
ALLOCATE (tr4(nu_wa,nu_wa))
!
!Building up Hamiltonian matrix
DO i = 1, nu_kp, 1
tr1 = CMPLX(0.0d0, 0.0d0)
DO j = 1, nu_ws, 1
tr2 = CMPLX(0.0d0, 0.0d0)
DO k = 1, nu_wa**2, 1
l = hr(k+(j-1)*nu_wa**2,4)
m = hr(k+(j-1)*nu_wa**2,5)
dc(2) = CMPLX(hr(k+(j-1)*nu_wa**2,6), hr(k+(j-1)*nu_wa**2,7))
tr2(l,m) = EXP(dc(1) * (kp(i,1)*hr(k+(j-1)*nu_wa**2,1)&
+kp(i,2)*hr(k+(j-1)*nu_wa**2,2)&
+kp(i,3)*hr(k+(j-1)*nu_wa**2,3)))&
* dc(2)
END DO
tr2 = tr2 / DBLE(nd(j))
tr1 = tr1 + tr2
END DO
DO j = 1, nu_wa, 1
l = j + (i-1) * nu_wa
DO k = 1, nu_wa, 1
tb(l,k) = tb(l,k) + tr1(j,k)
END DO
END DO
END DO
!
!Initialising the array to store the Eigen vector matrix
ec = CMPLX(0.0d0, 0.0d0)
!
!Initialising the array to store the Eigen value
ev = 0.0d0
!
!Setting up all parameters used by ZHEEV subroutine
n = nu_wa
lda = nu_wa
ALLOCATE (a(nu_wa,nu_wa)) !Transitting Eigen vector matrix
ALLOCATE (w(nu_wa)) !Transitting Eigen value
ALLOCATE (work(2*nu_wa-1))
lwork = 2 * nu_wa - 1
ALLOCATE (rwork(3*nu_wa-2))
!
!Computing Hamiltonian matrix, Eigen vector matrix and Eigen value on each k point
DO i = 1, nu_kp, 1
!Initialising parameters used by ZHEEV subroutine
a = CMPLX(0.0d0, 0.0d0)
w = 0.0d0
work = CMPLX(0.0d0, 0.0d0)
rwork = 0.0d0
!
DO j = 1, nu_wa, 1
a(j,:) = tb(j+(i-1)*nu_wa,:)
END DO
CALL ZHEEV('V','L',n,a,lda,w,work,lwork,rwork,info)
DO j = 1, nu_wa, 1
ec(1+(i-1)*nu_wa:i*nu_wa,j) = a(:,j)
END DO
ev(i,:) = w
END DO
!
!Computing the velocity of the Hamiltonian matrix
vh = CMPLX(0.0d0, 0.0d0)
DO i = 1, nu_kp, 1
tr1 = CMPLX(0.0d0, 0.0d0)
tr2 = CMPLX(0.0d0, 0.0d0)
DO j = 1, nu_ws, 1
tr3 = CMPLX(0.0d0, 0.0d0)
tr4 = CMPLX(0.0d0, 0.0d0)
DO k = 1, nu_wa**2, 1
l = hr(k+(j-1)*nu_wa**2,4)
m = hr(k+(j-1)*nu_wa**2,5)
dc(2) = CMPLX(hr(k+(j-1)*nu_wa**2,6), hr(k+(j-1)*nu_wa**2,7))
!vx
dc(3) = CMPLX(hr(k+(j-1)*nu_wa**2,1), 0.0d0)
tr3(l,m) = EXP(dc(1) * (kp(i,1)*hr(k+(j-1)*nu_wa**2,1)&
+kp(i,2)*hr(k+(j-1)*nu_wa**2,2)&
+kp(i,3)*hr(k+(j-1)*nu_wa**2,3)))&
* dc(2) * dc(1) * dc(3)
!
!Vy
dc(3) = CMPLX(hr(k+(j-1)*nu_wa**2,2), 0.0d0)
tr4(l,m) = EXP(dc(1) * (kp(i,1)*hr(k+(j-1)*nu_wa**2,1)&
+kp(i,2)*hr(k+(j-1)*nu_wa**2,2)&
+kp(i,3)*hr(k+(j-1)*nu_wa**2,3)))&
* dc(2) * dc(1) * dc(3)
!
END DO
tr3 = tr3 / DBLE(nd(j))
tr4 = tr4 / DBLE(nd(j))
!Vx
tr1 = tr1 + tr3
!
!Vy
tr2 = tr2 + tr3
!
END DO
DO j = 1, nu_wa, 1
l = j + (i-1) * nu_wa
DO k = 1, nu_wa, 1
vh(l,k) = vh(l,k) + tr1(j,k)
vh(l,k+nu_wa) = vh(l,k+nu_wa) + tr2(j,k)
END DO
END DO
END DO
!
!Computing the conductivity
!
!Allocating the arrays that store the eigen vector, velocity of Hamiltonian and normalising prefactor
ALLOCATE (mt1(1,nu_wa))
ALLOCATE (mt2(nu_wa,1))
ALLOCATE (mt3(1,nu_wa))
ALLOCATE (mt4(nu_wa,1))
ALLOCATE (mt5(nu_wa,nu_wa))
ALLOCATE (nm(nu_wa))
!
!Initialising the array that stores the conductivity values on all km(1) k points
sc = CMPLX(0.0d0, 0.0d0)
!
!Computing the conductivity
DO i = 1, nu_kp, 1
!Normalized factor part
DO j = 1, nu_wa, 1
mt1(1,:) = DCONJG(ec(1+(i-1)*nu_wa:i*nu_wa,j))
mt2(:,1) = ec(1+(i-1)*nu_wa:i*nu_wa,j)
nm(j) = REAL(SUM(MATMUL(mt1,mt2)))
WRITE (UNIT=21, FMT=*) SUM(MATMUL(mt1,mt2))
nm(j) = 1.0d0 / DSQRT(nm(j))
END DO
!
!Velocity of Hamiltonian
IF (vd .EQ. 0) THEN
mt5 = vh(1+(i-1)*nu_wa:i*nu_wa,1:nu_wa)
ELSE
mt5 = vh(1+(i-1)*nu_wa:i*nu_wa,1+nu_wa:2*nu_wa)
END IF
!
!Conductivity part
oc = CMPLX(0.0d0, 0.0d0)
DO j = 1, nu_wa, 1
gr(1) = CMPLX (1.0d0, 0.0d0) / CMPLX(fermi - ev(i,j), bv)
ga(1) = CMPLX (1.0d0, 0.0d0) / CMPLX(fermi - ev(i,j), 0.0d0 - bv)
gd(1) = gr(1) - ga(1)
mt1(1,:) = DCONJG(ec(1+(i-1)*nu_wa:i*nu_wa,j))
mt2(:,1) = ec(1+(i-1)*nu_wa:i*nu_wa,j)
DO k = 1, nu_wa, 1
gr(2) = CMPLX (1.0d0, 0.0d0) / CMPLX(fermi - ev(i,k), bv)
ga(2) = CMPLX (1.0d0, 0.0d0) / CMPLX(fermi - ev(i,k), 0.0d0 - bv)
gd(2) = gr(2) - ga(2)
mt3(1,:) = DCONJG(ec(1+(i-1)*nu_wa:i*nu_wa,k))
mt4(:,1) = ec(1+(i-1)*nu_wa:i*nu_wa,k)
oc(1) = SUM(MATMUL(mt1,MATMUL(mt5,mt4)))*SUM(MATMUL(mt3,MATMUL(mt5,mt2)))*&
gd(1)*gd(2)*nm(j)*nm(j)*nm(k)*nm(k)*dk(1)*dk(2)
write(unit=22,fmt=*) SUM(MATMUL(mt1,MATMUL(mt5,mt4))), SUM(MATMUL(mt3,MATMUL(mt5,mt2)))
write(unit=25,fmt=*) gr(1),ga(1),gr(2),ga(2)
write(unit=26,fmt=*) nm(j), nm(k), dk(1), dk(2)
oc(2) = oc(2) + oc(1)
END DO
END DO
sc = sc + oc(2)
write(unit=23,fmt=*) oc(2),sc
!
END DO
!
!Deallocating arrays used for transitting Hamiltonian
DEALLOCATE (tr1)
DEALLOCATE (tr2)
DEALLOCATE (tr3)
DEALLOCATE (tr4)
!
!Deallocating arrays used by ZHEEV subroutine
DEALLOCATE (a)
DEALLOCATE (w)
DEALLOCATE (rwork)
DEALLOCATE (work)
!
!Deallocating arrays used to transitting eigen vectors
DEALLOCATE (mt1)
DEALLOCATE (mt2)
DEALLOCATE (mt3)
DEALLOCATE (mt4)
!
!Deallocating array used to transitting velocity of Hamiltonian
DEALLOCATE (mt5)
!
!Deallocating array used to store the normalising prefactor
DEALLOCATE (nm)
!
RETURN
END SUBROUTINE HAMCON
!
END MODULE CAL
I have problem with combining several 2D arrays into one big 2D array using MPI in Fortran.
I have equal size 2D arrays containing real numbers, every array is contained in different process:
numerical(subdsize,nt)
I want to combine them to one big array
numerical_final(nx,nt)
I am using the following command
CALL MPI_Gather(numerical(1:subdsize,nt),subdsize*nt,MPI_DOUBLE_PRECISION,numerical_final,subdsize*nt,MPI_DOUBLE_PRECISION,0, MPI_COMM_WORLD, mpierror)
Unfortunately the data that numerical_final array contains are a complete mess. I was looking for solutions really everywhere. I read this topic but it did not help me:
sending blocks of 2D array in C using MPI
I am using Intel Fortran 2018 compiler and Ubuntu 16.04.
Full code below.
I will be grateful for the help.
PROGRAM Advection
IMPLICIT NONE
INCLUDE 'mpif.h'
INTEGER :: nt,nx,i,steptime,tag,j
DOUBLE PRECISION :: R_dx, R_dt, R_c, R_cfl, R_t
DOUBLE PRECISION, DIMENSION(3) :: R_input
DOUBLE PRECISION, ALLOCATABLE, DIMENSION(:) :: xcoord
DOUBLE PRECISION, ALLOCATABLE, DIMENSION(:,:) :: numerical, numerical_final
DOUBLE PRECISION :: time_begin,time_end,time_elapsed
INTEGER:: myrank,nproc,mpierror,xdomains,subdsize
INTEGER:: status(MPI_STATUS_SIZE)
CALL MPI_Init(mpierror)
CALL MPI_Comm_size(MPI_COMM_WORLD,nproc,mpierror)
CALL MPI_Comm_rank(MPI_COMM_WORLD,myrank,mpierror)
IF (nproc<2) THEN
PRINT*, "Error, only more than 1"
CALL MPI_ABORT
END IF
IF (myrank .EQ. 0) THEN
OPEN(UNIT = 1, FILE = 'inputdata.dat')
READ(1,*) R_input(1)
READ(1,*) R_input(2)
READ(1,*) R_input(3)
READ(1,*) nx
CLOSE(1)
END IF
CALL MPI_Bcast(R_input, 3, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, mpierror)
CALL MPI_Bcast(nx, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, mpierror)
R_c=R_input(1)
R_cfl=R_input(2)
R_t=R_input(3)
R_dx=80./(nx-1)
nt=15
R_dt=R_t/(nt-1)
IF (myrank .EQ. 0) THEN
PRINT*, R_c*R_dt/R_dx
END IF
xdomains = nproc
IF ((MOD(nx,xdomains))==0) THEN
subdsize =nx/xdomains
ELSE
DO
nx=nx+1
IF ((MOD(nx,xdomains)) .EQ. 0) THEN
subdsize=nx/xdomains
EXIT
END IF
END DO
END IF
RAYS
ALLOCATE(xcoord(0:subdsize+1))
ALLOCATE(numerical(0:subdsize+1,nt))
DO i=0,subdsize+1
xcoord(i) = -40.-R_dx+i*R_dx+myrank*R_dx*subdsize
END DO
DO i = 0,subdsize+1
numerical(i,1)=0.5*(sign(1.,xcoord(i))+1.0)
END DO
IF (myrank .EQ. 0) THEN
DO i=1,nt
numerical(0:1,i)=0.
END DO
END IF
IF (myrank .EQ. nproc-1) THEN
DO i=1,nt
numerical(subdsize:subdsize+1,i)=1.
END DO
END IF
DO steptime=1, nt-1
tag = 1
IF (myrank .LT. nproc-1) THEN
CALL MPI_Send (numerical(subdsize,steptime),1,MPI_DOUBLE_PRECISION,myrank+1,tag,MPI_COMM_WORLD,mpierror)
END IF
IF (myrank .GT. 0) THEN
CALL MPI_Recv (numerical(0,steptime),1,MPI_DOUBLE_PRECISION,myrank-1,tag,MPI_COMM_WORLD,status,mpierror )
END IF
IF (myrank .EQ. 0) THEN
DO i = 2, subdsize+1
numerical(i,steptime+1)=numerical(i,steptime)-R_c*R_dt/R_dx*(numerical(i,steptime)-numerical(i-1,steptime))
END DO
ELSE
DO i = 1, subdsize+1
numerical(i,steptime+1)=numerical(i,steptime)-R_c*R_dt/R_dx*(numerical(i,steptime)-numerical(i-1,steptime))
END DO
END IF
END DO
ALLOCATE(numerical_final(nx,nt))
CALL MPI_Gather(numerical(1:subdsize,nt),subdsize*nt,MPI_DOUBLE_PRECISION,numerical_final,subdsize*nt,MPI_DOUBLE_PRECISION,0, MPI_COMM_WORLD, mpierror)
CALL MPI_Finalize(mpierror)
DEALLOCATE (numerical,numerical_final)
END PROGRAM
And inputfile
1.5 !c
0.5 !Courant
5.0 !time
100 !x points
I have a 2D array of integers and I want to send its rows to each separate process. I assume that number of rows (M=5) is not evenly divisible by number of processes (size = 4), so in my case the process 0 will obtain additional row. Size of the 2D array A is MxN (5x10).
Here is my code
PROGRAM SCATTERV_MATRIX
INCLUDE 'mpif.h'
integer :: rank, size, ierr, dest, src, tag !MPI variables
integer :: status(MPI_STATUS_SIZE) !MPI variables
INTEGER, PARAMETER :: N = 10 !number of columns
INTEGER, PARAMETER :: M = 5 !number of rows
INTEGER, ALLOCATABLE, DIMENSION(:,:) :: A !MxN matrix A
INTEGER :: NEWTYPE, RESIZEDTYPE !MPI derived data types
INTEGER, ALLOCATABLE, DIMENSION(:,:) :: LOCAL
INTEGER, ALLOCATABLE :: SENDCOUNTS(:), DISPLS(:)
INTEGER :: RECVCOUNT, NRBUF
INTEGER :: MMIN, MEXTRA, INTSIZE, K, I, J
INTEGER :: START, EXTENT !(KIND=MPI_ADRESS_KIND)
CALL MPI_INIT(ierr)
CALL MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
CALL MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierr)
IF ( rank == 0 ) THEN !allocate and create 2Darray
ALLOCATE( A (M, N) )
K = 1
DO I = 1, M
DO J = 1, N
A(I, J) = K
K = K + 1
END DO
END DO
END IF
ALLOCATE( SENDCOUNTS(0:size-1), DISPLS(0:size-1) )
MMIN = M/size !number of rows divided by number of processors
MEXTRA = MOD(M, size) !extra rows
K = 0
DO I = 0, size-1
IF (I < MEXTRA) THEN !SENDCOUNTS=(/2,1,1,1/)
SENDCOUNTS(I) = MMIN + 1
ELSE
SENDCOUNTS(I) = MMIN
END IF
DISPLS(I) = K !DISPLS=(/0,2,3,4/)
K = K + SENDCOUNTS(I)
END DO
RECVCOUNT = SENDCOUNTS(rank)
ALLOCATE( LOCAL(RECVCOUNT,N) )
CALL MPI_TYPE_VECTOR(N, 1, M, MPI_INTEGER, NEWTYPE, ierr)
CALL MPI_TYPE_COMMIT(NEWTYPE, ierr)
START = 0
CALL MPI_TYPE_SIZE(MPI_INTEGER, INTSIZE, ierr)
EXTENT = 1*INTSIZE
CALL MPI_TYPE_CREATE_RESIZED(NEWTYPE, START, EXTENT, RESIZEDTYPE, ierr)
CALL MPI_TYPE_COMMIT(RESIZEDTYPE, ierr)
LOCAL(:, :) = 0
CALL MPI_SCATTERV( &
A, SENDCOUNTS, DISPLS, RESIZEDTYPE, &
LOCAL, RECVCOUNT*N, MPI_INTEGER, &
0, MPI_COMM_WORLD, ierr)
WRITE(*,*) rank, ':', LOCAL
CALL MPI_FINALIZE(ierr)
END PROGRAM SCATTERV_MATRIX
After sucessfull compilation I got "Program Exception - access violation" error. All my previous Fortan MPI programs worked fine. There must be some bug in the code, probably in MPI_SCATTERV.
I was mainly following this answer. I will be gratefull for any suggestion. Thank you.
There's an error in your code:
INTEGER :: START, EXTENT !(KIND=MPI_ADRESS_KIND)
This line should be:
INTEGER(KIND=MPI_ADDRESS_KIND) :: START, EXTENT
In MPI, anything that is related to memory address, or similar concepts such as memory displacement, file size, file cursor etc., must not be normal integer. Some how you have this information in your comment and you also misspell MPI_ADDRESS_KIND.
Vladimir F correctly pointed out that you should 'USE MPI' instead of 'INCLUDE 'mpif.h''. This gives the compiler the opportunity to check the data types. For example, gfortran gives the following error message:
test.f90:59:71:
CALL MPI_TYPE_CREATE_RESIZED(NEWTYPE, START, EXTENT, RESIZEDTYPE, ierr)
1
Error: There is no specific subroutine for the generic
‘mpi_type_create_resized’ at (1)