Changeset 4683 for palm/trunk/SOURCE/radiation_model_mod.f90
- Timestamp:
- Sep 18, 2020 9:23:09 AM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/radiation_model_mod.f90
r4679 r4683 28 28 ! ----------------- 29 29 ! $Id$ 30 ! Add option to limit the size of MPI_Alltoall calls 31 ! 32 ! 4679 2020-09-14 13:53:52Z pavelkrc 30 33 ! Enable warning about unrealistically large radiative fluxes by default 31 34 ! … … 803 806 804 807 !-- configuration parameters (they can be setup in PALM config) 808 INTEGER(iwp) :: bufsize_alltoall = 0 !< max no. of items to send in mpi_alltoall at once (0=infinite) 805 809 LOGICAL :: raytrace_mpi_rma = .TRUE. !< use MPI RMA to access LAD and gridsurf from remote processes during raytracing 806 810 LOGICAL :: rad_angular_discretization = .TRUE.!< whether to use fixed resolution discretization of view factors for … … 4009 4013 NAMELIST /radiation_par/ albedo, albedo_lw_dif, albedo_lw_dir, & 4010 4014 albedo_sw_dif, albedo_sw_dir, albedo_type, & 4015 bufsize_alltoall, & 4011 4016 constant_albedo, dt_radiation, emissivity, & 4012 4017 lw_radiation, max_raytracing_dist, & … … 4026 4031 NAMELIST /radiation_parameters/ albedo, albedo_lw_dif, albedo_lw_dir, & 4027 4032 albedo_sw_dif, albedo_sw_dir, albedo_type, & 4033 bufsize_alltoall, & 4028 4034 constant_albedo, dt_radiation, emissivity, & 4029 4035 lw_radiation, max_raytracing_dist, & … … 7409 7415 IMPLICIT NONE 7410 7416 7411 INTEGER(iwp) :: i, j, k, d,ip, jp7417 INTEGER(iwp) :: i, j, k, ip, jp 7412 7418 INTEGER(iwp) :: isvf, ksvf, icsf, kcsf, npcsfl, isvf_surflt, imrt, imrtf, ipcgb 7413 7419 INTEGER(iwp) :: sd, td … … 7434 7440 INTEGER(iwp), DIMENSION(:), ALLOCATABLE,TARGET:: kcsflt_l,kpcsflt_l 7435 7441 INTEGER(iwp), DIMENSION(:,:), POINTER :: kcsflt,kpcsflt 7436 INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: icsflt ,dcsflt,ipcsflt,dpcsflt7442 INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: icsflt 7437 7443 REAL(wp), DIMENSION(3) :: uv 7438 7444 LOGICAL :: visible … … 8282 8288 kcsflt(1:kdcsf,1:udim) => kcsflt_l(1:kdcsf*udim) 8283 8289 ALLOCATE( icsflt(0:numprocs-1) ) 8284 ALLOCATE( dcsflt(0:numprocs-1) )8285 ALLOCATE( ipcsflt(0:numprocs-1) )8286 ALLOCATE( dpcsflt(0:numprocs-1) )8287 8290 8288 8291 !-- fill out arrays of csf values and … … 8290 8293 !-- for particular precessors 8291 8294 icsflt = 0 8292 dcsflt = 08293 8295 ip = -1 8294 8296 j = -1 8295 d = 08296 8297 DO kcsf = 1, ncsfl 8297 8298 j = j+1 … … 8300 8301 !-- number of elements of previous block 8301 8302 IF ( ip>=0) icsflt(ip) = j 8302 d = d+j8303 8303 !-- blank blocks 8304 8304 DO jp = ip+1, acsf(kcsf)%ip-1 8305 8305 !-- number of elements is zero, displacement is equal to previous 8306 8306 icsflt(jp) = 0 8307 dcsflt(jp) = d8308 8307 ENDDO 8309 8308 !-- the actual block 8310 8309 ip = acsf(kcsf)%ip 8311 dcsflt(ip) = d8312 8310 j = 0 8313 8311 ENDIF … … 8322 8320 j = j+1 8323 8321 IF ( ip>=0 ) icsflt(ip) = j 8324 d = d+j8325 8322 DO jp = ip+1, numprocs-1 8326 8323 !-- number of elements is zero, displacement is equal to previous 8327 8324 icsflt(jp) = 0 8328 dcsflt(jp) = d8329 8325 ENDDO 8330 8326 … … 8340 8336 8341 8337 #if defined( __parallel ) 8342 !-- scatter and gather the number of elements to and from all processor 8343 !-- and calculate displacements 8344 IF ( debug_output ) CALL debug_message( 'Scatter and gather the number of elements to and from all processor', 'info' ) 8345 8346 CALL MPI_AlltoAll(icsflt,1,MPI_INTEGER,ipcsflt,1,MPI_INTEGER,comm2d, ierr) 8347 8348 IF ( ierr /= 0 ) THEN 8349 WRITE(9,*) 'Error MPI_AlltoAll1:', ierr, SIZE(icsflt), SIZE(ipcsflt) 8350 FLUSH(9) 8351 ENDIF 8352 8353 npcsfl = SUM(ipcsflt) 8354 d = 0 8355 DO i = 0, numprocs-1 8356 dpcsflt(i) = d 8357 d = d + ipcsflt(i) 8358 ENDDO 8359 8360 !-- exchange csf fields between processors 8338 ! 8339 !-- Exchange csf fields between processors 8361 8340 IF ( debug_output ) CALL debug_message( 'Exchange CSF fields between processors', 'start' ) 8362 udim = max(npcsfl,1) 8363 ALLOCATE( pcsflt_l(ndcsf*udim) ) 8364 pcsflt(1:ndcsf,1:udim) => pcsflt_l(1:ndcsf*udim) 8365 ALLOCATE( kpcsflt_l(kdcsf*udim) ) 8366 kpcsflt(1:kdcsf,1:udim) => kpcsflt_l(1:kdcsf*udim) 8367 CALL MPI_AlltoAllv(csflt_l, ndcsf*icsflt, ndcsf*dcsflt, MPI_REAL, & 8368 pcsflt_l, ndcsf*ipcsflt, ndcsf*dpcsflt, MPI_REAL, comm2d, ierr) 8369 IF ( ierr /= 0 ) THEN 8370 WRITE(9,*) 'Error MPI_AlltoAllv1:', ierr, SIZE(ipcsflt), ndcsf*icsflt, & 8371 ndcsf*dcsflt, SIZE(pcsflt_l),ndcsf*ipcsflt, ndcsf*dpcsflt 8372 FLUSH(9) 8373 ENDIF 8374 IF ( debug_output ) CALL debug_message( 'Exchange CSF fields: finished first part', 'info' ) 8375 8376 CALL MPI_AlltoAllv(kcsflt_l, kdcsf*icsflt, kdcsf*dcsflt, MPI_INTEGER, & 8377 kpcsflt_l, kdcsf*ipcsflt, kdcsf*dpcsflt, MPI_INTEGER, comm2d, ierr) 8378 IF ( ierr /= 0 ) THEN 8379 WRITE(9,*) 'Error MPI_AlltoAllv2:', ierr, SIZE(kcsflt_l),kdcsf*icsflt, & 8380 kdcsf*dcsflt, SIZE(kpcsflt_l), kdcsf*ipcsflt, kdcsf*dpcsflt 8381 FLUSH(9) 8382 ENDIF 8341 8342 CALL radiation_exchange_alltoall(icsflt, kdcsf, ndcsf, kcsflt_l, csflt_l, & 8343 npcsfl, kpcsflt_l, pcsflt_l) 8344 pcsflt(1:ndcsf,1:npcsfl) => pcsflt_l(0:ndcsf*npcsfl-1) 8345 kpcsflt(1:kdcsf,1:npcsfl) => kpcsflt_l(0:kdcsf*npcsfl-1) 8346 8383 8347 IF ( debug_output ) CALL debug_message( 'Exchange CSF fields between processors', 'end' ) 8384 8348 … … 8395 8359 DEALLOCATE( kcsflt_l ) 8396 8360 DEALLOCATE( icsflt ) 8397 DEALLOCATE( dcsflt )8398 DEALLOCATE( ipcsflt )8399 DEALLOCATE( dpcsflt )8400 8361 8401 8362 !-- sort csf ( a version of quicksort ) … … 10021 9982 10022 9983 9984 !--------------------------------------------------------------------------------------------------! 9985 ! 9986 ! Description: 9987 ! ------------ 9988 !> Performs MPI alltoall exchange for integer and floating-point data, optionally splitting the 9989 !> exchange to multiple iterations with maximum number of items per iteration. 9990 !--------------------------------------------------------------------------------------------------! 9991 SUBROUTINE radiation_exchange_alltoall(ntosend, npint, npfloat, isendbuf, fsendbuf, & 9992 nrecv, irecvbuf, frecvbuf) 9993 IMPLICIT NONE 9994 9995 INTEGER(iwp), DIMENSION(0:), INTENT(IN) :: ntosend !< number of records to send 9996 !< to each process 9997 INTEGER(iwp), INTENT(IN) :: npint !< no. of integers in a record 9998 INTEGER(iwp), INTENT(IN) :: npfloat !< no. of floats in a record 9999 INTEGER(iwp), DIMENSION(0:), INTENT(IN) :: isendbuf !< send buffer with integers 10000 REAL(wp), DIMENSION(0:), INTENT(IN) :: fsendbuf !< send buffer with floats 10001 INTEGER(iwp), INTENT(OUT) :: nrecv !< total no. of records received 10002 INTEGER(iwp), DIMENSION(:), ALLOCATABLE, INTENT(OUT) :: irecvbuf !< int receive buffer (will be 10003 !< allocated to proper size 10004 REAL(wp), DIMENSION(:), ALLOCATABLE, INTENT(OUT) :: frecvbuf !< float receive buffer 10005 10006 INTEGER(iwp) :: i, j !< iterators 10007 INTEGER(iwp) :: iproc !< process iterator 10008 INTEGER(iwp) :: iter !< current iteration 10009 INTEGER(iwp) :: niters !< local number of iterations needed 10010 INTEGER(iwp) :: nitersg !< global no. of iterations needed 10011 INTEGER(iwp) :: nmaxsend !< max no. of records sent to each process 10012 !< in each iteration 10013 INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: drecv !< received data displacements per proc 10014 INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: drecvnow !< current receive displacements 10015 INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: dsend !< sent data displacements per process 10016 INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: dsendnow !< current send displacements 10017 INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: nrecvnow !< no. of items to receive in current iteration 10018 INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: nsendnow !< no. of items to send in current iteration 10019 INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: ntorecv !< no. of records to receive from each process 10020 10021 ALLOCATE( ntorecv(0:numprocs-1) ) 10022 ALLOCATE( dsend(0:numprocs) ) 10023 ALLOCATE( drecv(0:numprocs) ) 10024 ALLOCATE( dsendnow(0:numprocs-1) ) 10025 ALLOCATE( drecvnow(0:numprocs-1) ) 10026 ALLOCATE( nsendnow(0:numprocs-1) ) 10027 ALLOCATE( nrecvnow(0:numprocs-1) ) 10028 ! 10029 !-- Exchange send and receive sizes 10030 CALL MPI_Alltoall(ntosend, 1, MPI_INTEGER, ntorecv, 1, MPI_INTEGER, comm2d, ierr) 10031 IF ( ierr /= 0 ) THEN 10032 WRITE (9,*) 'Error at MPI_Alltoall 1:', ierr, ntosend, ntorecv 10033 FLUSH (9) 10034 ENDIF 10035 ! 10036 !-- Calculate initial displacements 10037 i = 0 10038 j = 0 10039 DO iproc = 0, numprocs-1 10040 dsend(iproc) = i 10041 dsendnow(iproc) = i 10042 drecv(iproc) = j 10043 drecvnow(iproc) = j 10044 i = i + ntosend(iproc) 10045 j = j + ntorecv(iproc) 10046 ENDDO 10047 dsend(numprocs) = i ! behind last pos = sum of all to send 10048 drecv(numprocs) = j ! behind last pos = sum of all to receive 10049 nrecv = j 10050 ! 10051 !-- Allocate receive buffers 10052 ALLOCATE( irecvbuf(0:nrecv*npint-1) ) 10053 ALLOCATE( frecvbuf(0:nrecv*npfloat-1) ) 10054 ! 10055 !-- Determine number of iterations among all processes 10056 !-- (e.g. this process may have nothing to send and receive, yet some other still might) 10057 IF ( bufsize_alltoall <= 0 ) THEN 10058 nitersg = 1 10059 nmaxsend = HUGE(nitersg) 10060 ELSE 10061 nmaxsend = bufsize_alltoall 10062 niters = (MAXVAL(ntosend(:)) + nmaxsend - 1) / nmaxsend 10063 CALL MPI_Allreduce(niters, nitersg, 1, MPI_INTEGER, MPI_MAX, comm2d, ierr) 10064 IF ( nitersg > 1 ) THEN 10065 WRITE( debug_string, '("The MPI AllToAll call has been split to ' // & 10066 '",I8," iterations of max. ",I12," records ' // & 10067 'each.")' ) & 10068 nitersg, bufsize_alltoall 10069 CALL debug_message( debug_string, 'info' ) 10070 ENDIF 10071 ENDIF 10072 ! 10073 !-- Iterate alltoall using max-sized buffers 10074 DO iter = 1, nitersg 10075 nsendnow(:) = MIN(dsend(1:) - dsendnow(:), nmaxsend) 10076 nrecvnow(:) = MIN(drecv(1:) - drecvnow(:), nmaxsend) 10077 ! 10078 !-- Send integer data 10079 CALL MPI_Alltoallv(isendbuf, nsendnow(:)*npint, dsendnow(:)*npint, MPI_INTEGER, & 10080 irecvbuf, nrecvnow(:)*npint, drecvnow(:)*npint, MPI_INTEGER, & 10081 comm2d, ierr) 10082 IF ( ierr /= 0 ) THEN 10083 WRITE (9,*) 'Error at MPI_Alltoallv 1:', ierr, iter, nmaxsend, dsend, dsendnow, nsendnow,& 10084 drecv, drecvnow, nrecvnow 10085 FLUSH (9) 10086 ENDIF 10087 ! 10088 !-- Send floating point data 10089 CALL MPI_Alltoallv(fsendbuf, nsendnow(:)*npfloat, dsendnow(:)*npfloat, MPI_REAL, & 10090 frecvbuf, nrecvnow(:)*npfloat, drecvnow(:)*npfloat, MPI_REAL, & 10091 comm2d, ierr) 10092 IF ( ierr /= 0 ) THEN 10093 WRITE (9,*) 'Error at MPI_Alltoallv 2:', ierr, iter, nmaxsend, dsend, dsendnow, nsendnow,& 10094 drecv, drecvnow, nrecvnow 10095 FLUSH (9) 10096 ENDIF 10097 ! 10098 !-- Shift displacements for next iteration 10099 dsendnow(:) = dsendnow(:) + nsendnow(:) 10100 drecvnow(:) = drecvnow(:) + nrecvnow(:) 10101 ENDDO 10102 10103 DEALLOCATE( ntorecv, dsend, drecv, dsendnow, drecvnow, nsendnow, nrecvnow ) 10104 END SUBROUTINE radiation_exchange_alltoall 10105 10023 10106 !------------------------------------------------------------------------------! 10024 10107 !
Note: See TracChangeset
for help on using the changeset viewer.