Ignore:
Timestamp:
Apr 12, 2013 6:19:32 AM (8 years ago)
Author:
raasch
Message:

asynchronous transfer of ghost point data for acc-optimized version

File:
1 edited

Legend:

Unmodified
Added
Removed
  • palm/trunk/SOURCE/exchange_horiz.f90

    r1114 r1128  
    2020! Current revisions:
    2121! -----------------
    22 !
     22! modifications for asynchronous transfer,
     23! local variables req, wait_stat are global now, and have been moved to module
     24! pegrid
    2325!
    2426! Former revisions:
     
    9092
    9193#if ! defined( __check )
     94
     95    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
     96
    9297#if defined( __parallel )
    93     INTEGER, DIMENSION(4)                 ::  req
    94     INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
    95 #endif
    96 
    97 
    98     CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
    99 
    100 #if defined( __parallel )
    101 
    102 !
    103 !-- Exchange of lateral boundary values
     98
     99!
     100!-- Exchange in x-direction of lateral boundaries
    104101    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
    105102!
     
    130127       ELSE
    131128
    132           req = 0
    133 !
    134 !--       Send left boundary, receive right one (asynchronous)
    135           CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), &
    136                           pleft, 0, comm2d, req(1), ierr )
    137           CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &
    138                           pright, 0, comm2d, req(2), ierr )
    139 !
    140 !--       Send right boundary, receive left one (asynchronous)
    141           CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
    142                           type_yz(grid_level), pright, 1, comm2d, req(3), ierr )
    143           CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
    144                           type_yz(grid_level), pleft,  1, comm2d, req(4), ierr )
    145 
    146           CALL MPI_WAITALL( 4, req, wait_stat, ierr )
     129!
     130!--       In case of background communication switched on, exchange is done
     131!--       either along x or along y
     132          IF ( send_receive == 'lr'  .OR.  send_receive == 'al' )  THEN
     133
     134             IF ( .NOT. sendrecv_in_background )  THEN
     135                req(1:4)  = 0
     136                req_count = 0
     137             ENDIF
     138!
     139!--          Send left boundary, receive right one (asynchronous)
     140             CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), &
     141                             pleft, req_count, comm2d, req(req_count+1), ierr )
     142             CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &
     143                             pright, req_count, comm2d, req(req_count+2), ierr )
     144!
     145!--          Send right boundary, receive left one (asynchronous)
     146             CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
     147                             type_yz(grid_level), pright, req_count+1, comm2d,    &
     148                             req(req_count+3), ierr )
     149             CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
     150                             type_yz(grid_level), pleft,  req_count+1, comm2d,    &
     151                             req(req_count+4), ierr )
     152
     153             IF ( .NOT. sendrecv_in_background )  THEN
     154                CALL MPI_WAITALL( 4, req, wait_stat, ierr )
     155             ELSE
     156                req_count = req_count + 4
     157             ENDIF
     158
     159          ENDIF
    147160
    148161       ENDIF
     
    179192       ELSE
    180193
    181           req = 0
    182 !
    183 !--       Send front boundary, receive rear one (asynchronous)
    184           CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), &
    185                           psouth, 0, comm2d, req(1), ierr )
    186           CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &
    187                           pnorth, 0, comm2d, req(2), ierr )
    188 !
    189 !--       Send rear boundary, receive front one (asynchronous)
    190           CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
    191                           type_xz(grid_level), pnorth, 1, comm2d, req(3), ierr )
    192           CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
    193                           type_xz(grid_level), psouth, 1, comm2d, req(4), ierr )
    194 
    195           CALL MPI_WAITALL( 4, req, wait_stat, ierr )
     194!
     195!--       In case of background communication switched on, exchange is done
     196!--       either along x or along y
     197          IF ( send_receive == 'lr'  .OR.  send_receive == 'al' )  THEN
     198
     199             IF ( .NOT. sendrecv_in_background )  THEN
     200                req(1:4)  = 0
     201                req_count = 0
     202             ENDIF
     203
     204!
     205!--          Send front boundary, receive rear one (asynchronous)
     206             CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), &
     207                             psouth, req_count, comm2d, req(req_count+1), ierr )
     208             CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &
     209                             pnorth, req_count, comm2d, req(req_count+2), ierr )
     210!
     211!--          Send rear boundary, receive front one (asynchronous)
     212             CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
     213                             type_xz(grid_level), pnorth, req_count+1, comm2d,    &
     214                             req(req_count+3), ierr )
     215             CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
     216                             type_xz(grid_level), psouth, req_count+1, comm2d,    &
     217                             req(req_count+4), ierr )
     218
     219             IF ( .NOT. sendrecv_in_background )  THEN
     220                CALL MPI_WAITALL( 4, req, wait_stat, ierr )
     221             ELSE
     222                req_count = req_count + 4
     223             ENDIF
     224
     225          ENDIF
    196226
    197227       ENDIF
Note: See TracChangeset for help on using the changeset viewer.