SUBROUTINE exchange_horiz( ar, xrp, ynp ) !------------------------------------------------------------------------------! ! Actual revisions: ! ----------------- ! ! ! Former revisions: ! ----------------- ! $Id: exchange_horiz.f90 4 2007-02-13 11:33:16Z raasch $ ! RCS Log replace by Id keyword, revision history cleaned up ! ! Revision 1.16 2006/02/23 12:19:08 raasch ! anz_yz renamed ngp_yz ! ! Revision 1.1 1997/07/24 11:13:29 raasch ! Initial revision ! ! ! Description: ! ------------ ! Exchange of lateral boundary values (parallel computers) and cyclic ! lateral boundary conditions, respectively. !------------------------------------------------------------------------------! USE control_parameters USE cpulog USE indices USE interfaces USE pegrid IMPLICIT NONE INTEGER :: xrp, ynp #if defined( __parallel ) INTEGER :: typexz INTEGER, DIMENSION(4) :: req INTEGER, DIMENSION(MPI_STATUS_SIZE,4) :: wait_stat #endif REAL :: ar(nzb:nzt+1,nys-1:nyn+ynp+1,nxl-1:nxr+xrp+1) CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' ) #if defined( __parallel ) ! !-- Exchange of lateral boundary values for parallel computers IF ( pdims(1) == 1 .OR. mg_switch_to_pe0 ) THEN ! !-- One-dimensional decomposition along y, boundary values can be exchanged !-- within the PE memory IF ( bc_lr == 'cyclic' ) THEN ar(:,nys:nyn,nxl-1) = ar(:,nys:nyn,nxr) ar(:,nys:nyn,nxr+1) = ar(:,nys:nyn,nxl) ENDIF ELSE req = 0 ! !-- Send left boundary, receive right one CALL MPI_ISEND( & ar(nzb,nys-1,nxl), ngp_yz(grid_level), MPI_REAL, pleft, 0, & comm2d, req(1), ierr ) CALL MPI_IRECV( & ar(nzb,nys-1,nxr+1), ngp_yz(grid_level), MPI_REAL, pright, 0, & comm2d, req(2), ierr ) ! !-- Send right boundary, receive left one CALL MPI_ISEND( & ar(nzb,nys-1,nxr), ngp_yz(grid_level), MPI_REAL, pright, 1, & comm2d, req(3), ierr ) CALL MPI_IRECV( & ar(nzb,nys-1,nxl-1), ngp_yz(grid_level), MPI_REAL, pleft, 1, & comm2d, req(4), ierr ) call MPI_Waitall (4,req,wait_stat,ierr) ENDIF IF ( pdims(2) == 1 .OR. mg_switch_to_pe0 ) THEN ! !-- One-dimensional decomposition along x, boundary values can be exchanged !-- within the PE memory IF ( bc_ns == 'cyclic' ) THEN ar(:,nys-1,:) = ar(:,nyn,:) ar(:,nyn+1,:) = ar(:,nys,:) ENDIF ELSE ! !-- Set the MPI data type, which depends on the size of the array !-- (the v array has an additional gridpoint along y in case of non-cyclic !-- boundary conditions) IF ( ynp == 0 ) THEN typexz = type_xz(grid_level) ELSE typexz = type_xz_p ENDIF req = 0 ! !-- Send front boundary, receive rear one CALL MPI_ISEND( ar(nzb,nys,nxl-1), 1, typexz, psouth, 0, comm2d, & req(1), ierr ) CALL MPI_IRECV( ar(nzb,nyn+1,nxl-1), 1, typexz, pnorth, 0, comm2d, & req(2), ierr ) ! !-- Send rear boundary, receive front one CALL MPI_ISEND( ar(nzb,nyn,nxl-1), 1, typexz, pnorth, 1, comm2d, & req(3), ierr ) CALL MPI_IRECV( ar(nzb,nys-1,nxl-1), 1, typexz, psouth, 1, comm2d, & req(4), ierr ) call MPI_Waitall (4,req,wait_stat,ierr) ENDIF #else ! !-- Lateral boundary conditions in the non-parallel case IF ( bc_lr == 'cyclic' ) THEN ar(:,nys:nyn,nxl-1) = ar(:,nys:nyn,nxr) ar(:,nys:nyn,nxr+1) = ar(:,nys:nyn,nxl) ENDIF IF ( bc_ns == 'cyclic' ) THEN ar(:,nys-1,:) = ar(:,nyn,:) ar(:,nyn+1,:) = ar(:,nys,:) ENDIF #endif CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' ) END SUBROUTINE exchange_horiz