SUBROUTINE exchange_horiz( ar, nbgp_local) !------------------------------------------------------------------------------! ! Current revisions: ! ----------------- ! ! Former revisions: ! ----------------- ! $Id: exchange_horiz.f90 668 2010-12-23 13:22:58Z heinze $ ! ! 667 2010-12-23 12:06:00Z suehring/gryschka ! Dynamic exchange of ghost points with nbgp_local to ensure that no useless ! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0) ! used for normal grid, the remaining types used for the several grid levels. ! Exchange is done via MPI-Vectors with a dynamic value of ghost points which ! depend on the advection scheme. Exchange of left and right PEs is 10% faster ! with MPI-Vectors than without. ! ! 75 2007-03-22 09:54:05Z raasch ! Special cases for additional gridpoints along x or y in case of non-cyclic ! boundary conditions are not regarded any more ! ! RCS Log replace by Id keyword, revision history cleaned up ! ! Revision 1.16 2006/02/23 12:19:08 raasch ! anz_yz renamed ngp_yz ! ! Revision 1.1 1997/07/24 11:13:29 raasch ! Initial revision ! ! ! Description: ! ------------ ! Exchange of lateral boundary values (parallel computers) and cyclic ! lateral boundary conditions, respectively. !------------------------------------------------------------------------------! USE control_parameters USE cpulog USE indices USE interfaces USE pegrid IMPLICIT NONE #if defined( __parallel ) INTEGER, DIMENSION(4) :: req INTEGER, DIMENSION(MPI_STATUS_SIZE,4) :: wait_stat #endif INTEGER :: i,nbgp_local REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, & nxl-nbgp_local:nxr+nbgp_local) :: ar CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' ) IF ( exchange_mg == .TRUE. ) THEN i = grid_level ELSE i = 0 END IF #if defined( __parallel ) ! !-- Exchange of lateral boundary values for parallel computers IF ( pdims(1) == 1 .OR. mg_switch_to_pe0 ) THEN ! !-- One-dimensional decomposition along y, boundary values can be exchanged !-- within the PE memory IF ( bc_lr == 'cyclic' ) THEN ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr) ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1) ENDIF ELSE req = 0 ! !-- Send left boundary, receive right one CALL MPI_ISEND(ar(nzb,nys-nbgp_local,nxl),1,type_yz(i),pleft,0,comm2d,& req(1),ierr) CALL MPI_IRECV(ar(nzb,nys-nbgp_local,nxr+1),1,type_yz(i),pright,0,& comm2d,req(2),ierr) ! !-- Send right boundary, receive left one CALL MPI_ISEND(ar(nzb,nys-nbgp_local,nxr+1-nbgp_local),1,type_yz(i),pright, 1, & comm2d, req(3), ierr ) CALL MPI_IRECV(ar(nzb,nys-nbgp_local,nxl-nbgp_local),1,type_yz(i),pleft,1,& comm2d,req(4), ierr) CALL MPI_WAITALL( 4, req, wait_stat, ierr ) ENDIF IF ( pdims(2) == 1 .OR. mg_switch_to_pe0 ) THEN ! !-- One-dimensional decomposition along x, boundary values can be exchanged !-- within the PE memory IF ( bc_ns == 'cyclic' ) THEN ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:) ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:) ENDIF ELSE req = 0 ! !-- Send front boundary, receive rear one !-- MPI_ISEND initial send adress changed, type_xz() is sendet nbgp times CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),1, type_xz(i), psouth, 0, & comm2d, req(1), ierr ) CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local),1, type_xz(i), pnorth, 0, & comm2d, req(2), ierr ) ! !-- Send rear boundary, receive front one CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local),1, type_xz(i), pnorth, 1, & comm2d, req(3), ierr ) CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),1, type_xz(i), psouth, 1, & comm2d, req(4), ierr ) call MPI_WAITALL( 4, req, wait_stat, ierr ) ENDIF #else ! !-- Lateral boundary conditions in the non-parallel case IF ( bc_lr == 'cyclic' ) THEN ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr) ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1) ENDIF IF ( bc_ns == 'cyclic' ) THEN ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:) ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:) ENDIF #endif CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' ) END SUBROUTINE exchange_horiz