SUBROUTINE exchange_horiz_2d( ar ) !------------------------------------------------------------------------------! ! Current revisions: ! ----------------- ! ! ! Former revisions: ! ----------------- ! $Id: exchange_horiz_2d.f90 842 2012-02-28 12:37:31Z gryschka $ ! ! 841 2012-02-28 12:29:49Z maronga ! Excluded routine from compilation of namelist_file_check ! ! 707 2011-03-29 11:39:40Z raasch ! bc_lr/ns replaced by bc_lr/ns_cyc ! ! 702 2011-03-24 19:33:15Z suehring ! Bugfix in declaration of ar in exchange_horiz_2d_int and number of MPI-blocks ! in MPI_SENDRECV(). ! ! 667 2010-12-23 12:06:00Z suehring/gryschka ! Dynamic exchange of ghost points with nbgp, which depends on the advection ! scheme. Exchange between left and right PEs is now done with MPI-vectors. ! ! 73 2007-03-20 08:33:14Z raasch ! Neumann boundary conditions at inflow/outflow in case of non-cyclic boundary ! conditions ! ! RCS Log replace by Id keyword, revision history cleaned up ! ! Revision 1.9 2006/05/12 19:15:52 letzel ! MPI_REAL replaced by MPI_INTEGER in exchange_horiz_2d_int ! ! Revision 1.1 1998/01/23 09:58:21 raasch ! Initial revision ! ! ! Description: ! ------------ ! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic ! boundary conditions, respectively, for 2D-arrays. !------------------------------------------------------------------------------! USE control_parameters USE cpulog USE indices USE interfaces USE pegrid IMPLICIT NONE REAL :: ar(nysg:nyng,nxlg:nxrg) INTEGER :: i #if ! defined( __check ) CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' ) #if defined( __parallel ) ! !-- Exchange of lateral boundary values for parallel computers IF ( pdims(1) == 1 ) THEN ! !-- One-dimensional decomposition along y, boundary values can be exchanged !-- within the PE memory ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr) ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1) ELSE ! !-- Send left boundary, receive right one CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y, pleft, 0, & ar(nysg,nxr+1), 1, type_y, pright, 0, & comm2d, status, ierr ) ! !-- Send right boundary, receive left one CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y, pright, 1, & ar(nysg,nxlg), 1, type_y, pleft, 1, & comm2d, status, ierr ) ENDIF IF ( pdims(2) == 1 ) THEN ! !-- One-dimensional decomposition along x, boundary values can be exchanged !-- within the PE memory ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:) ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:) ELSE ! !-- Send front boundary, receive rear one CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x, psouth, 0, & ar(nyn+1,nxlg), 1, type_x, pnorth, 0, & comm2d, status, ierr ) ! !-- Send rear boundary, receive front one CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x, pnorth, 1, & ar(nysg,nxlg), 1, type_x, psouth, 1, & comm2d, status, ierr ) ENDIF #else ! !-- Lateral boundary conditions in the non-parallel case IF ( bc_lr_cyc ) THEN ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr) ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1) ENDIF IF ( bc_ns_cyc ) THEN ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:) ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:) ENDIF #endif ! !-- Neumann-conditions at inflow/outflow in case of non-cyclic boundary !-- conditions IF ( inflow_l .OR. outflow_l ) THEN DO i=nbgp, 1, -1 ar(:,nxl-i) = ar(:,nxl) END DO END IF IF ( inflow_r .OR. outflow_r ) THEN DO i=1, nbgp ar(:,nxr+i) = ar(:,nxr) END DO END IF IF ( inflow_s .OR. outflow_s ) THEN DO i=nbgp, 1, -1 ar(nys-i,:) = ar(nys,:) END DO END IF IF ( inflow_n .OR. outflow_n ) THEN DO i=1, nbgp ar(nyn+i,:) = ar(nyn,:) END DO END IF CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' ) #endif END SUBROUTINE exchange_horiz_2d SUBROUTINE exchange_horiz_2d_int( ar ) !------------------------------------------------------------------------------! ! Description: ! ------------ ! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic ! boundary conditions, respectively, for 2D integer arrays. !------------------------------------------------------------------------------! USE control_parameters USE cpulog USE indices USE interfaces USE pegrid IMPLICIT NONE INTEGER :: ar(nysg:nyng,nxlg:nxrg) INTEGER :: i #if ! defined( __check ) CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' ) #if defined( __parallel ) ! !-- Exchange of lateral boundary values for parallel computers IF ( pdims(1) == 1 ) THEN ! !-- One-dimensional decomposition along y, boundary values can be exchanged !-- within the PE memory ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr) ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1) ELSE ! !-- Send left boundary, receive right one CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y_int, pleft, 0, & ar(nysg,nxr+1), 1, type_y_int, pright, 0, & comm2d, status, ierr ) ! !-- Send right boundary, receive left one CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y_int, pright, 1, & ar(nysg,nxlg), 1, type_y_int, pleft, 1, & comm2d, status, ierr ) ENDIF IF ( pdims(2) == 1 ) THEN ! !-- One-dimensional decomposition along x, boundary values can be exchanged !-- within the PE memory ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:) ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:) ELSE ! !-- Send front boundary, receive rear one CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x_int, psouth, 0, & ar(nyn+1,nxlg), 1, type_x_int, pnorth, 0, & comm2d, status, ierr ) ! !-- Send rear boundary, receive front one CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x_int, pnorth, 1, & ar(nysg,nxlg), 1, type_x_int, psouth, 1, & comm2d, status, ierr ) ENDIF #else ! !-- Lateral boundary conditions in the non-parallel case IF ( bc_lr_cyc ) THEN ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr) ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1) ENDIF IF ( bc_ns_cyc ) THEN ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:) ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:) ENDIF #endif CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' ) #endif END SUBROUTINE exchange_horiz_2d_int