[667] | 1 | SUBROUTINE exchange_horiz( ar, nbgp_local) |
---|
[1] | 2 | |
---|
| 3 | !------------------------------------------------------------------------------! |
---|
[484] | 4 | ! Current revisions: |
---|
[1] | 5 | ! ----------------- |
---|
[668] | 6 | ! |
---|
[686] | 7 | ! |
---|
[668] | 8 | ! Former revisions: |
---|
| 9 | ! ----------------- |
---|
[687] | 10 | ! 686 2011-02-20 13:45:08Z gryschka $ |
---|
| 11 | ! bugfix: Replaced "==" by ".EQ." in case of logical expression |
---|
[668] | 12 | ! |
---|
[684] | 13 | ! 683 2011-02-09 14:25:15Z raasch |
---|
| 14 | ! optional synchronous exchange (sendrecv) implemented, code partly reformatted |
---|
| 15 | ! |
---|
[668] | 16 | ! 667 2010-12-23 12:06:00Z suehring/gryschka |
---|
[667] | 17 | ! Dynamic exchange of ghost points with nbgp_local to ensure that no useless |
---|
| 18 | ! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0) |
---|
| 19 | ! used for normal grid, the remaining types used for the several grid levels. |
---|
| 20 | ! Exchange is done via MPI-Vectors with a dynamic value of ghost points which |
---|
| 21 | ! depend on the advection scheme. Exchange of left and right PEs is 10% faster |
---|
[668] | 22 | ! with MPI-Vectors than without. |
---|
[1] | 23 | ! |
---|
[77] | 24 | ! 75 2007-03-22 09:54:05Z raasch |
---|
| 25 | ! Special cases for additional gridpoints along x or y in case of non-cyclic |
---|
| 26 | ! boundary conditions are not regarded any more |
---|
| 27 | ! |
---|
[3] | 28 | ! RCS Log replace by Id keyword, revision history cleaned up |
---|
| 29 | ! |
---|
[1] | 30 | ! Revision 1.16 2006/02/23 12:19:08 raasch |
---|
| 31 | ! anz_yz renamed ngp_yz |
---|
| 32 | ! |
---|
| 33 | ! Revision 1.1 1997/07/24 11:13:29 raasch |
---|
| 34 | ! Initial revision |
---|
| 35 | ! |
---|
| 36 | ! |
---|
| 37 | ! Description: |
---|
| 38 | ! ------------ |
---|
| 39 | ! Exchange of lateral boundary values (parallel computers) and cyclic |
---|
| 40 | ! lateral boundary conditions, respectively. |
---|
| 41 | !------------------------------------------------------------------------------! |
---|
| 42 | |
---|
| 43 | USE control_parameters |
---|
| 44 | USE cpulog |
---|
| 45 | USE indices |
---|
| 46 | USE interfaces |
---|
| 47 | USE pegrid |
---|
| 48 | |
---|
| 49 | IMPLICIT NONE |
---|
| 50 | |
---|
| 51 | #if defined( __parallel ) |
---|
| 52 | INTEGER, DIMENSION(4) :: req |
---|
| 53 | INTEGER, DIMENSION(MPI_STATUS_SIZE,4) :: wait_stat |
---|
| 54 | #endif |
---|
[683] | 55 | INTEGER :: i, nbgp_local |
---|
[667] | 56 | REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, & |
---|
| 57 | nxl-nbgp_local:nxr+nbgp_local) :: ar |
---|
[1] | 58 | |
---|
| 59 | CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' ) |
---|
| 60 | |
---|
[683] | 61 | ! |
---|
| 62 | !-- In the Poisson multigrid solver arrays with coarser grids are used. |
---|
| 63 | !-- Set i appropriately, because the coarser grids have different |
---|
| 64 | !-- MPI datatypes type_xz, type_yz. |
---|
[686] | 65 | IF ( exchange_mg .EQ. .TRUE. ) THEN |
---|
[683] | 66 | i = grid_level |
---|
[667] | 67 | ELSE |
---|
[683] | 68 | i = 0 |
---|
[667] | 69 | END IF |
---|
[683] | 70 | |
---|
[1] | 71 | #if defined( __parallel ) |
---|
| 72 | |
---|
| 73 | ! |
---|
| 74 | !-- Exchange of lateral boundary values for parallel computers |
---|
| 75 | IF ( pdims(1) == 1 .OR. mg_switch_to_pe0 ) THEN |
---|
| 76 | ! |
---|
| 77 | !-- One-dimensional decomposition along y, boundary values can be exchanged |
---|
| 78 | !-- within the PE memory |
---|
| 79 | IF ( bc_lr == 'cyclic' ) THEN |
---|
[667] | 80 | ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr) |
---|
| 81 | ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1) |
---|
[1] | 82 | ENDIF |
---|
| 83 | |
---|
| 84 | ELSE |
---|
[75] | 85 | |
---|
[683] | 86 | IF ( synchronous_exchange ) THEN |
---|
[1] | 87 | ! |
---|
[683] | 88 | !-- Send left boundary, receive right one (synchronous) |
---|
| 89 | CALL MPI_SENDRECV( & |
---|
| 90 | ar(nzb,nys-nbgp_local,nxl), 1, type_yz(i), pleft, 0, & |
---|
| 91 | ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, 0, & |
---|
| 92 | comm2d, status, ierr ) |
---|
[1] | 93 | ! |
---|
[683] | 94 | !-- Send right boundary, receive left one (synchronous) |
---|
| 95 | CALL MPI_SENDRECV( & |
---|
| 96 | ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, type_yz(i), pright, 1, & |
---|
| 97 | ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_yz(i), pleft, 1, & |
---|
| 98 | comm2d, status, ierr ) |
---|
[667] | 99 | |
---|
[683] | 100 | ELSE |
---|
[667] | 101 | |
---|
[683] | 102 | req = 0 |
---|
| 103 | ! |
---|
| 104 | !-- Send left boundary, receive right one (asynchronous) |
---|
| 105 | CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl), 1, type_yz(i), pleft, & |
---|
| 106 | 0, comm2d, req(1), ierr ) |
---|
| 107 | CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, & |
---|
| 108 | 0, comm2d, req(2), ierr ) |
---|
| 109 | ! |
---|
| 110 | !-- Send right boundary, receive left one (asynchronous) |
---|
| 111 | CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, & |
---|
| 112 | type_yz(i), pright, 1, comm2d, req(3), ierr ) |
---|
| 113 | CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & |
---|
| 114 | type_yz(i), pleft, 1, comm2d, req(4), ierr ) |
---|
[667] | 115 | |
---|
[683] | 116 | CALL MPI_WAITALL( 4, req, wait_stat, ierr ) |
---|
[75] | 117 | |
---|
[683] | 118 | ENDIF |
---|
| 119 | |
---|
[1] | 120 | ENDIF |
---|
| 121 | |
---|
| 122 | |
---|
| 123 | IF ( pdims(2) == 1 .OR. mg_switch_to_pe0 ) THEN |
---|
| 124 | ! |
---|
| 125 | !-- One-dimensional decomposition along x, boundary values can be exchanged |
---|
| 126 | !-- within the PE memory |
---|
| 127 | IF ( bc_ns == 'cyclic' ) THEN |
---|
[667] | 128 | ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:) |
---|
| 129 | ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:) |
---|
[1] | 130 | ENDIF |
---|
| 131 | |
---|
| 132 | ELSE |
---|
| 133 | |
---|
[683] | 134 | IF ( synchronous_exchange ) THEN |
---|
[1] | 135 | ! |
---|
[683] | 136 | !-- Send front boundary, receive rear one (synchronous) |
---|
| 137 | CALL MPI_SENDRECV( & |
---|
| 138 | ar(nzb,nys,nxl-nbgp_local), 1, type_xz(i), psouth, 0, & |
---|
| 139 | ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 0, & |
---|
| 140 | comm2d, status, ierr ) |
---|
| 141 | ! |
---|
| 142 | !-- Send rear boundary, receive front one (synchronous) |
---|
| 143 | CALL MPI_SENDRECV( & |
---|
| 144 | ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 1, & |
---|
| 145 | ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_xz(i), psouth, 1, & |
---|
| 146 | comm2d, status, ierr ) |
---|
[667] | 147 | |
---|
[683] | 148 | ELSE |
---|
| 149 | |
---|
| 150 | req = 0 |
---|
[1] | 151 | ! |
---|
[683] | 152 | !-- Send front boundary, receive rear one (asynchronous) |
---|
| 153 | CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local), 1, type_xz(i), psouth, & |
---|
| 154 | 0, comm2d, req(1), ierr ) |
---|
| 155 | CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, & |
---|
| 156 | 0, comm2d, req(2), ierr ) |
---|
| 157 | ! |
---|
| 158 | !-- Send rear boundary, receive front one (asynchronous) |
---|
| 159 | CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, & |
---|
| 160 | type_xz(i), pnorth, 1, comm2d, req(3), ierr ) |
---|
| 161 | CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & |
---|
| 162 | type_xz(i), psouth, 1, comm2d, req(4), ierr ) |
---|
[75] | 163 | |
---|
[683] | 164 | CALL MPI_WAITALL( 4, req, wait_stat, ierr ) |
---|
| 165 | |
---|
| 166 | ENDIF |
---|
| 167 | |
---|
[1] | 168 | ENDIF |
---|
| 169 | |
---|
| 170 | #else |
---|
| 171 | |
---|
| 172 | ! |
---|
| 173 | !-- Lateral boundary conditions in the non-parallel case |
---|
| 174 | IF ( bc_lr == 'cyclic' ) THEN |
---|
[667] | 175 | ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr) |
---|
| 176 | ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1) |
---|
[1] | 177 | ENDIF |
---|
| 178 | |
---|
| 179 | IF ( bc_ns == 'cyclic' ) THEN |
---|
[667] | 180 | ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:) |
---|
| 181 | ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:) |
---|
[1] | 182 | ENDIF |
---|
| 183 | |
---|
| 184 | #endif |
---|
| 185 | CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' ) |
---|
| 186 | |
---|
[667] | 187 | |
---|
[1] | 188 | END SUBROUTINE exchange_horiz |
---|