[1682] | 1 | !> @file exchange_horiz.f90 |
---|
[1320] | 2 | !------------------------------------------------------------------------------! |
---|
[2696] | 3 | ! This file is part of the PALM model system. |
---|
[1036] | 4 | ! |
---|
[2000] | 5 | ! PALM is free software: you can redistribute it and/or modify it under the |
---|
| 6 | ! terms of the GNU General Public License as published by the Free Software |
---|
| 7 | ! Foundation, either version 3 of the License, or (at your option) any later |
---|
| 8 | ! version. |
---|
[1036] | 9 | ! |
---|
| 10 | ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY |
---|
| 11 | ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR |
---|
| 12 | ! A PARTICULAR PURPOSE. See the GNU General Public License for more details. |
---|
| 13 | ! |
---|
| 14 | ! You should have received a copy of the GNU General Public License along with |
---|
| 15 | ! PALM. If not, see <http://www.gnu.org/licenses/>. |
---|
| 16 | ! |
---|
[4360] | 17 | ! Copyright 1997-2020 Leibniz Universitaet Hannover |
---|
[1320] | 18 | !------------------------------------------------------------------------------! |
---|
[1036] | 19 | ! |
---|
[484] | 20 | ! Current revisions: |
---|
[1] | 21 | ! ----------------- |
---|
[1678] | 22 | ! |
---|
[2119] | 23 | ! |
---|
[1321] | 24 | ! Former revisions: |
---|
| 25 | ! ----------------- |
---|
| 26 | ! $Id: exchange_horiz_mod.f90 4474 2020-03-26 09:32:18Z monakurppa $ |
---|
[4474] | 27 | ! bugfix for correct usage of alternative communicators in case of 1d-decompositions and in |
---|
| 28 | ! non-parallel mode |
---|
| 29 | ! |
---|
| 30 | ! 4461 2020-03-12 16:51:59Z raasch |
---|
[4461] | 31 | ! optional communicator added to exchange_horiz |
---|
| 32 | ! |
---|
| 33 | ! 4457 2020-03-11 14:20:43Z raasch |
---|
[4457] | 34 | ! routine has been modularized, file exchange_horiz_2d has been merged |
---|
| 35 | ! |
---|
| 36 | ! 4429 2020-02-27 15:24:30Z raasch |
---|
[4429] | 37 | ! bugfix: cpp-directives added for serial mode |
---|
| 38 | ! |
---|
| 39 | ! 4360 2020-01-07 11:25:50Z suehring |
---|
[4182] | 40 | ! Corrected "Former revisions" section |
---|
| 41 | ! |
---|
| 42 | ! 3761 2019-02-25 15:31:42Z raasch |
---|
[3761] | 43 | ! OpenACC directives re-formatted |
---|
| 44 | ! |
---|
| 45 | ! 3657 2019-01-07 20:14:18Z knoop |
---|
[3634] | 46 | ! OpenACC port for SPEC |
---|
[1321] | 47 | ! |
---|
[4182] | 48 | ! Revision 1.1 1997/07/24 11:13:29 raasch |
---|
| 49 | ! Initial revision |
---|
| 50 | ! |
---|
| 51 | ! |
---|
[1] | 52 | ! Description: |
---|
| 53 | ! ------------ |
---|
[4461] | 54 | !> Exchange of ghost point layers for subdomains (in parallel mode) and setting |
---|
| 55 | !> of cyclic lateral boundary conditions for the total domain . |
---|
[1] | 56 | !------------------------------------------------------------------------------! |
---|
[4457] | 57 | MODULE exchange_horiz_mod |
---|
| 58 | |
---|
| 59 | USE kinds |
---|
| 60 | |
---|
| 61 | USE pegrid |
---|
| 62 | |
---|
| 63 | IMPLICIT NONE |
---|
| 64 | |
---|
| 65 | PRIVATE |
---|
| 66 | PUBLIC exchange_horiz, exchange_horiz_int, exchange_horiz_2d, exchange_horiz_2d_byte, & |
---|
| 67 | exchange_horiz_2d_int |
---|
| 68 | |
---|
| 69 | INTERFACE exchange_horiz |
---|
| 70 | MODULE PROCEDURE exchange_horiz |
---|
| 71 | END INTERFACE exchange_horiz |
---|
| 72 | |
---|
| 73 | INTERFACE exchange_horiz_int |
---|
| 74 | MODULE PROCEDURE exchange_horiz_int |
---|
| 75 | END INTERFACE exchange_horiz_int |
---|
| 76 | |
---|
| 77 | INTERFACE exchange_horiz_2d |
---|
| 78 | MODULE PROCEDURE exchange_horiz_2d |
---|
| 79 | END INTERFACE exchange_horiz_2d |
---|
| 80 | |
---|
| 81 | INTERFACE exchange_horiz_2d_byte |
---|
| 82 | MODULE PROCEDURE exchange_horiz_2d_byte |
---|
| 83 | END INTERFACE exchange_horiz_2d_byte |
---|
| 84 | |
---|
| 85 | INTERFACE exchange_horiz_2d_int |
---|
| 86 | MODULE PROCEDURE exchange_horiz_2d_int |
---|
| 87 | END INTERFACE exchange_horiz_2d_int |
---|
| 88 | |
---|
| 89 | |
---|
| 90 | CONTAINS |
---|
| 91 | |
---|
| 92 | |
---|
[4461] | 93 | !------------------------------------------------------------------------------! |
---|
| 94 | ! Description: |
---|
| 95 | ! ------------ |
---|
| 96 | !> Exchange of ghost point layers for subdomains (in parallel mode) and setting |
---|
| 97 | !> of cyclic lateral boundary conditions for the total domain. |
---|
| 98 | !> This routine is for REAL 3d-arrays. |
---|
| 99 | !------------------------------------------------------------------------------! |
---|
| 100 | SUBROUTINE exchange_horiz( ar, nbgp_local, alternative_communicator) |
---|
[1] | 101 | |
---|
[1320] | 102 | USE control_parameters, & |
---|
[4429] | 103 | ONLY: bc_lr_cyc, bc_ns_cyc |
---|
| 104 | |
---|
| 105 | #if defined( __parallel ) |
---|
| 106 | USE control_parameters, & |
---|
| 107 | ONLY: grid_level, mg_switch_to_pe0, synchronous_exchange |
---|
| 108 | #endif |
---|
[1320] | 109 | |
---|
| 110 | USE cpulog, & |
---|
| 111 | ONLY: cpu_log, log_point_s |
---|
| 112 | |
---|
| 113 | USE indices, & |
---|
| 114 | ONLY: nxl, nxr, nyn, nys, nzb, nzt |
---|
| 115 | |
---|
[1] | 116 | |
---|
[3761] | 117 | #if defined( _OPENACC ) |
---|
[3634] | 118 | INTEGER(iwp) :: i !< |
---|
| 119 | #endif |
---|
[3761] | 120 | |
---|
[4461] | 121 | INTEGER(iwp), OPTIONAL :: alternative_communicator !< alternative MPI communicator to be used |
---|
| 122 | INTEGER(iwp) :: communicator !< communicator that is used as argument in MPI calls |
---|
| 123 | INTEGER(iwp) :: left_pe !< id of left pe that is used as argument in MPI calls |
---|
| 124 | INTEGER(iwp) :: nbgp_local !< number of ghost point layers |
---|
| 125 | INTEGER(iwp) :: north_pe !< id of north pe that is used as argument in MPI calls |
---|
| 126 | INTEGER(iwp) :: right_pe !< id of right pe that is used as argument in MPI calls |
---|
| 127 | INTEGER(iwp) :: south_pe !< id of south pe that is used as argument in MPI calls |
---|
[1320] | 128 | |
---|
| 129 | REAL(wp), DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, & |
---|
[4461] | 130 | nxl-nbgp_local:nxr+nbgp_local) :: ar !< 3d-array for which exchange is done |
---|
[1320] | 131 | |
---|
[841] | 132 | |
---|
[1] | 133 | CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' ) |
---|
| 134 | |
---|
[3761] | 135 | #if defined( _OPENACC ) |
---|
[3657] | 136 | !$ACC UPDATE IF_PRESENT ASYNC(1) & |
---|
[3634] | 137 | !$ACC HOST(ar(:,:,nxr-nbgp_local+1:nxr)) & |
---|
| 138 | !$ACC HOST(ar(:,:,nxl:nxl+nbgp_local-1)) |
---|
[3657] | 139 | |
---|
| 140 | ! |
---|
| 141 | !-- Wait for first UPDATE to complete before starting the others. |
---|
| 142 | !$ACC WAIT(1) ASYNC(2) |
---|
| 143 | ! ar(:,:,nxl-nbgp_local:nxl-1) is overwritten by first part below |
---|
| 144 | ! ar(:,:,nxl:nxl+nbgp_local-1) has been transferred above |
---|
| 145 | DO i = nxl+nbgp_local, nxr-nbgp_local |
---|
| 146 | !$ACC UPDATE IF_PRESENT ASYNC(2) & |
---|
[3634] | 147 | !$ACC HOST(ar(:,nyn-nbgp_local+1:nyn,i)) & |
---|
| 148 | !$ACC HOST(ar(:,nys:nys+nbgp_local-1,i)) |
---|
| 149 | ENDDO |
---|
[3657] | 150 | ! ar(:,:,nxr-nbgp_local+1:nxr) has been transferred above |
---|
| 151 | ! ar(:,:,nxr+1:nxr+nbgp_local) is overwritten by first part below |
---|
| 152 | |
---|
| 153 | ! |
---|
| 154 | !-- Wait for first UPDATE to complete before starting MPI. |
---|
| 155 | !$ACC WAIT(1) |
---|
[3634] | 156 | #endif |
---|
| 157 | |
---|
[4461] | 158 | ! |
---|
| 159 | !-- Set the communicator to be used |
---|
| 160 | IF ( PRESENT( alternative_communicator ) ) THEN |
---|
| 161 | ! |
---|
| 162 | !-- Alternative communicator is to be used |
---|
| 163 | communicator = communicator_configurations(alternative_communicator)%mpi_communicator |
---|
| 164 | left_pe = communicator_configurations(alternative_communicator)%pleft |
---|
| 165 | right_pe = communicator_configurations(alternative_communicator)%pright |
---|
| 166 | south_pe = communicator_configurations(alternative_communicator)%psouth |
---|
| 167 | north_pe = communicator_configurations(alternative_communicator)%pnorth |
---|
| 168 | |
---|
| 169 | ELSE |
---|
| 170 | ! |
---|
| 171 | !-- Main communicator is to be used |
---|
| 172 | communicator = comm2d |
---|
| 173 | left_pe = pleft |
---|
| 174 | right_pe = pright |
---|
| 175 | south_pe = psouth |
---|
| 176 | north_pe = pnorth |
---|
| 177 | |
---|
| 178 | ENDIF |
---|
| 179 | |
---|
[1] | 180 | #if defined( __parallel ) |
---|
| 181 | |
---|
| 182 | ! |
---|
[1128] | 183 | !-- Exchange in x-direction of lateral boundaries |
---|
[1] | 184 | IF ( pdims(1) == 1 .OR. mg_switch_to_pe0 ) THEN |
---|
| 185 | ! |
---|
| 186 | !-- One-dimensional decomposition along y, boundary values can be exchanged |
---|
| 187 | !-- within the PE memory |
---|
[4474] | 188 | IF ( PRESENT( alternative_communicator ) ) THEN |
---|
| 189 | IF ( alternative_communicator <= 2 ) THEN |
---|
| 190 | ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr) |
---|
| 191 | ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1) |
---|
| 192 | ENDIF |
---|
| 193 | ELSE |
---|
| 194 | IF ( bc_lr_cyc ) THEN |
---|
| 195 | ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr) |
---|
| 196 | ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1) |
---|
| 197 | ENDIF |
---|
[1] | 198 | ENDIF |
---|
| 199 | |
---|
| 200 | ELSE |
---|
[75] | 201 | |
---|
[683] | 202 | IF ( synchronous_exchange ) THEN |
---|
[1] | 203 | ! |
---|
[683] | 204 | !-- Send left boundary, receive right one (synchronous) |
---|
[4461] | 205 | CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxl), 1, type_yz(grid_level), left_pe, 0, & |
---|
| 206 | ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), right_pe, 0, & |
---|
| 207 | communicator, status, ierr ) |
---|
[1] | 208 | ! |
---|
[683] | 209 | !-- Send right boundary, receive left one (synchronous) |
---|
[4461] | 210 | CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, & |
---|
| 211 | type_yz(grid_level), right_pe, 1, & |
---|
| 212 | ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & |
---|
| 213 | type_yz(grid_level), left_pe, 1, & |
---|
| 214 | communicator, status, ierr ) |
---|
[667] | 215 | |
---|
[683] | 216 | ELSE |
---|
[667] | 217 | |
---|
[683] | 218 | ! |
---|
[2298] | 219 | !-- Asynchroneous exchange |
---|
[1128] | 220 | IF ( send_receive == 'lr' .OR. send_receive == 'al' ) THEN |
---|
| 221 | |
---|
[2298] | 222 | req(1:4) = 0 |
---|
| 223 | req_count = 0 |
---|
[683] | 224 | ! |
---|
[1128] | 225 | !-- Send left boundary, receive right one (asynchronous) |
---|
[4461] | 226 | CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl), 1, type_yz(grid_level), left_pe, & |
---|
| 227 | req_count, communicator, req(req_count+1), ierr ) |
---|
| 228 | CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), right_pe, & |
---|
| 229 | req_count, communicator, req(req_count+2), ierr ) |
---|
[1128] | 230 | ! |
---|
| 231 | !-- Send right boundary, receive left one (asynchronous) |
---|
[4461] | 232 | CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, type_yz(grid_level), & |
---|
| 233 | right_pe, req_count+1, communicator, req(req_count+3), ierr ) |
---|
| 234 | CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_yz(grid_level), & |
---|
| 235 | left_pe, req_count+1, communicator, req(req_count+4), ierr ) |
---|
[667] | 236 | |
---|
[2298] | 237 | CALL MPI_WAITALL( 4, req, wait_stat, ierr ) |
---|
[75] | 238 | |
---|
[1128] | 239 | ENDIF |
---|
| 240 | |
---|
[683] | 241 | ENDIF |
---|
| 242 | |
---|
[1] | 243 | ENDIF |
---|
| 244 | |
---|
[3657] | 245 | !$ACC UPDATE IF_PRESENT ASYNC(1) & |
---|
| 246 | !$ACC DEVICE(ar(:,:,nxl-nbgp_local:nxl-1)) & |
---|
| 247 | !$ACC DEVICE(ar(:,:,nxr+1:nxr+nbgp_local)) |
---|
[1] | 248 | |
---|
[3657] | 249 | ! |
---|
| 250 | !-- Wait for UPDATES above to complete before starting MPI. |
---|
| 251 | !$ACC WAIT(2) |
---|
| 252 | |
---|
[1] | 253 | IF ( pdims(2) == 1 .OR. mg_switch_to_pe0 ) THEN |
---|
| 254 | ! |
---|
| 255 | !-- One-dimensional decomposition along x, boundary values can be exchanged |
---|
| 256 | !-- within the PE memory |
---|
[4474] | 257 | IF ( PRESENT( alternative_communicator ) ) THEN |
---|
| 258 | IF ( alternative_communicator == 1 .OR. alternative_communicator == 3 ) THEN |
---|
| 259 | ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:) |
---|
| 260 | ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:) |
---|
| 261 | ENDIF |
---|
| 262 | ELSE |
---|
| 263 | IF ( bc_ns_cyc ) THEN |
---|
| 264 | ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:) |
---|
| 265 | ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:) |
---|
| 266 | ENDIF |
---|
[1] | 267 | ENDIF |
---|
| 268 | |
---|
| 269 | ELSE |
---|
| 270 | |
---|
[683] | 271 | IF ( synchronous_exchange ) THEN |
---|
[1] | 272 | ! |
---|
[683] | 273 | !-- Send front boundary, receive rear one (synchronous) |
---|
[4461] | 274 | CALL MPI_SENDRECV( ar(nzb,nys,nxl-nbgp_local), 1, type_xz(grid_level), south_pe, 0, & |
---|
| 275 | ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), north_pe, 0, & |
---|
| 276 | communicator, status, ierr ) |
---|
[683] | 277 | ! |
---|
| 278 | !-- Send rear boundary, receive front one (synchronous) |
---|
[4461] | 279 | CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, & |
---|
| 280 | type_xz(grid_level), north_pe, 1, & |
---|
| 281 | ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & |
---|
| 282 | type_xz(grid_level), south_pe, 1, & |
---|
| 283 | communicator, status, ierr ) |
---|
[667] | 284 | |
---|
[683] | 285 | ELSE |
---|
| 286 | |
---|
[1] | 287 | ! |
---|
[2298] | 288 | !-- Asynchroneous exchange |
---|
[1569] | 289 | IF ( send_receive == 'ns' .OR. send_receive == 'al' ) THEN |
---|
[1128] | 290 | |
---|
[2298] | 291 | req(1:4) = 0 |
---|
| 292 | req_count = 0 |
---|
[1128] | 293 | |
---|
[683] | 294 | ! |
---|
[1128] | 295 | !-- Send front boundary, receive rear one (asynchronous) |
---|
[4461] | 296 | CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local), 1, type_xz(grid_level), south_pe, & |
---|
| 297 | req_count, communicator, req(req_count+1), ierr ) |
---|
| 298 | CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), north_pe, & |
---|
| 299 | req_count, communicator, req(req_count+2), ierr ) |
---|
[1128] | 300 | ! |
---|
| 301 | !-- Send rear boundary, receive front one (asynchronous) |
---|
[4461] | 302 | CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, type_xz(grid_level), & |
---|
| 303 | north_pe, req_count+1, communicator, req(req_count+3), ierr ) |
---|
| 304 | CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_xz(grid_level), & |
---|
| 305 | south_pe, req_count+1, communicator, req(req_count+4), ierr ) |
---|
[75] | 306 | |
---|
[2298] | 307 | CALL MPI_WAITALL( 4, req, wait_stat, ierr ) |
---|
[683] | 308 | |
---|
[1128] | 309 | ENDIF |
---|
| 310 | |
---|
[683] | 311 | ENDIF |
---|
| 312 | |
---|
[1] | 313 | ENDIF |
---|
| 314 | |
---|
| 315 | #else |
---|
| 316 | |
---|
| 317 | ! |
---|
[1113] | 318 | !-- Lateral boundary conditions in the non-parallel case. |
---|
| 319 | !-- Case dependent, because in GPU mode still not all arrays are on device. This |
---|
| 320 | !-- workaround has to be removed later. Also, since PGI compiler 12.5 has problems |
---|
| 321 | !-- with array syntax, explicit loops are used. |
---|
[4474] | 322 | IF ( PRESENT( alternative_communicator ) ) THEN |
---|
| 323 | IF ( alternative_communicator <= 2 ) THEN |
---|
| 324 | ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr) |
---|
| 325 | ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1) |
---|
| 326 | ENDIF |
---|
| 327 | ELSE |
---|
| 328 | IF ( bc_lr_cyc ) THEN |
---|
| 329 | ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr) |
---|
| 330 | ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1) |
---|
| 331 | ENDIF |
---|
[1] | 332 | ENDIF |
---|
| 333 | |
---|
[3657] | 334 | !$ACC UPDATE IF_PRESENT ASYNC(1) & |
---|
| 335 | !$ACC DEVICE(ar(:,:,nxl-nbgp_local:nxl-1)) & |
---|
| 336 | !$ACC DEVICE(ar(:,:,nxr+1:nxr+nbgp_local)) |
---|
| 337 | |
---|
| 338 | ! |
---|
| 339 | !-- Wait for UPDATES above to complete before starting MPI. |
---|
| 340 | !$ACC WAIT(2) |
---|
| 341 | |
---|
[4474] | 342 | IF ( PRESENT( alternative_communicator ) ) THEN |
---|
| 343 | IF ( alternative_communicator == 1 .OR. alternative_communicator == 3 ) THEN |
---|
| 344 | ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:) |
---|
| 345 | ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:) |
---|
| 346 | ENDIF |
---|
| 347 | ELSE |
---|
| 348 | IF ( bc_ns_cyc ) THEN |
---|
| 349 | ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:) |
---|
| 350 | ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:) |
---|
| 351 | ENDIF |
---|
[1] | 352 | ENDIF |
---|
| 353 | |
---|
| 354 | #endif |
---|
[3634] | 355 | |
---|
[3761] | 356 | #if defined( _OPENACC ) |
---|
[3634] | 357 | DO i = nxl-nbgp_local, nxr+nbgp_local |
---|
[3657] | 358 | !$ACC UPDATE IF_PRESENT ASYNC(2) & |
---|
[3634] | 359 | !$ACC DEVICE(ar(:,nys-nbgp_local:nys-1,i)) & |
---|
| 360 | !$ACC DEVICE(ar(:,nyn+1:nyn+nbgp_local,i)) |
---|
| 361 | ENDDO |
---|
[3657] | 362 | |
---|
| 363 | ! |
---|
| 364 | !-- Wait for all UPDATEs to finish. |
---|
| 365 | !$ACC WAIT |
---|
[3634] | 366 | #endif |
---|
| 367 | |
---|
[1] | 368 | CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' ) |
---|
| 369 | |
---|
| 370 | END SUBROUTINE exchange_horiz |
---|
[1677] | 371 | |
---|
| 372 | |
---|
[1682] | 373 | !------------------------------------------------------------------------------! |
---|
| 374 | ! Description: |
---|
| 375 | ! ------------ |
---|
| 376 | !> @todo Missing subroutine description. |
---|
| 377 | !------------------------------------------------------------------------------! |
---|
[4429] | 378 | SUBROUTINE exchange_horiz_int( ar, nys_l, nyn_l, nxl_l, nxr_l, nzt_l, nbgp_local ) |
---|
[1677] | 379 | |
---|
[4429] | 380 | |
---|
[1677] | 381 | USE control_parameters, & |
---|
[4429] | 382 | ONLY: bc_lr_cyc, bc_ns_cyc |
---|
| 383 | |
---|
| 384 | #if defined( __parallel ) |
---|
| 385 | USE control_parameters, & |
---|
| 386 | ONLY: grid_level |
---|
| 387 | #endif |
---|
[1677] | 388 | |
---|
| 389 | USE indices, & |
---|
[3241] | 390 | ONLY: nzb |
---|
[1677] | 391 | |
---|
[2696] | 392 | INTEGER(iwp) :: nxl_l !< local index bound at current grid level, left side |
---|
| 393 | INTEGER(iwp) :: nxr_l !< local index bound at current grid level, right side |
---|
| 394 | INTEGER(iwp) :: nyn_l !< local index bound at current grid level, north side |
---|
| 395 | INTEGER(iwp) :: nys_l !< local index bound at current grid level, south side |
---|
| 396 | INTEGER(iwp) :: nzt_l !< local index bound at current grid level, top |
---|
[1682] | 397 | INTEGER(iwp) :: nbgp_local !< number of ghost points |
---|
[1677] | 398 | |
---|
[2696] | 399 | INTEGER(iwp), DIMENSION(nzb:nzt_l+1,nys_l-nbgp_local:nyn_l+nbgp_local, & |
---|
| 400 | nxl_l-nbgp_local:nxr_l+nbgp_local) :: ar !< treated array |
---|
[1677] | 401 | |
---|
| 402 | |
---|
| 403 | #if defined( __parallel ) |
---|
| 404 | IF ( pdims(1) == 1 ) THEN |
---|
| 405 | ! |
---|
| 406 | !-- One-dimensional decomposition along y, boundary values can be exchanged |
---|
| 407 | !-- within the PE memory |
---|
| 408 | IF ( bc_lr_cyc ) THEN |
---|
[2696] | 409 | ar(:,:,nxl_l-nbgp_local:nxl_l-1) = ar(:,:,nxr_l-nbgp_local+1:nxr_l) |
---|
| 410 | ar(:,:,nxr_l+1:nxr_l+nbgp_local) = ar(:,:,nxl_l:nxl_l+nbgp_local-1) |
---|
[1677] | 411 | ENDIF |
---|
| 412 | ELSE |
---|
| 413 | ! |
---|
| 414 | !-- Send left boundary, receive right one (synchronous) |
---|
[2696] | 415 | CALL MPI_SENDRECV( & |
---|
| 416 | ar(nzb,nys_l-nbgp_local,nxl_l), 1, type_yz_int(grid_level), pleft, 0,& |
---|
| 417 | ar(nzb,nys_l-nbgp_local,nxr_l+1), 1, type_yz_int(grid_level), pright, 0,& |
---|
[1677] | 418 | comm2d, status, ierr ) |
---|
| 419 | ! |
---|
| 420 | !-- Send right boundary, receive left one (synchronous) |
---|
[2696] | 421 | CALL MPI_SENDRECV( & |
---|
| 422 | ar(nzb,nys_l-nbgp_local,nxr_l+1-nbgp_local), 1, type_yz_int(grid_level),& |
---|
| 423 | pright, 1, & |
---|
| 424 | ar(nzb,nys_l-nbgp_local,nxl_l-nbgp_local), 1, type_yz_int(grid_level),& |
---|
| 425 | pleft, 1, & |
---|
[1677] | 426 | comm2d, status, ierr ) |
---|
| 427 | ENDIF |
---|
| 428 | |
---|
| 429 | |
---|
| 430 | IF ( pdims(2) == 1 ) THEN |
---|
| 431 | ! |
---|
| 432 | !-- One-dimensional decomposition along x, boundary values can be exchanged |
---|
| 433 | !-- within the PE memory |
---|
| 434 | IF ( bc_ns_cyc ) THEN |
---|
[2696] | 435 | ar(:,nys_l-nbgp_local:nys_l-1,:) = ar(:,nyn_l-nbgp_local+1:nyn_l,:) |
---|
| 436 | ar(:,nyn_l+1:nyn_l+nbgp_local,:) = ar(:,nys_l:nys_l+nbgp_local-1,:) |
---|
[1677] | 437 | ENDIF |
---|
| 438 | |
---|
| 439 | ELSE |
---|
| 440 | |
---|
| 441 | ! |
---|
| 442 | !-- Send front boundary, receive rear one (synchronous) |
---|
[2696] | 443 | CALL MPI_SENDRECV( & |
---|
| 444 | ar(nzb,nys_l,nxl_l-nbgp_local), 1, type_xz_int(grid_level), psouth, 0,& |
---|
| 445 | ar(nzb,nyn_l+1,nxl_l-nbgp_local), 1, type_xz_int(grid_level), pnorth, 0,& |
---|
[1677] | 446 | comm2d, status, ierr ) |
---|
| 447 | ! |
---|
| 448 | !-- Send rear boundary, receive front one (synchronous) |
---|
[2696] | 449 | CALL MPI_SENDRECV( ar(nzb,nyn_l-nbgp_local+1,nxl_l-nbgp_local), 1, & |
---|
| 450 | type_xz_int(grid_level), pnorth, 1, & |
---|
| 451 | ar(nzb,nys_l-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
| 452 | type_xz_int(grid_level), psouth, 1, & |
---|
[1677] | 453 | comm2d, status, ierr ) |
---|
| 454 | |
---|
| 455 | ENDIF |
---|
| 456 | |
---|
| 457 | #else |
---|
| 458 | |
---|
[3241] | 459 | IF ( bc_lr_cyc ) THEN |
---|
[2696] | 460 | ar(:,:,nxl_l-nbgp_local:nxl_l-1) = ar(:,:,nxr_l-nbgp_local+1:nxr_l) |
---|
| 461 | ar(:,:,nxr_l+1:nxr_l+nbgp_local) = ar(:,:,nxl_l:nxl_l+nbgp_local-1) |
---|
[1677] | 462 | ENDIF |
---|
| 463 | |
---|
[3241] | 464 | IF ( bc_ns_cyc ) THEN |
---|
[2696] | 465 | ar(:,nys_l-nbgp_local:nys_l-1,:) = ar(:,nyn_l-nbgp_local+1:nyn_l,:) |
---|
| 466 | ar(:,nyn_l+1:nyn_l+nbgp_local,:) = ar(:,nys_l:nys_l+nbgp_local-1,:) |
---|
[1677] | 467 | ENDIF |
---|
| 468 | |
---|
| 469 | #endif |
---|
| 470 | |
---|
[4457] | 471 | END SUBROUTINE exchange_horiz_int |
---|
[1677] | 472 | |
---|
[4457] | 473 | ! Description: |
---|
| 474 | ! ------------ |
---|
| 475 | !> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic |
---|
| 476 | !> boundary conditions, respectively, for 2D-arrays. |
---|
| 477 | !------------------------------------------------------------------------------! |
---|
| 478 | SUBROUTINE exchange_horiz_2d( ar ) |
---|
| 479 | |
---|
| 480 | USE control_parameters, & |
---|
| 481 | ONLY : bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, & |
---|
| 482 | bc_dirichlet_s, bc_radiation_l, & |
---|
| 483 | bc_radiation_n, bc_radiation_r, bc_radiation_s |
---|
| 484 | |
---|
| 485 | USE cpulog, & |
---|
| 486 | ONLY : cpu_log, log_point_s |
---|
| 487 | |
---|
| 488 | USE indices, & |
---|
| 489 | ONLY : nbgp, nxl, nxlg, nxr, nxrg, nyn, nyng, nys, nysg |
---|
| 490 | |
---|
| 491 | #if ! defined( __parallel ) |
---|
| 492 | USE control_parameters, & |
---|
| 493 | ONLY: bc_lr_cyc, bc_ns_cyc |
---|
| 494 | #endif |
---|
| 495 | |
---|
| 496 | |
---|
| 497 | INTEGER(iwp) :: i !< |
---|
| 498 | |
---|
| 499 | REAL(wp) :: ar(nysg:nyng,nxlg:nxrg) !< |
---|
| 500 | |
---|
| 501 | |
---|
| 502 | CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' ) |
---|
| 503 | |
---|
| 504 | #if defined( __parallel ) |
---|
| 505 | |
---|
| 506 | ! |
---|
| 507 | !-- Exchange of lateral boundary values for parallel computers |
---|
| 508 | IF ( pdims(1) == 1 ) THEN |
---|
| 509 | |
---|
| 510 | ! |
---|
| 511 | !-- One-dimensional decomposition along y, boundary values can be exchanged |
---|
| 512 | !-- within the PE memory |
---|
| 513 | ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr) |
---|
| 514 | ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1) |
---|
| 515 | |
---|
| 516 | ELSE |
---|
| 517 | ! |
---|
| 518 | !-- Send left boundary, receive right one |
---|
| 519 | |
---|
| 520 | CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y, pleft, 0, & |
---|
| 521 | ar(nysg,nxr+1), 1, type_y, pright, 0, & |
---|
| 522 | comm2d, status, ierr ) |
---|
| 523 | ! |
---|
| 524 | !-- Send right boundary, receive left one |
---|
| 525 | CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y, pright, 1, & |
---|
| 526 | ar(nysg,nxlg), 1, type_y, pleft, 1, & |
---|
| 527 | comm2d, status, ierr ) |
---|
| 528 | |
---|
| 529 | |
---|
| 530 | ENDIF |
---|
| 531 | |
---|
| 532 | IF ( pdims(2) == 1 ) THEN |
---|
| 533 | ! |
---|
| 534 | !-- One-dimensional decomposition along x, boundary values can be exchanged |
---|
| 535 | !-- within the PE memory |
---|
| 536 | ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:) |
---|
| 537 | ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:) |
---|
| 538 | |
---|
| 539 | ELSE |
---|
| 540 | ! |
---|
| 541 | !-- Send front boundary, receive rear one |
---|
| 542 | |
---|
| 543 | CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x, psouth, 0, & |
---|
| 544 | ar(nyn+1,nxlg), 1, type_x, pnorth, 0, & |
---|
| 545 | comm2d, status, ierr ) |
---|
| 546 | ! |
---|
| 547 | !-- Send rear boundary, receive front one |
---|
| 548 | CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x, pnorth, 1, & |
---|
| 549 | ar(nysg,nxlg), 1, type_x, psouth, 1, & |
---|
| 550 | comm2d, status, ierr ) |
---|
| 551 | |
---|
| 552 | ENDIF |
---|
| 553 | |
---|
| 554 | #else |
---|
| 555 | |
---|
| 556 | ! |
---|
| 557 | !-- Lateral boundary conditions in the non-parallel case |
---|
| 558 | IF ( bc_lr_cyc ) THEN |
---|
| 559 | ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr) |
---|
| 560 | ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1) |
---|
| 561 | ENDIF |
---|
| 562 | |
---|
| 563 | IF ( bc_ns_cyc ) THEN |
---|
| 564 | ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:) |
---|
| 565 | ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:) |
---|
| 566 | ENDIF |
---|
| 567 | |
---|
| 568 | #endif |
---|
| 569 | |
---|
| 570 | ! |
---|
| 571 | !-- Neumann-conditions at inflow/outflow/nested boundaries |
---|
| 572 | IF ( bc_dirichlet_l .OR. bc_radiation_l ) THEN |
---|
| 573 | DO i = nbgp, 1, -1 |
---|
| 574 | ar(:,nxl-i) = ar(:,nxl) |
---|
| 575 | ENDDO |
---|
| 576 | ENDIF |
---|
| 577 | IF ( bc_dirichlet_r .OR. bc_radiation_r ) THEN |
---|
| 578 | DO i = 1, nbgp |
---|
| 579 | ar(:,nxr+i) = ar(:,nxr) |
---|
| 580 | ENDDO |
---|
| 581 | ENDIF |
---|
| 582 | IF ( bc_dirichlet_s .OR. bc_radiation_s ) THEN |
---|
| 583 | DO i = nbgp, 1, -1 |
---|
| 584 | ar(nys-i,:) = ar(nys,:) |
---|
| 585 | ENDDO |
---|
| 586 | ENDIF |
---|
| 587 | IF ( bc_dirichlet_n .OR. bc_radiation_n ) THEN |
---|
| 588 | DO i = 1, nbgp |
---|
| 589 | ar(nyn+i,:) = ar(nyn,:) |
---|
| 590 | ENDDO |
---|
| 591 | ENDIF |
---|
| 592 | |
---|
| 593 | CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' ) |
---|
| 594 | |
---|
| 595 | END SUBROUTINE exchange_horiz_2d |
---|
| 596 | |
---|
| 597 | |
---|
| 598 | !------------------------------------------------------------------------------! |
---|
| 599 | ! Description: |
---|
| 600 | ! ------------ |
---|
| 601 | !> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic |
---|
| 602 | !> boundary conditions, respectively, for 2D 8-bit integer arrays. |
---|
| 603 | !------------------------------------------------------------------------------! |
---|
| 604 | SUBROUTINE exchange_horiz_2d_byte( ar, nys_l, nyn_l, nxl_l, nxr_l, nbgp_local ) |
---|
| 605 | |
---|
| 606 | |
---|
| 607 | USE control_parameters, & |
---|
| 608 | ONLY: bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, & |
---|
| 609 | bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s, & |
---|
| 610 | bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s |
---|
| 611 | |
---|
| 612 | USE cpulog, & |
---|
| 613 | ONLY: cpu_log, log_point_s |
---|
| 614 | |
---|
| 615 | #if ! defined( __parallel ) |
---|
| 616 | USE control_parameters, & |
---|
| 617 | ONLY: bc_lr_cyc, bc_ns_cyc |
---|
| 618 | #endif |
---|
| 619 | |
---|
| 620 | INTEGER(iwp) :: i !< dummy index to zero-gradient conditions at in/outflow boundaries |
---|
| 621 | INTEGER(iwp) :: nxl_l !< local index bound at current grid level, left side |
---|
| 622 | INTEGER(iwp) :: nxr_l !< local index bound at current grid level, right side |
---|
| 623 | INTEGER(iwp) :: nyn_l !< local index bound at current grid level, north side |
---|
| 624 | INTEGER(iwp) :: nys_l !< local index bound at current grid level, south side |
---|
| 625 | INTEGER(iwp) :: nbgp_local !< number of ghost layers to be exchanged |
---|
| 626 | |
---|
| 627 | INTEGER(KIND=1), DIMENSION(nys_l-nbgp_local:nyn_l+nbgp_local, & |
---|
| 628 | nxl_l-nbgp_local:nxr_l+nbgp_local) :: ar !< treated array |
---|
| 629 | |
---|
| 630 | CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' ) |
---|
| 631 | |
---|
| 632 | #if defined( __parallel ) |
---|
| 633 | |
---|
| 634 | ! |
---|
| 635 | !-- Exchange of lateral boundary values for parallel computers |
---|
| 636 | IF ( pdims(1) == 1 ) THEN |
---|
| 637 | |
---|
| 638 | ! |
---|
| 639 | !-- One-dimensional decomposition along y, boundary values can be exchanged |
---|
| 640 | !-- within the PE memory |
---|
| 641 | ar(:,nxl_l-nbgp_local:nxl_l-1) = ar(:,nxr_l-nbgp_local+1:nxr_l) |
---|
| 642 | ar(:,nxr_l+1:nxr_l+nbgp_local) = ar(:,nxl_l:nxl_l+nbgp_local-1) |
---|
| 643 | |
---|
| 644 | ELSE |
---|
| 645 | ! |
---|
| 646 | !-- Send left boundary, receive right one |
---|
| 647 | CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxl_l), 1, & |
---|
| 648 | type_y_byte, pleft, 0, & |
---|
| 649 | ar(nys_l-nbgp_local,nxr_l+1), 1, & |
---|
| 650 | type_y_byte, pright, 0, & |
---|
| 651 | comm2d, status, ierr ) |
---|
| 652 | ! |
---|
| 653 | !-- Send right boundary, receive left one |
---|
| 654 | CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxr_l+1-nbgp_local), 1, & |
---|
| 655 | type_y_byte, pright, 1, & |
---|
| 656 | ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
| 657 | type_y_byte, pleft, 1, & |
---|
| 658 | comm2d, status, ierr ) |
---|
| 659 | |
---|
| 660 | ENDIF |
---|
| 661 | |
---|
| 662 | IF ( pdims(2) == 1 ) THEN |
---|
| 663 | ! |
---|
| 664 | !-- One-dimensional decomposition along x, boundary values can be exchanged |
---|
| 665 | !-- within the PE memory |
---|
| 666 | ar(nys_l-nbgp_local:nys_l-1,:) = ar(nyn_l+1-nbgp_local:nyn_l,:) |
---|
| 667 | ar(nyn_l+1:nyn_l+nbgp_local,:) = ar(nys_l:nys_l-1+nbgp_local,:) |
---|
| 668 | |
---|
| 669 | |
---|
| 670 | ELSE |
---|
| 671 | ! |
---|
| 672 | !-- Send front boundary, receive rear one |
---|
| 673 | CALL MPI_SENDRECV( ar(nys_l,nxl_l-nbgp_local), 1, & |
---|
| 674 | type_x_byte, psouth, 0, & |
---|
| 675 | ar(nyn_l+1,nxl_l-nbgp_local), 1, & |
---|
| 676 | type_x_byte, pnorth, 0, & |
---|
| 677 | comm2d, status, ierr ) |
---|
| 678 | |
---|
| 679 | ! |
---|
| 680 | !-- Send rear boundary, receive front one |
---|
| 681 | CALL MPI_SENDRECV( ar(nyn_l+1-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
| 682 | type_x_byte, pnorth, 1, & |
---|
| 683 | ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
| 684 | type_x_byte, psouth, 1, & |
---|
| 685 | comm2d, status, ierr ) |
---|
| 686 | |
---|
| 687 | ENDIF |
---|
| 688 | |
---|
| 689 | #else |
---|
| 690 | |
---|
| 691 | ! |
---|
| 692 | !-- Lateral boundary conditions in the non-parallel case |
---|
| 693 | IF ( bc_lr_cyc ) THEN |
---|
| 694 | ar(:,nxl_l-nbgp_local:nxl_l-1) = ar(:,nxr_l-nbgp_local+1:nxr_l) |
---|
| 695 | ar(:,nxr_l+1:nxr_l+nbgp_local) = ar(:,nxl_l:nxl_l+nbgp_local-1) |
---|
| 696 | ENDIF |
---|
| 697 | |
---|
| 698 | IF ( bc_ns_cyc ) THEN |
---|
| 699 | ar(nys_l-nbgp_local:nys_l-1,:) = ar(nyn_l+1-nbgp_local:nyn_l,:) |
---|
| 700 | ar(nyn_l+1:nyn_l+nbgp_local,:) = ar(nys_l:nys_l-1+nbgp_local,:) |
---|
| 701 | ENDIF |
---|
| 702 | |
---|
| 703 | #endif |
---|
| 704 | ! |
---|
| 705 | !-- Neumann-conditions at inflow/outflow/nested boundaries |
---|
| 706 | IF ( bc_dirichlet_l .OR. bc_radiation_l ) THEN |
---|
| 707 | DO i = nbgp_local, 1, -1 |
---|
| 708 | ar(:,nxl_l-i) = ar(:,nxl_l) |
---|
| 709 | ENDDO |
---|
| 710 | ENDIF |
---|
| 711 | IF ( bc_dirichlet_r .OR. bc_radiation_r ) THEN |
---|
| 712 | DO i = 1, nbgp_local |
---|
| 713 | ar(:,nxr_l+i) = ar(:,nxr_l) |
---|
| 714 | ENDDO |
---|
| 715 | ENDIF |
---|
| 716 | IF ( bc_dirichlet_s .OR. bc_radiation_s ) THEN |
---|
| 717 | DO i = nbgp_local, 1, -1 |
---|
| 718 | ar(nys_l-i,:) = ar(nys_l,:) |
---|
| 719 | ENDDO |
---|
| 720 | ENDIF |
---|
| 721 | IF ( bc_dirichlet_n .OR. bc_radiation_n ) THEN |
---|
| 722 | DO i = 1, nbgp_local |
---|
| 723 | ar(nyn_l+i,:) = ar(nyn_l,:) |
---|
| 724 | ENDDO |
---|
| 725 | ENDIF |
---|
| 726 | |
---|
| 727 | CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' ) |
---|
| 728 | |
---|
| 729 | END SUBROUTINE exchange_horiz_2d_byte |
---|
| 730 | |
---|
| 731 | |
---|
| 732 | !------------------------------------------------------------------------------! |
---|
| 733 | ! Description: |
---|
| 734 | ! ------------ |
---|
| 735 | !> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic |
---|
| 736 | !> boundary conditions, respectively, for 2D 32-bit integer arrays. |
---|
| 737 | !------------------------------------------------------------------------------! |
---|
| 738 | SUBROUTINE exchange_horiz_2d_int( ar, nys_l, nyn_l, nxl_l, nxr_l, nbgp_local ) |
---|
| 739 | |
---|
| 740 | |
---|
| 741 | USE control_parameters, & |
---|
| 742 | ONLY: bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, & |
---|
| 743 | bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s, & |
---|
| 744 | bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s |
---|
| 745 | |
---|
| 746 | #if defined( __parallel ) |
---|
| 747 | USE control_parameters, & |
---|
| 748 | ONLY: grid_level |
---|
| 749 | #endif |
---|
| 750 | |
---|
| 751 | USE cpulog, & |
---|
| 752 | ONLY: cpu_log, log_point_s |
---|
| 753 | |
---|
| 754 | #if ! defined( __parallel ) |
---|
| 755 | USE control_parameters, & |
---|
| 756 | ONLY: bc_lr_cyc, bc_ns_cyc |
---|
| 757 | #endif |
---|
| 758 | |
---|
| 759 | INTEGER(iwp) :: i !< dummy index to zero-gradient conditions at in/outflow boundaries |
---|
| 760 | INTEGER(iwp) :: nxl_l !< local index bound at current grid level, left side |
---|
| 761 | INTEGER(iwp) :: nxr_l !< local index bound at current grid level, right side |
---|
| 762 | INTEGER(iwp) :: nyn_l !< local index bound at current grid level, north side |
---|
| 763 | INTEGER(iwp) :: nys_l !< local index bound at current grid level, south side |
---|
| 764 | INTEGER(iwp) :: nbgp_local !< number of ghost layers to be exchanged |
---|
| 765 | |
---|
| 766 | INTEGER(iwp), DIMENSION(nys_l-nbgp_local:nyn_l+nbgp_local, & |
---|
| 767 | nxl_l-nbgp_local:nxr_l+nbgp_local) :: ar !< treated array |
---|
| 768 | |
---|
| 769 | CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' ) |
---|
| 770 | |
---|
| 771 | #if defined( __parallel ) |
---|
| 772 | |
---|
| 773 | ! |
---|
| 774 | !-- Exchange of lateral boundary values for parallel computers |
---|
| 775 | IF ( pdims(1) == 1 ) THEN |
---|
| 776 | |
---|
| 777 | ! |
---|
| 778 | !-- One-dimensional decomposition along y, boundary values can be exchanged |
---|
| 779 | !-- within the PE memory |
---|
| 780 | ar(:,nxl_l-nbgp_local:nxl_l-1) = ar(:,nxr_l-nbgp_local+1:nxr_l) |
---|
| 781 | ar(:,nxr_l+1:nxr_l+nbgp_local) = ar(:,nxl_l:nxl_l+nbgp_local-1) |
---|
| 782 | |
---|
| 783 | ELSE |
---|
| 784 | ! |
---|
| 785 | !-- Send left boundary, receive right one |
---|
| 786 | CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxl_l), 1, & |
---|
| 787 | type_y_int(grid_level), pleft, 0, & |
---|
| 788 | ar(nys_l-nbgp_local,nxr_l+1), 1, & |
---|
| 789 | type_y_int(grid_level), pright, 0, & |
---|
| 790 | comm2d, status, ierr ) |
---|
| 791 | ! |
---|
| 792 | !-- Send right boundary, receive left one |
---|
| 793 | CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxr_l+1-nbgp_local), 1, & |
---|
| 794 | type_y_int(grid_level), pright, 1, & |
---|
| 795 | ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
| 796 | type_y_int(grid_level), pleft, 1, & |
---|
| 797 | comm2d, status, ierr ) |
---|
| 798 | |
---|
| 799 | ENDIF |
---|
| 800 | |
---|
| 801 | IF ( pdims(2) == 1 ) THEN |
---|
| 802 | ! |
---|
| 803 | !-- One-dimensional decomposition along x, boundary values can be exchanged |
---|
| 804 | !-- within the PE memory |
---|
| 805 | ar(nys_l-nbgp_local:nys_l-1,:) = ar(nyn_l+1-nbgp_local:nyn_l,:) |
---|
| 806 | ar(nyn_l+1:nyn_l+nbgp_local,:) = ar(nys_l:nys_l-1+nbgp_local,:) |
---|
| 807 | |
---|
| 808 | |
---|
| 809 | ELSE |
---|
| 810 | ! |
---|
| 811 | !-- Send front boundary, receive rear one |
---|
| 812 | CALL MPI_SENDRECV( ar(nys_l,nxl_l-nbgp_local), 1, & |
---|
| 813 | type_x_int(grid_level), psouth, 0, & |
---|
| 814 | ar(nyn_l+1,nxl_l-nbgp_local), 1, & |
---|
| 815 | type_x_int(grid_level), pnorth, 0, & |
---|
| 816 | comm2d, status, ierr ) |
---|
| 817 | |
---|
| 818 | ! |
---|
| 819 | !-- Send rear boundary, receive front one |
---|
| 820 | CALL MPI_SENDRECV( ar(nyn_l+1-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
| 821 | type_x_int(grid_level), pnorth, 1, & |
---|
| 822 | ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
| 823 | type_x_int(grid_level), psouth, 1, & |
---|
| 824 | comm2d, status, ierr ) |
---|
| 825 | |
---|
| 826 | ENDIF |
---|
| 827 | |
---|
| 828 | #else |
---|
| 829 | |
---|
| 830 | ! |
---|
| 831 | !-- Lateral boundary conditions in the non-parallel case |
---|
| 832 | IF ( bc_lr_cyc ) THEN |
---|
| 833 | ar(:,nxl_l-nbgp_local:nxl_l-1) = ar(:,nxr_l-nbgp_local+1:nxr_l) |
---|
| 834 | ar(:,nxr_l+1:nxr_l+nbgp_local) = ar(:,nxl_l:nxl_l+nbgp_local-1) |
---|
| 835 | ENDIF |
---|
| 836 | |
---|
| 837 | IF ( bc_ns_cyc ) THEN |
---|
| 838 | ar(nys_l-nbgp_local:nys_l-1,:) = ar(nyn_l+1-nbgp_local:nyn_l,:) |
---|
| 839 | ar(nyn_l+1:nyn_l+nbgp_local,:) = ar(nys_l:nys_l-1+nbgp_local,:) |
---|
| 840 | ENDIF |
---|
| 841 | |
---|
| 842 | #endif |
---|
| 843 | ! |
---|
| 844 | !-- Neumann-conditions at inflow/outflow/nested boundaries |
---|
| 845 | IF ( bc_dirichlet_l .OR. bc_radiation_l ) THEN |
---|
| 846 | DO i = nbgp_local, 1, -1 |
---|
| 847 | ar(:,nxl_l-i) = ar(:,nxl_l) |
---|
| 848 | ENDDO |
---|
| 849 | ENDIF |
---|
| 850 | IF ( bc_dirichlet_r .OR. bc_radiation_r ) THEN |
---|
| 851 | DO i = 1, nbgp_local |
---|
| 852 | ar(:,nxr_l+i) = ar(:,nxr_l) |
---|
| 853 | ENDDO |
---|
| 854 | ENDIF |
---|
| 855 | IF ( bc_dirichlet_s .OR. bc_radiation_s ) THEN |
---|
| 856 | DO i = nbgp_local, 1, -1 |
---|
| 857 | ar(nys_l-i,:) = ar(nys_l,:) |
---|
| 858 | ENDDO |
---|
| 859 | ENDIF |
---|
| 860 | IF ( bc_dirichlet_n .OR. bc_radiation_n ) THEN |
---|
| 861 | DO i = 1, nbgp_local |
---|
| 862 | ar(nyn_l+i,:) = ar(nyn_l,:) |
---|
| 863 | ENDDO |
---|
| 864 | ENDIF |
---|
| 865 | |
---|
| 866 | CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' ) |
---|
| 867 | |
---|
| 868 | END SUBROUTINE exchange_horiz_2d_int |
---|
| 869 | |
---|
| 870 | |
---|
| 871 | END MODULE exchange_horiz_mod |
---|