1 | !> @file exchange_horiz.f90 |
---|
2 | !------------------------------------------------------------------------------! |
---|
3 | ! This file is part of the PALM model system. |
---|
4 | ! |
---|
5 | ! PALM is free software: you can redistribute it and/or modify it under the |
---|
6 | ! terms of the GNU General Public License as published by the Free Software |
---|
7 | ! Foundation, either version 3 of the License, or (at your option) any later |
---|
8 | ! version. |
---|
9 | ! |
---|
10 | ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY |
---|
11 | ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR |
---|
12 | ! A PARTICULAR PURPOSE. See the GNU General Public License for more details. |
---|
13 | ! |
---|
14 | ! You should have received a copy of the GNU General Public License along with |
---|
15 | ! PALM. If not, see <http://www.gnu.org/licenses/>. |
---|
16 | ! |
---|
17 | ! Copyright 1997-2020 Leibniz Universitaet Hannover |
---|
18 | !------------------------------------------------------------------------------! |
---|
19 | ! |
---|
20 | ! Current revisions: |
---|
21 | ! ----------------- |
---|
22 | ! |
---|
23 | ! |
---|
24 | ! Former revisions: |
---|
25 | ! ----------------- |
---|
26 | ! $Id: exchange_horiz_mod.f90 4461 2020-03-12 16:51:59Z schwenkel $ |
---|
27 | ! optional communicator added to exchange_horiz |
---|
28 | ! |
---|
29 | ! 4457 2020-03-11 14:20:43Z raasch |
---|
30 | ! routine has been modularized, file exchange_horiz_2d has been merged |
---|
31 | ! |
---|
32 | ! 4429 2020-02-27 15:24:30Z raasch |
---|
33 | ! bugfix: cpp-directives added for serial mode |
---|
34 | ! |
---|
35 | ! 4360 2020-01-07 11:25:50Z suehring |
---|
36 | ! Corrected "Former revisions" section |
---|
37 | ! |
---|
38 | ! 3761 2019-02-25 15:31:42Z raasch |
---|
39 | ! OpenACC directives re-formatted |
---|
40 | ! |
---|
41 | ! 3657 2019-01-07 20:14:18Z knoop |
---|
42 | ! OpenACC port for SPEC |
---|
43 | ! |
---|
44 | ! Revision 1.1 1997/07/24 11:13:29 raasch |
---|
45 | ! Initial revision |
---|
46 | ! |
---|
47 | ! |
---|
48 | ! Description: |
---|
49 | ! ------------ |
---|
50 | !> Exchange of ghost point layers for subdomains (in parallel mode) and setting |
---|
51 | !> of cyclic lateral boundary conditions for the total domain . |
---|
52 | !------------------------------------------------------------------------------! |
---|
53 | MODULE exchange_horiz_mod |
---|
54 | |
---|
55 | USE kinds |
---|
56 | |
---|
57 | USE pegrid |
---|
58 | |
---|
59 | IMPLICIT NONE |
---|
60 | |
---|
61 | PRIVATE |
---|
62 | PUBLIC exchange_horiz, exchange_horiz_int, exchange_horiz_2d, exchange_horiz_2d_byte, & |
---|
63 | exchange_horiz_2d_int |
---|
64 | |
---|
65 | INTERFACE exchange_horiz |
---|
66 | MODULE PROCEDURE exchange_horiz |
---|
67 | END INTERFACE exchange_horiz |
---|
68 | |
---|
69 | INTERFACE exchange_horiz_int |
---|
70 | MODULE PROCEDURE exchange_horiz_int |
---|
71 | END INTERFACE exchange_horiz_int |
---|
72 | |
---|
73 | INTERFACE exchange_horiz_2d |
---|
74 | MODULE PROCEDURE exchange_horiz_2d |
---|
75 | END INTERFACE exchange_horiz_2d |
---|
76 | |
---|
77 | INTERFACE exchange_horiz_2d_byte |
---|
78 | MODULE PROCEDURE exchange_horiz_2d_byte |
---|
79 | END INTERFACE exchange_horiz_2d_byte |
---|
80 | |
---|
81 | INTERFACE exchange_horiz_2d_int |
---|
82 | MODULE PROCEDURE exchange_horiz_2d_int |
---|
83 | END INTERFACE exchange_horiz_2d_int |
---|
84 | |
---|
85 | |
---|
86 | CONTAINS |
---|
87 | |
---|
88 | |
---|
89 | !------------------------------------------------------------------------------! |
---|
90 | ! Description: |
---|
91 | ! ------------ |
---|
92 | !> Exchange of ghost point layers for subdomains (in parallel mode) and setting |
---|
93 | !> of cyclic lateral boundary conditions for the total domain. |
---|
94 | !> This routine is for REAL 3d-arrays. |
---|
95 | !------------------------------------------------------------------------------! |
---|
96 | SUBROUTINE exchange_horiz( ar, nbgp_local, alternative_communicator) |
---|
97 | |
---|
98 | USE control_parameters, & |
---|
99 | ONLY: bc_lr_cyc, bc_ns_cyc |
---|
100 | |
---|
101 | #if defined( __parallel ) |
---|
102 | USE control_parameters, & |
---|
103 | ONLY: grid_level, mg_switch_to_pe0, synchronous_exchange |
---|
104 | #endif |
---|
105 | |
---|
106 | USE cpulog, & |
---|
107 | ONLY: cpu_log, log_point_s |
---|
108 | |
---|
109 | USE indices, & |
---|
110 | ONLY: nxl, nxr, nyn, nys, nzb, nzt |
---|
111 | |
---|
112 | |
---|
113 | #if defined( _OPENACC ) |
---|
114 | INTEGER(iwp) :: i !< |
---|
115 | #endif |
---|
116 | |
---|
117 | INTEGER(iwp), OPTIONAL :: alternative_communicator !< alternative MPI communicator to be used |
---|
118 | INTEGER(iwp) :: communicator !< communicator that is used as argument in MPI calls |
---|
119 | INTEGER(iwp) :: left_pe !< id of left pe that is used as argument in MPI calls |
---|
120 | INTEGER(iwp) :: nbgp_local !< number of ghost point layers |
---|
121 | INTEGER(iwp) :: north_pe !< id of north pe that is used as argument in MPI calls |
---|
122 | INTEGER(iwp) :: right_pe !< id of right pe that is used as argument in MPI calls |
---|
123 | INTEGER(iwp) :: south_pe !< id of south pe that is used as argument in MPI calls |
---|
124 | |
---|
125 | REAL(wp), DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, & |
---|
126 | nxl-nbgp_local:nxr+nbgp_local) :: ar !< 3d-array for which exchange is done |
---|
127 | |
---|
128 | |
---|
129 | CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' ) |
---|
130 | |
---|
131 | #if defined( _OPENACC ) |
---|
132 | !$ACC UPDATE IF_PRESENT ASYNC(1) & |
---|
133 | !$ACC HOST(ar(:,:,nxr-nbgp_local+1:nxr)) & |
---|
134 | !$ACC HOST(ar(:,:,nxl:nxl+nbgp_local-1)) |
---|
135 | |
---|
136 | ! |
---|
137 | !-- Wait for first UPDATE to complete before starting the others. |
---|
138 | !$ACC WAIT(1) ASYNC(2) |
---|
139 | ! ar(:,:,nxl-nbgp_local:nxl-1) is overwritten by first part below |
---|
140 | ! ar(:,:,nxl:nxl+nbgp_local-1) has been transferred above |
---|
141 | DO i = nxl+nbgp_local, nxr-nbgp_local |
---|
142 | !$ACC UPDATE IF_PRESENT ASYNC(2) & |
---|
143 | !$ACC HOST(ar(:,nyn-nbgp_local+1:nyn,i)) & |
---|
144 | !$ACC HOST(ar(:,nys:nys+nbgp_local-1,i)) |
---|
145 | ENDDO |
---|
146 | ! ar(:,:,nxr-nbgp_local+1:nxr) has been transferred above |
---|
147 | ! ar(:,:,nxr+1:nxr+nbgp_local) is overwritten by first part below |
---|
148 | |
---|
149 | ! |
---|
150 | !-- Wait for first UPDATE to complete before starting MPI. |
---|
151 | !$ACC WAIT(1) |
---|
152 | #endif |
---|
153 | |
---|
154 | ! |
---|
155 | !-- Set the communicator to be used |
---|
156 | IF ( PRESENT( alternative_communicator ) ) THEN |
---|
157 | ! |
---|
158 | !-- Alternative communicator is to be used |
---|
159 | communicator = communicator_configurations(alternative_communicator)%mpi_communicator |
---|
160 | left_pe = communicator_configurations(alternative_communicator)%pleft |
---|
161 | right_pe = communicator_configurations(alternative_communicator)%pright |
---|
162 | south_pe = communicator_configurations(alternative_communicator)%psouth |
---|
163 | north_pe = communicator_configurations(alternative_communicator)%pnorth |
---|
164 | |
---|
165 | ELSE |
---|
166 | ! |
---|
167 | !-- Main communicator is to be used |
---|
168 | communicator = comm2d |
---|
169 | left_pe = pleft |
---|
170 | right_pe = pright |
---|
171 | south_pe = psouth |
---|
172 | north_pe = pnorth |
---|
173 | |
---|
174 | ENDIF |
---|
175 | |
---|
176 | #if defined( __parallel ) |
---|
177 | |
---|
178 | ! |
---|
179 | !-- Exchange in x-direction of lateral boundaries |
---|
180 | IF ( pdims(1) == 1 .OR. mg_switch_to_pe0 ) THEN |
---|
181 | ! |
---|
182 | !-- One-dimensional decomposition along y, boundary values can be exchanged |
---|
183 | !-- within the PE memory |
---|
184 | IF ( bc_lr_cyc ) THEN |
---|
185 | ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr) |
---|
186 | ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1) |
---|
187 | ENDIF |
---|
188 | |
---|
189 | ELSE |
---|
190 | |
---|
191 | IF ( synchronous_exchange ) THEN |
---|
192 | ! |
---|
193 | !-- Send left boundary, receive right one (synchronous) |
---|
194 | CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxl), 1, type_yz(grid_level), left_pe, 0, & |
---|
195 | ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), right_pe, 0, & |
---|
196 | communicator, status, ierr ) |
---|
197 | ! |
---|
198 | !-- Send right boundary, receive left one (synchronous) |
---|
199 | CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, & |
---|
200 | type_yz(grid_level), right_pe, 1, & |
---|
201 | ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & |
---|
202 | type_yz(grid_level), left_pe, 1, & |
---|
203 | communicator, status, ierr ) |
---|
204 | |
---|
205 | ELSE |
---|
206 | |
---|
207 | ! |
---|
208 | !-- Asynchroneous exchange |
---|
209 | IF ( send_receive == 'lr' .OR. send_receive == 'al' ) THEN |
---|
210 | |
---|
211 | req(1:4) = 0 |
---|
212 | req_count = 0 |
---|
213 | ! |
---|
214 | !-- Send left boundary, receive right one (asynchronous) |
---|
215 | CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl), 1, type_yz(grid_level), left_pe, & |
---|
216 | req_count, communicator, req(req_count+1), ierr ) |
---|
217 | CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), right_pe, & |
---|
218 | req_count, communicator, req(req_count+2), ierr ) |
---|
219 | ! |
---|
220 | !-- Send right boundary, receive left one (asynchronous) |
---|
221 | CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, type_yz(grid_level), & |
---|
222 | right_pe, req_count+1, communicator, req(req_count+3), ierr ) |
---|
223 | CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_yz(grid_level), & |
---|
224 | left_pe, req_count+1, communicator, req(req_count+4), ierr ) |
---|
225 | |
---|
226 | CALL MPI_WAITALL( 4, req, wait_stat, ierr ) |
---|
227 | |
---|
228 | ENDIF |
---|
229 | |
---|
230 | ENDIF |
---|
231 | |
---|
232 | ENDIF |
---|
233 | |
---|
234 | !$ACC UPDATE IF_PRESENT ASYNC(1) & |
---|
235 | !$ACC DEVICE(ar(:,:,nxl-nbgp_local:nxl-1)) & |
---|
236 | !$ACC DEVICE(ar(:,:,nxr+1:nxr+nbgp_local)) |
---|
237 | |
---|
238 | ! |
---|
239 | !-- Wait for UPDATES above to complete before starting MPI. |
---|
240 | !$ACC WAIT(2) |
---|
241 | |
---|
242 | IF ( pdims(2) == 1 .OR. mg_switch_to_pe0 ) THEN |
---|
243 | ! |
---|
244 | !-- One-dimensional decomposition along x, boundary values can be exchanged |
---|
245 | !-- within the PE memory |
---|
246 | IF ( bc_ns_cyc ) THEN |
---|
247 | ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:) |
---|
248 | ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:) |
---|
249 | ENDIF |
---|
250 | |
---|
251 | ELSE |
---|
252 | |
---|
253 | IF ( synchronous_exchange ) THEN |
---|
254 | ! |
---|
255 | !-- Send front boundary, receive rear one (synchronous) |
---|
256 | CALL MPI_SENDRECV( ar(nzb,nys,nxl-nbgp_local), 1, type_xz(grid_level), south_pe, 0, & |
---|
257 | ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), north_pe, 0, & |
---|
258 | communicator, status, ierr ) |
---|
259 | ! |
---|
260 | !-- Send rear boundary, receive front one (synchronous) |
---|
261 | CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, & |
---|
262 | type_xz(grid_level), north_pe, 1, & |
---|
263 | ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & |
---|
264 | type_xz(grid_level), south_pe, 1, & |
---|
265 | communicator, status, ierr ) |
---|
266 | |
---|
267 | ELSE |
---|
268 | |
---|
269 | ! |
---|
270 | !-- Asynchroneous exchange |
---|
271 | IF ( send_receive == 'ns' .OR. send_receive == 'al' ) THEN |
---|
272 | |
---|
273 | req(1:4) = 0 |
---|
274 | req_count = 0 |
---|
275 | |
---|
276 | ! |
---|
277 | !-- Send front boundary, receive rear one (asynchronous) |
---|
278 | CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local), 1, type_xz(grid_level), south_pe, & |
---|
279 | req_count, communicator, req(req_count+1), ierr ) |
---|
280 | CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), north_pe, & |
---|
281 | req_count, communicator, req(req_count+2), ierr ) |
---|
282 | ! |
---|
283 | !-- Send rear boundary, receive front one (asynchronous) |
---|
284 | CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, type_xz(grid_level), & |
---|
285 | north_pe, req_count+1, communicator, req(req_count+3), ierr ) |
---|
286 | CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_xz(grid_level), & |
---|
287 | south_pe, req_count+1, communicator, req(req_count+4), ierr ) |
---|
288 | |
---|
289 | CALL MPI_WAITALL( 4, req, wait_stat, ierr ) |
---|
290 | |
---|
291 | ENDIF |
---|
292 | |
---|
293 | ENDIF |
---|
294 | |
---|
295 | ENDIF |
---|
296 | |
---|
297 | #else |
---|
298 | |
---|
299 | ! |
---|
300 | !-- Lateral boundary conditions in the non-parallel case. |
---|
301 | !-- Case dependent, because in GPU mode still not all arrays are on device. This |
---|
302 | !-- workaround has to be removed later. Also, since PGI compiler 12.5 has problems |
---|
303 | !-- with array syntax, explicit loops are used. |
---|
304 | IF ( bc_lr_cyc ) THEN |
---|
305 | ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr) |
---|
306 | ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1) |
---|
307 | ENDIF |
---|
308 | |
---|
309 | !$ACC UPDATE IF_PRESENT ASYNC(1) & |
---|
310 | !$ACC DEVICE(ar(:,:,nxl-nbgp_local:nxl-1)) & |
---|
311 | !$ACC DEVICE(ar(:,:,nxr+1:nxr+nbgp_local)) |
---|
312 | |
---|
313 | ! |
---|
314 | !-- Wait for UPDATES above to complete before starting MPI. |
---|
315 | !$ACC WAIT(2) |
---|
316 | |
---|
317 | IF ( bc_ns_cyc ) THEN |
---|
318 | ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:) |
---|
319 | ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:) |
---|
320 | ENDIF |
---|
321 | |
---|
322 | #endif |
---|
323 | |
---|
324 | #if defined( _OPENACC ) |
---|
325 | DO i = nxl-nbgp_local, nxr+nbgp_local |
---|
326 | !$ACC UPDATE IF_PRESENT ASYNC(2) & |
---|
327 | !$ACC DEVICE(ar(:,nys-nbgp_local:nys-1,i)) & |
---|
328 | !$ACC DEVICE(ar(:,nyn+1:nyn+nbgp_local,i)) |
---|
329 | ENDDO |
---|
330 | |
---|
331 | ! |
---|
332 | !-- Wait for all UPDATEs to finish. |
---|
333 | !$ACC WAIT |
---|
334 | #endif |
---|
335 | |
---|
336 | CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' ) |
---|
337 | |
---|
338 | END SUBROUTINE exchange_horiz |
---|
339 | |
---|
340 | |
---|
341 | !------------------------------------------------------------------------------! |
---|
342 | ! Description: |
---|
343 | ! ------------ |
---|
344 | !> @todo Missing subroutine description. |
---|
345 | !------------------------------------------------------------------------------! |
---|
346 | SUBROUTINE exchange_horiz_int( ar, nys_l, nyn_l, nxl_l, nxr_l, nzt_l, nbgp_local ) |
---|
347 | |
---|
348 | |
---|
349 | USE control_parameters, & |
---|
350 | ONLY: bc_lr_cyc, bc_ns_cyc |
---|
351 | |
---|
352 | #if defined( __parallel ) |
---|
353 | USE control_parameters, & |
---|
354 | ONLY: grid_level |
---|
355 | #endif |
---|
356 | |
---|
357 | USE indices, & |
---|
358 | ONLY: nzb |
---|
359 | |
---|
360 | INTEGER(iwp) :: nxl_l !< local index bound at current grid level, left side |
---|
361 | INTEGER(iwp) :: nxr_l !< local index bound at current grid level, right side |
---|
362 | INTEGER(iwp) :: nyn_l !< local index bound at current grid level, north side |
---|
363 | INTEGER(iwp) :: nys_l !< local index bound at current grid level, south side |
---|
364 | INTEGER(iwp) :: nzt_l !< local index bound at current grid level, top |
---|
365 | INTEGER(iwp) :: nbgp_local !< number of ghost points |
---|
366 | |
---|
367 | INTEGER(iwp), DIMENSION(nzb:nzt_l+1,nys_l-nbgp_local:nyn_l+nbgp_local, & |
---|
368 | nxl_l-nbgp_local:nxr_l+nbgp_local) :: ar !< treated array |
---|
369 | |
---|
370 | |
---|
371 | #if defined( __parallel ) |
---|
372 | IF ( pdims(1) == 1 ) THEN |
---|
373 | ! |
---|
374 | !-- One-dimensional decomposition along y, boundary values can be exchanged |
---|
375 | !-- within the PE memory |
---|
376 | IF ( bc_lr_cyc ) THEN |
---|
377 | ar(:,:,nxl_l-nbgp_local:nxl_l-1) = ar(:,:,nxr_l-nbgp_local+1:nxr_l) |
---|
378 | ar(:,:,nxr_l+1:nxr_l+nbgp_local) = ar(:,:,nxl_l:nxl_l+nbgp_local-1) |
---|
379 | ENDIF |
---|
380 | ELSE |
---|
381 | ! |
---|
382 | !-- Send left boundary, receive right one (synchronous) |
---|
383 | CALL MPI_SENDRECV( & |
---|
384 | ar(nzb,nys_l-nbgp_local,nxl_l), 1, type_yz_int(grid_level), pleft, 0,& |
---|
385 | ar(nzb,nys_l-nbgp_local,nxr_l+1), 1, type_yz_int(grid_level), pright, 0,& |
---|
386 | comm2d, status, ierr ) |
---|
387 | ! |
---|
388 | !-- Send right boundary, receive left one (synchronous) |
---|
389 | CALL MPI_SENDRECV( & |
---|
390 | ar(nzb,nys_l-nbgp_local,nxr_l+1-nbgp_local), 1, type_yz_int(grid_level),& |
---|
391 | pright, 1, & |
---|
392 | ar(nzb,nys_l-nbgp_local,nxl_l-nbgp_local), 1, type_yz_int(grid_level),& |
---|
393 | pleft, 1, & |
---|
394 | comm2d, status, ierr ) |
---|
395 | ENDIF |
---|
396 | |
---|
397 | |
---|
398 | IF ( pdims(2) == 1 ) THEN |
---|
399 | ! |
---|
400 | !-- One-dimensional decomposition along x, boundary values can be exchanged |
---|
401 | !-- within the PE memory |
---|
402 | IF ( bc_ns_cyc ) THEN |
---|
403 | ar(:,nys_l-nbgp_local:nys_l-1,:) = ar(:,nyn_l-nbgp_local+1:nyn_l,:) |
---|
404 | ar(:,nyn_l+1:nyn_l+nbgp_local,:) = ar(:,nys_l:nys_l+nbgp_local-1,:) |
---|
405 | ENDIF |
---|
406 | |
---|
407 | ELSE |
---|
408 | |
---|
409 | ! |
---|
410 | !-- Send front boundary, receive rear one (synchronous) |
---|
411 | CALL MPI_SENDRECV( & |
---|
412 | ar(nzb,nys_l,nxl_l-nbgp_local), 1, type_xz_int(grid_level), psouth, 0,& |
---|
413 | ar(nzb,nyn_l+1,nxl_l-nbgp_local), 1, type_xz_int(grid_level), pnorth, 0,& |
---|
414 | comm2d, status, ierr ) |
---|
415 | ! |
---|
416 | !-- Send rear boundary, receive front one (synchronous) |
---|
417 | CALL MPI_SENDRECV( ar(nzb,nyn_l-nbgp_local+1,nxl_l-nbgp_local), 1, & |
---|
418 | type_xz_int(grid_level), pnorth, 1, & |
---|
419 | ar(nzb,nys_l-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
420 | type_xz_int(grid_level), psouth, 1, & |
---|
421 | comm2d, status, ierr ) |
---|
422 | |
---|
423 | ENDIF |
---|
424 | |
---|
425 | #else |
---|
426 | |
---|
427 | IF ( bc_lr_cyc ) THEN |
---|
428 | ar(:,:,nxl_l-nbgp_local:nxl_l-1) = ar(:,:,nxr_l-nbgp_local+1:nxr_l) |
---|
429 | ar(:,:,nxr_l+1:nxr_l+nbgp_local) = ar(:,:,nxl_l:nxl_l+nbgp_local-1) |
---|
430 | ENDIF |
---|
431 | |
---|
432 | IF ( bc_ns_cyc ) THEN |
---|
433 | ar(:,nys_l-nbgp_local:nys_l-1,:) = ar(:,nyn_l-nbgp_local+1:nyn_l,:) |
---|
434 | ar(:,nyn_l+1:nyn_l+nbgp_local,:) = ar(:,nys_l:nys_l+nbgp_local-1,:) |
---|
435 | ENDIF |
---|
436 | |
---|
437 | #endif |
---|
438 | |
---|
439 | END SUBROUTINE exchange_horiz_int |
---|
440 | |
---|
441 | ! Description: |
---|
442 | ! ------------ |
---|
443 | !> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic |
---|
444 | !> boundary conditions, respectively, for 2D-arrays. |
---|
445 | !------------------------------------------------------------------------------! |
---|
446 | SUBROUTINE exchange_horiz_2d( ar ) |
---|
447 | |
---|
448 | USE control_parameters, & |
---|
449 | ONLY : bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, & |
---|
450 | bc_dirichlet_s, bc_radiation_l, & |
---|
451 | bc_radiation_n, bc_radiation_r, bc_radiation_s |
---|
452 | |
---|
453 | USE cpulog, & |
---|
454 | ONLY : cpu_log, log_point_s |
---|
455 | |
---|
456 | USE indices, & |
---|
457 | ONLY : nbgp, nxl, nxlg, nxr, nxrg, nyn, nyng, nys, nysg |
---|
458 | |
---|
459 | #if ! defined( __parallel ) |
---|
460 | USE control_parameters, & |
---|
461 | ONLY: bc_lr_cyc, bc_ns_cyc |
---|
462 | #endif |
---|
463 | |
---|
464 | |
---|
465 | INTEGER(iwp) :: i !< |
---|
466 | |
---|
467 | REAL(wp) :: ar(nysg:nyng,nxlg:nxrg) !< |
---|
468 | |
---|
469 | |
---|
470 | CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' ) |
---|
471 | |
---|
472 | #if defined( __parallel ) |
---|
473 | |
---|
474 | ! |
---|
475 | !-- Exchange of lateral boundary values for parallel computers |
---|
476 | IF ( pdims(1) == 1 ) THEN |
---|
477 | |
---|
478 | ! |
---|
479 | !-- One-dimensional decomposition along y, boundary values can be exchanged |
---|
480 | !-- within the PE memory |
---|
481 | ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr) |
---|
482 | ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1) |
---|
483 | |
---|
484 | ELSE |
---|
485 | ! |
---|
486 | !-- Send left boundary, receive right one |
---|
487 | |
---|
488 | CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y, pleft, 0, & |
---|
489 | ar(nysg,nxr+1), 1, type_y, pright, 0, & |
---|
490 | comm2d, status, ierr ) |
---|
491 | ! |
---|
492 | !-- Send right boundary, receive left one |
---|
493 | CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y, pright, 1, & |
---|
494 | ar(nysg,nxlg), 1, type_y, pleft, 1, & |
---|
495 | comm2d, status, ierr ) |
---|
496 | |
---|
497 | |
---|
498 | ENDIF |
---|
499 | |
---|
500 | IF ( pdims(2) == 1 ) THEN |
---|
501 | ! |
---|
502 | !-- One-dimensional decomposition along x, boundary values can be exchanged |
---|
503 | !-- within the PE memory |
---|
504 | ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:) |
---|
505 | ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:) |
---|
506 | |
---|
507 | ELSE |
---|
508 | ! |
---|
509 | !-- Send front boundary, receive rear one |
---|
510 | |
---|
511 | CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x, psouth, 0, & |
---|
512 | ar(nyn+1,nxlg), 1, type_x, pnorth, 0, & |
---|
513 | comm2d, status, ierr ) |
---|
514 | ! |
---|
515 | !-- Send rear boundary, receive front one |
---|
516 | CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x, pnorth, 1, & |
---|
517 | ar(nysg,nxlg), 1, type_x, psouth, 1, & |
---|
518 | comm2d, status, ierr ) |
---|
519 | |
---|
520 | ENDIF |
---|
521 | |
---|
522 | #else |
---|
523 | |
---|
524 | ! |
---|
525 | !-- Lateral boundary conditions in the non-parallel case |
---|
526 | IF ( bc_lr_cyc ) THEN |
---|
527 | ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr) |
---|
528 | ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1) |
---|
529 | ENDIF |
---|
530 | |
---|
531 | IF ( bc_ns_cyc ) THEN |
---|
532 | ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:) |
---|
533 | ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:) |
---|
534 | ENDIF |
---|
535 | |
---|
536 | #endif |
---|
537 | |
---|
538 | ! |
---|
539 | !-- Neumann-conditions at inflow/outflow/nested boundaries |
---|
540 | IF ( bc_dirichlet_l .OR. bc_radiation_l ) THEN |
---|
541 | DO i = nbgp, 1, -1 |
---|
542 | ar(:,nxl-i) = ar(:,nxl) |
---|
543 | ENDDO |
---|
544 | ENDIF |
---|
545 | IF ( bc_dirichlet_r .OR. bc_radiation_r ) THEN |
---|
546 | DO i = 1, nbgp |
---|
547 | ar(:,nxr+i) = ar(:,nxr) |
---|
548 | ENDDO |
---|
549 | ENDIF |
---|
550 | IF ( bc_dirichlet_s .OR. bc_radiation_s ) THEN |
---|
551 | DO i = nbgp, 1, -1 |
---|
552 | ar(nys-i,:) = ar(nys,:) |
---|
553 | ENDDO |
---|
554 | ENDIF |
---|
555 | IF ( bc_dirichlet_n .OR. bc_radiation_n ) THEN |
---|
556 | DO i = 1, nbgp |
---|
557 | ar(nyn+i,:) = ar(nyn,:) |
---|
558 | ENDDO |
---|
559 | ENDIF |
---|
560 | |
---|
561 | CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' ) |
---|
562 | |
---|
563 | END SUBROUTINE exchange_horiz_2d |
---|
564 | |
---|
565 | |
---|
566 | !------------------------------------------------------------------------------! |
---|
567 | ! Description: |
---|
568 | ! ------------ |
---|
569 | !> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic |
---|
570 | !> boundary conditions, respectively, for 2D 8-bit integer arrays. |
---|
571 | !------------------------------------------------------------------------------! |
---|
572 | SUBROUTINE exchange_horiz_2d_byte( ar, nys_l, nyn_l, nxl_l, nxr_l, nbgp_local ) |
---|
573 | |
---|
574 | |
---|
575 | USE control_parameters, & |
---|
576 | ONLY: bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, & |
---|
577 | bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s, & |
---|
578 | bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s |
---|
579 | |
---|
580 | USE cpulog, & |
---|
581 | ONLY: cpu_log, log_point_s |
---|
582 | |
---|
583 | #if ! defined( __parallel ) |
---|
584 | USE control_parameters, & |
---|
585 | ONLY: bc_lr_cyc, bc_ns_cyc |
---|
586 | #endif |
---|
587 | |
---|
588 | INTEGER(iwp) :: i !< dummy index to zero-gradient conditions at in/outflow boundaries |
---|
589 | INTEGER(iwp) :: nxl_l !< local index bound at current grid level, left side |
---|
590 | INTEGER(iwp) :: nxr_l !< local index bound at current grid level, right side |
---|
591 | INTEGER(iwp) :: nyn_l !< local index bound at current grid level, north side |
---|
592 | INTEGER(iwp) :: nys_l !< local index bound at current grid level, south side |
---|
593 | INTEGER(iwp) :: nbgp_local !< number of ghost layers to be exchanged |
---|
594 | |
---|
595 | INTEGER(KIND=1), DIMENSION(nys_l-nbgp_local:nyn_l+nbgp_local, & |
---|
596 | nxl_l-nbgp_local:nxr_l+nbgp_local) :: ar !< treated array |
---|
597 | |
---|
598 | CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' ) |
---|
599 | |
---|
600 | #if defined( __parallel ) |
---|
601 | |
---|
602 | ! |
---|
603 | !-- Exchange of lateral boundary values for parallel computers |
---|
604 | IF ( pdims(1) == 1 ) THEN |
---|
605 | |
---|
606 | ! |
---|
607 | !-- One-dimensional decomposition along y, boundary values can be exchanged |
---|
608 | !-- within the PE memory |
---|
609 | ar(:,nxl_l-nbgp_local:nxl_l-1) = ar(:,nxr_l-nbgp_local+1:nxr_l) |
---|
610 | ar(:,nxr_l+1:nxr_l+nbgp_local) = ar(:,nxl_l:nxl_l+nbgp_local-1) |
---|
611 | |
---|
612 | ELSE |
---|
613 | ! |
---|
614 | !-- Send left boundary, receive right one |
---|
615 | CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxl_l), 1, & |
---|
616 | type_y_byte, pleft, 0, & |
---|
617 | ar(nys_l-nbgp_local,nxr_l+1), 1, & |
---|
618 | type_y_byte, pright, 0, & |
---|
619 | comm2d, status, ierr ) |
---|
620 | ! |
---|
621 | !-- Send right boundary, receive left one |
---|
622 | CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxr_l+1-nbgp_local), 1, & |
---|
623 | type_y_byte, pright, 1, & |
---|
624 | ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
625 | type_y_byte, pleft, 1, & |
---|
626 | comm2d, status, ierr ) |
---|
627 | |
---|
628 | ENDIF |
---|
629 | |
---|
630 | IF ( pdims(2) == 1 ) THEN |
---|
631 | ! |
---|
632 | !-- One-dimensional decomposition along x, boundary values can be exchanged |
---|
633 | !-- within the PE memory |
---|
634 | ar(nys_l-nbgp_local:nys_l-1,:) = ar(nyn_l+1-nbgp_local:nyn_l,:) |
---|
635 | ar(nyn_l+1:nyn_l+nbgp_local,:) = ar(nys_l:nys_l-1+nbgp_local,:) |
---|
636 | |
---|
637 | |
---|
638 | ELSE |
---|
639 | ! |
---|
640 | !-- Send front boundary, receive rear one |
---|
641 | CALL MPI_SENDRECV( ar(nys_l,nxl_l-nbgp_local), 1, & |
---|
642 | type_x_byte, psouth, 0, & |
---|
643 | ar(nyn_l+1,nxl_l-nbgp_local), 1, & |
---|
644 | type_x_byte, pnorth, 0, & |
---|
645 | comm2d, status, ierr ) |
---|
646 | |
---|
647 | ! |
---|
648 | !-- Send rear boundary, receive front one |
---|
649 | CALL MPI_SENDRECV( ar(nyn_l+1-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
650 | type_x_byte, pnorth, 1, & |
---|
651 | ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
652 | type_x_byte, psouth, 1, & |
---|
653 | comm2d, status, ierr ) |
---|
654 | |
---|
655 | ENDIF |
---|
656 | |
---|
657 | #else |
---|
658 | |
---|
659 | ! |
---|
660 | !-- Lateral boundary conditions in the non-parallel case |
---|
661 | IF ( bc_lr_cyc ) THEN |
---|
662 | ar(:,nxl_l-nbgp_local:nxl_l-1) = ar(:,nxr_l-nbgp_local+1:nxr_l) |
---|
663 | ar(:,nxr_l+1:nxr_l+nbgp_local) = ar(:,nxl_l:nxl_l+nbgp_local-1) |
---|
664 | ENDIF |
---|
665 | |
---|
666 | IF ( bc_ns_cyc ) THEN |
---|
667 | ar(nys_l-nbgp_local:nys_l-1,:) = ar(nyn_l+1-nbgp_local:nyn_l,:) |
---|
668 | ar(nyn_l+1:nyn_l+nbgp_local,:) = ar(nys_l:nys_l-1+nbgp_local,:) |
---|
669 | ENDIF |
---|
670 | |
---|
671 | #endif |
---|
672 | ! |
---|
673 | !-- Neumann-conditions at inflow/outflow/nested boundaries |
---|
674 | IF ( bc_dirichlet_l .OR. bc_radiation_l ) THEN |
---|
675 | DO i = nbgp_local, 1, -1 |
---|
676 | ar(:,nxl_l-i) = ar(:,nxl_l) |
---|
677 | ENDDO |
---|
678 | ENDIF |
---|
679 | IF ( bc_dirichlet_r .OR. bc_radiation_r ) THEN |
---|
680 | DO i = 1, nbgp_local |
---|
681 | ar(:,nxr_l+i) = ar(:,nxr_l) |
---|
682 | ENDDO |
---|
683 | ENDIF |
---|
684 | IF ( bc_dirichlet_s .OR. bc_radiation_s ) THEN |
---|
685 | DO i = nbgp_local, 1, -1 |
---|
686 | ar(nys_l-i,:) = ar(nys_l,:) |
---|
687 | ENDDO |
---|
688 | ENDIF |
---|
689 | IF ( bc_dirichlet_n .OR. bc_radiation_n ) THEN |
---|
690 | DO i = 1, nbgp_local |
---|
691 | ar(nyn_l+i,:) = ar(nyn_l,:) |
---|
692 | ENDDO |
---|
693 | ENDIF |
---|
694 | |
---|
695 | CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' ) |
---|
696 | |
---|
697 | END SUBROUTINE exchange_horiz_2d_byte |
---|
698 | |
---|
699 | |
---|
700 | !------------------------------------------------------------------------------! |
---|
701 | ! Description: |
---|
702 | ! ------------ |
---|
703 | !> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic |
---|
704 | !> boundary conditions, respectively, for 2D 32-bit integer arrays. |
---|
705 | !------------------------------------------------------------------------------! |
---|
706 | SUBROUTINE exchange_horiz_2d_int( ar, nys_l, nyn_l, nxl_l, nxr_l, nbgp_local ) |
---|
707 | |
---|
708 | |
---|
709 | USE control_parameters, & |
---|
710 | ONLY: bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, & |
---|
711 | bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s, & |
---|
712 | bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s |
---|
713 | |
---|
714 | #if defined( __parallel ) |
---|
715 | USE control_parameters, & |
---|
716 | ONLY: grid_level |
---|
717 | #endif |
---|
718 | |
---|
719 | USE cpulog, & |
---|
720 | ONLY: cpu_log, log_point_s |
---|
721 | |
---|
722 | #if ! defined( __parallel ) |
---|
723 | USE control_parameters, & |
---|
724 | ONLY: bc_lr_cyc, bc_ns_cyc |
---|
725 | #endif |
---|
726 | |
---|
727 | INTEGER(iwp) :: i !< dummy index to zero-gradient conditions at in/outflow boundaries |
---|
728 | INTEGER(iwp) :: nxl_l !< local index bound at current grid level, left side |
---|
729 | INTEGER(iwp) :: nxr_l !< local index bound at current grid level, right side |
---|
730 | INTEGER(iwp) :: nyn_l !< local index bound at current grid level, north side |
---|
731 | INTEGER(iwp) :: nys_l !< local index bound at current grid level, south side |
---|
732 | INTEGER(iwp) :: nbgp_local !< number of ghost layers to be exchanged |
---|
733 | |
---|
734 | INTEGER(iwp), DIMENSION(nys_l-nbgp_local:nyn_l+nbgp_local, & |
---|
735 | nxl_l-nbgp_local:nxr_l+nbgp_local) :: ar !< treated array |
---|
736 | |
---|
737 | CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' ) |
---|
738 | |
---|
739 | #if defined( __parallel ) |
---|
740 | |
---|
741 | ! |
---|
742 | !-- Exchange of lateral boundary values for parallel computers |
---|
743 | IF ( pdims(1) == 1 ) THEN |
---|
744 | |
---|
745 | ! |
---|
746 | !-- One-dimensional decomposition along y, boundary values can be exchanged |
---|
747 | !-- within the PE memory |
---|
748 | ar(:,nxl_l-nbgp_local:nxl_l-1) = ar(:,nxr_l-nbgp_local+1:nxr_l) |
---|
749 | ar(:,nxr_l+1:nxr_l+nbgp_local) = ar(:,nxl_l:nxl_l+nbgp_local-1) |
---|
750 | |
---|
751 | ELSE |
---|
752 | ! |
---|
753 | !-- Send left boundary, receive right one |
---|
754 | CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxl_l), 1, & |
---|
755 | type_y_int(grid_level), pleft, 0, & |
---|
756 | ar(nys_l-nbgp_local,nxr_l+1), 1, & |
---|
757 | type_y_int(grid_level), pright, 0, & |
---|
758 | comm2d, status, ierr ) |
---|
759 | ! |
---|
760 | !-- Send right boundary, receive left one |
---|
761 | CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxr_l+1-nbgp_local), 1, & |
---|
762 | type_y_int(grid_level), pright, 1, & |
---|
763 | ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
764 | type_y_int(grid_level), pleft, 1, & |
---|
765 | comm2d, status, ierr ) |
---|
766 | |
---|
767 | ENDIF |
---|
768 | |
---|
769 | IF ( pdims(2) == 1 ) THEN |
---|
770 | ! |
---|
771 | !-- One-dimensional decomposition along x, boundary values can be exchanged |
---|
772 | !-- within the PE memory |
---|
773 | ar(nys_l-nbgp_local:nys_l-1,:) = ar(nyn_l+1-nbgp_local:nyn_l,:) |
---|
774 | ar(nyn_l+1:nyn_l+nbgp_local,:) = ar(nys_l:nys_l-1+nbgp_local,:) |
---|
775 | |
---|
776 | |
---|
777 | ELSE |
---|
778 | ! |
---|
779 | !-- Send front boundary, receive rear one |
---|
780 | CALL MPI_SENDRECV( ar(nys_l,nxl_l-nbgp_local), 1, & |
---|
781 | type_x_int(grid_level), psouth, 0, & |
---|
782 | ar(nyn_l+1,nxl_l-nbgp_local), 1, & |
---|
783 | type_x_int(grid_level), pnorth, 0, & |
---|
784 | comm2d, status, ierr ) |
---|
785 | |
---|
786 | ! |
---|
787 | !-- Send rear boundary, receive front one |
---|
788 | CALL MPI_SENDRECV( ar(nyn_l+1-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
789 | type_x_int(grid_level), pnorth, 1, & |
---|
790 | ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, & |
---|
791 | type_x_int(grid_level), psouth, 1, & |
---|
792 | comm2d, status, ierr ) |
---|
793 | |
---|
794 | ENDIF |
---|
795 | |
---|
796 | #else |
---|
797 | |
---|
798 | ! |
---|
799 | !-- Lateral boundary conditions in the non-parallel case |
---|
800 | IF ( bc_lr_cyc ) THEN |
---|
801 | ar(:,nxl_l-nbgp_local:nxl_l-1) = ar(:,nxr_l-nbgp_local+1:nxr_l) |
---|
802 | ar(:,nxr_l+1:nxr_l+nbgp_local) = ar(:,nxl_l:nxl_l+nbgp_local-1) |
---|
803 | ENDIF |
---|
804 | |
---|
805 | IF ( bc_ns_cyc ) THEN |
---|
806 | ar(nys_l-nbgp_local:nys_l-1,:) = ar(nyn_l+1-nbgp_local:nyn_l,:) |
---|
807 | ar(nyn_l+1:nyn_l+nbgp_local,:) = ar(nys_l:nys_l-1+nbgp_local,:) |
---|
808 | ENDIF |
---|
809 | |
---|
810 | #endif |
---|
811 | ! |
---|
812 | !-- Neumann-conditions at inflow/outflow/nested boundaries |
---|
813 | IF ( bc_dirichlet_l .OR. bc_radiation_l ) THEN |
---|
814 | DO i = nbgp_local, 1, -1 |
---|
815 | ar(:,nxl_l-i) = ar(:,nxl_l) |
---|
816 | ENDDO |
---|
817 | ENDIF |
---|
818 | IF ( bc_dirichlet_r .OR. bc_radiation_r ) THEN |
---|
819 | DO i = 1, nbgp_local |
---|
820 | ar(:,nxr_l+i) = ar(:,nxr_l) |
---|
821 | ENDDO |
---|
822 | ENDIF |
---|
823 | IF ( bc_dirichlet_s .OR. bc_radiation_s ) THEN |
---|
824 | DO i = nbgp_local, 1, -1 |
---|
825 | ar(nys_l-i,:) = ar(nys_l,:) |
---|
826 | ENDDO |
---|
827 | ENDIF |
---|
828 | IF ( bc_dirichlet_n .OR. bc_radiation_n ) THEN |
---|
829 | DO i = 1, nbgp_local |
---|
830 | ar(nyn_l+i,:) = ar(nyn_l,:) |
---|
831 | ENDDO |
---|
832 | ENDIF |
---|
833 | |
---|
834 | CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' ) |
---|
835 | |
---|
836 | END SUBROUTINE exchange_horiz_2d_int |
---|
837 | |
---|
838 | |
---|
839 | END MODULE exchange_horiz_mod |
---|