1 | !> @file init_pegrid.f90 |
---|
2 | !--------------------------------------------------------------------------------! |
---|
3 | ! This file is part of PALM. |
---|
4 | ! |
---|
5 | ! PALM is free software: you can redistribute it and/or modify it under the terms |
---|
6 | ! of the GNU General Public License as published by the Free Software Foundation, |
---|
7 | ! either version 3 of the License, or (at your option) any later version. |
---|
8 | ! |
---|
9 | ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY |
---|
10 | ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR |
---|
11 | ! A PARTICULAR PURPOSE. See the GNU General Public License for more details. |
---|
12 | ! |
---|
13 | ! You should have received a copy of the GNU General Public License along with |
---|
14 | ! PALM. If not, see <http://www.gnu.org/licenses/>. |
---|
15 | ! |
---|
16 | ! Copyright 1997-2014 Leibniz Universitaet Hannover |
---|
17 | !--------------------------------------------------------------------------------! |
---|
18 | ! |
---|
19 | ! Current revisions: |
---|
20 | ! ------------------ |
---|
21 | ! cpp-statements for nesting removed |
---|
22 | ! |
---|
23 | ! Former revisions: |
---|
24 | ! ----------------- |
---|
25 | ! $Id: init_pegrid.f90 1764 2016-02-28 12:45:19Z raasch $ |
---|
26 | ! |
---|
27 | ! 1762 2016-02-25 12:31:13Z hellstea |
---|
28 | ! Introduction of nested domain feature |
---|
29 | ! |
---|
30 | ! 1682 2015-10-07 23:56:08Z knoop |
---|
31 | ! Code annotations made doxygen readable |
---|
32 | ! |
---|
33 | ! 1677 2015-10-02 13:25:23Z boeske |
---|
34 | ! New MPI-data types for exchange of 3D integer arrays. |
---|
35 | ! |
---|
36 | ! 1575 2015-03-27 09:56:27Z raasch |
---|
37 | ! adjustments for psolver-queries, calculation of ngp_xz added |
---|
38 | ! |
---|
39 | ! 1565 2015-03-09 20:59:31Z suehring |
---|
40 | ! Refine if-clause for setting nbgp. |
---|
41 | ! |
---|
42 | ! 1557 2015-03-05 16:43:04Z suehring |
---|
43 | ! Adjustment for monotonic limiter |
---|
44 | ! |
---|
45 | ! 1468 2014-09-24 14:06:57Z maronga |
---|
46 | ! Adapted for use on up to 6-digit processor cores |
---|
47 | ! |
---|
48 | ! 1435 2014-07-21 10:37:02Z keck |
---|
49 | ! bugfix: added missing parameter coupling_mode_remote to ONLY-attribute |
---|
50 | ! |
---|
51 | ! 1402 2014-05-09 14:25:13Z raasch |
---|
52 | ! location messages modified |
---|
53 | ! |
---|
54 | ! 1384 2014-05-02 14:31:06Z raasch |
---|
55 | ! location messages added |
---|
56 | ! |
---|
57 | ! 1353 2014-04-08 15:21:23Z heinze |
---|
58 | ! REAL constants provided with KIND-attribute |
---|
59 | ! |
---|
60 | ! 1322 2014-03-20 16:38:49Z raasch |
---|
61 | ! REAL functions provided with KIND-attribute |
---|
62 | ! |
---|
63 | ! 1320 2014-03-20 08:40:49Z raasch |
---|
64 | ! ONLY-attribute added to USE-statements, |
---|
65 | ! kind-parameters added to all INTEGER and REAL declaration statements, |
---|
66 | ! kinds are defined in new module kinds, |
---|
67 | ! revision history before 2012 removed, |
---|
68 | ! comment fields (!:) to be used for variable explanations added to |
---|
69 | ! all variable declaration statements |
---|
70 | ! |
---|
71 | ! 1304 2014-03-12 10:29:42Z raasch |
---|
72 | ! bugfix: single core MPI runs missed some settings of transpose indices |
---|
73 | ! |
---|
74 | ! 1212 2013-08-15 08:46:27Z raasch |
---|
75 | ! error message for poisfft_hybrid removed |
---|
76 | ! |
---|
77 | ! 1159 2013-05-21 11:58:22Z fricke |
---|
78 | ! dirichlet/neumann and neumann/dirichlet removed |
---|
79 | ! |
---|
80 | ! 1139 2013-04-18 07:25:03Z raasch |
---|
81 | ! bugfix for calculating the id of the PE carrying the recycling plane |
---|
82 | ! |
---|
83 | ! 1111 2013-03-08 23:54:10Z raasch |
---|
84 | ! initialization of poisfft moved to module poisfft |
---|
85 | ! |
---|
86 | ! 1092 2013-02-02 11:24:22Z raasch |
---|
87 | ! unused variables removed |
---|
88 | ! |
---|
89 | ! 1056 2012-11-16 15:28:04Z raasch |
---|
90 | ! Indices for arrays n.._mg start from zero due to definition of arrays f2 and |
---|
91 | ! p2 as automatic arrays in recursive subroutine next_mg_level |
---|
92 | ! |
---|
93 | ! 1041 2012-11-06 02:36:29Z raasch |
---|
94 | ! a 2d virtual processor topology is used by default for all machines |
---|
95 | ! |
---|
96 | ! 1036 2012-10-22 13:43:42Z raasch |
---|
97 | ! code put under GPL (PALM 3.9) |
---|
98 | ! |
---|
99 | ! 1003 2012-09-14 14:35:53Z raasch |
---|
100 | ! subdomains must have identical size (grid matching = "match" removed) |
---|
101 | ! |
---|
102 | ! 1001 2012-09-13 14:08:46Z raasch |
---|
103 | ! all actions concerning upstream-spline-method removed |
---|
104 | ! |
---|
105 | ! 978 2012-08-09 08:28:32Z fricke |
---|
106 | ! dirichlet/neumann and neumann/dirichlet added |
---|
107 | ! nxlu and nysv are also calculated for inflow boundary |
---|
108 | ! |
---|
109 | ! 809 2012-01-30 13:32:58Z maronga |
---|
110 | ! Bugfix: replaced .AND. and .NOT. with && and ! in the preprocessor directives |
---|
111 | ! |
---|
112 | ! 807 2012-01-25 11:53:51Z maronga |
---|
113 | ! New cpp directive "__check" implemented which is used by check_namelist_files |
---|
114 | ! |
---|
115 | ! Revision 1.1 1997/07/24 11:15:09 raasch |
---|
116 | ! Initial revision |
---|
117 | ! |
---|
118 | ! |
---|
119 | ! Description: |
---|
120 | ! ------------ |
---|
121 | !> Determination of the virtual processor topology (if not prescribed by the |
---|
122 | !> user)and computation of the grid point number and array bounds of the local |
---|
123 | !> domains. |
---|
124 | !------------------------------------------------------------------------------! |
---|
125 | SUBROUTINE init_pegrid |
---|
126 | |
---|
127 | |
---|
128 | USE control_parameters, & |
---|
129 | ONLY: bc_lr, bc_ns, coupling_mode, coupling_mode_remote, & |
---|
130 | coupling_topology, dt_dosp, gathered_size, grid_level, & |
---|
131 | grid_level_count, host, inflow_l, inflow_n, inflow_r, inflow_s, & |
---|
132 | io_blocks, io_group, maximum_grid_level, & |
---|
133 | maximum_parallel_io_streams, message_string, & |
---|
134 | mg_switch_to_pe0_level, momentum_advec, nest_bound_l, & |
---|
135 | nest_bound_n, nest_bound_r, nest_bound_s, neutral, psolver, & |
---|
136 | outflow_l, outflow_n, outflow_r, outflow_s, recycling_width, & |
---|
137 | scalar_advec, subdomain_size |
---|
138 | |
---|
139 | USE grid_variables, & |
---|
140 | ONLY: dx |
---|
141 | |
---|
142 | USE indices, & |
---|
143 | ONLY: mg_loc_ind, nbgp, nnx, nny, nnz, nx, nx_a, nx_o, nxl, nxl_mg, & |
---|
144 | nxlu, nxr, nxr_mg, ny, ny_a, ny_o, nyn, nyn_mg, nys, nys_mg, & |
---|
145 | nysv, nz, nzb, nzt, nzt_mg, wall_flags_1, wall_flags_2, & |
---|
146 | wall_flags_3, wall_flags_4, wall_flags_5, wall_flags_6, & |
---|
147 | wall_flags_7, wall_flags_8, wall_flags_9, wall_flags_10 |
---|
148 | |
---|
149 | USE kinds |
---|
150 | |
---|
151 | USE pegrid |
---|
152 | |
---|
153 | USE pmc_interface, & |
---|
154 | ONLY: cpl_npex, cpl_npey, nested_run |
---|
155 | |
---|
156 | USE transpose_indices, & |
---|
157 | ONLY: nxl_y, nxl_yd, nxl_z, nxr_y, nxr_yd, nxr_z, nyn_x, nyn_z, nys_x,& |
---|
158 | nys_z, nzb_x, nzb_y, nzb_yd, nzt_x, nzt_yd, nzt_y |
---|
159 | |
---|
160 | IMPLICIT NONE |
---|
161 | |
---|
162 | INTEGER(iwp) :: i !< |
---|
163 | INTEGER(iwp) :: id_inflow_l !< |
---|
164 | INTEGER(iwp) :: id_recycling_l !< |
---|
165 | INTEGER(iwp) :: ind(5) !< |
---|
166 | INTEGER(iwp) :: j !< |
---|
167 | INTEGER(iwp) :: k !< |
---|
168 | INTEGER(iwp) :: maximum_grid_level_l !< |
---|
169 | INTEGER(iwp) :: mg_levels_x !< |
---|
170 | INTEGER(iwp) :: mg_levels_y !< |
---|
171 | INTEGER(iwp) :: mg_levels_z !< |
---|
172 | INTEGER(iwp) :: mg_switch_to_pe0_level_l !< |
---|
173 | INTEGER(iwp) :: nnx_y !< |
---|
174 | INTEGER(iwp) :: nnx_z !< |
---|
175 | INTEGER(iwp) :: nny_x !< |
---|
176 | INTEGER(iwp) :: nny_z !< |
---|
177 | INTEGER(iwp) :: nnz_x !< |
---|
178 | INTEGER(iwp) :: nnz_y !< |
---|
179 | INTEGER(iwp) :: numproc_sqr !< |
---|
180 | INTEGER(iwp) :: nxl_l !< |
---|
181 | INTEGER(iwp) :: nxr_l !< |
---|
182 | INTEGER(iwp) :: nyn_l !< |
---|
183 | INTEGER(iwp) :: nys_l !< |
---|
184 | INTEGER(iwp) :: nzb_l !< |
---|
185 | INTEGER(iwp) :: nzt_l !< |
---|
186 | INTEGER(iwp) :: omp_get_num_threads !< |
---|
187 | |
---|
188 | INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: ind_all !< |
---|
189 | INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: nxlf !< |
---|
190 | INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: nxrf !< |
---|
191 | INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: nynf !< |
---|
192 | INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: nysf !< |
---|
193 | |
---|
194 | INTEGER(iwp), DIMENSION(2) :: pdims_remote !< |
---|
195 | |
---|
196 | #if defined( __mpi2 ) |
---|
197 | LOGICAL :: found !< |
---|
198 | #endif |
---|
199 | |
---|
200 | ! |
---|
201 | !-- Get the number of OpenMP threads |
---|
202 | !$OMP PARALLEL |
---|
203 | #if defined( __intel_openmp_bug ) |
---|
204 | threads_per_task = omp_get_num_threads() |
---|
205 | #else |
---|
206 | !$ threads_per_task = omp_get_num_threads() |
---|
207 | #endif |
---|
208 | !$OMP END PARALLEL |
---|
209 | |
---|
210 | |
---|
211 | #if defined( __parallel ) |
---|
212 | |
---|
213 | CALL location_message( 'creating virtual PE grids + MPI derived data types', & |
---|
214 | .FALSE. ) |
---|
215 | |
---|
216 | IF ( nested_run ) THEN |
---|
217 | ! |
---|
218 | !-- In case of nested-domain runs, the processor grid is explicitly given |
---|
219 | !-- by the user in the nestpar-NAMELIST |
---|
220 | pdims(1) = cpl_npex |
---|
221 | pdims(2) = cpl_npey |
---|
222 | |
---|
223 | ELSE |
---|
224 | ! |
---|
225 | !-- Determine the processor topology or check it, if prescribed by the user |
---|
226 | IF ( npex == -1 .AND. npey == -1 ) THEN |
---|
227 | |
---|
228 | ! |
---|
229 | !-- Automatic determination of the topology |
---|
230 | numproc_sqr = SQRT( REAL( numprocs, KIND=wp ) ) |
---|
231 | pdims(1) = MAX( numproc_sqr , 1 ) |
---|
232 | DO WHILE ( MOD( numprocs , pdims(1) ) /= 0 ) |
---|
233 | pdims(1) = pdims(1) - 1 |
---|
234 | ENDDO |
---|
235 | pdims(2) = numprocs / pdims(1) |
---|
236 | |
---|
237 | ELSEIF ( npex /= -1 .AND. npey /= -1 ) THEN |
---|
238 | |
---|
239 | ! |
---|
240 | !-- Prescribed by user. Number of processors on the prescribed topology |
---|
241 | !-- must be equal to the number of PEs available to the job |
---|
242 | IF ( ( npex * npey ) /= numprocs ) THEN |
---|
243 | WRITE( message_string, * ) 'number of PEs of the prescribed ', & |
---|
244 | 'topology (', npex*npey,') does not match & the number of ', & |
---|
245 | 'PEs available to the job (', numprocs, ')' |
---|
246 | CALL message( 'init_pegrid', 'PA0221', 1, 2, 0, 6, 0 ) |
---|
247 | ENDIF |
---|
248 | pdims(1) = npex |
---|
249 | pdims(2) = npey |
---|
250 | |
---|
251 | ELSE |
---|
252 | ! |
---|
253 | !-- If the processor topology is prescribed by the user, the number of |
---|
254 | !-- PEs must be given in both directions |
---|
255 | message_string = 'if the processor topology is prescribed by th' // & |
---|
256 | 'e user& both values of "npex" and "npey" must be given' // & |
---|
257 | ' in the &NAMELIST-parameter file' |
---|
258 | CALL message( 'init_pegrid', 'PA0222', 1, 2, 0, 6, 0 ) |
---|
259 | |
---|
260 | ENDIF |
---|
261 | |
---|
262 | ENDIF |
---|
263 | |
---|
264 | |
---|
265 | ! |
---|
266 | !-- For communication speedup, set barriers in front of collective |
---|
267 | !-- communications by default on SGI-type systems |
---|
268 | IF ( host(3:5) == 'sgi' ) collective_wait = .TRUE. |
---|
269 | |
---|
270 | ! |
---|
271 | !-- If necessary, set horizontal boundary conditions to non-cyclic |
---|
272 | IF ( bc_lr /= 'cyclic' ) cyclic(1) = .FALSE. |
---|
273 | IF ( bc_ns /= 'cyclic' ) cyclic(2) = .FALSE. |
---|
274 | |
---|
275 | |
---|
276 | #if ! defined( __check) |
---|
277 | ! |
---|
278 | !-- Create the virtual processor grid |
---|
279 | CALL MPI_CART_CREATE( comm_palm, ndim, pdims, cyclic, reorder, & |
---|
280 | comm2d, ierr ) |
---|
281 | CALL MPI_COMM_RANK( comm2d, myid, ierr ) |
---|
282 | WRITE (myid_char,'(''_'',I6.6)') myid |
---|
283 | |
---|
284 | CALL MPI_CART_COORDS( comm2d, myid, ndim, pcoord, ierr ) |
---|
285 | CALL MPI_CART_SHIFT( comm2d, 0, 1, pleft, pright, ierr ) |
---|
286 | CALL MPI_CART_SHIFT( comm2d, 1, 1, psouth, pnorth, ierr ) |
---|
287 | |
---|
288 | ! |
---|
289 | !-- Determine sub-topologies for transpositions |
---|
290 | !-- Transposition from z to x: |
---|
291 | remain_dims(1) = .TRUE. |
---|
292 | remain_dims(2) = .FALSE. |
---|
293 | CALL MPI_CART_SUB( comm2d, remain_dims, comm1dx, ierr ) |
---|
294 | CALL MPI_COMM_RANK( comm1dx, myidx, ierr ) |
---|
295 | ! |
---|
296 | !-- Transposition from x to y |
---|
297 | remain_dims(1) = .FALSE. |
---|
298 | remain_dims(2) = .TRUE. |
---|
299 | CALL MPI_CART_SUB( comm2d, remain_dims, comm1dy, ierr ) |
---|
300 | CALL MPI_COMM_RANK( comm1dy, myidy, ierr ) |
---|
301 | |
---|
302 | #endif |
---|
303 | |
---|
304 | ! |
---|
305 | !-- Calculate array bounds along x-direction for every PE. |
---|
306 | ALLOCATE( nxlf(0:pdims(1)-1), nxrf(0:pdims(1)-1), nynf(0:pdims(2)-1), & |
---|
307 | nysf(0:pdims(2)-1) ) |
---|
308 | |
---|
309 | IF ( MOD( nx+1 , pdims(1) ) /= 0 ) THEN |
---|
310 | WRITE( message_string, * ) 'x-direction: gridpoint number (',nx+1,') ',& |
---|
311 | 'is not an& integral divisor of the number ', & |
---|
312 | 'processors (', pdims(1),')' |
---|
313 | CALL message( 'init_pegrid', 'PA0225', 1, 2, 0, 6, 0 ) |
---|
314 | ELSE |
---|
315 | nnx = ( nx + 1 ) / pdims(1) |
---|
316 | IF ( nnx*pdims(1) - ( nx + 1) > nnx ) THEN |
---|
317 | WRITE( message_string, * ) 'x-direction: nx does not match the', & |
---|
318 | 'requirements given by the number of PEs &used', & |
---|
319 | '& please use nx = ', nx - ( pdims(1) - ( nnx*pdims(1) & |
---|
320 | - ( nx + 1 ) ) ), ' instead of nx =', nx |
---|
321 | CALL message( 'init_pegrid', 'PA0226', 1, 2, 0, 6, 0 ) |
---|
322 | ENDIF |
---|
323 | ENDIF |
---|
324 | |
---|
325 | ! |
---|
326 | !-- Left and right array bounds, number of gridpoints |
---|
327 | DO i = 0, pdims(1)-1 |
---|
328 | nxlf(i) = i * nnx |
---|
329 | nxrf(i) = ( i + 1 ) * nnx - 1 |
---|
330 | ENDDO |
---|
331 | |
---|
332 | ! |
---|
333 | !-- Calculate array bounds in y-direction for every PE. |
---|
334 | IF ( MOD( ny+1 , pdims(2) ) /= 0 ) THEN |
---|
335 | WRITE( message_string, * ) 'y-direction: gridpoint number (',ny+1,') ', & |
---|
336 | 'is not an& integral divisor of the number of', & |
---|
337 | 'processors (', pdims(2),')' |
---|
338 | CALL message( 'init_pegrid', 'PA0227', 1, 2, 0, 6, 0 ) |
---|
339 | ELSE |
---|
340 | nny = ( ny + 1 ) / pdims(2) |
---|
341 | IF ( nny*pdims(2) - ( ny + 1) > nny ) THEN |
---|
342 | WRITE( message_string, * ) 'y-direction: ny does not match the', & |
---|
343 | 'requirements given by the number of PEs &used ', & |
---|
344 | '& please use ny = ', ny - ( pdims(2) - ( nnx*pdims(2) & |
---|
345 | - ( ny + 1 ) ) ), ' instead of ny =', ny |
---|
346 | CALL message( 'init_pegrid', 'PA0228', 1, 2, 0, 6, 0 ) |
---|
347 | ENDIF |
---|
348 | ENDIF |
---|
349 | |
---|
350 | ! |
---|
351 | !-- South and north array bounds |
---|
352 | DO j = 0, pdims(2)-1 |
---|
353 | nysf(j) = j * nny |
---|
354 | nynf(j) = ( j + 1 ) * nny - 1 |
---|
355 | ENDDO |
---|
356 | |
---|
357 | ! |
---|
358 | !-- Local array bounds of the respective PEs |
---|
359 | nxl = nxlf(pcoord(1)) |
---|
360 | nxr = nxrf(pcoord(1)) |
---|
361 | nys = nysf(pcoord(2)) |
---|
362 | nyn = nynf(pcoord(2)) |
---|
363 | nzb = 0 |
---|
364 | nzt = nz |
---|
365 | nnz = nz |
---|
366 | |
---|
367 | ! |
---|
368 | !-- Set switches to define if the PE is situated at the border of the virtual |
---|
369 | !-- processor grid |
---|
370 | IF ( nxl == 0 ) left_border_pe = .TRUE. |
---|
371 | IF ( nxr == nx ) right_border_pe = .TRUE. |
---|
372 | IF ( nys == 0 ) south_border_pe = .TRUE. |
---|
373 | IF ( nyn == ny ) north_border_pe = .TRUE. |
---|
374 | |
---|
375 | ! |
---|
376 | !-- Calculate array bounds and gridpoint numbers for the transposed arrays |
---|
377 | !-- (needed in the pressure solver) |
---|
378 | !-- For the transposed arrays, cyclic boundaries as well as top and bottom |
---|
379 | !-- boundaries are omitted, because they are obstructive to the transposition |
---|
380 | |
---|
381 | ! |
---|
382 | !-- 1. transposition z --> x |
---|
383 | !-- This transposition is not neccessary in case of a 1d-decomposition along x |
---|
384 | nys_x = nys |
---|
385 | nyn_x = nyn |
---|
386 | nny_x = nny |
---|
387 | nnz_x = nz / pdims(1) |
---|
388 | nzb_x = 1 + myidx * nnz_x |
---|
389 | nzt_x = ( myidx + 1 ) * nnz_x |
---|
390 | sendrecvcount_zx = nnx * nny * nnz_x |
---|
391 | |
---|
392 | IF ( pdims(2) /= 1 ) THEN |
---|
393 | IF ( MOD( nz , pdims(1) ) /= 0 ) THEN |
---|
394 | WRITE( message_string, * ) 'transposition z --> x:', & |
---|
395 | '&nz=',nz,' is not an integral divisior of pdims(1)=', & |
---|
396 | pdims(1) |
---|
397 | CALL message( 'init_pegrid', 'PA0230', 1, 2, 0, 6, 0 ) |
---|
398 | ENDIF |
---|
399 | ENDIF |
---|
400 | |
---|
401 | ! |
---|
402 | !-- 2. transposition x --> y |
---|
403 | nnz_y = nnz_x |
---|
404 | nzb_y = nzb_x |
---|
405 | nzt_y = nzt_x |
---|
406 | IF ( MOD( nx+1 , pdims(2) ) /= 0 ) THEN |
---|
407 | WRITE( message_string, * ) 'transposition x --> y:', & |
---|
408 | '&nx+1=',nx+1,' is not an integral divisor of ',& |
---|
409 | 'pdims(2)=',pdims(2) |
---|
410 | CALL message( 'init_pegrid', 'PA0231', 1, 2, 0, 6, 0 ) |
---|
411 | ENDIF |
---|
412 | nnx_y = (nx+1) / pdims(2) |
---|
413 | nxl_y = myidy * nnx_y |
---|
414 | nxr_y = ( myidy + 1 ) * nnx_y - 1 |
---|
415 | sendrecvcount_xy = nnx_y * nny_x * nnz_y |
---|
416 | |
---|
417 | ! |
---|
418 | !-- 3. transposition y --> z (ELSE: x --> y in case of 1D-decomposition |
---|
419 | !-- along x) |
---|
420 | nnx_z = nnx_y |
---|
421 | nxl_z = nxl_y |
---|
422 | nxr_z = nxr_y |
---|
423 | nny_z = (ny+1) / pdims(1) |
---|
424 | nys_z = myidx * nny_z |
---|
425 | nyn_z = ( myidx + 1 ) * nny_z - 1 |
---|
426 | sendrecvcount_yz = nnx_y * nny_z * nnz_y |
---|
427 | |
---|
428 | IF ( pdims(2) /= 1 ) THEN |
---|
429 | ! |
---|
430 | !-- y --> z |
---|
431 | !-- This transposition is not neccessary in case of a 1d-decomposition |
---|
432 | !-- along x, except that the uptream-spline method is switched on |
---|
433 | IF ( MOD( ny+1 , pdims(1) ) /= 0 ) THEN |
---|
434 | WRITE( message_string, * ) 'transposition y --> z:', & |
---|
435 | '& ny+1=',ny+1,' is not an integral divisor of ',& |
---|
436 | 'pdims(1)=',pdims(1) |
---|
437 | CALL message( 'init_pegrid', 'PA0232', 1, 2, 0, 6, 0 ) |
---|
438 | ENDIF |
---|
439 | |
---|
440 | ELSE |
---|
441 | ! |
---|
442 | !-- x --> y. This condition must be fulfilled for a 1D-decomposition along x |
---|
443 | IF ( MOD( ny+1 , pdims(1) ) /= 0 ) THEN |
---|
444 | WRITE( message_string, * ) 'transposition x --> y:', & |
---|
445 | '& ny+1=',ny+1,' is not an integral divisor of ',& |
---|
446 | 'pdims(1)=',pdims(1) |
---|
447 | CALL message( 'init_pegrid', 'PA0233', 1, 2, 0, 6, 0 ) |
---|
448 | ENDIF |
---|
449 | |
---|
450 | ENDIF |
---|
451 | |
---|
452 | ! |
---|
453 | !-- Indices for direct transpositions z --> y (used for calculating spectra) |
---|
454 | IF ( dt_dosp /= 9999999.9_wp ) THEN |
---|
455 | IF ( MOD( nz, pdims(2) ) /= 0 ) THEN |
---|
456 | WRITE( message_string, * ) 'direct transposition z --> y (needed ', & |
---|
457 | 'for spectra):& nz=',nz,' is not an integral divisor of ',& |
---|
458 | 'pdims(2)=',pdims(2) |
---|
459 | CALL message( 'init_pegrid', 'PA0234', 1, 2, 0, 6, 0 ) |
---|
460 | ELSE |
---|
461 | nxl_yd = nxl |
---|
462 | nxr_yd = nxr |
---|
463 | nzb_yd = 1 + myidy * ( nz / pdims(2) ) |
---|
464 | nzt_yd = ( myidy + 1 ) * ( nz / pdims(2) ) |
---|
465 | sendrecvcount_zyd = nnx * nny * ( nz / pdims(2) ) |
---|
466 | ENDIF |
---|
467 | ENDIF |
---|
468 | |
---|
469 | ! |
---|
470 | !-- Indices for direct transpositions y --> x (they are only possible in case |
---|
471 | !-- of a 1d-decomposition along x) |
---|
472 | IF ( pdims(2) == 1 ) THEN |
---|
473 | nny_x = nny / pdims(1) |
---|
474 | nys_x = myid * nny_x |
---|
475 | nyn_x = ( myid + 1 ) * nny_x - 1 |
---|
476 | nzb_x = 1 |
---|
477 | nzt_x = nz |
---|
478 | sendrecvcount_xy = nnx * nny_x * nz |
---|
479 | ENDIF |
---|
480 | |
---|
481 | ! |
---|
482 | !-- Indices for direct transpositions x --> y (they are only possible in case |
---|
483 | !-- of a 1d-decomposition along y) |
---|
484 | IF ( pdims(1) == 1 ) THEN |
---|
485 | nnx_y = nnx / pdims(2) |
---|
486 | nxl_y = myid * nnx_y |
---|
487 | nxr_y = ( myid + 1 ) * nnx_y - 1 |
---|
488 | nzb_y = 1 |
---|
489 | nzt_y = nz |
---|
490 | sendrecvcount_xy = nnx_y * nny * nz |
---|
491 | ENDIF |
---|
492 | |
---|
493 | ! |
---|
494 | !-- Arrays for storing the array bounds are needed any more |
---|
495 | DEALLOCATE( nxlf , nxrf , nynf , nysf ) |
---|
496 | |
---|
497 | |
---|
498 | #if ! defined( __check) |
---|
499 | ! |
---|
500 | !-- Collect index bounds from other PEs (to be written to restart file later) |
---|
501 | ALLOCATE( hor_index_bounds(4,0:numprocs-1) ) |
---|
502 | |
---|
503 | IF ( myid == 0 ) THEN |
---|
504 | |
---|
505 | hor_index_bounds(1,0) = nxl |
---|
506 | hor_index_bounds(2,0) = nxr |
---|
507 | hor_index_bounds(3,0) = nys |
---|
508 | hor_index_bounds(4,0) = nyn |
---|
509 | |
---|
510 | ! |
---|
511 | !-- Receive data from all other PEs |
---|
512 | DO i = 1, numprocs-1 |
---|
513 | CALL MPI_RECV( ibuf, 4, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, & |
---|
514 | ierr ) |
---|
515 | hor_index_bounds(:,i) = ibuf(1:4) |
---|
516 | ENDDO |
---|
517 | |
---|
518 | ELSE |
---|
519 | ! |
---|
520 | !-- Send index bounds to PE0 |
---|
521 | ibuf(1) = nxl |
---|
522 | ibuf(2) = nxr |
---|
523 | ibuf(3) = nys |
---|
524 | ibuf(4) = nyn |
---|
525 | CALL MPI_SEND( ibuf, 4, MPI_INTEGER, 0, myid, comm2d, ierr ) |
---|
526 | |
---|
527 | ENDIF |
---|
528 | |
---|
529 | #endif |
---|
530 | |
---|
531 | #if defined( __print ) |
---|
532 | ! |
---|
533 | !-- Control output |
---|
534 | IF ( myid == 0 ) THEN |
---|
535 | PRINT*, '*** processor topology ***' |
---|
536 | PRINT*, ' ' |
---|
537 | PRINT*, 'myid pcoord left right south north idx idy nxl: nxr',& |
---|
538 | &' nys: nyn' |
---|
539 | PRINT*, '------------------------------------------------------------',& |
---|
540 | &'-----------' |
---|
541 | WRITE (*,1000) 0, pcoord(1), pcoord(2), pleft, pright, psouth, pnorth, & |
---|
542 | myidx, myidy, nxl, nxr, nys, nyn |
---|
543 | 1000 FORMAT (I4,2X,'(',I3,',',I3,')',3X,I4,2X,I4,3X,I4,2X,I4,2X,I3,1X,I3, & |
---|
544 | 2(2X,I4,':',I4)) |
---|
545 | |
---|
546 | ! |
---|
547 | !-- Receive data from the other PEs |
---|
548 | DO i = 1,numprocs-1 |
---|
549 | CALL MPI_RECV( ibuf, 12, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, & |
---|
550 | ierr ) |
---|
551 | WRITE (*,1000) i, ( ibuf(j) , j = 1,12 ) |
---|
552 | ENDDO |
---|
553 | ELSE |
---|
554 | |
---|
555 | ! |
---|
556 | !-- Send data to PE0 |
---|
557 | ibuf(1) = pcoord(1); ibuf(2) = pcoord(2); ibuf(3) = pleft |
---|
558 | ibuf(4) = pright; ibuf(5) = psouth; ibuf(6) = pnorth; ibuf(7) = myidx |
---|
559 | ibuf(8) = myidy; ibuf(9) = nxl; ibuf(10) = nxr; ibuf(11) = nys |
---|
560 | ibuf(12) = nyn |
---|
561 | CALL MPI_SEND( ibuf, 12, MPI_INTEGER, 0, myid, comm2d, ierr ) |
---|
562 | ENDIF |
---|
563 | #endif |
---|
564 | |
---|
565 | #if defined( __parallel ) && ! defined( __check) |
---|
566 | #if defined( __mpi2 ) |
---|
567 | ! |
---|
568 | !-- In case of coupled runs, get the port name on PE0 of the atmosphere model |
---|
569 | !-- and pass it to PE0 of the ocean model |
---|
570 | IF ( myid == 0 ) THEN |
---|
571 | |
---|
572 | IF ( coupling_mode == 'atmosphere_to_ocean' ) THEN |
---|
573 | |
---|
574 | CALL MPI_OPEN_PORT( MPI_INFO_NULL, port_name, ierr ) |
---|
575 | |
---|
576 | CALL MPI_PUBLISH_NAME( 'palm_coupler', MPI_INFO_NULL, port_name, & |
---|
577 | ierr ) |
---|
578 | |
---|
579 | ! |
---|
580 | !-- Write a flag file for the ocean model and the other atmosphere |
---|
581 | !-- processes. |
---|
582 | !-- There seems to be a bug in MPICH2 which causes hanging processes |
---|
583 | !-- in case that execution of LOOKUP_NAME is continued too early |
---|
584 | !-- (i.e. before the port has been created) |
---|
585 | OPEN( 90, FILE='COUPLING_PORT_OPENED', FORM='FORMATTED' ) |
---|
586 | WRITE ( 90, '(''TRUE'')' ) |
---|
587 | CLOSE ( 90 ) |
---|
588 | |
---|
589 | ELSEIF ( coupling_mode == 'ocean_to_atmosphere' ) THEN |
---|
590 | |
---|
591 | ! |
---|
592 | !-- Continue only if the atmosphere model has created the port. |
---|
593 | !-- There seems to be a bug in MPICH2 which causes hanging processes |
---|
594 | !-- in case that execution of LOOKUP_NAME is continued too early |
---|
595 | !-- (i.e. before the port has been created) |
---|
596 | INQUIRE( FILE='COUPLING_PORT_OPENED', EXIST=found ) |
---|
597 | DO WHILE ( .NOT. found ) |
---|
598 | INQUIRE( FILE='COUPLING_PORT_OPENED', EXIST=found ) |
---|
599 | ENDDO |
---|
600 | |
---|
601 | CALL MPI_LOOKUP_NAME( 'palm_coupler', MPI_INFO_NULL, port_name, ierr ) |
---|
602 | |
---|
603 | ENDIF |
---|
604 | |
---|
605 | ENDIF |
---|
606 | |
---|
607 | ! |
---|
608 | !-- In case of coupled runs, establish the connection between the atmosphere |
---|
609 | !-- and the ocean model and define the intercommunicator (comm_inter) |
---|
610 | CALL MPI_BARRIER( comm2d, ierr ) |
---|
611 | IF ( coupling_mode == 'atmosphere_to_ocean' ) THEN |
---|
612 | |
---|
613 | CALL MPI_COMM_ACCEPT( port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, & |
---|
614 | comm_inter, ierr ) |
---|
615 | coupling_mode_remote = 'ocean_to_atmosphere' |
---|
616 | |
---|
617 | ELSEIF ( coupling_mode == 'ocean_to_atmosphere' ) THEN |
---|
618 | |
---|
619 | CALL MPI_COMM_CONNECT( port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, & |
---|
620 | comm_inter, ierr ) |
---|
621 | coupling_mode_remote = 'atmosphere_to_ocean' |
---|
622 | |
---|
623 | ENDIF |
---|
624 | #endif |
---|
625 | |
---|
626 | ! |
---|
627 | !-- Determine the number of ghost point layers |
---|
628 | IF ( ( scalar_advec == 'ws-scheme' .AND. .NOT. neutral ) .OR. & |
---|
629 | scalar_advec == 'ws-scheme-mono' .OR. & |
---|
630 | momentum_advec == 'ws-scheme' ) THEN |
---|
631 | nbgp = 3 |
---|
632 | ELSE |
---|
633 | nbgp = 1 |
---|
634 | ENDIF |
---|
635 | |
---|
636 | ! |
---|
637 | !-- Create a new MPI derived datatype for the exchange of surface (xy) data, |
---|
638 | !-- which is needed for coupled atmosphere-ocean runs. |
---|
639 | !-- First, calculate number of grid points of an xy-plane. |
---|
640 | ngp_xy = ( nxr - nxl + 1 + 2 * nbgp ) * ( nyn - nys + 1 + 2 * nbgp ) |
---|
641 | CALL MPI_TYPE_VECTOR( ngp_xy, 1, nzt-nzb+2, MPI_REAL, type_xy, ierr ) |
---|
642 | CALL MPI_TYPE_COMMIT( type_xy, ierr ) |
---|
643 | |
---|
644 | IF ( TRIM( coupling_mode ) /= 'uncoupled' ) THEN |
---|
645 | |
---|
646 | ! |
---|
647 | !-- Pass the number of grid points of the atmosphere model to |
---|
648 | !-- the ocean model and vice versa |
---|
649 | IF ( coupling_mode == 'atmosphere_to_ocean' ) THEN |
---|
650 | |
---|
651 | nx_a = nx |
---|
652 | ny_a = ny |
---|
653 | |
---|
654 | IF ( myid == 0 ) THEN |
---|
655 | |
---|
656 | CALL MPI_SEND( nx_a, 1, MPI_INTEGER, numprocs, 1, comm_inter, & |
---|
657 | ierr ) |
---|
658 | CALL MPI_SEND( ny_a, 1, MPI_INTEGER, numprocs, 2, comm_inter, & |
---|
659 | ierr ) |
---|
660 | CALL MPI_SEND( pdims, 2, MPI_INTEGER, numprocs, 3, comm_inter, & |
---|
661 | ierr ) |
---|
662 | CALL MPI_RECV( nx_o, 1, MPI_INTEGER, numprocs, 4, comm_inter, & |
---|
663 | status, ierr ) |
---|
664 | CALL MPI_RECV( ny_o, 1, MPI_INTEGER, numprocs, 5, comm_inter, & |
---|
665 | status, ierr ) |
---|
666 | CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, numprocs, 6, & |
---|
667 | comm_inter, status, ierr ) |
---|
668 | ENDIF |
---|
669 | |
---|
670 | CALL MPI_BCAST( nx_o, 1, MPI_INTEGER, 0, comm2d, ierr ) |
---|
671 | CALL MPI_BCAST( ny_o, 1, MPI_INTEGER, 0, comm2d, ierr ) |
---|
672 | CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr ) |
---|
673 | |
---|
674 | ELSEIF ( coupling_mode == 'ocean_to_atmosphere' ) THEN |
---|
675 | |
---|
676 | nx_o = nx |
---|
677 | ny_o = ny |
---|
678 | |
---|
679 | IF ( myid == 0 ) THEN |
---|
680 | |
---|
681 | CALL MPI_RECV( nx_a, 1, MPI_INTEGER, 0, 1, comm_inter, status, & |
---|
682 | ierr ) |
---|
683 | CALL MPI_RECV( ny_a, 1, MPI_INTEGER, 0, 2, comm_inter, status, & |
---|
684 | ierr ) |
---|
685 | CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, 0, 3, comm_inter, & |
---|
686 | status, ierr ) |
---|
687 | CALL MPI_SEND( nx_o, 1, MPI_INTEGER, 0, 4, comm_inter, ierr ) |
---|
688 | CALL MPI_SEND( ny_o, 1, MPI_INTEGER, 0, 5, comm_inter, ierr ) |
---|
689 | CALL MPI_SEND( pdims, 2, MPI_INTEGER, 0, 6, comm_inter, ierr ) |
---|
690 | ENDIF |
---|
691 | |
---|
692 | CALL MPI_BCAST( nx_a, 1, MPI_INTEGER, 0, comm2d, ierr) |
---|
693 | CALL MPI_BCAST( ny_a, 1, MPI_INTEGER, 0, comm2d, ierr) |
---|
694 | CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr) |
---|
695 | |
---|
696 | ENDIF |
---|
697 | |
---|
698 | ngp_a = ( nx_a+1 + 2 * nbgp ) * ( ny_a+1 + 2 * nbgp ) |
---|
699 | ngp_o = ( nx_o+1 + 2 * nbgp ) * ( ny_o+1 + 2 * nbgp ) |
---|
700 | |
---|
701 | ! |
---|
702 | !-- Determine if the horizontal grid and the number of PEs in ocean and |
---|
703 | !-- atmosphere is same or not |
---|
704 | IF ( nx_o == nx_a .AND. ny_o == ny_a .AND. & |
---|
705 | pdims(1) == pdims_remote(1) .AND. pdims(2) == pdims_remote(2) ) & |
---|
706 | THEN |
---|
707 | coupling_topology = 0 |
---|
708 | ELSE |
---|
709 | coupling_topology = 1 |
---|
710 | ENDIF |
---|
711 | |
---|
712 | ! |
---|
713 | !-- Determine the target PEs for the exchange between ocean and |
---|
714 | !-- atmosphere (comm2d) |
---|
715 | IF ( coupling_topology == 0 ) THEN |
---|
716 | ! |
---|
717 | !-- In case of identical topologies, every atmosphere PE has exactly one |
---|
718 | !-- ocean PE counterpart and vice versa |
---|
719 | IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' ) THEN |
---|
720 | target_id = myid + numprocs |
---|
721 | ELSE |
---|
722 | target_id = myid |
---|
723 | ENDIF |
---|
724 | |
---|
725 | ELSE |
---|
726 | ! |
---|
727 | !-- In case of nonequivalent topology in ocean and atmosphere only for |
---|
728 | !-- PE0 in ocean and PE0 in atmosphere a target_id is needed, since |
---|
729 | !-- data echxchange between ocean and atmosphere will be done only |
---|
730 | !-- between these PEs. |
---|
731 | IF ( myid == 0 ) THEN |
---|
732 | |
---|
733 | IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' ) THEN |
---|
734 | target_id = numprocs |
---|
735 | ELSE |
---|
736 | target_id = 0 |
---|
737 | ENDIF |
---|
738 | |
---|
739 | ENDIF |
---|
740 | |
---|
741 | ENDIF |
---|
742 | |
---|
743 | ENDIF |
---|
744 | |
---|
745 | |
---|
746 | #endif |
---|
747 | |
---|
748 | #else |
---|
749 | |
---|
750 | ! |
---|
751 | !-- Array bounds when running on a single PE (respectively a non-parallel |
---|
752 | !-- machine) |
---|
753 | nxl = 0 |
---|
754 | nxr = nx |
---|
755 | nnx = nxr - nxl + 1 |
---|
756 | nys = 0 |
---|
757 | nyn = ny |
---|
758 | nny = nyn - nys + 1 |
---|
759 | nzb = 0 |
---|
760 | nzt = nz |
---|
761 | nnz = nz |
---|
762 | |
---|
763 | ALLOCATE( hor_index_bounds(4,0:0) ) |
---|
764 | hor_index_bounds(1,0) = nxl |
---|
765 | hor_index_bounds(2,0) = nxr |
---|
766 | hor_index_bounds(3,0) = nys |
---|
767 | hor_index_bounds(4,0) = nyn |
---|
768 | |
---|
769 | ! |
---|
770 | !-- Array bounds for the pressure solver (in the parallel code, these bounds |
---|
771 | !-- are the ones for the transposed arrays) |
---|
772 | nys_x = nys |
---|
773 | nyn_x = nyn |
---|
774 | nzb_x = nzb + 1 |
---|
775 | nzt_x = nzt |
---|
776 | |
---|
777 | nxl_y = nxl |
---|
778 | nxr_y = nxr |
---|
779 | nzb_y = nzb + 1 |
---|
780 | nzt_y = nzt |
---|
781 | |
---|
782 | nxl_z = nxl |
---|
783 | nxr_z = nxr |
---|
784 | nys_z = nys |
---|
785 | nyn_z = nyn |
---|
786 | |
---|
787 | #endif |
---|
788 | |
---|
789 | ! |
---|
790 | !-- Calculate number of grid levels necessary for the multigrid poisson solver |
---|
791 | !-- as well as the gridpoint indices on each level |
---|
792 | IF ( psolver(1:9) == 'multigrid' ) THEN |
---|
793 | |
---|
794 | ! |
---|
795 | !-- First calculate number of possible grid levels for the subdomains |
---|
796 | mg_levels_x = 1 |
---|
797 | mg_levels_y = 1 |
---|
798 | mg_levels_z = 1 |
---|
799 | |
---|
800 | i = nnx |
---|
801 | DO WHILE ( MOD( i, 2 ) == 0 .AND. i /= 2 ) |
---|
802 | i = i / 2 |
---|
803 | mg_levels_x = mg_levels_x + 1 |
---|
804 | ENDDO |
---|
805 | |
---|
806 | j = nny |
---|
807 | DO WHILE ( MOD( j, 2 ) == 0 .AND. j /= 2 ) |
---|
808 | j = j / 2 |
---|
809 | mg_levels_y = mg_levels_y + 1 |
---|
810 | ENDDO |
---|
811 | |
---|
812 | k = nz ! do not use nnz because it might be > nz due to transposition |
---|
813 | ! requirements |
---|
814 | DO WHILE ( MOD( k, 2 ) == 0 .AND. k /= 2 ) |
---|
815 | k = k / 2 |
---|
816 | mg_levels_z = mg_levels_z + 1 |
---|
817 | ENDDO |
---|
818 | |
---|
819 | maximum_grid_level = MIN( mg_levels_x, mg_levels_y, mg_levels_z ) |
---|
820 | |
---|
821 | ! |
---|
822 | !-- Find out, if the total domain allows more levels. These additional |
---|
823 | !-- levels are identically processed on all PEs. |
---|
824 | IF ( numprocs > 1 .AND. mg_switch_to_pe0_level /= -1 ) THEN |
---|
825 | |
---|
826 | IF ( mg_levels_z > MIN( mg_levels_x, mg_levels_y ) ) THEN |
---|
827 | |
---|
828 | mg_switch_to_pe0_level_l = maximum_grid_level |
---|
829 | |
---|
830 | mg_levels_x = 1 |
---|
831 | mg_levels_y = 1 |
---|
832 | |
---|
833 | i = nx+1 |
---|
834 | DO WHILE ( MOD( i, 2 ) == 0 .AND. i /= 2 ) |
---|
835 | i = i / 2 |
---|
836 | mg_levels_x = mg_levels_x + 1 |
---|
837 | ENDDO |
---|
838 | |
---|
839 | j = ny+1 |
---|
840 | DO WHILE ( MOD( j, 2 ) == 0 .AND. j /= 2 ) |
---|
841 | j = j / 2 |
---|
842 | mg_levels_y = mg_levels_y + 1 |
---|
843 | ENDDO |
---|
844 | |
---|
845 | maximum_grid_level_l = MIN( mg_levels_x, mg_levels_y, mg_levels_z ) |
---|
846 | |
---|
847 | IF ( maximum_grid_level_l > mg_switch_to_pe0_level_l ) THEN |
---|
848 | mg_switch_to_pe0_level_l = maximum_grid_level_l - & |
---|
849 | mg_switch_to_pe0_level_l + 1 |
---|
850 | ELSE |
---|
851 | mg_switch_to_pe0_level_l = 0 |
---|
852 | ENDIF |
---|
853 | |
---|
854 | ELSE |
---|
855 | mg_switch_to_pe0_level_l = 0 |
---|
856 | maximum_grid_level_l = maximum_grid_level |
---|
857 | |
---|
858 | ENDIF |
---|
859 | |
---|
860 | ! |
---|
861 | !-- Use switch level calculated above only if it is not pre-defined |
---|
862 | !-- by user |
---|
863 | IF ( mg_switch_to_pe0_level == 0 ) THEN |
---|
864 | IF ( mg_switch_to_pe0_level_l /= 0 ) THEN |
---|
865 | mg_switch_to_pe0_level = mg_switch_to_pe0_level_l |
---|
866 | maximum_grid_level = maximum_grid_level_l |
---|
867 | ENDIF |
---|
868 | |
---|
869 | ELSE |
---|
870 | ! |
---|
871 | !-- Check pre-defined value and reset to default, if neccessary |
---|
872 | IF ( mg_switch_to_pe0_level < mg_switch_to_pe0_level_l .OR. & |
---|
873 | mg_switch_to_pe0_level >= maximum_grid_level_l ) THEN |
---|
874 | message_string = 'mg_switch_to_pe0_level ' // & |
---|
875 | 'out of range and reset to default (=0)' |
---|
876 | CALL message( 'init_pegrid', 'PA0235', 0, 1, 0, 6, 0 ) |
---|
877 | mg_switch_to_pe0_level = 0 |
---|
878 | ELSE |
---|
879 | ! |
---|
880 | !-- Use the largest number of possible levels anyway and recalculate |
---|
881 | !-- the switch level to this largest number of possible values |
---|
882 | maximum_grid_level = maximum_grid_level_l |
---|
883 | |
---|
884 | ENDIF |
---|
885 | |
---|
886 | ENDIF |
---|
887 | |
---|
888 | ENDIF |
---|
889 | |
---|
890 | ALLOCATE( grid_level_count(maximum_grid_level), & |
---|
891 | nxl_mg(0:maximum_grid_level), nxr_mg(0:maximum_grid_level), & |
---|
892 | nyn_mg(0:maximum_grid_level), nys_mg(0:maximum_grid_level), & |
---|
893 | nzt_mg(0:maximum_grid_level) ) |
---|
894 | |
---|
895 | grid_level_count = 0 |
---|
896 | ! |
---|
897 | !-- Index zero required as dummy due to definition of arrays f2 and p2 in |
---|
898 | !-- recursive subroutine next_mg_level |
---|
899 | nxl_mg(0) = 0; nxr_mg(0) = 0; nyn_mg(0) = 0; nys_mg(0) = 0; nzt_mg(0) = 0 |
---|
900 | |
---|
901 | nxl_l = nxl; nxr_l = nxr; nys_l = nys; nyn_l = nyn; nzt_l = nzt |
---|
902 | |
---|
903 | DO i = maximum_grid_level, 1 , -1 |
---|
904 | |
---|
905 | IF ( i == mg_switch_to_pe0_level ) THEN |
---|
906 | #if defined( __parallel ) && ! defined( __check ) |
---|
907 | ! |
---|
908 | !-- Save the grid size of the subdomain at the switch level, because |
---|
909 | !-- it is needed in poismg. |
---|
910 | ind(1) = nxl_l; ind(2) = nxr_l |
---|
911 | ind(3) = nys_l; ind(4) = nyn_l |
---|
912 | ind(5) = nzt_l |
---|
913 | ALLOCATE( ind_all(5*numprocs), mg_loc_ind(5,0:numprocs-1) ) |
---|
914 | CALL MPI_ALLGATHER( ind, 5, MPI_INTEGER, ind_all, 5, & |
---|
915 | MPI_INTEGER, comm2d, ierr ) |
---|
916 | DO j = 0, numprocs-1 |
---|
917 | DO k = 1, 5 |
---|
918 | mg_loc_ind(k,j) = ind_all(k+j*5) |
---|
919 | ENDDO |
---|
920 | ENDDO |
---|
921 | DEALLOCATE( ind_all ) |
---|
922 | ! |
---|
923 | !-- Calculate the grid size of the total domain |
---|
924 | nxr_l = ( nxr_l-nxl_l+1 ) * pdims(1) - 1 |
---|
925 | nxl_l = 0 |
---|
926 | nyn_l = ( nyn_l-nys_l+1 ) * pdims(2) - 1 |
---|
927 | nys_l = 0 |
---|
928 | ! |
---|
929 | !-- The size of this gathered array must not be larger than the |
---|
930 | !-- array tend, which is used in the multigrid scheme as a temporary |
---|
931 | !-- array. Therefore the subdomain size of an PE is calculated and |
---|
932 | !-- the size of the gathered grid. These values are used in |
---|
933 | !-- routines pres and poismg |
---|
934 | subdomain_size = ( nxr - nxl + 2 * nbgp + 1 ) * & |
---|
935 | ( nyn - nys + 2 * nbgp + 1 ) * ( nzt - nzb + 2 ) |
---|
936 | gathered_size = ( nxr_l - nxl_l + 3 ) * ( nyn_l - nys_l + 3 ) * & |
---|
937 | ( nzt_l - nzb + 2 ) |
---|
938 | |
---|
939 | #elif ! defined ( __parallel ) |
---|
940 | message_string = 'multigrid gather/scatter impossible ' // & |
---|
941 | 'in non parallel mode' |
---|
942 | CALL message( 'init_pegrid', 'PA0237', 1, 2, 0, 6, 0 ) |
---|
943 | #endif |
---|
944 | ENDIF |
---|
945 | |
---|
946 | nxl_mg(i) = nxl_l |
---|
947 | nxr_mg(i) = nxr_l |
---|
948 | nys_mg(i) = nys_l |
---|
949 | nyn_mg(i) = nyn_l |
---|
950 | nzt_mg(i) = nzt_l |
---|
951 | |
---|
952 | nxl_l = nxl_l / 2 |
---|
953 | nxr_l = nxr_l / 2 |
---|
954 | nys_l = nys_l / 2 |
---|
955 | nyn_l = nyn_l / 2 |
---|
956 | nzt_l = nzt_l / 2 |
---|
957 | |
---|
958 | ENDDO |
---|
959 | |
---|
960 | ! |
---|
961 | !-- Temporary problem: Currently calculation of maxerror iin routine poismg crashes |
---|
962 | !-- if grid data are collected on PE0 already on the finest grid level. |
---|
963 | !-- To be solved later. |
---|
964 | IF ( maximum_grid_level == mg_switch_to_pe0_level ) THEN |
---|
965 | message_string = 'grid coarsening on subdomain level cannot be performed' |
---|
966 | CALL message( 'poismg', 'PA0236', 1, 2, 0, 6, 0 ) |
---|
967 | ENDIF |
---|
968 | |
---|
969 | ELSE |
---|
970 | |
---|
971 | maximum_grid_level = 0 |
---|
972 | |
---|
973 | ENDIF |
---|
974 | |
---|
975 | ! |
---|
976 | !-- Default level 0 tells exchange_horiz that all ghost planes have to be |
---|
977 | !-- exchanged. grid_level is adjusted in poismg, where only one ghost plane |
---|
978 | !-- is required. |
---|
979 | grid_level = 0 |
---|
980 | |
---|
981 | #if defined( __parallel ) && ! defined ( __check ) |
---|
982 | ! |
---|
983 | !-- Gridpoint number for the exchange of ghost points (y-line for 2D-arrays) |
---|
984 | ngp_y = nyn - nys + 1 + 2 * nbgp |
---|
985 | |
---|
986 | ! |
---|
987 | !-- Define new MPI derived datatypes for the exchange of ghost points in |
---|
988 | !-- x- and y-direction for 2D-arrays (line) |
---|
989 | CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_REAL, type_x, & |
---|
990 | ierr ) |
---|
991 | CALL MPI_TYPE_COMMIT( type_x, ierr ) |
---|
992 | CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_INTEGER, & |
---|
993 | type_x_int, ierr ) |
---|
994 | CALL MPI_TYPE_COMMIT( type_x_int, ierr ) |
---|
995 | |
---|
996 | CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_REAL, type_y, ierr ) |
---|
997 | CALL MPI_TYPE_COMMIT( type_y, ierr ) |
---|
998 | CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_INTEGER, type_y_int, ierr ) |
---|
999 | CALL MPI_TYPE_COMMIT( type_y_int, ierr ) |
---|
1000 | |
---|
1001 | |
---|
1002 | ! |
---|
1003 | !-- Calculate gridpoint numbers for the exchange of ghost points along x |
---|
1004 | !-- (yz-plane for 3D-arrays) and define MPI derived data type(s) for the |
---|
1005 | !-- exchange of ghost points in y-direction (xz-plane). |
---|
1006 | !-- Do these calculations for the model grid and (if necessary) also |
---|
1007 | !-- for the coarser grid levels used in the multigrid method |
---|
1008 | ALLOCATE ( ngp_xz(0:maximum_grid_level), ngp_yz(0:maximum_grid_level), & |
---|
1009 | type_xz(0:maximum_grid_level), type_yz(0:maximum_grid_level) ) |
---|
1010 | |
---|
1011 | nxl_l = nxl; nxr_l = nxr; nys_l = nys; nyn_l = nyn; nzb_l = nzb; nzt_l = nzt |
---|
1012 | |
---|
1013 | ! |
---|
1014 | !-- Discern between the model grid, which needs nbgp ghost points and |
---|
1015 | !-- grid levels for the multigrid scheme. In the latter case only one |
---|
1016 | !-- ghost point is necessary. |
---|
1017 | !-- First definition of MPI-datatypes for exchange of ghost layers on normal |
---|
1018 | !-- grid. The following loop is needed for data exchange in poismg.f90. |
---|
1019 | ! |
---|
1020 | !-- Determine number of grid points of yz-layer for exchange |
---|
1021 | ngp_yz(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp) |
---|
1022 | |
---|
1023 | ! |
---|
1024 | !-- Define an MPI-datatype for the exchange of left/right boundaries. |
---|
1025 | !-- Although data are contiguous in physical memory (which does not |
---|
1026 | !-- necessarily require an MPI-derived datatype), the data exchange between |
---|
1027 | !-- left and right PE's using the MPI-derived type is 10% faster than without. |
---|
1028 | CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz(0), & |
---|
1029 | MPI_REAL, type_xz(0), ierr ) |
---|
1030 | CALL MPI_TYPE_COMMIT( type_xz(0), ierr ) |
---|
1031 | |
---|
1032 | CALL MPI_TYPE_VECTOR( nbgp, ngp_yz(0), ngp_yz(0), MPI_REAL, type_yz(0), & |
---|
1033 | ierr ) |
---|
1034 | CALL MPI_TYPE_COMMIT( type_yz(0), ierr ) |
---|
1035 | |
---|
1036 | ! |
---|
1037 | !-- Definition of MPI-datatypes for multigrid method (coarser level grids) |
---|
1038 | IF ( psolver(1:9) == 'multigrid' ) THEN |
---|
1039 | ! |
---|
1040 | !-- Definition of MPI-datatyoe as above, but only 1 ghost level is used |
---|
1041 | DO i = maximum_grid_level, 1 , -1 |
---|
1042 | |
---|
1043 | ngp_xz(i) = (nzt_l - nzb_l + 2) * (nxr_l - nxl_l + 3) |
---|
1044 | ngp_yz(i) = (nzt_l - nzb_l + 2) * (nyn_l - nys_l + 3) |
---|
1045 | |
---|
1046 | CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+3, nzt_l-nzb_l+2, ngp_yz(i), & |
---|
1047 | MPI_REAL, type_xz(i), ierr ) |
---|
1048 | CALL MPI_TYPE_COMMIT( type_xz(i), ierr ) |
---|
1049 | |
---|
1050 | CALL MPI_TYPE_VECTOR( 1, ngp_yz(i), ngp_yz(i), MPI_REAL, type_yz(i), & |
---|
1051 | ierr ) |
---|
1052 | CALL MPI_TYPE_COMMIT( type_yz(i), ierr ) |
---|
1053 | |
---|
1054 | nxl_l = nxl_l / 2 |
---|
1055 | nxr_l = nxr_l / 2 |
---|
1056 | nys_l = nys_l / 2 |
---|
1057 | nyn_l = nyn_l / 2 |
---|
1058 | nzt_l = nzt_l / 2 |
---|
1059 | |
---|
1060 | ENDDO |
---|
1061 | |
---|
1062 | ENDIF |
---|
1063 | ! |
---|
1064 | !-- Define data types for exchange of 3D Integer arrays. |
---|
1065 | ngp_yz_int = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp) |
---|
1066 | |
---|
1067 | CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz_int, & |
---|
1068 | MPI_INTEGER, type_xz_int, ierr ) |
---|
1069 | CALL MPI_TYPE_COMMIT( type_xz_int, ierr ) |
---|
1070 | |
---|
1071 | CALL MPI_TYPE_VECTOR( nbgp, ngp_yz_int, ngp_yz_int, MPI_INTEGER, type_yz_int, & |
---|
1072 | ierr ) |
---|
1073 | CALL MPI_TYPE_COMMIT( type_yz_int, ierr ) |
---|
1074 | |
---|
1075 | #endif |
---|
1076 | |
---|
1077 | #if defined( __parallel ) && ! defined ( __check ) |
---|
1078 | ! |
---|
1079 | !-- Setting of flags for inflow/outflow/nesting conditions in case of non-cyclic |
---|
1080 | !-- horizontal boundary conditions. |
---|
1081 | IF ( pleft == MPI_PROC_NULL ) THEN |
---|
1082 | IF ( bc_lr == 'dirichlet/radiation' ) THEN |
---|
1083 | inflow_l = .TRUE. |
---|
1084 | ELSEIF ( bc_lr == 'radiation/dirichlet' ) THEN |
---|
1085 | outflow_l = .TRUE. |
---|
1086 | ELSEIF ( bc_lr == 'nested' ) THEN |
---|
1087 | nest_bound_l = .TRUE. |
---|
1088 | ENDIF |
---|
1089 | ENDIF |
---|
1090 | |
---|
1091 | IF ( pright == MPI_PROC_NULL ) THEN |
---|
1092 | IF ( bc_lr == 'dirichlet/radiation' ) THEN |
---|
1093 | outflow_r = .TRUE. |
---|
1094 | ELSEIF ( bc_lr == 'radiation/dirichlet' ) THEN |
---|
1095 | inflow_r = .TRUE. |
---|
1096 | ELSEIF ( bc_lr == 'nested' ) THEN |
---|
1097 | nest_bound_r = .TRUE. |
---|
1098 | ENDIF |
---|
1099 | ENDIF |
---|
1100 | |
---|
1101 | IF ( psouth == MPI_PROC_NULL ) THEN |
---|
1102 | IF ( bc_ns == 'dirichlet/radiation' ) THEN |
---|
1103 | outflow_s = .TRUE. |
---|
1104 | ELSEIF ( bc_ns == 'radiation/dirichlet' ) THEN |
---|
1105 | inflow_s = .TRUE. |
---|
1106 | ELSEIF ( bc_ns == 'nested' ) THEN |
---|
1107 | nest_bound_s = .TRUE. |
---|
1108 | ENDIF |
---|
1109 | ENDIF |
---|
1110 | |
---|
1111 | IF ( pnorth == MPI_PROC_NULL ) THEN |
---|
1112 | IF ( bc_ns == 'dirichlet/radiation' ) THEN |
---|
1113 | inflow_n = .TRUE. |
---|
1114 | ELSEIF ( bc_ns == 'radiation/dirichlet' ) THEN |
---|
1115 | outflow_n = .TRUE. |
---|
1116 | ELSEIF ( bc_ns == 'nested' ) THEN |
---|
1117 | nest_bound_n = .TRUE. |
---|
1118 | ENDIF |
---|
1119 | ENDIF |
---|
1120 | |
---|
1121 | ! |
---|
1122 | !-- Broadcast the id of the inflow PE |
---|
1123 | IF ( inflow_l ) THEN |
---|
1124 | id_inflow_l = myidx |
---|
1125 | ELSE |
---|
1126 | id_inflow_l = 0 |
---|
1127 | ENDIF |
---|
1128 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
1129 | CALL MPI_ALLREDUCE( id_inflow_l, id_inflow, 1, MPI_INTEGER, MPI_SUM, & |
---|
1130 | comm1dx, ierr ) |
---|
1131 | |
---|
1132 | ! |
---|
1133 | !-- Broadcast the id of the recycling plane |
---|
1134 | !-- WARNING: needs to be adjusted in case of inflows other than from left side! |
---|
1135 | IF ( NINT( recycling_width / dx ) >= nxl .AND. & |
---|
1136 | NINT( recycling_width / dx ) <= nxr ) THEN |
---|
1137 | id_recycling_l = myidx |
---|
1138 | ELSE |
---|
1139 | id_recycling_l = 0 |
---|
1140 | ENDIF |
---|
1141 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
1142 | CALL MPI_ALLREDUCE( id_recycling_l, id_recycling, 1, MPI_INTEGER, MPI_SUM, & |
---|
1143 | comm1dx, ierr ) |
---|
1144 | |
---|
1145 | CALL location_message( 'finished', .TRUE. ) |
---|
1146 | |
---|
1147 | #elif ! defined ( __parallel ) |
---|
1148 | IF ( bc_lr == 'dirichlet/radiation' ) THEN |
---|
1149 | inflow_l = .TRUE. |
---|
1150 | outflow_r = .TRUE. |
---|
1151 | ELSEIF ( bc_lr == 'radiation/dirichlet' ) THEN |
---|
1152 | outflow_l = .TRUE. |
---|
1153 | inflow_r = .TRUE. |
---|
1154 | ENDIF |
---|
1155 | |
---|
1156 | IF ( bc_ns == 'dirichlet/radiation' ) THEN |
---|
1157 | inflow_n = .TRUE. |
---|
1158 | outflow_s = .TRUE. |
---|
1159 | ELSEIF ( bc_ns == 'radiation/dirichlet' ) THEN |
---|
1160 | outflow_n = .TRUE. |
---|
1161 | inflow_s = .TRUE. |
---|
1162 | ENDIF |
---|
1163 | #endif |
---|
1164 | |
---|
1165 | ! |
---|
1166 | !-- At the inflow or outflow, u or v, respectively, have to be calculated for |
---|
1167 | !-- one more grid point. |
---|
1168 | IF ( inflow_l .OR. outflow_l .OR. nest_bound_l ) THEN |
---|
1169 | nxlu = nxl + 1 |
---|
1170 | ELSE |
---|
1171 | nxlu = nxl |
---|
1172 | ENDIF |
---|
1173 | IF ( inflow_s .OR. outflow_s .OR. nest_bound_s ) THEN |
---|
1174 | nysv = nys + 1 |
---|
1175 | ELSE |
---|
1176 | nysv = nys |
---|
1177 | ENDIF |
---|
1178 | |
---|
1179 | ! |
---|
1180 | !-- Allocate wall flag arrays used in the multigrid solver |
---|
1181 | IF ( psolver(1:9) == 'multigrid' ) THEN |
---|
1182 | |
---|
1183 | DO i = maximum_grid_level, 1, -1 |
---|
1184 | |
---|
1185 | SELECT CASE ( i ) |
---|
1186 | |
---|
1187 | CASE ( 1 ) |
---|
1188 | ALLOCATE( wall_flags_1(nzb:nzt_mg(i)+1, & |
---|
1189 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
1190 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
1191 | |
---|
1192 | CASE ( 2 ) |
---|
1193 | ALLOCATE( wall_flags_2(nzb:nzt_mg(i)+1, & |
---|
1194 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
1195 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
1196 | |
---|
1197 | CASE ( 3 ) |
---|
1198 | ALLOCATE( wall_flags_3(nzb:nzt_mg(i)+1, & |
---|
1199 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
1200 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
1201 | |
---|
1202 | CASE ( 4 ) |
---|
1203 | ALLOCATE( wall_flags_4(nzb:nzt_mg(i)+1, & |
---|
1204 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
1205 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
1206 | |
---|
1207 | CASE ( 5 ) |
---|
1208 | ALLOCATE( wall_flags_5(nzb:nzt_mg(i)+1, & |
---|
1209 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
1210 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
1211 | |
---|
1212 | CASE ( 6 ) |
---|
1213 | ALLOCATE( wall_flags_6(nzb:nzt_mg(i)+1, & |
---|
1214 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
1215 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
1216 | |
---|
1217 | CASE ( 7 ) |
---|
1218 | ALLOCATE( wall_flags_7(nzb:nzt_mg(i)+1, & |
---|
1219 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
1220 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
1221 | |
---|
1222 | CASE ( 8 ) |
---|
1223 | ALLOCATE( wall_flags_8(nzb:nzt_mg(i)+1, & |
---|
1224 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
1225 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
1226 | |
---|
1227 | CASE ( 9 ) |
---|
1228 | ALLOCATE( wall_flags_9(nzb:nzt_mg(i)+1, & |
---|
1229 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
1230 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
1231 | |
---|
1232 | CASE ( 10 ) |
---|
1233 | ALLOCATE( wall_flags_10(nzb:nzt_mg(i)+1, & |
---|
1234 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
1235 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
1236 | |
---|
1237 | CASE DEFAULT |
---|
1238 | message_string = 'more than 10 multigrid levels' |
---|
1239 | CALL message( 'init_pegrid', 'PA0238', 1, 2, 0, 6, 0 ) |
---|
1240 | |
---|
1241 | END SELECT |
---|
1242 | |
---|
1243 | ENDDO |
---|
1244 | |
---|
1245 | ENDIF |
---|
1246 | |
---|
1247 | ! |
---|
1248 | !-- Calculate the number of groups into which parallel I/O is split. |
---|
1249 | !-- The default for files which are opened by all PEs (or where each |
---|
1250 | !-- PE opens his own independent file) is, that all PEs are doing input/output |
---|
1251 | !-- in parallel at the same time. This might cause performance or even more |
---|
1252 | !-- severe problems depending on the configuration of the underlying file |
---|
1253 | !-- system. |
---|
1254 | !-- First, set the default: |
---|
1255 | IF ( maximum_parallel_io_streams == -1 .OR. & |
---|
1256 | maximum_parallel_io_streams > numprocs ) THEN |
---|
1257 | maximum_parallel_io_streams = numprocs |
---|
1258 | ENDIF |
---|
1259 | |
---|
1260 | ! |
---|
1261 | !-- Now calculate the number of io_blocks and the io_group to which the |
---|
1262 | !-- respective PE belongs. I/O of the groups is done in serial, but in parallel |
---|
1263 | !-- for all PEs belonging to the same group. A preliminary setting with myid |
---|
1264 | !-- based on MPI_COMM_WORLD has been done in parin. |
---|
1265 | io_blocks = numprocs / maximum_parallel_io_streams |
---|
1266 | io_group = MOD( myid+1, io_blocks ) |
---|
1267 | |
---|
1268 | |
---|
1269 | END SUBROUTINE init_pegrid |
---|