[1682] | 1 | !> @file init_pegrid.f90 |
---|
[2000] | 2 | !------------------------------------------------------------------------------! |
---|
[2696] | 3 | ! This file is part of the PALM model system. |
---|
[1036] | 4 | ! |
---|
[2000] | 5 | ! PALM is free software: you can redistribute it and/or modify it under the |
---|
| 6 | ! terms of the GNU General Public License as published by the Free Software |
---|
| 7 | ! Foundation, either version 3 of the License, or (at your option) any later |
---|
| 8 | ! version. |
---|
[1036] | 9 | ! |
---|
| 10 | ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY |
---|
| 11 | ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR |
---|
| 12 | ! A PARTICULAR PURPOSE. See the GNU General Public License for more details. |
---|
| 13 | ! |
---|
| 14 | ! You should have received a copy of the GNU General Public License along with |
---|
| 15 | ! PALM. If not, see <http://www.gnu.org/licenses/>. |
---|
| 16 | ! |
---|
[3655] | 17 | ! Copyright 1997-2019 Leibniz Universitaet Hannover |
---|
[2000] | 18 | !------------------------------------------------------------------------------! |
---|
[1036] | 19 | ! |
---|
[254] | 20 | ! Current revisions: |
---|
[1322] | 21 | ! ------------------ |
---|
[2201] | 22 | ! |
---|
[3553] | 23 | ! |
---|
[1321] | 24 | ! Former revisions: |
---|
| 25 | ! ----------------- |
---|
| 26 | ! $Id: init_pegrid.f90 3897 2019-04-15 11:51:14Z gronemeier $ |
---|
[3897] | 27 | ! Minor revision of multigrid check; give warning instead of an abort. |
---|
| 28 | ! |
---|
| 29 | ! 3890 2019-04-12 15:59:20Z suehring |
---|
[3890] | 30 | ! Check if grid coarsening is possible on subdomain, in order to avoid that |
---|
| 31 | ! multigrid approach effectively reduces to a Gauss-Seidel scheme. |
---|
| 32 | ! |
---|
| 33 | ! 3885 2019-04-11 11:29:34Z kanani |
---|
[3885] | 34 | ! Changes related to global restructuring of location messages and introduction |
---|
| 35 | ! of additional debug messages |
---|
| 36 | ! |
---|
| 37 | ! 3884 2019-04-10 13:31:55Z Giersch |
---|
[3884] | 38 | ! id_recycling is only calculated in case of tubulent inflow |
---|
| 39 | ! |
---|
| 40 | ! 3761 2019-02-25 15:31:42Z raasch |
---|
[3761] | 41 | ! unused variable removed |
---|
| 42 | ! |
---|
| 43 | ! 3655 2019-01-07 16:51:22Z knoop |
---|
[3553] | 44 | ! variables documented |
---|
| 45 | ! |
---|
| 46 | ! 3552 2018-11-22 10:28:35Z suehring |
---|
[3543] | 47 | ! Introduce new MPI-datatype for ghostpoint exchange of 2D 8-bit Integer arrays |
---|
| 48 | ! |
---|
| 49 | ! 3542 2018-11-20 17:04:13Z suehring |
---|
[3347] | 50 | ! Bugfix in setting number of ghost layers in neutral case |
---|
| 51 | ! |
---|
| 52 | ! 3341 2018-10-15 10:31:27Z suehring |
---|
[3241] | 53 | ! unused variables removed |
---|
| 54 | ! |
---|
| 55 | ! 3183 2018-07-27 14:25:55Z suehring |
---|
[3183] | 56 | ! Rename variables and boundary conditions in mesoscale-offline nesting mode |
---|
| 57 | ! |
---|
| 58 | ! 3182 2018-07-27 13:36:03Z suehring |
---|
[3058] | 59 | ! bugfix: wrong error number in r3057 revised |
---|
| 60 | ! |
---|
| 61 | ! 3057 2018-06-05 09:03:41Z raasch |
---|
[3057] | 62 | ! bugfix: check that nz is even in case that optimized multigrid is used |
---|
| 63 | ! |
---|
| 64 | ! 3049 2018-05-29 13:52:36Z Giersch |
---|
[3045] | 65 | ! Error messages revised |
---|
| 66 | ! |
---|
[3049] | 67 | ! 3045 2018-05-28 07:55:41Z Giersch |
---|
| 68 | ! Error messages revised |
---|
| 69 | ! |
---|
[3045] | 70 | ! 2938 2018-03-27 15:52:42Z suehring |
---|
[2938] | 71 | ! - No checks for domain decomposition in case of turbulence generator |
---|
| 72 | ! (is done in stg module) |
---|
| 73 | ! - Introduce ids to indicate lateral processors for turbulence generator |
---|
| 74 | ! |
---|
| 75 | ! 2936 2018-03-27 14:49:27Z suehring |
---|
[2776] | 76 | ! Variable use_synthetic_turbulence_generator has been abbreviated |
---|
| 77 | ! |
---|
| 78 | ! 2718 2018-01-02 08:49:38Z maronga |
---|
[2716] | 79 | ! Corrected "Former revisions" section |
---|
| 80 | ! |
---|
| 81 | ! 2696 2017-12-14 17:12:51Z kanani |
---|
| 82 | ! Change in file header (GPL part) |
---|
[2696] | 83 | ! 3D-Integer exchange on multigrid level (MS) |
---|
| 84 | ! Forcing implemented (MS) |
---|
| 85 | ! |
---|
| 86 | ! 2600 2017-11-01 14:11:20Z raasch |
---|
[2600] | 87 | ! calculation of block-I/O quantitites removed (is now done in parin) |
---|
| 88 | ! |
---|
| 89 | ! 2516 2017-10-04 11:03:04Z suehring |
---|
[2516] | 90 | ! Remove tabs |
---|
| 91 | ! |
---|
| 92 | ! 2514 2017-10-04 09:52:37Z suehring |
---|
[2414] | 93 | ! Redundant preprocessor directives removed |
---|
| 94 | ! |
---|
| 95 | ! 2372 2017-08-25 12:37:32Z sward |
---|
[2372] | 96 | ! Shifted cyclic boundary conditions implemented |
---|
| 97 | ! |
---|
| 98 | ! 2365 2017-08-21 14:59:59Z kanani |
---|
[2365] | 99 | ! Vertical nesting implemented (SadiqHuq) |
---|
| 100 | ! |
---|
| 101 | ! 2300 2017-06-29 13:31:14Z raasch |
---|
[2300] | 102 | ! host-specific settings removed |
---|
| 103 | ! |
---|
| 104 | ! 2298 2017-06-29 09:28:18Z raasch |
---|
[2298] | 105 | ! MPI2 related parts removed |
---|
| 106 | ! |
---|
| 107 | ! 2271 2017-06-09 12:34:55Z sward |
---|
[2271] | 108 | ! Error message changed |
---|
| 109 | ! |
---|
| 110 | ! 2259 2017-06-08 09:09:11Z gronemeier |
---|
[2259] | 111 | ! Implemented synthetic turbulence generator |
---|
| 112 | ! |
---|
| 113 | ! 2238 2017-05-31 16:49:16Z suehring |
---|
[2238] | 114 | ! Remove unnecessary module load of pmc_interface |
---|
| 115 | ! |
---|
| 116 | ! 2231 2017-05-30 16:44:33Z suehring |
---|
[1321] | 117 | ! |
---|
[2201] | 118 | ! 2200 2017-04-11 11:37:51Z suehring |
---|
| 119 | ! monotonic_adjustment removed |
---|
| 120 | ! |
---|
[2198] | 121 | ! 2197 2017-03-24 02:25:00Z raasch |
---|
| 122 | ! bugfix: do not allow odd values for nz at the coarsest grid level in case of |
---|
| 123 | ! optimized multigrid solver |
---|
| 124 | ! |
---|
[2181] | 125 | ! 2180 2017-03-17 13:33:05Z hellstea |
---|
| 126 | ! Checks to ensure (2178) that pdims match the grid dimensions in the |
---|
| 127 | ! automatic determination of pdims are canceled as unnecessary |
---|
| 128 | ! |
---|
[2179] | 129 | ! 2178 2017-03-17 11:07:39Z hellstea |
---|
| 130 | ! Checks to ensure that pdims match the grid dimensions are added in the |
---|
| 131 | ! automatic determination of pdims |
---|
| 132 | ! |
---|
[2051] | 133 | ! 2050 2016-11-08 15:00:55Z gronemeier |
---|
| 134 | ! Implement turbulent outflow condition |
---|
| 135 | ! |
---|
[2001] | 136 | ! 2000 2016-08-20 18:09:15Z knoop |
---|
| 137 | ! Forced header and separation lines into 80 columns |
---|
| 138 | ! |
---|
[1969] | 139 | ! 1968 2016-07-18 12:01:49Z suehring |
---|
| 140 | ! Extent MPI-datatypes for exchange of 2D-INTEGER arrays on coarser multigrid |
---|
| 141 | ! level |
---|
| 142 | ! |
---|
[1965] | 143 | ! 1964 2016-07-14 15:35:18Z hellstea |
---|
| 144 | ! Bugfix: erroneous setting of nest_bound_l/r/s/n = .TRUE. for vertical nesting mode removed. |
---|
[1968] | 145 | ! |
---|
[1933] | 146 | ! 1923 2016-05-31 16:37:07Z boeske |
---|
| 147 | ! Initial version of purely vertical nesting introduced. |
---|
| 148 | ! |
---|
[1923] | 149 | ! 1922 2016-05-31 16:36:08Z boeske |
---|
| 150 | ! Bugfix: array transposition checks restricted to cases if a fourier |
---|
| 151 | ! transform is used , removed unused variable nnx_z |
---|
| 152 | ! |
---|
[1834] | 153 | ! 1833 2016-04-07 14:23:03Z raasch |
---|
| 154 | ! spectra related variables moved to spectra_mod |
---|
| 155 | ! |
---|
[1816] | 156 | ! 1815 2016-04-06 13:49:59Z raasch |
---|
| 157 | ! cpp-directives for intel openmp bug removed |
---|
| 158 | ! |
---|
[1805] | 159 | ! 1804 2016-04-05 16:30:18Z maronga |
---|
| 160 | ! Removed code for parameter file check (__check) |
---|
| 161 | ! |
---|
[1780] | 162 | ! 1779 2016-03-03 08:01:28Z raasch |
---|
| 163 | ! changes regarding nested domain removed: virtual PE grid will be automatically |
---|
| 164 | ! calculated for nested runs too |
---|
| 165 | ! |
---|
[1765] | 166 | ! 1764 2016-02-28 12:45:19Z raasch |
---|
| 167 | ! cpp-statements for nesting removed |
---|
| 168 | ! |
---|
[1763] | 169 | ! 1762 2016-02-25 12:31:13Z hellstea |
---|
| 170 | ! Introduction of nested domain feature |
---|
| 171 | ! |
---|
[1683] | 172 | ! 1682 2015-10-07 23:56:08Z knoop |
---|
| 173 | ! Code annotations made doxygen readable |
---|
| 174 | ! |
---|
[1678] | 175 | ! 1677 2015-10-02 13:25:23Z boeske |
---|
| 176 | ! New MPI-data types for exchange of 3D integer arrays. |
---|
| 177 | ! |
---|
[1576] | 178 | ! 1575 2015-03-27 09:56:27Z raasch |
---|
| 179 | ! adjustments for psolver-queries, calculation of ngp_xz added |
---|
| 180 | ! |
---|
[1566] | 181 | ! 1565 2015-03-09 20:59:31Z suehring |
---|
| 182 | ! Refine if-clause for setting nbgp. |
---|
| 183 | ! |
---|
[1558] | 184 | ! 1557 2015-03-05 16:43:04Z suehring |
---|
| 185 | ! Adjustment for monotonic limiter |
---|
| 186 | ! |
---|
[1469] | 187 | ! 1468 2014-09-24 14:06:57Z maronga |
---|
| 188 | ! Adapted for use on up to 6-digit processor cores |
---|
| 189 | ! |
---|
[1436] | 190 | ! 1435 2014-07-21 10:37:02Z keck |
---|
| 191 | ! bugfix: added missing parameter coupling_mode_remote to ONLY-attribute |
---|
| 192 | ! |
---|
[1403] | 193 | ! 1402 2014-05-09 14:25:13Z raasch |
---|
| 194 | ! location messages modified |
---|
| 195 | ! |
---|
[1385] | 196 | ! 1384 2014-05-02 14:31:06Z raasch |
---|
| 197 | ! location messages added |
---|
| 198 | ! |
---|
[1354] | 199 | ! 1353 2014-04-08 15:21:23Z heinze |
---|
| 200 | ! REAL constants provided with KIND-attribute |
---|
| 201 | ! |
---|
[1323] | 202 | ! 1322 2014-03-20 16:38:49Z raasch |
---|
| 203 | ! REAL functions provided with KIND-attribute |
---|
| 204 | ! |
---|
[1321] | 205 | ! 1320 2014-03-20 08:40:49Z raasch |
---|
[1320] | 206 | ! ONLY-attribute added to USE-statements, |
---|
| 207 | ! kind-parameters added to all INTEGER and REAL declaration statements, |
---|
| 208 | ! kinds are defined in new module kinds, |
---|
| 209 | ! revision history before 2012 removed, |
---|
| 210 | ! comment fields (!:) to be used for variable explanations added to |
---|
| 211 | ! all variable declaration statements |
---|
[760] | 212 | ! |
---|
[1305] | 213 | ! 1304 2014-03-12 10:29:42Z raasch |
---|
| 214 | ! bugfix: single core MPI runs missed some settings of transpose indices |
---|
| 215 | ! |
---|
[1213] | 216 | ! 1212 2013-08-15 08:46:27Z raasch |
---|
| 217 | ! error message for poisfft_hybrid removed |
---|
| 218 | ! |
---|
[1160] | 219 | ! 1159 2013-05-21 11:58:22Z fricke |
---|
| 220 | ! dirichlet/neumann and neumann/dirichlet removed |
---|
| 221 | ! |
---|
[1140] | 222 | ! 1139 2013-04-18 07:25:03Z raasch |
---|
| 223 | ! bugfix for calculating the id of the PE carrying the recycling plane |
---|
| 224 | ! |
---|
[1112] | 225 | ! 1111 2013-03-08 23:54:10Z raasch |
---|
| 226 | ! initialization of poisfft moved to module poisfft |
---|
| 227 | ! |
---|
[1093] | 228 | ! 1092 2013-02-02 11:24:22Z raasch |
---|
| 229 | ! unused variables removed |
---|
| 230 | ! |
---|
[1057] | 231 | ! 1056 2012-11-16 15:28:04Z raasch |
---|
| 232 | ! Indices for arrays n.._mg start from zero due to definition of arrays f2 and |
---|
| 233 | ! p2 as automatic arrays in recursive subroutine next_mg_level |
---|
| 234 | ! |
---|
[1042] | 235 | ! 1041 2012-11-06 02:36:29Z raasch |
---|
| 236 | ! a 2d virtual processor topology is used by default for all machines |
---|
| 237 | ! |
---|
[1037] | 238 | ! 1036 2012-10-22 13:43:42Z raasch |
---|
| 239 | ! code put under GPL (PALM 3.9) |
---|
| 240 | ! |
---|
[1004] | 241 | ! 1003 2012-09-14 14:35:53Z raasch |
---|
| 242 | ! subdomains must have identical size (grid matching = "match" removed) |
---|
| 243 | ! |
---|
[1002] | 244 | ! 1001 2012-09-13 14:08:46Z raasch |
---|
| 245 | ! all actions concerning upstream-spline-method removed |
---|
| 246 | ! |
---|
[979] | 247 | ! 978 2012-08-09 08:28:32Z fricke |
---|
| 248 | ! dirichlet/neumann and neumann/dirichlet added |
---|
| 249 | ! nxlu and nysv are also calculated for inflow boundary |
---|
| 250 | ! |
---|
[810] | 251 | ! 809 2012-01-30 13:32:58Z maronga |
---|
| 252 | ! Bugfix: replaced .AND. and .NOT. with && and ! in the preprocessor directives |
---|
| 253 | ! |
---|
[808] | 254 | ! 807 2012-01-25 11:53:51Z maronga |
---|
| 255 | ! New cpp directive "__check" implemented which is used by check_namelist_files |
---|
| 256 | ! |
---|
[1] | 257 | ! Revision 1.1 1997/07/24 11:15:09 raasch |
---|
| 258 | ! Initial revision |
---|
| 259 | ! |
---|
| 260 | ! |
---|
| 261 | ! Description: |
---|
| 262 | ! ------------ |
---|
[1682] | 263 | !> Determination of the virtual processor topology (if not prescribed by the |
---|
| 264 | !> user)and computation of the grid point number and array bounds of the local |
---|
| 265 | !> domains. |
---|
[2696] | 266 | !> @todo: remove MPI-data types for 2D exchange on coarse multigrid level (not |
---|
| 267 | !> used any more) |
---|
[1] | 268 | !------------------------------------------------------------------------------! |
---|
[1682] | 269 | SUBROUTINE init_pegrid |
---|
| 270 | |
---|
[1] | 271 | |
---|
[1320] | 272 | USE control_parameters, & |
---|
[3182] | 273 | ONLY: bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, & |
---|
| 274 | bc_lr, bc_ns, bc_radiation_l, bc_radiation_n, bc_radiation_r, & |
---|
[3241] | 275 | bc_radiation_s, coupling_mode, coupling_topology, gathered_size,& |
---|
| 276 | grid_level, grid_level_count, maximum_grid_level, & |
---|
[3761] | 277 | message_string, mg_switch_to_pe0_level, momentum_advec, & |
---|
[3182] | 278 | psolver, outflow_source_plane, recycling_width, scalar_advec, & |
---|
[3884] | 279 | subdomain_size, turbulent_inflow, turbulent_outflow, y_shift |
---|
[1] | 280 | |
---|
[1320] | 281 | USE grid_variables, & |
---|
| 282 | ONLY: dx |
---|
| 283 | |
---|
| 284 | USE indices, & |
---|
| 285 | ONLY: mg_loc_ind, nbgp, nnx, nny, nnz, nx, nx_a, nx_o, nxl, nxl_mg, & |
---|
| 286 | nxlu, nxr, nxr_mg, ny, ny_a, ny_o, nyn, nyn_mg, nys, nys_mg, & |
---|
| 287 | nysv, nz, nzb, nzt, nzt_mg, wall_flags_1, wall_flags_2, & |
---|
| 288 | wall_flags_3, wall_flags_4, wall_flags_5, wall_flags_6, & |
---|
| 289 | wall_flags_7, wall_flags_8, wall_flags_9, wall_flags_10 |
---|
[1] | 290 | |
---|
[1320] | 291 | USE kinds |
---|
| 292 | |
---|
| 293 | USE pegrid |
---|
[2238] | 294 | |
---|
[1833] | 295 | USE spectra_mod, & |
---|
[1922] | 296 | ONLY: calculate_spectra, dt_dosp |
---|
[1833] | 297 | |
---|
[2259] | 298 | USE synthetic_turbulence_generator_mod, & |
---|
[2938] | 299 | ONLY: id_stg_left, id_stg_north, id_stg_right, id_stg_south, & |
---|
| 300 | use_syn_turb_gen |
---|
[2259] | 301 | |
---|
[1320] | 302 | USE transpose_indices, & |
---|
| 303 | ONLY: nxl_y, nxl_yd, nxl_z, nxr_y, nxr_yd, nxr_z, nyn_x, nyn_z, nys_x,& |
---|
| 304 | nys_z, nzb_x, nzb_y, nzb_yd, nzt_x, nzt_yd, nzt_y |
---|
[667] | 305 | |
---|
[2365] | 306 | USE vertical_nesting_mod, & |
---|
| 307 | ONLY: vnested, vnest_init_pegrid_domain, vnest_init_pegrid_rank |
---|
| 308 | |
---|
[1] | 309 | IMPLICIT NONE |
---|
| 310 | |
---|
[3552] | 311 | INTEGER(iwp) :: i !< running index over number of processors or number of multigrid level |
---|
| 312 | INTEGER(iwp) :: id_inflow_l !< ID indicating processors located at the left inflow boundary |
---|
[2050] | 313 | INTEGER(iwp) :: id_outflow_l !< local value of id_outflow |
---|
| 314 | INTEGER(iwp) :: id_outflow_source_l !< local value of id_outflow_source |
---|
[3552] | 315 | INTEGER(iwp) :: id_recycling_l !< ID indicating processors located at the recycling plane |
---|
[2938] | 316 | INTEGER(iwp) :: id_stg_left_l !< left lateral boundary local core id in case of turbulence generator |
---|
| 317 | INTEGER(iwp) :: id_stg_north_l !< north lateral boundary local core id in case of turbulence generator |
---|
| 318 | INTEGER(iwp) :: id_stg_right_l !< right lateral boundary local core id in case of turbulence generator |
---|
| 319 | INTEGER(iwp) :: id_stg_south_l !< south lateral boundary local core id in case of turbulence generator |
---|
[3552] | 320 | INTEGER(iwp) :: ind(5) !< array containing the subdomain bounds |
---|
| 321 | INTEGER(iwp) :: j !< running index, used for various loops |
---|
| 322 | INTEGER(iwp) :: k !< number of vertical grid points in different multigrid level |
---|
| 323 | INTEGER(iwp) :: maximum_grid_level_l !< maximum number of grid level without switching to PE 0 |
---|
| 324 | INTEGER(iwp) :: mg_levels_x !< maximum number of grid level allowed along x-direction |
---|
| 325 | INTEGER(iwp) :: mg_levels_y !< maximum number of grid level allowed along y-direction |
---|
| 326 | INTEGER(iwp) :: mg_levels_z !< maximum number of grid level allowed along z-direction |
---|
| 327 | INTEGER(iwp) :: mg_switch_to_pe0_level_l !< maximum number of grid level with switching to PE 0 |
---|
| 328 | INTEGER(iwp) :: nnx_y !< quotient of number of grid points along x-direction and number of PEs used along y-direction |
---|
| 329 | INTEGER(iwp) :: nny_x !< quotient of number of grid points along y-direction and number of PEs used along x-direction |
---|
| 330 | INTEGER(iwp) :: nny_z !< quotient of number of grid points along y-direction and number of PEs used along x-direction |
---|
| 331 | INTEGER(iwp) :: nnz_x !< quotient of number of grid points along z-direction and number of PEs used along x-direction |
---|
| 332 | INTEGER(iwp) :: nnz_y !< quotient of number of grid points along z-direction and number of PEs used along x-direction |
---|
| 333 | INTEGER(iwp) :: numproc_sqr !< square root of the number of processors |
---|
| 334 | INTEGER(iwp) :: nxl_l !< lower index bound along x-direction on subdomain and different multigrid level |
---|
| 335 | INTEGER(iwp) :: nxr_l !< upper index bound along x-direction on subdomain and different multigrid level |
---|
| 336 | INTEGER(iwp) :: nyn_l !< lower index bound along y-direction on subdomain and different multigrid level |
---|
| 337 | INTEGER(iwp) :: nys_l !< upper index bound along y-direction on subdomain and different multigrid level |
---|
| 338 | INTEGER(iwp) :: nzb_l !< lower index bound along z-direction on subdomain and different multigrid level |
---|
| 339 | INTEGER(iwp) :: nzt_l !< upper index bound along z-direction on subdomain and different multigrid level |
---|
| 340 | !$ INTEGER(iwp) :: omp_get_num_threads !< number of OpenMP threads |
---|
[1] | 341 | |
---|
[3552] | 342 | INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: ind_all !< dummy array containing index bounds on subdomain, used for gathering |
---|
| 343 | INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: nxlf !< lower index bound allong x-direction for every PE |
---|
| 344 | INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: nxrf !< upper index bound allong x-direction for every PE |
---|
| 345 | INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: nynf !< lower index bound allong y-direction for every PE |
---|
| 346 | INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: nysf !< lower index bound allong y-direction for every PE |
---|
[1] | 347 | |
---|
[3552] | 348 | INTEGER(iwp), DIMENSION(2) :: pdims_remote !< number of PEs used for coupled model (only in atmospher-ocean coupling) |
---|
[2372] | 349 | INTEGER(iwp) :: lcoord(2) !< PE coordinates of left neighbor along x and y |
---|
| 350 | INTEGER(iwp) :: rcoord(2) !< PE coordinates of right neighbor along x and y |
---|
[667] | 351 | |
---|
[1] | 352 | ! |
---|
| 353 | !-- Get the number of OpenMP threads |
---|
| 354 | !$OMP PARALLEL |
---|
| 355 | !$ threads_per_task = omp_get_num_threads() |
---|
| 356 | !$OMP END PARALLEL |
---|
| 357 | |
---|
| 358 | |
---|
| 359 | #if defined( __parallel ) |
---|
[667] | 360 | |
---|
[3885] | 361 | CALL location_message( 'creating virtual PE grids + MPI derived data types', 'start' ) |
---|
[1764] | 362 | |
---|
[1] | 363 | ! |
---|
[2177] | 364 | !-- Determine the processor topology or check it, if prescribed by the user |
---|
[1779] | 365 | IF ( npex == -1 .AND. npey == -1 ) THEN |
---|
[1] | 366 | |
---|
| 367 | ! |
---|
[2177] | 368 | !-- Automatic determination of the topology |
---|
[1779] | 369 | numproc_sqr = SQRT( REAL( numprocs, KIND=wp ) ) |
---|
| 370 | pdims(1) = MAX( numproc_sqr , 1 ) |
---|
[2180] | 371 | DO WHILE ( MOD( numprocs , pdims(1) ) /= 0 ) |
---|
[1779] | 372 | pdims(1) = pdims(1) - 1 |
---|
| 373 | ENDDO |
---|
[2180] | 374 | pdims(2) = numprocs / pdims(1) |
---|
[1] | 375 | |
---|
[1779] | 376 | ELSEIF ( npex /= -1 .AND. npey /= -1 ) THEN |
---|
[1] | 377 | |
---|
| 378 | ! |
---|
[1779] | 379 | !-- Prescribed by user. Number of processors on the prescribed topology |
---|
| 380 | !-- must be equal to the number of PEs available to the job |
---|
| 381 | IF ( ( npex * npey ) /= numprocs ) THEN |
---|
[3045] | 382 | WRITE( message_string, * ) 'number of PEs of the prescribed ', & |
---|
[3046] | 383 | 'topology (', npex*npey,') does not match & the number of ', & |
---|
[1779] | 384 | 'PEs available to the job (', numprocs, ')' |
---|
| 385 | CALL message( 'init_pegrid', 'PA0221', 1, 2, 0, 6, 0 ) |
---|
| 386 | ENDIF |
---|
| 387 | pdims(1) = npex |
---|
| 388 | pdims(2) = npey |
---|
[1] | 389 | |
---|
[1779] | 390 | ELSE |
---|
[1] | 391 | ! |
---|
[1779] | 392 | !-- If the processor topology is prescribed by the user, the number of |
---|
| 393 | !-- PEs must be given in both directions |
---|
[3045] | 394 | message_string = 'if the processor topology is prescribed by th' // & |
---|
[3046] | 395 | 'e user & both values of "npex" and "npey" must be given' // & |
---|
[1779] | 396 | ' in the &NAMELIST-parameter file' |
---|
| 397 | CALL message( 'init_pegrid', 'PA0222', 1, 2, 0, 6, 0 ) |
---|
[1] | 398 | |
---|
| 399 | ENDIF |
---|
| 400 | |
---|
| 401 | ! |
---|
| 402 | !-- If necessary, set horizontal boundary conditions to non-cyclic |
---|
[722] | 403 | IF ( bc_lr /= 'cyclic' ) cyclic(1) = .FALSE. |
---|
| 404 | IF ( bc_ns /= 'cyclic' ) cyclic(2) = .FALSE. |
---|
[1] | 405 | |
---|
[807] | 406 | |
---|
[1] | 407 | ! |
---|
| 408 | !-- Create the virtual processor grid |
---|
| 409 | CALL MPI_CART_CREATE( comm_palm, ndim, pdims, cyclic, reorder, & |
---|
| 410 | comm2d, ierr ) |
---|
| 411 | CALL MPI_COMM_RANK( comm2d, myid, ierr ) |
---|
[1468] | 412 | WRITE (myid_char,'(''_'',I6.6)') myid |
---|
[1] | 413 | |
---|
| 414 | CALL MPI_CART_COORDS( comm2d, myid, ndim, pcoord, ierr ) |
---|
| 415 | CALL MPI_CART_SHIFT( comm2d, 0, 1, pleft, pright, ierr ) |
---|
| 416 | CALL MPI_CART_SHIFT( comm2d, 1, 1, psouth, pnorth, ierr ) |
---|
| 417 | ! |
---|
[2372] | 418 | !-- In case of cyclic boundary conditions, a y-shift at the boundaries in |
---|
| 419 | !-- x-direction can be introduced via parameter y_shift. The shift is done |
---|
| 420 | !-- by modifying the processor grid in such a way that processors located |
---|
| 421 | !-- at the x-boundary communicate across it to processors with y-coordinate |
---|
| 422 | !-- shifted by y_shift relative to their own. This feature can not be used |
---|
| 423 | !-- in combination with an fft pressure solver. It has been implemented to |
---|
| 424 | !-- counter the effect of streak structures in case of cyclic boundary |
---|
| 425 | !-- conditions. For a description of these see Munters |
---|
| 426 | !-- (2016; dx.doi.org/10.1063/1.4941912) |
---|
| 427 | !-- |
---|
| 428 | !-- Get coordinates of left and right neighbor on PE grid |
---|
| 429 | IF ( y_shift /= 0 ) THEN |
---|
| 430 | |
---|
| 431 | IF ( bc_lr /= 'cyclic' .OR. bc_ns /= 'cyclic' ) THEN |
---|
| 432 | message_string = 'y_shift /= 0 is only allowed for cyclic ' // & |
---|
| 433 | 'boundary conditions in both directions ' |
---|
| 434 | CALL message( 'check_parameters', 'PA0467', 1, 2, 0, 6, 0 ) |
---|
| 435 | ENDIF |
---|
| 436 | IF ( TRIM( psolver ) /= 'multigrid' .AND. & |
---|
| 437 | TRIM( psolver ) /= 'multigrid_noopt') & |
---|
| 438 | THEN |
---|
| 439 | message_string = 'y_shift /= 0 requires a multigrid pressure solver ' |
---|
| 440 | CALL message( 'check_parameters', 'PA0468', 1, 2, 0, 6, 0 ) |
---|
| 441 | ENDIF |
---|
| 442 | |
---|
| 443 | CALL MPI_CART_COORDS( comm2d, pright, ndim, rcoord, ierr ) |
---|
| 444 | CALL MPI_CART_COORDS( comm2d, pleft, ndim, lcoord, ierr ) |
---|
| 445 | |
---|
| 446 | ! |
---|
| 447 | !-- If the x(y)-coordinate of the right (left) neighbor is smaller (greater) |
---|
| 448 | !-- than that of the calling process, then the calling process is located on |
---|
| 449 | !-- the right (left) boundary of the processor grid. In that case, |
---|
| 450 | !-- the y-coordinate of that neighbor is increased (decreased) by y_shift. |
---|
| 451 | !-- The rank of the process with that coordinate is then inquired and the |
---|
| 452 | !-- neighbor rank for MPI_SENDRECV, pright (pleft) is set to it. |
---|
| 453 | !-- In this way, the calling process receives a new right (left) neighbor |
---|
| 454 | !-- for all future MPI_SENDRECV calls. That neighbor has a y-coordinate |
---|
| 455 | !-- of y+(-)y_shift, where y is the original right (left) neighbor's |
---|
| 456 | !-- y-coordinate. The modulo-operation ensures that if the neighbor's |
---|
| 457 | !-- y-coordinate exceeds the grid-boundary, it will be relocated to |
---|
| 458 | !-- the opposite part of the grid cyclicly. |
---|
| 459 | IF ( rcoord(1) < pcoord(1) ) THEN |
---|
| 460 | rcoord(2) = MODULO( rcoord(2) + y_shift, pdims(2) ) |
---|
| 461 | CALL MPI_CART_RANK( comm2d, rcoord, pright, ierr ) |
---|
| 462 | ENDIF |
---|
| 463 | |
---|
| 464 | IF ( lcoord(1) > pcoord(1) ) THEN |
---|
| 465 | lcoord(2) = MODULO( lcoord(2) - y_shift, pdims(2) ) |
---|
| 466 | CALL MPI_CART_RANK( comm2d, lcoord, pleft, ierr ) |
---|
| 467 | ENDIF |
---|
| 468 | ENDIF |
---|
| 469 | ! |
---|
[2365] | 470 | !-- Vertical nesting: store four lists that identify partner ranks to exchange |
---|
| 471 | !-- data |
---|
| 472 | IF ( vnested ) CALL vnest_init_pegrid_rank |
---|
| 473 | |
---|
| 474 | ! |
---|
[1] | 475 | !-- Determine sub-topologies for transpositions |
---|
| 476 | !-- Transposition from z to x: |
---|
| 477 | remain_dims(1) = .TRUE. |
---|
| 478 | remain_dims(2) = .FALSE. |
---|
| 479 | CALL MPI_CART_SUB( comm2d, remain_dims, comm1dx, ierr ) |
---|
| 480 | CALL MPI_COMM_RANK( comm1dx, myidx, ierr ) |
---|
| 481 | ! |
---|
| 482 | !-- Transposition from x to y |
---|
| 483 | remain_dims(1) = .FALSE. |
---|
| 484 | remain_dims(2) = .TRUE. |
---|
| 485 | CALL MPI_CART_SUB( comm2d, remain_dims, comm1dy, ierr ) |
---|
| 486 | CALL MPI_COMM_RANK( comm1dy, myidy, ierr ) |
---|
| 487 | |
---|
| 488 | |
---|
| 489 | ! |
---|
[1003] | 490 | !-- Calculate array bounds along x-direction for every PE. |
---|
[3045] | 491 | ALLOCATE( nxlf(0:pdims(1)-1), nxrf(0:pdims(1)-1), nynf(0:pdims(2)-1), & |
---|
[1003] | 492 | nysf(0:pdims(2)-1) ) |
---|
[1] | 493 | |
---|
[1003] | 494 | IF ( MOD( nx+1 , pdims(1) ) /= 0 ) THEN |
---|
[3045] | 495 | WRITE( message_string, * ) 'x-direction: gridpoint number (',nx+1,') ', & |
---|
[3046] | 496 | 'is not an& integral divisor of the number ', & |
---|
[3045] | 497 | 'of processors (', pdims(1),')' |
---|
[254] | 498 | CALL message( 'init_pegrid', 'PA0225', 1, 2, 0, 6, 0 ) |
---|
[1] | 499 | ELSE |
---|
[1003] | 500 | nnx = ( nx + 1 ) / pdims(1) |
---|
[1] | 501 | ENDIF |
---|
| 502 | |
---|
| 503 | ! |
---|
| 504 | !-- Left and right array bounds, number of gridpoints |
---|
| 505 | DO i = 0, pdims(1)-1 |
---|
| 506 | nxlf(i) = i * nnx |
---|
| 507 | nxrf(i) = ( i + 1 ) * nnx - 1 |
---|
| 508 | ENDDO |
---|
| 509 | |
---|
| 510 | ! |
---|
| 511 | !-- Calculate array bounds in y-direction for every PE. |
---|
[1003] | 512 | IF ( MOD( ny+1 , pdims(2) ) /= 0 ) THEN |
---|
[274] | 513 | WRITE( message_string, * ) 'y-direction: gridpoint number (',ny+1,') ', & |
---|
[3046] | 514 | 'is not an& integral divisor of the number of', & |
---|
[274] | 515 | 'processors (', pdims(2),')' |
---|
[254] | 516 | CALL message( 'init_pegrid', 'PA0227', 1, 2, 0, 6, 0 ) |
---|
[1] | 517 | ELSE |
---|
[1003] | 518 | nny = ( ny + 1 ) / pdims(2) |
---|
[1] | 519 | ENDIF |
---|
| 520 | |
---|
| 521 | ! |
---|
| 522 | !-- South and north array bounds |
---|
| 523 | DO j = 0, pdims(2)-1 |
---|
| 524 | nysf(j) = j * nny |
---|
| 525 | nynf(j) = ( j + 1 ) * nny - 1 |
---|
| 526 | ENDDO |
---|
| 527 | |
---|
| 528 | ! |
---|
| 529 | !-- Local array bounds of the respective PEs |
---|
[1003] | 530 | nxl = nxlf(pcoord(1)) |
---|
| 531 | nxr = nxrf(pcoord(1)) |
---|
| 532 | nys = nysf(pcoord(2)) |
---|
| 533 | nyn = nynf(pcoord(2)) |
---|
| 534 | nzb = 0 |
---|
| 535 | nzt = nz |
---|
| 536 | nnz = nz |
---|
[1] | 537 | |
---|
| 538 | ! |
---|
[707] | 539 | !-- Set switches to define if the PE is situated at the border of the virtual |
---|
| 540 | !-- processor grid |
---|
| 541 | IF ( nxl == 0 ) left_border_pe = .TRUE. |
---|
| 542 | IF ( nxr == nx ) right_border_pe = .TRUE. |
---|
| 543 | IF ( nys == 0 ) south_border_pe = .TRUE. |
---|
| 544 | IF ( nyn == ny ) north_border_pe = .TRUE. |
---|
| 545 | |
---|
| 546 | ! |
---|
[1] | 547 | !-- Calculate array bounds and gridpoint numbers for the transposed arrays |
---|
| 548 | !-- (needed in the pressure solver) |
---|
| 549 | !-- For the transposed arrays, cyclic boundaries as well as top and bottom |
---|
| 550 | !-- boundaries are omitted, because they are obstructive to the transposition |
---|
| 551 | |
---|
| 552 | ! |
---|
| 553 | !-- 1. transposition z --> x |
---|
[1001] | 554 | !-- This transposition is not neccessary in case of a 1d-decomposition along x |
---|
[2938] | 555 | IF ( psolver == 'poisfft' .OR. calculate_spectra ) THEN |
---|
[1304] | 556 | |
---|
[1922] | 557 | IF ( pdims(2) /= 1 ) THEN |
---|
| 558 | IF ( MOD( nz , pdims(1) ) /= 0 ) THEN |
---|
| 559 | WRITE( message_string, * ) 'transposition z --> x:', & |
---|
[3046] | 560 | '& nz=',nz,' is not an integral divisior of pdims(1)=', & |
---|
[274] | 561 | pdims(1) |
---|
[1922] | 562 | CALL message( 'init_pegrid', 'PA0230', 1, 2, 0, 6, 0 ) |
---|
| 563 | ENDIF |
---|
[1] | 564 | ENDIF |
---|
[1922] | 565 | |
---|
| 566 | nys_x = nys |
---|
| 567 | nyn_x = nyn |
---|
| 568 | nny_x = nny |
---|
| 569 | nnz_x = nz / pdims(1) |
---|
| 570 | nzb_x = 1 + myidx * nnz_x |
---|
| 571 | nzt_x = ( myidx + 1 ) * nnz_x |
---|
| 572 | sendrecvcount_zx = nnx * nny * nnz_x |
---|
| 573 | |
---|
[1] | 574 | ENDIF |
---|
| 575 | |
---|
[1922] | 576 | |
---|
| 577 | IF ( psolver == 'poisfft' ) THEN |
---|
[1] | 578 | ! |
---|
[1922] | 579 | !-- 2. transposition x --> y |
---|
| 580 | IF ( MOD( nx+1 , pdims(2) ) /= 0 ) THEN |
---|
| 581 | WRITE( message_string, * ) 'transposition x --> y:', & |
---|
[3046] | 582 | '& nx+1=',nx+1,' is not an integral divisor of ', & |
---|
[1922] | 583 | 'pdims(2)=',pdims(2) |
---|
| 584 | CALL message( 'init_pegrid', 'PA0231', 1, 2, 0, 6, 0 ) |
---|
| 585 | ENDIF |
---|
[1] | 586 | |
---|
[1922] | 587 | nnz_y = nnz_x |
---|
| 588 | nzb_y = nzb_x |
---|
| 589 | nzt_y = nzt_x |
---|
| 590 | nnx_y = (nx+1) / pdims(2) |
---|
| 591 | nxl_y = myidy * nnx_y |
---|
| 592 | nxr_y = ( myidy + 1 ) * nnx_y - 1 |
---|
| 593 | sendrecvcount_xy = nnx_y * nny_x * nnz_y |
---|
[1] | 594 | ! |
---|
[1922] | 595 | !-- 3. transposition y --> z |
---|
| 596 | !-- (ELSE: x --> y in case of 1D-decomposition along x) |
---|
| 597 | nxl_z = nxl_y |
---|
| 598 | nxr_z = nxr_y |
---|
| 599 | nny_z = (ny+1) / pdims(1) |
---|
| 600 | nys_z = myidx * nny_z |
---|
| 601 | nyn_z = ( myidx + 1 ) * nny_z - 1 |
---|
| 602 | sendrecvcount_yz = nnx_y * nny_z * nnz_y |
---|
[1304] | 603 | |
---|
[1922] | 604 | IF ( pdims(2) /= 1 ) THEN |
---|
[1] | 605 | ! |
---|
[1922] | 606 | !-- y --> z |
---|
| 607 | !-- This transposition is not neccessary in case of a 1d-decomposition |
---|
| 608 | !-- along x, except that the uptream-spline method is switched on |
---|
| 609 | IF ( MOD( ny+1 , pdims(1) ) /= 0 ) THEN |
---|
| 610 | WRITE( message_string, * ) 'transposition y --> z:', & |
---|
[3046] | 611 | '& ny+1=',ny+1,' is not an integral divisor of',& |
---|
[1922] | 612 | ' pdims(1)=',pdims(1) |
---|
| 613 | CALL message( 'init_pegrid', 'PA0232', 1, 2, 0, 6, 0 ) |
---|
| 614 | ENDIF |
---|
[1] | 615 | |
---|
[1922] | 616 | ELSE |
---|
[1] | 617 | ! |
---|
[1922] | 618 | !-- x --> y |
---|
| 619 | !-- This condition must be fulfilled for a 1D-decomposition along x |
---|
| 620 | IF ( MOD( ny+1 , pdims(1) ) /= 0 ) THEN |
---|
| 621 | WRITE( message_string, * ) 'transposition x --> y:', & |
---|
[3046] | 622 | '& ny+1=',ny+1,' is not an integral divisor of',& |
---|
[1922] | 623 | ' pdims(1)=',pdims(1) |
---|
| 624 | CALL message( 'init_pegrid', 'PA0233', 1, 2, 0, 6, 0 ) |
---|
| 625 | ENDIF |
---|
| 626 | |
---|
[1] | 627 | ENDIF |
---|
| 628 | |
---|
| 629 | ENDIF |
---|
| 630 | |
---|
| 631 | ! |
---|
| 632 | !-- Indices for direct transpositions z --> y (used for calculating spectra) |
---|
[1922] | 633 | IF ( calculate_spectra ) THEN |
---|
[1003] | 634 | IF ( MOD( nz, pdims(2) ) /= 0 ) THEN |
---|
[1922] | 635 | WRITE( message_string, * ) 'direct transposition z --> y (needed ', & |
---|
[3045] | 636 | 'for spectra): nz=',nz,' is not an integral divisor of ', & |
---|
[274] | 637 | 'pdims(2)=',pdims(2) |
---|
[254] | 638 | CALL message( 'init_pegrid', 'PA0234', 1, 2, 0, 6, 0 ) |
---|
[1] | 639 | ELSE |
---|
[1003] | 640 | nxl_yd = nxl |
---|
| 641 | nxr_yd = nxr |
---|
| 642 | nzb_yd = 1 + myidy * ( nz / pdims(2) ) |
---|
| 643 | nzt_yd = ( myidy + 1 ) * ( nz / pdims(2) ) |
---|
| 644 | sendrecvcount_zyd = nnx * nny * ( nz / pdims(2) ) |
---|
[1] | 645 | ENDIF |
---|
| 646 | ENDIF |
---|
| 647 | |
---|
[1922] | 648 | IF ( psolver == 'poisfft' .OR. calculate_spectra ) THEN |
---|
[1] | 649 | ! |
---|
[1922] | 650 | !-- Indices for direct transpositions y --> x |
---|
| 651 | !-- (they are only possible in case of a 1d-decomposition along x) |
---|
| 652 | IF ( pdims(2) == 1 ) THEN |
---|
| 653 | nny_x = nny / pdims(1) |
---|
| 654 | nys_x = myid * nny_x |
---|
| 655 | nyn_x = ( myid + 1 ) * nny_x - 1 |
---|
| 656 | nzb_x = 1 |
---|
| 657 | nzt_x = nz |
---|
| 658 | sendrecvcount_xy = nnx * nny_x * nz |
---|
| 659 | ENDIF |
---|
| 660 | |
---|
[1] | 661 | ENDIF |
---|
| 662 | |
---|
[1922] | 663 | IF ( psolver == 'poisfft' ) THEN |
---|
[1] | 664 | ! |
---|
[1922] | 665 | !-- Indices for direct transpositions x --> y |
---|
| 666 | !-- (they are only possible in case of a 1d-decomposition along y) |
---|
| 667 | IF ( pdims(1) == 1 ) THEN |
---|
| 668 | nnx_y = nnx / pdims(2) |
---|
| 669 | nxl_y = myid * nnx_y |
---|
| 670 | nxr_y = ( myid + 1 ) * nnx_y - 1 |
---|
| 671 | nzb_y = 1 |
---|
| 672 | nzt_y = nz |
---|
| 673 | sendrecvcount_xy = nnx_y * nny * nz |
---|
| 674 | ENDIF |
---|
| 675 | |
---|
[1] | 676 | ENDIF |
---|
| 677 | |
---|
| 678 | ! |
---|
| 679 | !-- Arrays for storing the array bounds are needed any more |
---|
| 680 | DEALLOCATE( nxlf , nxrf , nynf , nysf ) |
---|
| 681 | |
---|
[807] | 682 | |
---|
[145] | 683 | ! |
---|
| 684 | !-- Collect index bounds from other PEs (to be written to restart file later) |
---|
| 685 | ALLOCATE( hor_index_bounds(4,0:numprocs-1) ) |
---|
| 686 | |
---|
| 687 | IF ( myid == 0 ) THEN |
---|
| 688 | |
---|
| 689 | hor_index_bounds(1,0) = nxl |
---|
| 690 | hor_index_bounds(2,0) = nxr |
---|
| 691 | hor_index_bounds(3,0) = nys |
---|
| 692 | hor_index_bounds(4,0) = nyn |
---|
| 693 | |
---|
| 694 | ! |
---|
| 695 | !-- Receive data from all other PEs |
---|
| 696 | DO i = 1, numprocs-1 |
---|
| 697 | CALL MPI_RECV( ibuf, 4, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, & |
---|
| 698 | ierr ) |
---|
| 699 | hor_index_bounds(:,i) = ibuf(1:4) |
---|
| 700 | ENDDO |
---|
| 701 | |
---|
| 702 | ELSE |
---|
| 703 | ! |
---|
| 704 | !-- Send index bounds to PE0 |
---|
| 705 | ibuf(1) = nxl |
---|
| 706 | ibuf(2) = nxr |
---|
| 707 | ibuf(3) = nys |
---|
| 708 | ibuf(4) = nyn |
---|
| 709 | CALL MPI_SEND( ibuf, 4, MPI_INTEGER, 0, myid, comm2d, ierr ) |
---|
| 710 | |
---|
| 711 | ENDIF |
---|
| 712 | |
---|
[807] | 713 | |
---|
[1] | 714 | #if defined( __print ) |
---|
| 715 | ! |
---|
| 716 | !-- Control output |
---|
| 717 | IF ( myid == 0 ) THEN |
---|
| 718 | PRINT*, '*** processor topology ***' |
---|
| 719 | PRINT*, ' ' |
---|
| 720 | PRINT*, 'myid pcoord left right south north idx idy nxl: nxr',& |
---|
| 721 | &' nys: nyn' |
---|
| 722 | PRINT*, '------------------------------------------------------------',& |
---|
| 723 | &'-----------' |
---|
| 724 | WRITE (*,1000) 0, pcoord(1), pcoord(2), pleft, pright, psouth, pnorth, & |
---|
| 725 | myidx, myidy, nxl, nxr, nys, nyn |
---|
| 726 | 1000 FORMAT (I4,2X,'(',I3,',',I3,')',3X,I4,2X,I4,3X,I4,2X,I4,2X,I3,1X,I3, & |
---|
| 727 | 2(2X,I4,':',I4)) |
---|
| 728 | |
---|
| 729 | ! |
---|
[108] | 730 | !-- Receive data from the other PEs |
---|
[1] | 731 | DO i = 1,numprocs-1 |
---|
| 732 | CALL MPI_RECV( ibuf, 12, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, & |
---|
| 733 | ierr ) |
---|
| 734 | WRITE (*,1000) i, ( ibuf(j) , j = 1,12 ) |
---|
| 735 | ENDDO |
---|
| 736 | ELSE |
---|
| 737 | |
---|
| 738 | ! |
---|
| 739 | !-- Send data to PE0 |
---|
| 740 | ibuf(1) = pcoord(1); ibuf(2) = pcoord(2); ibuf(3) = pleft |
---|
| 741 | ibuf(4) = pright; ibuf(5) = psouth; ibuf(6) = pnorth; ibuf(7) = myidx |
---|
| 742 | ibuf(8) = myidy; ibuf(9) = nxl; ibuf(10) = nxr; ibuf(11) = nys |
---|
| 743 | ibuf(12) = nyn |
---|
| 744 | CALL MPI_SEND( ibuf, 12, MPI_INTEGER, 0, myid, comm2d, ierr ) |
---|
| 745 | ENDIF |
---|
| 746 | #endif |
---|
| 747 | |
---|
[667] | 748 | ! |
---|
[709] | 749 | !-- Determine the number of ghost point layers |
---|
[3347] | 750 | IF ( scalar_advec == 'ws-scheme' .OR. & |
---|
[1557] | 751 | momentum_advec == 'ws-scheme' ) THEN |
---|
[667] | 752 | nbgp = 3 |
---|
| 753 | ELSE |
---|
| 754 | nbgp = 1 |
---|
[709] | 755 | ENDIF |
---|
[667] | 756 | |
---|
[102] | 757 | ! |
---|
[709] | 758 | !-- Create a new MPI derived datatype for the exchange of surface (xy) data, |
---|
| 759 | !-- which is needed for coupled atmosphere-ocean runs. |
---|
| 760 | !-- First, calculate number of grid points of an xy-plane. |
---|
[667] | 761 | ngp_xy = ( nxr - nxl + 1 + 2 * nbgp ) * ( nyn - nys + 1 + 2 * nbgp ) |
---|
[102] | 762 | CALL MPI_TYPE_VECTOR( ngp_xy, 1, nzt-nzb+2, MPI_REAL, type_xy, ierr ) |
---|
| 763 | CALL MPI_TYPE_COMMIT( type_xy, ierr ) |
---|
[667] | 764 | |
---|
[2365] | 765 | IF ( TRIM( coupling_mode ) /= 'uncoupled' .AND. .NOT. vnested ) THEN |
---|
[667] | 766 | |
---|
| 767 | ! |
---|
| 768 | !-- Pass the number of grid points of the atmosphere model to |
---|
| 769 | !-- the ocean model and vice versa |
---|
| 770 | IF ( coupling_mode == 'atmosphere_to_ocean' ) THEN |
---|
| 771 | |
---|
| 772 | nx_a = nx |
---|
| 773 | ny_a = ny |
---|
| 774 | |
---|
[709] | 775 | IF ( myid == 0 ) THEN |
---|
| 776 | |
---|
| 777 | CALL MPI_SEND( nx_a, 1, MPI_INTEGER, numprocs, 1, comm_inter, & |
---|
| 778 | ierr ) |
---|
| 779 | CALL MPI_SEND( ny_a, 1, MPI_INTEGER, numprocs, 2, comm_inter, & |
---|
| 780 | ierr ) |
---|
| 781 | CALL MPI_SEND( pdims, 2, MPI_INTEGER, numprocs, 3, comm_inter, & |
---|
| 782 | ierr ) |
---|
| 783 | CALL MPI_RECV( nx_o, 1, MPI_INTEGER, numprocs, 4, comm_inter, & |
---|
| 784 | status, ierr ) |
---|
| 785 | CALL MPI_RECV( ny_o, 1, MPI_INTEGER, numprocs, 5, comm_inter, & |
---|
| 786 | status, ierr ) |
---|
| 787 | CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, numprocs, 6, & |
---|
[667] | 788 | comm_inter, status, ierr ) |
---|
| 789 | ENDIF |
---|
| 790 | |
---|
[709] | 791 | CALL MPI_BCAST( nx_o, 1, MPI_INTEGER, 0, comm2d, ierr ) |
---|
| 792 | CALL MPI_BCAST( ny_o, 1, MPI_INTEGER, 0, comm2d, ierr ) |
---|
| 793 | CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr ) |
---|
[667] | 794 | |
---|
| 795 | ELSEIF ( coupling_mode == 'ocean_to_atmosphere' ) THEN |
---|
| 796 | |
---|
| 797 | nx_o = nx |
---|
| 798 | ny_o = ny |
---|
| 799 | |
---|
| 800 | IF ( myid == 0 ) THEN |
---|
[709] | 801 | |
---|
| 802 | CALL MPI_RECV( nx_a, 1, MPI_INTEGER, 0, 1, comm_inter, status, & |
---|
| 803 | ierr ) |
---|
| 804 | CALL MPI_RECV( ny_a, 1, MPI_INTEGER, 0, 2, comm_inter, status, & |
---|
| 805 | ierr ) |
---|
| 806 | CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, 0, 3, comm_inter, & |
---|
| 807 | status, ierr ) |
---|
| 808 | CALL MPI_SEND( nx_o, 1, MPI_INTEGER, 0, 4, comm_inter, ierr ) |
---|
| 809 | CALL MPI_SEND( ny_o, 1, MPI_INTEGER, 0, 5, comm_inter, ierr ) |
---|
| 810 | CALL MPI_SEND( pdims, 2, MPI_INTEGER, 0, 6, comm_inter, ierr ) |
---|
[667] | 811 | ENDIF |
---|
| 812 | |
---|
| 813 | CALL MPI_BCAST( nx_a, 1, MPI_INTEGER, 0, comm2d, ierr) |
---|
| 814 | CALL MPI_BCAST( ny_a, 1, MPI_INTEGER, 0, comm2d, ierr) |
---|
| 815 | CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr) |
---|
| 816 | |
---|
| 817 | ENDIF |
---|
| 818 | |
---|
[709] | 819 | ngp_a = ( nx_a+1 + 2 * nbgp ) * ( ny_a+1 + 2 * nbgp ) |
---|
| 820 | ngp_o = ( nx_o+1 + 2 * nbgp ) * ( ny_o+1 + 2 * nbgp ) |
---|
[667] | 821 | |
---|
| 822 | ! |
---|
[709] | 823 | !-- Determine if the horizontal grid and the number of PEs in ocean and |
---|
| 824 | !-- atmosphere is same or not |
---|
| 825 | IF ( nx_o == nx_a .AND. ny_o == ny_a .AND. & |
---|
[667] | 826 | pdims(1) == pdims_remote(1) .AND. pdims(2) == pdims_remote(2) ) & |
---|
| 827 | THEN |
---|
| 828 | coupling_topology = 0 |
---|
| 829 | ELSE |
---|
| 830 | coupling_topology = 1 |
---|
| 831 | ENDIF |
---|
| 832 | |
---|
| 833 | ! |
---|
| 834 | !-- Determine the target PEs for the exchange between ocean and |
---|
| 835 | !-- atmosphere (comm2d) |
---|
[709] | 836 | IF ( coupling_topology == 0 ) THEN |
---|
| 837 | ! |
---|
| 838 | !-- In case of identical topologies, every atmosphere PE has exactly one |
---|
| 839 | !-- ocean PE counterpart and vice versa |
---|
| 840 | IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' ) THEN |
---|
[667] | 841 | target_id = myid + numprocs |
---|
| 842 | ELSE |
---|
| 843 | target_id = myid |
---|
| 844 | ENDIF |
---|
| 845 | |
---|
| 846 | ELSE |
---|
| 847 | ! |
---|
| 848 | !-- In case of nonequivalent topology in ocean and atmosphere only for |
---|
| 849 | !-- PE0 in ocean and PE0 in atmosphere a target_id is needed, since |
---|
[709] | 850 | !-- data echxchange between ocean and atmosphere will be done only |
---|
| 851 | !-- between these PEs. |
---|
| 852 | IF ( myid == 0 ) THEN |
---|
| 853 | |
---|
| 854 | IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' ) THEN |
---|
[667] | 855 | target_id = numprocs |
---|
| 856 | ELSE |
---|
| 857 | target_id = 0 |
---|
| 858 | ENDIF |
---|
[709] | 859 | |
---|
[667] | 860 | ENDIF |
---|
[709] | 861 | |
---|
[667] | 862 | ENDIF |
---|
| 863 | |
---|
| 864 | ENDIF |
---|
| 865 | |
---|
[2365] | 866 | ! |
---|
| 867 | !-- Store partner grid point co-ordinates as lists. |
---|
| 868 | !-- Create custom MPI vector datatypes for contiguous data transfer |
---|
| 869 | IF ( vnested ) CALL vnest_init_pegrid_domain |
---|
[667] | 870 | |
---|
[1] | 871 | #else |
---|
| 872 | |
---|
| 873 | ! |
---|
| 874 | !-- Array bounds when running on a single PE (respectively a non-parallel |
---|
| 875 | !-- machine) |
---|
[1003] | 876 | nxl = 0 |
---|
| 877 | nxr = nx |
---|
| 878 | nnx = nxr - nxl + 1 |
---|
| 879 | nys = 0 |
---|
| 880 | nyn = ny |
---|
| 881 | nny = nyn - nys + 1 |
---|
| 882 | nzb = 0 |
---|
| 883 | nzt = nz |
---|
| 884 | nnz = nz |
---|
[1] | 885 | |
---|
[145] | 886 | ALLOCATE( hor_index_bounds(4,0:0) ) |
---|
| 887 | hor_index_bounds(1,0) = nxl |
---|
| 888 | hor_index_bounds(2,0) = nxr |
---|
| 889 | hor_index_bounds(3,0) = nys |
---|
| 890 | hor_index_bounds(4,0) = nyn |
---|
| 891 | |
---|
[1] | 892 | ! |
---|
| 893 | !-- Array bounds for the pressure solver (in the parallel code, these bounds |
---|
| 894 | !-- are the ones for the transposed arrays) |
---|
[1003] | 895 | nys_x = nys |
---|
| 896 | nyn_x = nyn |
---|
| 897 | nzb_x = nzb + 1 |
---|
| 898 | nzt_x = nzt |
---|
[1] | 899 | |
---|
[1003] | 900 | nxl_y = nxl |
---|
| 901 | nxr_y = nxr |
---|
| 902 | nzb_y = nzb + 1 |
---|
| 903 | nzt_y = nzt |
---|
[1] | 904 | |
---|
[1003] | 905 | nxl_z = nxl |
---|
| 906 | nxr_z = nxr |
---|
| 907 | nys_z = nys |
---|
| 908 | nyn_z = nyn |
---|
[1] | 909 | |
---|
| 910 | #endif |
---|
| 911 | |
---|
| 912 | ! |
---|
| 913 | !-- Calculate number of grid levels necessary for the multigrid poisson solver |
---|
| 914 | !-- as well as the gridpoint indices on each level |
---|
[1575] | 915 | IF ( psolver(1:9) == 'multigrid' ) THEN |
---|
[1] | 916 | |
---|
| 917 | ! |
---|
| 918 | !-- First calculate number of possible grid levels for the subdomains |
---|
| 919 | mg_levels_x = 1 |
---|
| 920 | mg_levels_y = 1 |
---|
| 921 | mg_levels_z = 1 |
---|
| 922 | |
---|
| 923 | i = nnx |
---|
| 924 | DO WHILE ( MOD( i, 2 ) == 0 .AND. i /= 2 ) |
---|
| 925 | i = i / 2 |
---|
| 926 | mg_levels_x = mg_levels_x + 1 |
---|
| 927 | ENDDO |
---|
| 928 | |
---|
| 929 | j = nny |
---|
| 930 | DO WHILE ( MOD( j, 2 ) == 0 .AND. j /= 2 ) |
---|
| 931 | j = j / 2 |
---|
| 932 | mg_levels_y = mg_levels_y + 1 |
---|
| 933 | ENDDO |
---|
| 934 | |
---|
[181] | 935 | k = nz ! do not use nnz because it might be > nz due to transposition |
---|
| 936 | ! requirements |
---|
[1] | 937 | DO WHILE ( MOD( k, 2 ) == 0 .AND. k /= 2 ) |
---|
| 938 | k = k / 2 |
---|
| 939 | mg_levels_z = mg_levels_z + 1 |
---|
| 940 | ENDDO |
---|
[2197] | 941 | ! |
---|
| 942 | !-- The optimized MG-solver does not allow odd values for nz at the coarsest |
---|
| 943 | !-- grid level |
---|
| 944 | IF ( TRIM( psolver ) /= 'multigrid_noopt' ) THEN |
---|
| 945 | IF ( MOD( k, 2 ) /= 0 ) mg_levels_z = mg_levels_z - 1 |
---|
[3057] | 946 | ! |
---|
| 947 | !-- An odd value of nz does not work. The finest level must have an even |
---|
| 948 | !-- value. |
---|
| 949 | IF ( mg_levels_z == 0 ) THEN |
---|
| 950 | message_string = 'optimized multigrid method requires nz to be even' |
---|
[3058] | 951 | CALL message( 'init_pegrid', 'PA0495', 1, 2, 0, 6, 0 ) |
---|
[3057] | 952 | ENDIF |
---|
[2197] | 953 | ENDIF |
---|
[1] | 954 | |
---|
| 955 | maximum_grid_level = MIN( mg_levels_x, mg_levels_y, mg_levels_z ) |
---|
[3890] | 956 | ! |
---|
| 957 | !-- Check if subdomain sizes prevents any coarsening. |
---|
| 958 | !-- This case, the maximum number of grid levels is 1, i.e. effectively |
---|
| 959 | !-- a Gauss-Seidel scheme is applied rather than a multigrid approach. |
---|
[3897] | 960 | !-- Give a warning in this case. |
---|
| 961 | IF ( maximum_grid_level == 1 .AND. mg_switch_to_pe0_level == -1 ) THEN |
---|
| 962 | message_string = 'No grid coarsening possible, multigrid ' // & |
---|
| 963 | 'approach effectively reduces to a Gauss-Seidel ' //& |
---|
| 964 | 'scheme.' |
---|
| 965 | |
---|
| 966 | CALL message( 'poismg', 'PA0648', 0, 1, 0, 6, 0 ) |
---|
[3890] | 967 | ENDIF |
---|
[1] | 968 | |
---|
| 969 | ! |
---|
| 970 | !-- Find out, if the total domain allows more levels. These additional |
---|
[709] | 971 | !-- levels are identically processed on all PEs. |
---|
[197] | 972 | IF ( numprocs > 1 .AND. mg_switch_to_pe0_level /= -1 ) THEN |
---|
[709] | 973 | |
---|
[1] | 974 | IF ( mg_levels_z > MIN( mg_levels_x, mg_levels_y ) ) THEN |
---|
[709] | 975 | |
---|
[1] | 976 | mg_switch_to_pe0_level_l = maximum_grid_level |
---|
| 977 | |
---|
| 978 | mg_levels_x = 1 |
---|
| 979 | mg_levels_y = 1 |
---|
| 980 | |
---|
| 981 | i = nx+1 |
---|
| 982 | DO WHILE ( MOD( i, 2 ) == 0 .AND. i /= 2 ) |
---|
| 983 | i = i / 2 |
---|
| 984 | mg_levels_x = mg_levels_x + 1 |
---|
| 985 | ENDDO |
---|
| 986 | |
---|
| 987 | j = ny+1 |
---|
| 988 | DO WHILE ( MOD( j, 2 ) == 0 .AND. j /= 2 ) |
---|
| 989 | j = j / 2 |
---|
| 990 | mg_levels_y = mg_levels_y + 1 |
---|
| 991 | ENDDO |
---|
| 992 | |
---|
| 993 | maximum_grid_level_l = MIN( mg_levels_x, mg_levels_y, mg_levels_z ) |
---|
| 994 | |
---|
| 995 | IF ( maximum_grid_level_l > mg_switch_to_pe0_level_l ) THEN |
---|
| 996 | mg_switch_to_pe0_level_l = maximum_grid_level_l - & |
---|
| 997 | mg_switch_to_pe0_level_l + 1 |
---|
| 998 | ELSE |
---|
| 999 | mg_switch_to_pe0_level_l = 0 |
---|
| 1000 | ENDIF |
---|
[709] | 1001 | |
---|
[1] | 1002 | ELSE |
---|
[3057] | 1003 | |
---|
[1] | 1004 | mg_switch_to_pe0_level_l = 0 |
---|
| 1005 | maximum_grid_level_l = maximum_grid_level |
---|
[709] | 1006 | |
---|
[1] | 1007 | ENDIF |
---|
| 1008 | |
---|
| 1009 | ! |
---|
| 1010 | !-- Use switch level calculated above only if it is not pre-defined |
---|
| 1011 | !-- by user |
---|
| 1012 | IF ( mg_switch_to_pe0_level == 0 ) THEN |
---|
| 1013 | IF ( mg_switch_to_pe0_level_l /= 0 ) THEN |
---|
| 1014 | mg_switch_to_pe0_level = mg_switch_to_pe0_level_l |
---|
| 1015 | maximum_grid_level = maximum_grid_level_l |
---|
| 1016 | ENDIF |
---|
| 1017 | |
---|
| 1018 | ELSE |
---|
| 1019 | ! |
---|
| 1020 | !-- Check pre-defined value and reset to default, if neccessary |
---|
[3045] | 1021 | IF ( mg_switch_to_pe0_level < mg_switch_to_pe0_level_l .OR. & |
---|
[1] | 1022 | mg_switch_to_pe0_level >= maximum_grid_level_l ) THEN |
---|
[3045] | 1023 | message_string = 'mg_switch_to_pe0_level ' // & |
---|
[2271] | 1024 | 'out of range and reset to 0' |
---|
[254] | 1025 | CALL message( 'init_pegrid', 'PA0235', 0, 1, 0, 6, 0 ) |
---|
[1] | 1026 | mg_switch_to_pe0_level = 0 |
---|
| 1027 | ELSE |
---|
| 1028 | ! |
---|
| 1029 | !-- Use the largest number of possible levels anyway and recalculate |
---|
| 1030 | !-- the switch level to this largest number of possible values |
---|
| 1031 | maximum_grid_level = maximum_grid_level_l |
---|
| 1032 | |
---|
| 1033 | ENDIF |
---|
[709] | 1034 | |
---|
[1] | 1035 | ENDIF |
---|
| 1036 | |
---|
| 1037 | ENDIF |
---|
| 1038 | |
---|
[1056] | 1039 | ALLOCATE( grid_level_count(maximum_grid_level), & |
---|
| 1040 | nxl_mg(0:maximum_grid_level), nxr_mg(0:maximum_grid_level), & |
---|
| 1041 | nyn_mg(0:maximum_grid_level), nys_mg(0:maximum_grid_level), & |
---|
| 1042 | nzt_mg(0:maximum_grid_level) ) |
---|
[1] | 1043 | |
---|
| 1044 | grid_level_count = 0 |
---|
[1056] | 1045 | ! |
---|
| 1046 | !-- Index zero required as dummy due to definition of arrays f2 and p2 in |
---|
| 1047 | !-- recursive subroutine next_mg_level |
---|
| 1048 | nxl_mg(0) = 0; nxr_mg(0) = 0; nyn_mg(0) = 0; nys_mg(0) = 0; nzt_mg(0) = 0 |
---|
[778] | 1049 | |
---|
[1] | 1050 | nxl_l = nxl; nxr_l = nxr; nys_l = nys; nyn_l = nyn; nzt_l = nzt |
---|
| 1051 | |
---|
| 1052 | DO i = maximum_grid_level, 1 , -1 |
---|
| 1053 | |
---|
| 1054 | IF ( i == mg_switch_to_pe0_level ) THEN |
---|
[1804] | 1055 | #if defined( __parallel ) |
---|
[1] | 1056 | ! |
---|
| 1057 | !-- Save the grid size of the subdomain at the switch level, because |
---|
| 1058 | !-- it is needed in poismg. |
---|
| 1059 | ind(1) = nxl_l; ind(2) = nxr_l |
---|
| 1060 | ind(3) = nys_l; ind(4) = nyn_l |
---|
| 1061 | ind(5) = nzt_l |
---|
| 1062 | ALLOCATE( ind_all(5*numprocs), mg_loc_ind(5,0:numprocs-1) ) |
---|
| 1063 | CALL MPI_ALLGATHER( ind, 5, MPI_INTEGER, ind_all, 5, & |
---|
| 1064 | MPI_INTEGER, comm2d, ierr ) |
---|
| 1065 | DO j = 0, numprocs-1 |
---|
| 1066 | DO k = 1, 5 |
---|
| 1067 | mg_loc_ind(k,j) = ind_all(k+j*5) |
---|
| 1068 | ENDDO |
---|
| 1069 | ENDDO |
---|
| 1070 | DEALLOCATE( ind_all ) |
---|
| 1071 | ! |
---|
[709] | 1072 | !-- Calculate the grid size of the total domain |
---|
[1] | 1073 | nxr_l = ( nxr_l-nxl_l+1 ) * pdims(1) - 1 |
---|
| 1074 | nxl_l = 0 |
---|
| 1075 | nyn_l = ( nyn_l-nys_l+1 ) * pdims(2) - 1 |
---|
| 1076 | nys_l = 0 |
---|
| 1077 | ! |
---|
| 1078 | !-- The size of this gathered array must not be larger than the |
---|
| 1079 | !-- array tend, which is used in the multigrid scheme as a temporary |
---|
[778] | 1080 | !-- array. Therefore the subdomain size of an PE is calculated and |
---|
| 1081 | !-- the size of the gathered grid. These values are used in |
---|
| 1082 | !-- routines pres and poismg |
---|
| 1083 | subdomain_size = ( nxr - nxl + 2 * nbgp + 1 ) * & |
---|
| 1084 | ( nyn - nys + 2 * nbgp + 1 ) * ( nzt - nzb + 2 ) |
---|
[3045] | 1085 | gathered_size = ( nxr_l - nxl_l + 3 ) * ( nyn_l - nys_l + 3 ) * & |
---|
[1] | 1086 | ( nzt_l - nzb + 2 ) |
---|
| 1087 | |
---|
[1804] | 1088 | #else |
---|
[3045] | 1089 | message_string = 'multigrid gather/scatter impossible ' // & |
---|
[1] | 1090 | 'in non parallel mode' |
---|
[254] | 1091 | CALL message( 'init_pegrid', 'PA0237', 1, 2, 0, 6, 0 ) |
---|
[1] | 1092 | #endif |
---|
| 1093 | ENDIF |
---|
| 1094 | |
---|
| 1095 | nxl_mg(i) = nxl_l |
---|
| 1096 | nxr_mg(i) = nxr_l |
---|
| 1097 | nys_mg(i) = nys_l |
---|
| 1098 | nyn_mg(i) = nyn_l |
---|
| 1099 | nzt_mg(i) = nzt_l |
---|
| 1100 | |
---|
| 1101 | nxl_l = nxl_l / 2 |
---|
| 1102 | nxr_l = nxr_l / 2 |
---|
| 1103 | nys_l = nys_l / 2 |
---|
| 1104 | nyn_l = nyn_l / 2 |
---|
| 1105 | nzt_l = nzt_l / 2 |
---|
[3890] | 1106 | |
---|
[1] | 1107 | ENDDO |
---|
| 1108 | |
---|
[780] | 1109 | ! |
---|
[3045] | 1110 | !-- Temporary problem: Currently calculation of maxerror in routine poismg crashes |
---|
[780] | 1111 | !-- if grid data are collected on PE0 already on the finest grid level. |
---|
| 1112 | !-- To be solved later. |
---|
| 1113 | IF ( maximum_grid_level == mg_switch_to_pe0_level ) THEN |
---|
| 1114 | message_string = 'grid coarsening on subdomain level cannot be performed' |
---|
| 1115 | CALL message( 'poismg', 'PA0236', 1, 2, 0, 6, 0 ) |
---|
| 1116 | ENDIF |
---|
| 1117 | |
---|
[1] | 1118 | ELSE |
---|
| 1119 | |
---|
[667] | 1120 | maximum_grid_level = 0 |
---|
[1] | 1121 | |
---|
| 1122 | ENDIF |
---|
| 1123 | |
---|
[722] | 1124 | ! |
---|
| 1125 | !-- Default level 0 tells exchange_horiz that all ghost planes have to be |
---|
| 1126 | !-- exchanged. grid_level is adjusted in poismg, where only one ghost plane |
---|
| 1127 | !-- is required. |
---|
| 1128 | grid_level = 0 |
---|
[1] | 1129 | |
---|
[1804] | 1130 | #if defined( __parallel ) |
---|
[1] | 1131 | ! |
---|
| 1132 | !-- Gridpoint number for the exchange of ghost points (y-line for 2D-arrays) |
---|
[667] | 1133 | ngp_y = nyn - nys + 1 + 2 * nbgp |
---|
[1] | 1134 | |
---|
| 1135 | ! |
---|
[709] | 1136 | !-- Define new MPI derived datatypes for the exchange of ghost points in |
---|
| 1137 | !-- x- and y-direction for 2D-arrays (line) |
---|
[1968] | 1138 | CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_REAL, type_x, & |
---|
[709] | 1139 | ierr ) |
---|
[1] | 1140 | CALL MPI_TYPE_COMMIT( type_x, ierr ) |
---|
| 1141 | |
---|
[667] | 1142 | CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_REAL, type_y, ierr ) |
---|
| 1143 | CALL MPI_TYPE_COMMIT( type_y, ierr ) |
---|
[1968] | 1144 | ! |
---|
| 1145 | !-- Define new MPI derived datatypes for the exchange of ghost points in |
---|
[3542] | 1146 | !-- x- and y-direction for 2D-INTEGER arrays (line) - on normal grid. |
---|
| 1147 | !-- Define types for 32-bit and 8-bit Integer. The 8-bit Integer are only |
---|
| 1148 | !-- required on normal grid, while 32-bit Integer may be also required on |
---|
| 1149 | !-- coarser grid level in case of multigrid solver. |
---|
| 1150 | ! |
---|
| 1151 | !-- 8-bit Integer |
---|
| 1152 | CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_BYTE, & |
---|
| 1153 | type_x_byte, ierr ) |
---|
| 1154 | CALL MPI_TYPE_COMMIT( type_x_byte, ierr ) |
---|
| 1155 | |
---|
| 1156 | CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_BYTE, & |
---|
| 1157 | type_y_byte, ierr ) |
---|
| 1158 | CALL MPI_TYPE_COMMIT( type_y_byte, ierr ) |
---|
| 1159 | ! |
---|
| 1160 | !-- 32-bit Integer |
---|
[1968] | 1161 | ALLOCATE( type_x_int(0:maximum_grid_level), & |
---|
| 1162 | type_y_int(0:maximum_grid_level) ) |
---|
[3542] | 1163 | |
---|
[1968] | 1164 | CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_INTEGER, & |
---|
| 1165 | type_x_int(0), ierr ) |
---|
| 1166 | CALL MPI_TYPE_COMMIT( type_x_int(0), ierr ) |
---|
[667] | 1167 | |
---|
[1968] | 1168 | CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_INTEGER, type_y_int(0), ierr ) |
---|
| 1169 | CALL MPI_TYPE_COMMIT( type_y_int(0), ierr ) |
---|
[1] | 1170 | ! |
---|
| 1171 | !-- Calculate gridpoint numbers for the exchange of ghost points along x |
---|
| 1172 | !-- (yz-plane for 3D-arrays) and define MPI derived data type(s) for the |
---|
| 1173 | !-- exchange of ghost points in y-direction (xz-plane). |
---|
| 1174 | !-- Do these calculations for the model grid and (if necessary) also |
---|
| 1175 | !-- for the coarser grid levels used in the multigrid method |
---|
[2696] | 1176 | ALLOCATE ( ngp_xz(0:maximum_grid_level), & |
---|
| 1177 | ngp_xz_int(0:maximum_grid_level), & |
---|
| 1178 | ngp_yz(0:maximum_grid_level), & |
---|
| 1179 | ngp_yz_int(0:maximum_grid_level), & |
---|
| 1180 | type_xz(0:maximum_grid_level), & |
---|
| 1181 | type_xz_int(0:maximum_grid_level), & |
---|
| 1182 | type_yz(0:maximum_grid_level), & |
---|
| 1183 | type_yz_int(0:maximum_grid_level) ) |
---|
[1] | 1184 | |
---|
| 1185 | nxl_l = nxl; nxr_l = nxr; nys_l = nys; nyn_l = nyn; nzb_l = nzb; nzt_l = nzt |
---|
[709] | 1186 | |
---|
[667] | 1187 | ! |
---|
| 1188 | !-- Discern between the model grid, which needs nbgp ghost points and |
---|
| 1189 | !-- grid levels for the multigrid scheme. In the latter case only one |
---|
| 1190 | !-- ghost point is necessary. |
---|
[709] | 1191 | !-- First definition of MPI-datatypes for exchange of ghost layers on normal |
---|
[667] | 1192 | !-- grid. The following loop is needed for data exchange in poismg.f90. |
---|
| 1193 | ! |
---|
| 1194 | !-- Determine number of grid points of yz-layer for exchange |
---|
| 1195 | ngp_yz(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp) |
---|
[709] | 1196 | |
---|
[667] | 1197 | ! |
---|
[709] | 1198 | !-- Define an MPI-datatype for the exchange of left/right boundaries. |
---|
| 1199 | !-- Although data are contiguous in physical memory (which does not |
---|
| 1200 | !-- necessarily require an MPI-derived datatype), the data exchange between |
---|
| 1201 | !-- left and right PE's using the MPI-derived type is 10% faster than without. |
---|
[667] | 1202 | CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz(0), & |
---|
[709] | 1203 | MPI_REAL, type_xz(0), ierr ) |
---|
[667] | 1204 | CALL MPI_TYPE_COMMIT( type_xz(0), ierr ) |
---|
[1] | 1205 | |
---|
[709] | 1206 | CALL MPI_TYPE_VECTOR( nbgp, ngp_yz(0), ngp_yz(0), MPI_REAL, type_yz(0), & |
---|
| 1207 | ierr ) |
---|
[667] | 1208 | CALL MPI_TYPE_COMMIT( type_yz(0), ierr ) |
---|
[709] | 1209 | |
---|
[667] | 1210 | ! |
---|
[2696] | 1211 | !-- Define data types for exchange of 3D Integer arrays. |
---|
| 1212 | ngp_yz_int(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp) |
---|
| 1213 | |
---|
| 1214 | CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz_int(0), & |
---|
| 1215 | MPI_INTEGER, type_xz_int(0), ierr ) |
---|
| 1216 | CALL MPI_TYPE_COMMIT( type_xz_int(0), ierr ) |
---|
| 1217 | |
---|
| 1218 | CALL MPI_TYPE_VECTOR( nbgp, ngp_yz_int(0), ngp_yz_int(0), MPI_INTEGER, & |
---|
| 1219 | type_yz_int(0), ierr ) |
---|
| 1220 | CALL MPI_TYPE_COMMIT( type_yz_int(0), ierr ) |
---|
| 1221 | |
---|
| 1222 | ! |
---|
[709] | 1223 | !-- Definition of MPI-datatypes for multigrid method (coarser level grids) |
---|
[1575] | 1224 | IF ( psolver(1:9) == 'multigrid' ) THEN |
---|
[667] | 1225 | ! |
---|
[709] | 1226 | !-- Definition of MPI-datatyoe as above, but only 1 ghost level is used |
---|
| 1227 | DO i = maximum_grid_level, 1 , -1 |
---|
[1968] | 1228 | ! |
---|
[2696] | 1229 | !-- For 3D-exchange on different multigrid level, one ghost point for |
---|
| 1230 | !-- REAL arrays, two ghost points for INTEGER arrays |
---|
[1575] | 1231 | ngp_xz(i) = (nzt_l - nzb_l + 2) * (nxr_l - nxl_l + 3) |
---|
[667] | 1232 | ngp_yz(i) = (nzt_l - nzb_l + 2) * (nyn_l - nys_l + 3) |
---|
| 1233 | |
---|
[2696] | 1234 | ngp_xz_int(i) = (nzt_l - nzb_l + 2) * (nxr_l - nxl_l + 3) |
---|
| 1235 | ngp_yz_int(i) = (nzt_l - nzb_l + 2) * (nyn_l - nys_l + 3) |
---|
| 1236 | ! |
---|
| 1237 | !-- MPI data type for REAL arrays, for xz-layers |
---|
| 1238 | CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+3, nzt_l-nzb_l+2, ngp_yz(i), & |
---|
[709] | 1239 | MPI_REAL, type_xz(i), ierr ) |
---|
[667] | 1240 | CALL MPI_TYPE_COMMIT( type_xz(i), ierr ) |
---|
[1] | 1241 | |
---|
[2696] | 1242 | ! |
---|
| 1243 | !-- MPI data type for INTEGER arrays, for xz-layers |
---|
| 1244 | CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+3, nzt_l-nzb_l+2, ngp_yz_int(i), & |
---|
| 1245 | MPI_INTEGER, type_xz_int(i), ierr ) |
---|
| 1246 | CALL MPI_TYPE_COMMIT( type_xz_int(i), ierr ) |
---|
| 1247 | |
---|
| 1248 | ! |
---|
| 1249 | !-- MPI data type for REAL arrays, for yz-layers |
---|
[709] | 1250 | CALL MPI_TYPE_VECTOR( 1, ngp_yz(i), ngp_yz(i), MPI_REAL, type_yz(i), & |
---|
| 1251 | ierr ) |
---|
[667] | 1252 | CALL MPI_TYPE_COMMIT( type_yz(i), ierr ) |
---|
[2696] | 1253 | ! |
---|
| 1254 | !-- MPI data type for INTEGER arrays, for yz-layers |
---|
| 1255 | CALL MPI_TYPE_VECTOR( 1, ngp_yz_int(i), ngp_yz_int(i), MPI_INTEGER, & |
---|
| 1256 | type_yz_int(i), ierr ) |
---|
| 1257 | CALL MPI_TYPE_COMMIT( type_yz_int(i), ierr ) |
---|
[667] | 1258 | |
---|
[1968] | 1259 | |
---|
| 1260 | !-- For 2D-exchange of INTEGER arrays on coarser grid level, where 2 ghost |
---|
[3542] | 1261 | !-- points need to be exchanged. Only required for 32-bit Integer arrays. |
---|
| 1262 | CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+5, 2, nyn_l-nys_l+5, MPI_INTEGER, & |
---|
[1968] | 1263 | type_x_int(i), ierr ) |
---|
| 1264 | CALL MPI_TYPE_COMMIT( type_x_int(i), ierr ) |
---|
| 1265 | |
---|
| 1266 | |
---|
[3542] | 1267 | CALL MPI_TYPE_VECTOR( 2, nyn_l-nys_l+5, nyn_l-nys_l+5, MPI_INTEGER, & |
---|
[1968] | 1268 | type_y_int(i), ierr ) |
---|
| 1269 | CALL MPI_TYPE_COMMIT( type_y_int(i), ierr ) |
---|
| 1270 | |
---|
[667] | 1271 | nxl_l = nxl_l / 2 |
---|
| 1272 | nxr_l = nxr_l / 2 |
---|
| 1273 | nys_l = nys_l / 2 |
---|
| 1274 | nyn_l = nyn_l / 2 |
---|
| 1275 | nzt_l = nzt_l / 2 |
---|
[709] | 1276 | |
---|
[667] | 1277 | ENDDO |
---|
[709] | 1278 | |
---|
| 1279 | ENDIF |
---|
[1677] | 1280 | |
---|
[1] | 1281 | #endif |
---|
| 1282 | |
---|
[1804] | 1283 | #if defined( __parallel ) |
---|
[1] | 1284 | ! |
---|
[1933] | 1285 | !-- Setting of flags for inflow/outflow/nesting conditions. |
---|
[1964] | 1286 | IF ( pleft == MPI_PROC_NULL ) THEN |
---|
[3182] | 1287 | IF ( bc_lr == 'dirichlet/radiation' .OR. bc_lr == 'nested' .OR. & |
---|
| 1288 | bc_lr == 'nesting_offline' ) THEN |
---|
| 1289 | bc_dirichlet_l = .TRUE. |
---|
[1964] | 1290 | ELSEIF ( bc_lr == 'radiation/dirichlet' ) THEN |
---|
[3182] | 1291 | bc_radiation_l = .TRUE. |
---|
[1] | 1292 | ENDIF |
---|
| 1293 | ENDIF |
---|
[1933] | 1294 | |
---|
[1964] | 1295 | IF ( pright == MPI_PROC_NULL ) THEN |
---|
| 1296 | IF ( bc_lr == 'dirichlet/radiation' ) THEN |
---|
[3182] | 1297 | bc_radiation_r = .TRUE. |
---|
| 1298 | ELSEIF ( bc_lr == 'radiation/dirichlet' .OR. bc_lr == 'nested' .OR. & |
---|
| 1299 | bc_lr == 'nesting_offline' ) THEN |
---|
| 1300 | bc_dirichlet_r = .TRUE. |
---|
[1] | 1301 | ENDIF |
---|
| 1302 | ENDIF |
---|
| 1303 | |
---|
[1964] | 1304 | IF ( psouth == MPI_PROC_NULL ) THEN |
---|
| 1305 | IF ( bc_ns == 'dirichlet/radiation' ) THEN |
---|
[3182] | 1306 | bc_radiation_s = .TRUE. |
---|
| 1307 | ELSEIF ( bc_ns == 'radiation/dirichlet' .OR. bc_ns == 'nested' .OR. & |
---|
| 1308 | bc_ns == 'nesting_offline' ) THEN |
---|
| 1309 | bc_dirichlet_s = .TRUE. |
---|
[1] | 1310 | ENDIF |
---|
| 1311 | ENDIF |
---|
| 1312 | |
---|
[1964] | 1313 | IF ( pnorth == MPI_PROC_NULL ) THEN |
---|
[3182] | 1314 | IF ( bc_ns == 'dirichlet/radiation' .OR. bc_ns == 'nested' .OR. & |
---|
| 1315 | bc_ns == 'nesting_offline' ) THEN |
---|
| 1316 | bc_dirichlet_n = .TRUE. |
---|
[1964] | 1317 | ELSEIF ( bc_ns == 'radiation/dirichlet' ) THEN |
---|
[3182] | 1318 | bc_radiation_n = .TRUE. |
---|
[1] | 1319 | ENDIF |
---|
| 1320 | ENDIF |
---|
[2938] | 1321 | ! |
---|
| 1322 | !-- In case of synthetic turbulence geneartor determine ids. |
---|
| 1323 | !-- Please note, if no forcing or nesting is applied, the generator is applied |
---|
| 1324 | !-- only at the left lateral boundary. |
---|
| 1325 | IF ( use_syn_turb_gen ) THEN |
---|
[3182] | 1326 | IF ( bc_dirichlet_l ) THEN |
---|
[2938] | 1327 | id_stg_left_l = myidx |
---|
| 1328 | ELSE |
---|
| 1329 | id_stg_left_l = 0 |
---|
| 1330 | ENDIF |
---|
[3182] | 1331 | IF ( bc_dirichlet_r ) THEN |
---|
[2938] | 1332 | id_stg_right_l = myidx |
---|
| 1333 | ELSE |
---|
| 1334 | id_stg_right_l = 0 |
---|
| 1335 | ENDIF |
---|
[3182] | 1336 | IF ( bc_dirichlet_s ) THEN |
---|
[2938] | 1337 | id_stg_south_l = myidy |
---|
| 1338 | ELSE |
---|
| 1339 | id_stg_south_l = 0 |
---|
| 1340 | ENDIF |
---|
[3182] | 1341 | IF ( bc_dirichlet_n ) THEN |
---|
[2938] | 1342 | id_stg_north_l = myidy |
---|
| 1343 | ELSE |
---|
| 1344 | id_stg_north_l = 0 |
---|
| 1345 | ENDIF |
---|
[1968] | 1346 | |
---|
[2938] | 1347 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
| 1348 | CALL MPI_ALLREDUCE( id_stg_left_l, id_stg_left, 1, MPI_INTEGER, & |
---|
| 1349 | MPI_SUM, comm1dx, ierr ) |
---|
| 1350 | |
---|
| 1351 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
| 1352 | CALL MPI_ALLREDUCE( id_stg_right_l, id_stg_right, 1, MPI_INTEGER, & |
---|
| 1353 | MPI_SUM, comm1dx, ierr ) |
---|
| 1354 | |
---|
| 1355 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
| 1356 | CALL MPI_ALLREDUCE( id_stg_south_l, id_stg_south, 1, MPI_INTEGER, & |
---|
| 1357 | MPI_SUM, comm1dy, ierr ) |
---|
| 1358 | |
---|
| 1359 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
| 1360 | CALL MPI_ALLREDUCE( id_stg_north_l, id_stg_north, 1, MPI_INTEGER, & |
---|
| 1361 | MPI_SUM, comm1dy, ierr ) |
---|
| 1362 | |
---|
| 1363 | ENDIF |
---|
| 1364 | |
---|
[151] | 1365 | ! |
---|
| 1366 | !-- Broadcast the id of the inflow PE |
---|
[3182] | 1367 | IF ( bc_dirichlet_l ) THEN |
---|
[163] | 1368 | id_inflow_l = myidx |
---|
[151] | 1369 | ELSE |
---|
| 1370 | id_inflow_l = 0 |
---|
| 1371 | ENDIF |
---|
[622] | 1372 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
[151] | 1373 | CALL MPI_ALLREDUCE( id_inflow_l, id_inflow, 1, MPI_INTEGER, MPI_SUM, & |
---|
| 1374 | comm1dx, ierr ) |
---|
| 1375 | |
---|
[163] | 1376 | ! |
---|
| 1377 | !-- Broadcast the id of the recycling plane |
---|
| 1378 | !-- WARNING: needs to be adjusted in case of inflows other than from left side! |
---|
[3884] | 1379 | IF ( turbulent_inflow ) THEN |
---|
| 1380 | |
---|
| 1381 | IF ( NINT( recycling_width / dx ) >= nxl .AND. & |
---|
| 1382 | NINT( recycling_width / dx ) <= nxr ) THEN |
---|
| 1383 | id_recycling_l = myidx |
---|
| 1384 | ELSE |
---|
| 1385 | id_recycling_l = 0 |
---|
| 1386 | ENDIF |
---|
| 1387 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
| 1388 | CALL MPI_ALLREDUCE( id_recycling_l, id_recycling, 1, MPI_INTEGER, MPI_SUM, & |
---|
| 1389 | comm1dx, ierr ) |
---|
| 1390 | |
---|
[163] | 1391 | ENDIF |
---|
| 1392 | |
---|
[2050] | 1393 | ! |
---|
| 1394 | !-- Broadcast the id of the outflow PE and outflow-source plane |
---|
| 1395 | IF ( turbulent_outflow ) THEN |
---|
| 1396 | |
---|
[3182] | 1397 | IF ( bc_radiation_r ) THEN |
---|
[2050] | 1398 | id_outflow_l = myidx |
---|
| 1399 | ELSE |
---|
| 1400 | id_outflow_l = 0 |
---|
| 1401 | ENDIF |
---|
| 1402 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
| 1403 | CALL MPI_ALLREDUCE( id_outflow_l, id_outflow, 1, MPI_INTEGER, MPI_SUM, & |
---|
| 1404 | comm1dx, ierr ) |
---|
| 1405 | |
---|
| 1406 | IF ( NINT( outflow_source_plane / dx ) >= nxl .AND. & |
---|
| 1407 | NINT( outflow_source_plane / dx ) <= nxr ) THEN |
---|
| 1408 | id_outflow_source_l = myidx |
---|
| 1409 | ELSE |
---|
| 1410 | id_outflow_source_l = 0 |
---|
| 1411 | ENDIF |
---|
| 1412 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
| 1413 | CALL MPI_ALLREDUCE( id_outflow_source_l, id_outflow_source, 1, & |
---|
| 1414 | MPI_INTEGER, MPI_SUM, comm1dx, ierr ) |
---|
| 1415 | |
---|
| 1416 | ENDIF |
---|
| 1417 | |
---|
[3885] | 1418 | CALL location_message( 'creating virtual PE grids + MPI derived data types', 'finished' ) |
---|
[1384] | 1419 | |
---|
[1804] | 1420 | #else |
---|
[1159] | 1421 | IF ( bc_lr == 'dirichlet/radiation' ) THEN |
---|
[3182] | 1422 | bc_dirichlet_l = .TRUE. |
---|
| 1423 | bc_radiation_r = .TRUE. |
---|
[1159] | 1424 | ELSEIF ( bc_lr == 'radiation/dirichlet' ) THEN |
---|
[3182] | 1425 | bc_radiation_l = .TRUE. |
---|
| 1426 | bc_dirichlet_r = .TRUE. |
---|
[1] | 1427 | ENDIF |
---|
| 1428 | |
---|
[1159] | 1429 | IF ( bc_ns == 'dirichlet/radiation' ) THEN |
---|
[3182] | 1430 | bc_dirichlet_n = .TRUE. |
---|
| 1431 | bc_radiation_s = .TRUE. |
---|
[1159] | 1432 | ELSEIF ( bc_ns == 'radiation/dirichlet' ) THEN |
---|
[3182] | 1433 | bc_radiation_n = .TRUE. |
---|
| 1434 | bc_dirichlet_s = .TRUE. |
---|
[1] | 1435 | ENDIF |
---|
| 1436 | #endif |
---|
[807] | 1437 | |
---|
[106] | 1438 | ! |
---|
[978] | 1439 | !-- At the inflow or outflow, u or v, respectively, have to be calculated for |
---|
| 1440 | !-- one more grid point. |
---|
[3182] | 1441 | IF ( bc_dirichlet_l .OR. bc_radiation_l ) THEN |
---|
[106] | 1442 | nxlu = nxl + 1 |
---|
| 1443 | ELSE |
---|
| 1444 | nxlu = nxl |
---|
| 1445 | ENDIF |
---|
[3182] | 1446 | IF ( bc_dirichlet_s .OR. bc_radiation_s ) THEN |
---|
[106] | 1447 | nysv = nys + 1 |
---|
| 1448 | ELSE |
---|
| 1449 | nysv = nys |
---|
| 1450 | ENDIF |
---|
[1] | 1451 | |
---|
[114] | 1452 | ! |
---|
| 1453 | !-- Allocate wall flag arrays used in the multigrid solver |
---|
[1575] | 1454 | IF ( psolver(1:9) == 'multigrid' ) THEN |
---|
[114] | 1455 | |
---|
| 1456 | DO i = maximum_grid_level, 1, -1 |
---|
| 1457 | |
---|
| 1458 | SELECT CASE ( i ) |
---|
| 1459 | |
---|
| 1460 | CASE ( 1 ) |
---|
| 1461 | ALLOCATE( wall_flags_1(nzb:nzt_mg(i)+1, & |
---|
| 1462 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1463 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1464 | |
---|
| 1465 | CASE ( 2 ) |
---|
| 1466 | ALLOCATE( wall_flags_2(nzb:nzt_mg(i)+1, & |
---|
| 1467 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1468 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1469 | |
---|
| 1470 | CASE ( 3 ) |
---|
| 1471 | ALLOCATE( wall_flags_3(nzb:nzt_mg(i)+1, & |
---|
| 1472 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1473 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1474 | |
---|
| 1475 | CASE ( 4 ) |
---|
| 1476 | ALLOCATE( wall_flags_4(nzb:nzt_mg(i)+1, & |
---|
| 1477 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1478 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1479 | |
---|
| 1480 | CASE ( 5 ) |
---|
| 1481 | ALLOCATE( wall_flags_5(nzb:nzt_mg(i)+1, & |
---|
| 1482 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1483 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1484 | |
---|
| 1485 | CASE ( 6 ) |
---|
| 1486 | ALLOCATE( wall_flags_6(nzb:nzt_mg(i)+1, & |
---|
| 1487 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1488 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1489 | |
---|
| 1490 | CASE ( 7 ) |
---|
| 1491 | ALLOCATE( wall_flags_7(nzb:nzt_mg(i)+1, & |
---|
| 1492 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1493 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1494 | |
---|
| 1495 | CASE ( 8 ) |
---|
| 1496 | ALLOCATE( wall_flags_8(nzb:nzt_mg(i)+1, & |
---|
| 1497 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1498 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1499 | |
---|
| 1500 | CASE ( 9 ) |
---|
| 1501 | ALLOCATE( wall_flags_9(nzb:nzt_mg(i)+1, & |
---|
| 1502 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1503 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1504 | |
---|
| 1505 | CASE ( 10 ) |
---|
| 1506 | ALLOCATE( wall_flags_10(nzb:nzt_mg(i)+1, & |
---|
| 1507 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1508 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1509 | |
---|
| 1510 | CASE DEFAULT |
---|
[254] | 1511 | message_string = 'more than 10 multigrid levels' |
---|
| 1512 | CALL message( 'init_pegrid', 'PA0238', 1, 2, 0, 6, 0 ) |
---|
[114] | 1513 | |
---|
| 1514 | END SELECT |
---|
| 1515 | |
---|
| 1516 | ENDDO |
---|
| 1517 | |
---|
| 1518 | ENDIF |
---|
| 1519 | |
---|
[1] | 1520 | END SUBROUTINE init_pegrid |
---|