[1] | 1 | SUBROUTINE init_pegrid |
---|
| 2 | !------------------------------------------------------------------------------! |
---|
[254] | 3 | ! Current revisions: |
---|
[1] | 4 | ! ----------------- |
---|
[760] | 5 | ! |
---|
[1004] | 6 | ! |
---|
[668] | 7 | ! Former revisions: |
---|
| 8 | ! ----------------- |
---|
| 9 | ! $Id: init_pegrid.f90 1004 2012-09-14 14:56:50Z letzel $ |
---|
| 10 | ! |
---|
[1004] | 11 | ! 1003 2012-09-14 14:35:53Z raasch |
---|
| 12 | ! subdomains must have identical size (grid matching = "match" removed) |
---|
| 13 | ! |
---|
[1002] | 14 | ! 1001 2012-09-13 14:08:46Z raasch |
---|
| 15 | ! all actions concerning upstream-spline-method removed |
---|
| 16 | ! |
---|
[979] | 17 | ! 978 2012-08-09 08:28:32Z fricke |
---|
| 18 | ! dirichlet/neumann and neumann/dirichlet added |
---|
| 19 | ! nxlu and nysv are also calculated for inflow boundary |
---|
| 20 | ! |
---|
[810] | 21 | ! 809 2012-01-30 13:32:58Z maronga |
---|
| 22 | ! Bugfix: replaced .AND. and .NOT. with && and ! in the preprocessor directives |
---|
| 23 | ! |
---|
[808] | 24 | ! 807 2012-01-25 11:53:51Z maronga |
---|
| 25 | ! New cpp directive "__check" implemented which is used by check_namelist_files |
---|
| 26 | ! |
---|
[781] | 27 | ! 780 2011-11-10 07:16:47Z raasch |
---|
| 28 | ! Bugfix for rev 778: Misplaced error message moved to the rigth place |
---|
| 29 | ! |
---|
[779] | 30 | ! 778 2011-11-07 14:18:25Z fricke |
---|
| 31 | ! Calculation of subdomain_size now considers the number of ghost points. |
---|
| 32 | ! Further coarsening on PE0 is now possible for multigrid solver if the |
---|
| 33 | ! collected field has more grid points than the subdomain of an PE. |
---|
| 34 | ! |
---|
[760] | 35 | ! 759 2011-09-15 13:58:31Z raasch |
---|
| 36 | ! calculation of number of io_blocks and the io_group to which the respective |
---|
| 37 | ! PE belongs |
---|
| 38 | ! |
---|
[756] | 39 | ! 755 2011-08-29 09:55:16Z witha |
---|
| 40 | ! 2d-decomposition is default for lcflow (ForWind cluster in Oldenburg) |
---|
| 41 | ! |
---|
[723] | 42 | ! 722 2011-04-11 06:21:09Z raasch |
---|
| 43 | ! Bugfix: bc_lr/ns_cyc/dirrad/raddir replaced by bc_lr/ns, because variables |
---|
| 44 | ! are not yet set here; grid_level set to 0 |
---|
| 45 | ! |
---|
[710] | 46 | ! 709 2011-03-30 09:31:40Z raasch |
---|
| 47 | ! formatting adjustments |
---|
| 48 | ! |
---|
[708] | 49 | ! 707 2011-03-29 11:39:40Z raasch |
---|
| 50 | ! bc_lr/ns replaced by bc_lr/ns_cyc/dirrad/raddir |
---|
| 51 | ! |
---|
[668] | 52 | ! 667 2010-12-23 12:06:00Z suehring/gryschka |
---|
[667] | 53 | ! Moved determination of target_id's from init_coupling |
---|
[669] | 54 | ! Determination of parameters needed for coupling (coupling_topology, ngp_a, |
---|
| 55 | ! ngp_o) with different grid/processor-topology in ocean and atmosphere |
---|
[667] | 56 | ! Adaption of ngp_xy, ngp_y to a dynamic number of ghost points. |
---|
| 57 | ! The maximum_grid_level changed from 1 to 0. 0 is the normal grid, 1 to |
---|
| 58 | ! maximum_grid_level the grids for multigrid, in which 0 and 1 are normal grids. |
---|
| 59 | ! This distinction is due to reasons of data exchange and performance for the |
---|
| 60 | ! normal grid and grids in poismg. |
---|
| 61 | ! The definition of MPI-Vectors adapted to a dynamic numer of ghost points. |
---|
| 62 | ! New MPI-Vectors for data exchange between left and right boundaries added. |
---|
| 63 | ! This is due to reasons of performance (10% faster). |
---|
[77] | 64 | ! |
---|
[647] | 65 | ! 646 2010-12-15 13:03:52Z raasch |
---|
| 66 | ! lctit is now using a 2d decomposition by default |
---|
| 67 | ! |
---|
[623] | 68 | ! 622 2010-12-10 08:08:13Z raasch |
---|
| 69 | ! optional barriers included in order to speed up collective operations |
---|
| 70 | ! |
---|
[482] | 71 | ! 438 2010-02-01 04:32:43Z raasch |
---|
| 72 | ! 2d-decomposition is default for Cray-XT machines |
---|
[77] | 73 | ! |
---|
[392] | 74 | ! 274 2009-03-26 15:11:21Z heinze |
---|
| 75 | ! Output of messages replaced by message handling routine. |
---|
| 76 | ! |
---|
[226] | 77 | ! 206 2008-10-13 14:59:11Z raasch |
---|
| 78 | ! Implementation of a MPI-1 coupling: added __parallel within the __mpi2 part |
---|
| 79 | ! 2d-decomposition is default on SGI-ICE systems |
---|
| 80 | ! |
---|
[198] | 81 | ! 197 2008-09-16 15:29:03Z raasch |
---|
| 82 | ! multigrid levels are limited by subdomains if mg_switch_to_pe0_level = -1, |
---|
| 83 | ! nz is used instead nnz for calculating mg-levels |
---|
| 84 | ! Collect on PE0 horizontal index bounds from all other PEs, |
---|
| 85 | ! broadcast the id of the inflow PE (using the respective communicator) |
---|
| 86 | ! |
---|
[139] | 87 | ! 114 2007-10-10 00:03:15Z raasch |
---|
| 88 | ! Allocation of wall flag arrays for multigrid solver |
---|
| 89 | ! |
---|
[110] | 90 | ! 108 2007-08-24 15:10:38Z letzel |
---|
| 91 | ! Intercommunicator (comm_inter) and derived data type (type_xy) for |
---|
| 92 | ! coupled model runs created, assign coupling_mode_remote, |
---|
| 93 | ! indices nxlu and nysv are calculated (needed for non-cyclic boundary |
---|
| 94 | ! conditions) |
---|
| 95 | ! |
---|
[83] | 96 | ! 82 2007-04-16 15:40:52Z raasch |
---|
| 97 | ! Cpp-directive lcmuk changed to intel_openmp_bug, setting of host on lcmuk by |
---|
| 98 | ! cpp-directive removed |
---|
| 99 | ! |
---|
[77] | 100 | ! 75 2007-03-22 09:54:05Z raasch |
---|
[73] | 101 | ! uxrp, vynp eliminated, |
---|
[75] | 102 | ! dirichlet/neumann changed to dirichlet/radiation, etc., |
---|
| 103 | ! poisfft_init is only called if fft-solver is switched on |
---|
[1] | 104 | ! |
---|
[3] | 105 | ! RCS Log replace by Id keyword, revision history cleaned up |
---|
| 106 | ! |
---|
[1] | 107 | ! Revision 1.28 2006/04/26 13:23:32 raasch |
---|
| 108 | ! lcmuk does not understand the !$ comment so a cpp-directive is required |
---|
| 109 | ! |
---|
| 110 | ! Revision 1.1 1997/07/24 11:15:09 raasch |
---|
| 111 | ! Initial revision |
---|
| 112 | ! |
---|
| 113 | ! |
---|
| 114 | ! Description: |
---|
| 115 | ! ------------ |
---|
| 116 | ! Determination of the virtual processor topology (if not prescribed by the |
---|
| 117 | ! user)and computation of the grid point number and array bounds of the local |
---|
| 118 | ! domains. |
---|
| 119 | !------------------------------------------------------------------------------! |
---|
| 120 | |
---|
| 121 | USE control_parameters |
---|
| 122 | USE fft_xy |
---|
[163] | 123 | USE grid_variables |
---|
[1] | 124 | USE indices |
---|
| 125 | USE pegrid |
---|
| 126 | USE poisfft_mod |
---|
| 127 | USE poisfft_hybrid_mod |
---|
| 128 | USE statistics |
---|
| 129 | USE transpose_indices |
---|
| 130 | |
---|
| 131 | |
---|
[667] | 132 | |
---|
[1] | 133 | IMPLICIT NONE |
---|
| 134 | |
---|
[778] | 135 | INTEGER :: i, id_inflow_l, id_recycling_l, ind(5), j, k, & |
---|
[151] | 136 | maximum_grid_level_l, mg_switch_to_pe0_level_l, mg_levels_x, & |
---|
| 137 | mg_levels_y, mg_levels_z, nnx_y, nnx_z, nny_x, nny_z, nnz_x, & |
---|
| 138 | nnz_y, numproc_sqr, nx_total, nxl_l, nxr_l, nyn_l, nys_l, & |
---|
[778] | 139 | nzb_l, nzt_l, omp_get_num_threads |
---|
[1] | 140 | |
---|
| 141 | INTEGER, DIMENSION(:), ALLOCATABLE :: ind_all, nxlf, nxrf, nynf, nysf |
---|
| 142 | |
---|
[667] | 143 | INTEGER, DIMENSION(2) :: pdims_remote |
---|
| 144 | |
---|
[1] | 145 | LOGICAL :: found |
---|
| 146 | |
---|
| 147 | ! |
---|
| 148 | !-- Get the number of OpenMP threads |
---|
| 149 | !$OMP PARALLEL |
---|
[82] | 150 | #if defined( __intel_openmp_bug ) |
---|
[1] | 151 | threads_per_task = omp_get_num_threads() |
---|
| 152 | #else |
---|
| 153 | !$ threads_per_task = omp_get_num_threads() |
---|
| 154 | #endif |
---|
| 155 | !$OMP END PARALLEL |
---|
| 156 | |
---|
| 157 | |
---|
| 158 | #if defined( __parallel ) |
---|
[667] | 159 | |
---|
[1] | 160 | ! |
---|
| 161 | !-- Determine the processor topology or check it, if prescribed by the user |
---|
| 162 | IF ( npex == -1 .AND. npey == -1 ) THEN |
---|
| 163 | |
---|
| 164 | ! |
---|
| 165 | !-- Automatic determination of the topology |
---|
| 166 | !-- The default on SMP- and cluster-hosts is a 1d-decomposition along x |
---|
[206] | 167 | IF ( host(1:3) == 'ibm' .OR. host(1:3) == 'nec' .OR. & |
---|
[438] | 168 | ( host(1:2) == 'lc' .AND. host(3:5) /= 'sgi' .AND. & |
---|
[755] | 169 | host(3:4) /= 'xt' .AND. host(3:5) /= 'tit' .AND. & |
---|
| 170 | host(3:6) /= 'flow' ) .OR. host(1:3) == 'dec' ) THEN |
---|
[1] | 171 | |
---|
| 172 | pdims(1) = numprocs |
---|
| 173 | pdims(2) = 1 |
---|
| 174 | |
---|
| 175 | ELSE |
---|
| 176 | |
---|
| 177 | numproc_sqr = SQRT( REAL( numprocs ) ) |
---|
| 178 | pdims(1) = MAX( numproc_sqr , 1 ) |
---|
| 179 | DO WHILE ( MOD( numprocs , pdims(1) ) /= 0 ) |
---|
| 180 | pdims(1) = pdims(1) - 1 |
---|
| 181 | ENDDO |
---|
| 182 | pdims(2) = numprocs / pdims(1) |
---|
| 183 | |
---|
| 184 | ENDIF |
---|
| 185 | |
---|
| 186 | ELSEIF ( npex /= -1 .AND. npey /= -1 ) THEN |
---|
| 187 | |
---|
| 188 | ! |
---|
| 189 | !-- Prescribed by user. Number of processors on the prescribed topology |
---|
| 190 | !-- must be equal to the number of PEs available to the job |
---|
| 191 | IF ( ( npex * npey ) /= numprocs ) THEN |
---|
[274] | 192 | WRITE( message_string, * ) 'number of PEs of the prescribed ', & |
---|
| 193 | 'topology (', npex*npey,') does not match & the number of ', & |
---|
| 194 | 'PEs available to the job (', numprocs, ')' |
---|
[254] | 195 | CALL message( 'init_pegrid', 'PA0221', 1, 2, 0, 6, 0 ) |
---|
[1] | 196 | ENDIF |
---|
| 197 | pdims(1) = npex |
---|
| 198 | pdims(2) = npey |
---|
| 199 | |
---|
| 200 | ELSE |
---|
| 201 | ! |
---|
| 202 | !-- If the processor topology is prescribed by the user, the number of |
---|
| 203 | !-- PEs must be given in both directions |
---|
[274] | 204 | message_string = 'if the processor topology is prescribed by the, ' // & |
---|
| 205 | ' user& both values of "npex" and "npey" must be given ' // & |
---|
| 206 | 'in the &NAMELIST-parameter file' |
---|
[254] | 207 | CALL message( 'init_pegrid', 'PA0222', 1, 2, 0, 6, 0 ) |
---|
[1] | 208 | |
---|
| 209 | ENDIF |
---|
| 210 | |
---|
| 211 | ! |
---|
| 212 | !-- The hybrid solver can only be used in case of a 1d-decomposition along x |
---|
| 213 | IF ( pdims(2) /= 1 .AND. psolver == 'poisfft_hybrid' ) THEN |
---|
[254] | 214 | message_string = 'psolver = "poisfft_hybrid" can only be' // & |
---|
| 215 | '& used in case of a 1d-decomposition along x' |
---|
| 216 | CALL message( 'init_pegrid', 'PA0223', 1, 2, 0, 6, 0 ) |
---|
[1] | 217 | ENDIF |
---|
| 218 | |
---|
| 219 | ! |
---|
[622] | 220 | !-- For communication speedup, set barriers in front of collective |
---|
| 221 | !-- communications by default on SGI-type systems |
---|
| 222 | IF ( host(3:5) == 'sgi' ) collective_wait = .TRUE. |
---|
| 223 | |
---|
| 224 | ! |
---|
[1] | 225 | !-- If necessary, set horizontal boundary conditions to non-cyclic |
---|
[722] | 226 | IF ( bc_lr /= 'cyclic' ) cyclic(1) = .FALSE. |
---|
| 227 | IF ( bc_ns /= 'cyclic' ) cyclic(2) = .FALSE. |
---|
[1] | 228 | |
---|
[807] | 229 | |
---|
[809] | 230 | #if ! defined( __check) |
---|
[1] | 231 | ! |
---|
| 232 | !-- Create the virtual processor grid |
---|
| 233 | CALL MPI_CART_CREATE( comm_palm, ndim, pdims, cyclic, reorder, & |
---|
| 234 | comm2d, ierr ) |
---|
| 235 | CALL MPI_COMM_RANK( comm2d, myid, ierr ) |
---|
| 236 | WRITE (myid_char,'(''_'',I4.4)') myid |
---|
| 237 | |
---|
| 238 | CALL MPI_CART_COORDS( comm2d, myid, ndim, pcoord, ierr ) |
---|
| 239 | CALL MPI_CART_SHIFT( comm2d, 0, 1, pleft, pright, ierr ) |
---|
| 240 | CALL MPI_CART_SHIFT( comm2d, 1, 1, psouth, pnorth, ierr ) |
---|
| 241 | |
---|
| 242 | ! |
---|
| 243 | !-- Determine sub-topologies for transpositions |
---|
| 244 | !-- Transposition from z to x: |
---|
| 245 | remain_dims(1) = .TRUE. |
---|
| 246 | remain_dims(2) = .FALSE. |
---|
| 247 | CALL MPI_CART_SUB( comm2d, remain_dims, comm1dx, ierr ) |
---|
| 248 | CALL MPI_COMM_RANK( comm1dx, myidx, ierr ) |
---|
| 249 | ! |
---|
| 250 | !-- Transposition from x to y |
---|
| 251 | remain_dims(1) = .FALSE. |
---|
| 252 | remain_dims(2) = .TRUE. |
---|
| 253 | CALL MPI_CART_SUB( comm2d, remain_dims, comm1dy, ierr ) |
---|
| 254 | CALL MPI_COMM_RANK( comm1dy, myidy, ierr ) |
---|
| 255 | |
---|
[807] | 256 | #endif |
---|
[1] | 257 | |
---|
| 258 | ! |
---|
[1003] | 259 | !-- Calculate array bounds along x-direction for every PE. |
---|
[1] | 260 | ALLOCATE( nxlf(0:pdims(1)-1), nxrf(0:pdims(1)-1), nynf(0:pdims(2)-1), & |
---|
[1003] | 261 | nysf(0:pdims(2)-1) ) |
---|
[1] | 262 | |
---|
[1003] | 263 | IF ( MOD( nx+1 , pdims(1) ) /= 0 ) THEN |
---|
[274] | 264 | WRITE( message_string, * ) 'x-direction: gridpoint number (',nx+1,') ',& |
---|
| 265 | 'is not an& integral divisor of the number ', & |
---|
| 266 | 'processors (', pdims(1),')' |
---|
[254] | 267 | CALL message( 'init_pegrid', 'PA0225', 1, 2, 0, 6, 0 ) |
---|
[1] | 268 | ELSE |
---|
[1003] | 269 | nnx = ( nx + 1 ) / pdims(1) |
---|
[1] | 270 | IF ( nnx*pdims(1) - ( nx + 1) > nnx ) THEN |
---|
[274] | 271 | WRITE( message_string, * ) 'x-direction: nx does not match the', & |
---|
| 272 | 'requirements given by the number of PEs &used', & |
---|
| 273 | '& please use nx = ', nx - ( pdims(1) - ( nnx*pdims(1) & |
---|
| 274 | - ( nx + 1 ) ) ), ' instead of nx =', nx |
---|
[254] | 275 | CALL message( 'init_pegrid', 'PA0226', 1, 2, 0, 6, 0 ) |
---|
[1] | 276 | ENDIF |
---|
| 277 | ENDIF |
---|
| 278 | |
---|
| 279 | ! |
---|
| 280 | !-- Left and right array bounds, number of gridpoints |
---|
| 281 | DO i = 0, pdims(1)-1 |
---|
| 282 | nxlf(i) = i * nnx |
---|
| 283 | nxrf(i) = ( i + 1 ) * nnx - 1 |
---|
| 284 | ENDDO |
---|
| 285 | |
---|
| 286 | ! |
---|
| 287 | !-- Calculate array bounds in y-direction for every PE. |
---|
[1003] | 288 | IF ( MOD( ny+1 , pdims(2) ) /= 0 ) THEN |
---|
[274] | 289 | WRITE( message_string, * ) 'y-direction: gridpoint number (',ny+1,') ', & |
---|
| 290 | 'is not an& integral divisor of the number of', & |
---|
| 291 | 'processors (', pdims(2),')' |
---|
[254] | 292 | CALL message( 'init_pegrid', 'PA0227', 1, 2, 0, 6, 0 ) |
---|
[1] | 293 | ELSE |
---|
[1003] | 294 | nny = ( ny + 1 ) / pdims(2) |
---|
[1] | 295 | IF ( nny*pdims(2) - ( ny + 1) > nny ) THEN |
---|
[274] | 296 | WRITE( message_string, * ) 'y-direction: ny does not match the', & |
---|
| 297 | 'requirements given by the number of PEs &used ', & |
---|
| 298 | '& please use ny = ', ny - ( pdims(2) - ( nnx*pdims(2) & |
---|
[254] | 299 | - ( ny + 1 ) ) ), ' instead of ny =', ny |
---|
| 300 | CALL message( 'init_pegrid', 'PA0228', 1, 2, 0, 6, 0 ) |
---|
[1] | 301 | ENDIF |
---|
| 302 | ENDIF |
---|
| 303 | |
---|
| 304 | ! |
---|
| 305 | !-- South and north array bounds |
---|
| 306 | DO j = 0, pdims(2)-1 |
---|
| 307 | nysf(j) = j * nny |
---|
| 308 | nynf(j) = ( j + 1 ) * nny - 1 |
---|
| 309 | ENDDO |
---|
| 310 | |
---|
| 311 | ! |
---|
| 312 | !-- Local array bounds of the respective PEs |
---|
[1003] | 313 | nxl = nxlf(pcoord(1)) |
---|
| 314 | nxr = nxrf(pcoord(1)) |
---|
| 315 | nys = nysf(pcoord(2)) |
---|
| 316 | nyn = nynf(pcoord(2)) |
---|
| 317 | nzb = 0 |
---|
| 318 | nzt = nz |
---|
| 319 | nnz = nz |
---|
[1] | 320 | |
---|
| 321 | ! |
---|
[707] | 322 | !-- Set switches to define if the PE is situated at the border of the virtual |
---|
| 323 | !-- processor grid |
---|
| 324 | IF ( nxl == 0 ) left_border_pe = .TRUE. |
---|
| 325 | IF ( nxr == nx ) right_border_pe = .TRUE. |
---|
| 326 | IF ( nys == 0 ) south_border_pe = .TRUE. |
---|
| 327 | IF ( nyn == ny ) north_border_pe = .TRUE. |
---|
| 328 | |
---|
| 329 | ! |
---|
[1] | 330 | !-- Calculate array bounds and gridpoint numbers for the transposed arrays |
---|
| 331 | !-- (needed in the pressure solver) |
---|
| 332 | !-- For the transposed arrays, cyclic boundaries as well as top and bottom |
---|
| 333 | !-- boundaries are omitted, because they are obstructive to the transposition |
---|
| 334 | |
---|
| 335 | ! |
---|
| 336 | !-- 1. transposition z --> x |
---|
[1001] | 337 | !-- This transposition is not neccessary in case of a 1d-decomposition along x |
---|
| 338 | IF ( pdims(2) /= 1 ) THEN |
---|
[1] | 339 | |
---|
[1003] | 340 | nys_x = nys |
---|
| 341 | nyn_x = nyn |
---|
| 342 | nny_x = nny |
---|
| 343 | IF ( MOD( nz , pdims(1) ) /= 0 ) THEN |
---|
[274] | 344 | WRITE( message_string, * ) 'transposition z --> x:', & |
---|
| 345 | '&nz=',nz,' is not an integral divisior of pdims(1)=', & |
---|
| 346 | pdims(1) |
---|
[254] | 347 | CALL message( 'init_pegrid', 'PA0230', 1, 2, 0, 6, 0 ) |
---|
[1] | 348 | ENDIF |
---|
[1003] | 349 | nnz_x = nz / pdims(1) |
---|
| 350 | nzb_x = 1 + myidx * nnz_x |
---|
| 351 | nzt_x = ( myidx + 1 ) * nnz_x |
---|
[1] | 352 | sendrecvcount_zx = nnx * nny * nnz_x |
---|
| 353 | |
---|
[181] | 354 | ELSE |
---|
| 355 | ! |
---|
| 356 | !--- Setting of dummy values because otherwise variables are undefined in |
---|
| 357 | !--- the next step x --> y |
---|
| 358 | !--- WARNING: This case has still to be clarified!!!!!!!!!!!! |
---|
[1003] | 359 | nnz_x = 1 |
---|
| 360 | nzb_x = 1 |
---|
| 361 | nzt_x = 1 |
---|
| 362 | nny_x = nny |
---|
[181] | 363 | |
---|
[1] | 364 | ENDIF |
---|
| 365 | |
---|
| 366 | ! |
---|
| 367 | !-- 2. transposition x --> y |
---|
[1003] | 368 | nnz_y = nnz_x |
---|
| 369 | nzb_y = nzb_x |
---|
| 370 | nzt_y = nzt_x |
---|
| 371 | IF ( MOD( nx+1 , pdims(2) ) /= 0 ) THEN |
---|
[274] | 372 | WRITE( message_string, * ) 'transposition x --> y:', & |
---|
| 373 | '&nx+1=',nx+1,' is not an integral divisor of ',& |
---|
| 374 | 'pdims(2)=',pdims(2) |
---|
[254] | 375 | CALL message( 'init_pegrid', 'PA0231', 1, 2, 0, 6, 0 ) |
---|
[1] | 376 | ENDIF |
---|
[1003] | 377 | nnx_y = (nx+1) / pdims(2) |
---|
[1] | 378 | nxl_y = myidy * nnx_y |
---|
[1003] | 379 | nxr_y = ( myidy + 1 ) * nnx_y - 1 |
---|
[1] | 380 | sendrecvcount_xy = nnx_y * nny_x * nnz_y |
---|
| 381 | |
---|
| 382 | ! |
---|
| 383 | !-- 3. transposition y --> z (ELSE: x --> y in case of 1D-decomposition |
---|
| 384 | !-- along x) |
---|
[1001] | 385 | IF ( pdims(2) /= 1 ) THEN |
---|
[1] | 386 | ! |
---|
| 387 | !-- y --> z |
---|
| 388 | !-- This transposition is not neccessary in case of a 1d-decomposition |
---|
| 389 | !-- along x, except that the uptream-spline method is switched on |
---|
[1003] | 390 | nnx_z = nnx_y |
---|
| 391 | nxl_z = nxl_y |
---|
| 392 | nxr_z = nxr_y |
---|
| 393 | IF ( MOD( ny+1 , pdims(1) ) /= 0 ) THEN |
---|
[274] | 394 | WRITE( message_string, * ) 'transposition y --> z:', & |
---|
| 395 | '& ny+1=',ny+1,' is not an integral divisor of ',& |
---|
| 396 | 'pdims(1)=',pdims(1) |
---|
[254] | 397 | CALL message( 'init_pegrid', 'PA0232', 1, 2, 0, 6, 0 ) |
---|
[1] | 398 | ENDIF |
---|
[1003] | 399 | nny_z = (ny+1) / pdims(1) |
---|
| 400 | nys_z = myidx * nny_z |
---|
| 401 | nyn_z = ( myidx + 1 ) * nny_z - 1 |
---|
[1] | 402 | sendrecvcount_yz = nnx_y * nny_z * nnz_y |
---|
| 403 | |
---|
| 404 | ELSE |
---|
| 405 | ! |
---|
| 406 | !-- x --> y. This condition must be fulfilled for a 1D-decomposition along x |
---|
[1003] | 407 | IF ( MOD( ny+1 , pdims(1) ) /= 0 ) THEN |
---|
[274] | 408 | WRITE( message_string, * ) 'transposition x --> y:', & |
---|
| 409 | '& ny+1=',ny+1,' is not an integral divisor of ',& |
---|
| 410 | 'pdims(1)=',pdims(1) |
---|
[254] | 411 | CALL message( 'init_pegrid', 'PA0233', 1, 2, 0, 6, 0 ) |
---|
[1] | 412 | ENDIF |
---|
| 413 | |
---|
| 414 | ENDIF |
---|
| 415 | |
---|
| 416 | ! |
---|
| 417 | !-- Indices for direct transpositions z --> y (used for calculating spectra) |
---|
| 418 | IF ( dt_dosp /= 9999999.9 ) THEN |
---|
[1003] | 419 | IF ( MOD( nz, pdims(2) ) /= 0 ) THEN |
---|
[274] | 420 | WRITE( message_string, * ) 'direct transposition z --> y (needed ', & |
---|
| 421 | 'for spectra):& nz=',nz,' is not an integral divisor of ',& |
---|
| 422 | 'pdims(2)=',pdims(2) |
---|
[254] | 423 | CALL message( 'init_pegrid', 'PA0234', 1, 2, 0, 6, 0 ) |
---|
[1] | 424 | ELSE |
---|
[1003] | 425 | nxl_yd = nxl |
---|
| 426 | nxr_yd = nxr |
---|
| 427 | nzb_yd = 1 + myidy * ( nz / pdims(2) ) |
---|
| 428 | nzt_yd = ( myidy + 1 ) * ( nz / pdims(2) ) |
---|
| 429 | sendrecvcount_zyd = nnx * nny * ( nz / pdims(2) ) |
---|
[1] | 430 | ENDIF |
---|
| 431 | ENDIF |
---|
| 432 | |
---|
| 433 | ! |
---|
| 434 | !-- Indices for direct transpositions y --> x (they are only possible in case |
---|
| 435 | !-- of a 1d-decomposition along x) |
---|
| 436 | IF ( pdims(2) == 1 ) THEN |
---|
[1003] | 437 | nny_x = nny / pdims(1) |
---|
| 438 | nys_x = myid * nny_x |
---|
| 439 | nyn_x = ( myid + 1 ) * nny_x - 1 |
---|
| 440 | nzb_x = 1 |
---|
| 441 | nzt_x = nz |
---|
| 442 | sendrecvcount_xy = nnx * nny_x * nz |
---|
[1] | 443 | ENDIF |
---|
| 444 | |
---|
| 445 | ! |
---|
| 446 | !-- Indices for direct transpositions x --> y (they are only possible in case |
---|
| 447 | !-- of a 1d-decomposition along y) |
---|
| 448 | IF ( pdims(1) == 1 ) THEN |
---|
[1003] | 449 | nnx_y = nnx / pdims(2) |
---|
| 450 | nxl_y = myid * nnx_y |
---|
| 451 | nxr_y = ( myid + 1 ) * nnx_y - 1 |
---|
| 452 | nzb_y = 1 |
---|
| 453 | nzt_y = nz |
---|
| 454 | sendrecvcount_xy = nnx_y * nny * nz |
---|
[1] | 455 | ENDIF |
---|
| 456 | |
---|
| 457 | ! |
---|
| 458 | !-- Arrays for storing the array bounds are needed any more |
---|
| 459 | DEALLOCATE( nxlf , nxrf , nynf , nysf ) |
---|
| 460 | |
---|
[807] | 461 | |
---|
[809] | 462 | #if ! defined( __check) |
---|
[145] | 463 | ! |
---|
| 464 | !-- Collect index bounds from other PEs (to be written to restart file later) |
---|
| 465 | ALLOCATE( hor_index_bounds(4,0:numprocs-1) ) |
---|
| 466 | |
---|
| 467 | IF ( myid == 0 ) THEN |
---|
| 468 | |
---|
| 469 | hor_index_bounds(1,0) = nxl |
---|
| 470 | hor_index_bounds(2,0) = nxr |
---|
| 471 | hor_index_bounds(3,0) = nys |
---|
| 472 | hor_index_bounds(4,0) = nyn |
---|
| 473 | |
---|
| 474 | ! |
---|
| 475 | !-- Receive data from all other PEs |
---|
| 476 | DO i = 1, numprocs-1 |
---|
| 477 | CALL MPI_RECV( ibuf, 4, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, & |
---|
| 478 | ierr ) |
---|
| 479 | hor_index_bounds(:,i) = ibuf(1:4) |
---|
| 480 | ENDDO |
---|
| 481 | |
---|
| 482 | ELSE |
---|
| 483 | ! |
---|
| 484 | !-- Send index bounds to PE0 |
---|
| 485 | ibuf(1) = nxl |
---|
| 486 | ibuf(2) = nxr |
---|
| 487 | ibuf(3) = nys |
---|
| 488 | ibuf(4) = nyn |
---|
| 489 | CALL MPI_SEND( ibuf, 4, MPI_INTEGER, 0, myid, comm2d, ierr ) |
---|
| 490 | |
---|
| 491 | ENDIF |
---|
| 492 | |
---|
[807] | 493 | #endif |
---|
| 494 | |
---|
[1] | 495 | #if defined( __print ) |
---|
| 496 | ! |
---|
| 497 | !-- Control output |
---|
| 498 | IF ( myid == 0 ) THEN |
---|
| 499 | PRINT*, '*** processor topology ***' |
---|
| 500 | PRINT*, ' ' |
---|
| 501 | PRINT*, 'myid pcoord left right south north idx idy nxl: nxr',& |
---|
| 502 | &' nys: nyn' |
---|
| 503 | PRINT*, '------------------------------------------------------------',& |
---|
| 504 | &'-----------' |
---|
| 505 | WRITE (*,1000) 0, pcoord(1), pcoord(2), pleft, pright, psouth, pnorth, & |
---|
| 506 | myidx, myidy, nxl, nxr, nys, nyn |
---|
| 507 | 1000 FORMAT (I4,2X,'(',I3,',',I3,')',3X,I4,2X,I4,3X,I4,2X,I4,2X,I3,1X,I3, & |
---|
| 508 | 2(2X,I4,':',I4)) |
---|
| 509 | |
---|
| 510 | ! |
---|
[108] | 511 | !-- Receive data from the other PEs |
---|
[1] | 512 | DO i = 1,numprocs-1 |
---|
| 513 | CALL MPI_RECV( ibuf, 12, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, & |
---|
| 514 | ierr ) |
---|
| 515 | WRITE (*,1000) i, ( ibuf(j) , j = 1,12 ) |
---|
| 516 | ENDDO |
---|
| 517 | ELSE |
---|
| 518 | |
---|
| 519 | ! |
---|
| 520 | !-- Send data to PE0 |
---|
| 521 | ibuf(1) = pcoord(1); ibuf(2) = pcoord(2); ibuf(3) = pleft |
---|
| 522 | ibuf(4) = pright; ibuf(5) = psouth; ibuf(6) = pnorth; ibuf(7) = myidx |
---|
| 523 | ibuf(8) = myidy; ibuf(9) = nxl; ibuf(10) = nxr; ibuf(11) = nys |
---|
| 524 | ibuf(12) = nyn |
---|
| 525 | CALL MPI_SEND( ibuf, 12, MPI_INTEGER, 0, myid, comm2d, ierr ) |
---|
| 526 | ENDIF |
---|
| 527 | #endif |
---|
| 528 | |
---|
[809] | 529 | #if defined( __parallel ) && ! defined( __check) |
---|
[102] | 530 | #if defined( __mpi2 ) |
---|
| 531 | ! |
---|
| 532 | !-- In case of coupled runs, get the port name on PE0 of the atmosphere model |
---|
| 533 | !-- and pass it to PE0 of the ocean model |
---|
| 534 | IF ( myid == 0 ) THEN |
---|
| 535 | |
---|
| 536 | IF ( coupling_mode == 'atmosphere_to_ocean' ) THEN |
---|
| 537 | |
---|
| 538 | CALL MPI_OPEN_PORT( MPI_INFO_NULL, port_name, ierr ) |
---|
[108] | 539 | |
---|
[102] | 540 | CALL MPI_PUBLISH_NAME( 'palm_coupler', MPI_INFO_NULL, port_name, & |
---|
| 541 | ierr ) |
---|
[108] | 542 | |
---|
| 543 | ! |
---|
[104] | 544 | !-- Write a flag file for the ocean model and the other atmosphere |
---|
| 545 | !-- processes. |
---|
| 546 | !-- There seems to be a bug in MPICH2 which causes hanging processes |
---|
| 547 | !-- in case that execution of LOOKUP_NAME is continued too early |
---|
| 548 | !-- (i.e. before the port has been created) |
---|
| 549 | OPEN( 90, FILE='COUPLING_PORT_OPENED', FORM='FORMATTED' ) |
---|
| 550 | WRITE ( 90, '(''TRUE'')' ) |
---|
| 551 | CLOSE ( 90 ) |
---|
[102] | 552 | |
---|
| 553 | ELSEIF ( coupling_mode == 'ocean_to_atmosphere' ) THEN |
---|
| 554 | |
---|
[104] | 555 | ! |
---|
| 556 | !-- Continue only if the atmosphere model has created the port. |
---|
| 557 | !-- There seems to be a bug in MPICH2 which causes hanging processes |
---|
| 558 | !-- in case that execution of LOOKUP_NAME is continued too early |
---|
| 559 | !-- (i.e. before the port has been created) |
---|
| 560 | INQUIRE( FILE='COUPLING_PORT_OPENED', EXIST=found ) |
---|
| 561 | DO WHILE ( .NOT. found ) |
---|
| 562 | INQUIRE( FILE='COUPLING_PORT_OPENED', EXIST=found ) |
---|
| 563 | ENDDO |
---|
| 564 | |
---|
[102] | 565 | CALL MPI_LOOKUP_NAME( 'palm_coupler', MPI_INFO_NULL, port_name, ierr ) |
---|
| 566 | |
---|
| 567 | ENDIF |
---|
| 568 | |
---|
| 569 | ENDIF |
---|
| 570 | |
---|
| 571 | ! |
---|
| 572 | !-- In case of coupled runs, establish the connection between the atmosphere |
---|
| 573 | !-- and the ocean model and define the intercommunicator (comm_inter) |
---|
| 574 | CALL MPI_BARRIER( comm2d, ierr ) |
---|
| 575 | IF ( coupling_mode == 'atmosphere_to_ocean' ) THEN |
---|
| 576 | |
---|
| 577 | CALL MPI_COMM_ACCEPT( port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, & |
---|
| 578 | comm_inter, ierr ) |
---|
[108] | 579 | coupling_mode_remote = 'ocean_to_atmosphere' |
---|
| 580 | |
---|
[102] | 581 | ELSEIF ( coupling_mode == 'ocean_to_atmosphere' ) THEN |
---|
| 582 | |
---|
| 583 | CALL MPI_COMM_CONNECT( port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, & |
---|
| 584 | comm_inter, ierr ) |
---|
[108] | 585 | coupling_mode_remote = 'atmosphere_to_ocean' |
---|
| 586 | |
---|
[102] | 587 | ENDIF |
---|
[206] | 588 | #endif |
---|
[102] | 589 | |
---|
[667] | 590 | ! |
---|
[709] | 591 | !-- Determine the number of ghost point layers |
---|
| 592 | IF ( scalar_advec == 'ws-scheme' .OR. momentum_advec == 'ws-scheme' ) THEN |
---|
[667] | 593 | nbgp = 3 |
---|
| 594 | ELSE |
---|
| 595 | nbgp = 1 |
---|
[709] | 596 | ENDIF |
---|
[667] | 597 | |
---|
[102] | 598 | ! |
---|
[709] | 599 | !-- Create a new MPI derived datatype for the exchange of surface (xy) data, |
---|
| 600 | !-- which is needed for coupled atmosphere-ocean runs. |
---|
| 601 | !-- First, calculate number of grid points of an xy-plane. |
---|
[667] | 602 | ngp_xy = ( nxr - nxl + 1 + 2 * nbgp ) * ( nyn - nys + 1 + 2 * nbgp ) |
---|
[102] | 603 | CALL MPI_TYPE_VECTOR( ngp_xy, 1, nzt-nzb+2, MPI_REAL, type_xy, ierr ) |
---|
| 604 | CALL MPI_TYPE_COMMIT( type_xy, ierr ) |
---|
[667] | 605 | |
---|
[709] | 606 | IF ( TRIM( coupling_mode ) /= 'uncoupled' ) THEN |
---|
[667] | 607 | |
---|
| 608 | ! |
---|
| 609 | !-- Pass the number of grid points of the atmosphere model to |
---|
| 610 | !-- the ocean model and vice versa |
---|
| 611 | IF ( coupling_mode == 'atmosphere_to_ocean' ) THEN |
---|
| 612 | |
---|
| 613 | nx_a = nx |
---|
| 614 | ny_a = ny |
---|
| 615 | |
---|
[709] | 616 | IF ( myid == 0 ) THEN |
---|
| 617 | |
---|
| 618 | CALL MPI_SEND( nx_a, 1, MPI_INTEGER, numprocs, 1, comm_inter, & |
---|
| 619 | ierr ) |
---|
| 620 | CALL MPI_SEND( ny_a, 1, MPI_INTEGER, numprocs, 2, comm_inter, & |
---|
| 621 | ierr ) |
---|
| 622 | CALL MPI_SEND( pdims, 2, MPI_INTEGER, numprocs, 3, comm_inter, & |
---|
| 623 | ierr ) |
---|
| 624 | CALL MPI_RECV( nx_o, 1, MPI_INTEGER, numprocs, 4, comm_inter, & |
---|
| 625 | status, ierr ) |
---|
| 626 | CALL MPI_RECV( ny_o, 1, MPI_INTEGER, numprocs, 5, comm_inter, & |
---|
| 627 | status, ierr ) |
---|
| 628 | CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, numprocs, 6, & |
---|
[667] | 629 | comm_inter, status, ierr ) |
---|
| 630 | ENDIF |
---|
| 631 | |
---|
[709] | 632 | CALL MPI_BCAST( nx_o, 1, MPI_INTEGER, 0, comm2d, ierr ) |
---|
| 633 | CALL MPI_BCAST( ny_o, 1, MPI_INTEGER, 0, comm2d, ierr ) |
---|
| 634 | CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr ) |
---|
[667] | 635 | |
---|
| 636 | ELSEIF ( coupling_mode == 'ocean_to_atmosphere' ) THEN |
---|
| 637 | |
---|
| 638 | nx_o = nx |
---|
| 639 | ny_o = ny |
---|
| 640 | |
---|
| 641 | IF ( myid == 0 ) THEN |
---|
[709] | 642 | |
---|
| 643 | CALL MPI_RECV( nx_a, 1, MPI_INTEGER, 0, 1, comm_inter, status, & |
---|
| 644 | ierr ) |
---|
| 645 | CALL MPI_RECV( ny_a, 1, MPI_INTEGER, 0, 2, comm_inter, status, & |
---|
| 646 | ierr ) |
---|
| 647 | CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, 0, 3, comm_inter, & |
---|
| 648 | status, ierr ) |
---|
| 649 | CALL MPI_SEND( nx_o, 1, MPI_INTEGER, 0, 4, comm_inter, ierr ) |
---|
| 650 | CALL MPI_SEND( ny_o, 1, MPI_INTEGER, 0, 5, comm_inter, ierr ) |
---|
| 651 | CALL MPI_SEND( pdims, 2, MPI_INTEGER, 0, 6, comm_inter, ierr ) |
---|
[667] | 652 | ENDIF |
---|
| 653 | |
---|
| 654 | CALL MPI_BCAST( nx_a, 1, MPI_INTEGER, 0, comm2d, ierr) |
---|
| 655 | CALL MPI_BCAST( ny_a, 1, MPI_INTEGER, 0, comm2d, ierr) |
---|
| 656 | CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr) |
---|
| 657 | |
---|
| 658 | ENDIF |
---|
| 659 | |
---|
[709] | 660 | ngp_a = ( nx_a+1 + 2 * nbgp ) * ( ny_a+1 + 2 * nbgp ) |
---|
| 661 | ngp_o = ( nx_o+1 + 2 * nbgp ) * ( ny_o+1 + 2 * nbgp ) |
---|
[667] | 662 | |
---|
| 663 | ! |
---|
[709] | 664 | !-- Determine if the horizontal grid and the number of PEs in ocean and |
---|
| 665 | !-- atmosphere is same or not |
---|
| 666 | IF ( nx_o == nx_a .AND. ny_o == ny_a .AND. & |
---|
[667] | 667 | pdims(1) == pdims_remote(1) .AND. pdims(2) == pdims_remote(2) ) & |
---|
| 668 | THEN |
---|
| 669 | coupling_topology = 0 |
---|
| 670 | ELSE |
---|
| 671 | coupling_topology = 1 |
---|
| 672 | ENDIF |
---|
| 673 | |
---|
| 674 | ! |
---|
| 675 | !-- Determine the target PEs for the exchange between ocean and |
---|
| 676 | !-- atmosphere (comm2d) |
---|
[709] | 677 | IF ( coupling_topology == 0 ) THEN |
---|
| 678 | ! |
---|
| 679 | !-- In case of identical topologies, every atmosphere PE has exactly one |
---|
| 680 | !-- ocean PE counterpart and vice versa |
---|
| 681 | IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' ) THEN |
---|
[667] | 682 | target_id = myid + numprocs |
---|
| 683 | ELSE |
---|
| 684 | target_id = myid |
---|
| 685 | ENDIF |
---|
| 686 | |
---|
| 687 | ELSE |
---|
| 688 | ! |
---|
| 689 | !-- In case of nonequivalent topology in ocean and atmosphere only for |
---|
| 690 | !-- PE0 in ocean and PE0 in atmosphere a target_id is needed, since |
---|
[709] | 691 | !-- data echxchange between ocean and atmosphere will be done only |
---|
| 692 | !-- between these PEs. |
---|
| 693 | IF ( myid == 0 ) THEN |
---|
| 694 | |
---|
| 695 | IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' ) THEN |
---|
[667] | 696 | target_id = numprocs |
---|
| 697 | ELSE |
---|
| 698 | target_id = 0 |
---|
| 699 | ENDIF |
---|
[709] | 700 | |
---|
[667] | 701 | ENDIF |
---|
[709] | 702 | |
---|
[667] | 703 | ENDIF |
---|
| 704 | |
---|
| 705 | ENDIF |
---|
| 706 | |
---|
| 707 | |
---|
[102] | 708 | #endif |
---|
| 709 | |
---|
[1] | 710 | #else |
---|
| 711 | |
---|
| 712 | ! |
---|
| 713 | !-- Array bounds when running on a single PE (respectively a non-parallel |
---|
| 714 | !-- machine) |
---|
[1003] | 715 | nxl = 0 |
---|
| 716 | nxr = nx |
---|
| 717 | nnx = nxr - nxl + 1 |
---|
| 718 | nys = 0 |
---|
| 719 | nyn = ny |
---|
| 720 | nny = nyn - nys + 1 |
---|
| 721 | nzb = 0 |
---|
| 722 | nzt = nz |
---|
| 723 | nnz = nz |
---|
[1] | 724 | |
---|
[145] | 725 | ALLOCATE( hor_index_bounds(4,0:0) ) |
---|
| 726 | hor_index_bounds(1,0) = nxl |
---|
| 727 | hor_index_bounds(2,0) = nxr |
---|
| 728 | hor_index_bounds(3,0) = nys |
---|
| 729 | hor_index_bounds(4,0) = nyn |
---|
| 730 | |
---|
[1] | 731 | ! |
---|
| 732 | !-- Array bounds for the pressure solver (in the parallel code, these bounds |
---|
| 733 | !-- are the ones for the transposed arrays) |
---|
[1003] | 734 | nys_x = nys |
---|
| 735 | nyn_x = nyn |
---|
| 736 | nzb_x = nzb + 1 |
---|
| 737 | nzt_x = nzt |
---|
[1] | 738 | |
---|
[1003] | 739 | nxl_y = nxl |
---|
| 740 | nxr_y = nxr |
---|
| 741 | nzb_y = nzb + 1 |
---|
| 742 | nzt_y = nzt |
---|
[1] | 743 | |
---|
[1003] | 744 | nxl_z = nxl |
---|
| 745 | nxr_z = nxr |
---|
| 746 | nys_z = nys |
---|
| 747 | nyn_z = nyn |
---|
[1] | 748 | |
---|
| 749 | #endif |
---|
| 750 | |
---|
| 751 | ! |
---|
| 752 | !-- Calculate number of grid levels necessary for the multigrid poisson solver |
---|
| 753 | !-- as well as the gridpoint indices on each level |
---|
| 754 | IF ( psolver == 'multigrid' ) THEN |
---|
| 755 | |
---|
| 756 | ! |
---|
| 757 | !-- First calculate number of possible grid levels for the subdomains |
---|
| 758 | mg_levels_x = 1 |
---|
| 759 | mg_levels_y = 1 |
---|
| 760 | mg_levels_z = 1 |
---|
| 761 | |
---|
| 762 | i = nnx |
---|
| 763 | DO WHILE ( MOD( i, 2 ) == 0 .AND. i /= 2 ) |
---|
| 764 | i = i / 2 |
---|
| 765 | mg_levels_x = mg_levels_x + 1 |
---|
| 766 | ENDDO |
---|
| 767 | |
---|
| 768 | j = nny |
---|
| 769 | DO WHILE ( MOD( j, 2 ) == 0 .AND. j /= 2 ) |
---|
| 770 | j = j / 2 |
---|
| 771 | mg_levels_y = mg_levels_y + 1 |
---|
| 772 | ENDDO |
---|
| 773 | |
---|
[181] | 774 | k = nz ! do not use nnz because it might be > nz due to transposition |
---|
| 775 | ! requirements |
---|
[1] | 776 | DO WHILE ( MOD( k, 2 ) == 0 .AND. k /= 2 ) |
---|
| 777 | k = k / 2 |
---|
| 778 | mg_levels_z = mg_levels_z + 1 |
---|
| 779 | ENDDO |
---|
| 780 | |
---|
| 781 | maximum_grid_level = MIN( mg_levels_x, mg_levels_y, mg_levels_z ) |
---|
| 782 | |
---|
| 783 | ! |
---|
| 784 | !-- Find out, if the total domain allows more levels. These additional |
---|
[709] | 785 | !-- levels are identically processed on all PEs. |
---|
[197] | 786 | IF ( numprocs > 1 .AND. mg_switch_to_pe0_level /= -1 ) THEN |
---|
[709] | 787 | |
---|
[1] | 788 | IF ( mg_levels_z > MIN( mg_levels_x, mg_levels_y ) ) THEN |
---|
[709] | 789 | |
---|
[1] | 790 | mg_switch_to_pe0_level_l = maximum_grid_level |
---|
| 791 | |
---|
| 792 | mg_levels_x = 1 |
---|
| 793 | mg_levels_y = 1 |
---|
| 794 | |
---|
| 795 | i = nx+1 |
---|
| 796 | DO WHILE ( MOD( i, 2 ) == 0 .AND. i /= 2 ) |
---|
| 797 | i = i / 2 |
---|
| 798 | mg_levels_x = mg_levels_x + 1 |
---|
| 799 | ENDDO |
---|
| 800 | |
---|
| 801 | j = ny+1 |
---|
| 802 | DO WHILE ( MOD( j, 2 ) == 0 .AND. j /= 2 ) |
---|
| 803 | j = j / 2 |
---|
| 804 | mg_levels_y = mg_levels_y + 1 |
---|
| 805 | ENDDO |
---|
| 806 | |
---|
| 807 | maximum_grid_level_l = MIN( mg_levels_x, mg_levels_y, mg_levels_z ) |
---|
| 808 | |
---|
| 809 | IF ( maximum_grid_level_l > mg_switch_to_pe0_level_l ) THEN |
---|
| 810 | mg_switch_to_pe0_level_l = maximum_grid_level_l - & |
---|
| 811 | mg_switch_to_pe0_level_l + 1 |
---|
| 812 | ELSE |
---|
| 813 | mg_switch_to_pe0_level_l = 0 |
---|
| 814 | ENDIF |
---|
[709] | 815 | |
---|
[1] | 816 | ELSE |
---|
| 817 | mg_switch_to_pe0_level_l = 0 |
---|
| 818 | maximum_grid_level_l = maximum_grid_level |
---|
[709] | 819 | |
---|
[1] | 820 | ENDIF |
---|
| 821 | |
---|
| 822 | ! |
---|
| 823 | !-- Use switch level calculated above only if it is not pre-defined |
---|
| 824 | !-- by user |
---|
| 825 | IF ( mg_switch_to_pe0_level == 0 ) THEN |
---|
| 826 | IF ( mg_switch_to_pe0_level_l /= 0 ) THEN |
---|
| 827 | mg_switch_to_pe0_level = mg_switch_to_pe0_level_l |
---|
| 828 | maximum_grid_level = maximum_grid_level_l |
---|
| 829 | ENDIF |
---|
| 830 | |
---|
| 831 | ELSE |
---|
| 832 | ! |
---|
| 833 | !-- Check pre-defined value and reset to default, if neccessary |
---|
| 834 | IF ( mg_switch_to_pe0_level < mg_switch_to_pe0_level_l .OR. & |
---|
| 835 | mg_switch_to_pe0_level >= maximum_grid_level_l ) THEN |
---|
[254] | 836 | message_string = 'mg_switch_to_pe0_level ' // & |
---|
| 837 | 'out of range and reset to default (=0)' |
---|
| 838 | CALL message( 'init_pegrid', 'PA0235', 0, 1, 0, 6, 0 ) |
---|
[1] | 839 | mg_switch_to_pe0_level = 0 |
---|
| 840 | ELSE |
---|
| 841 | ! |
---|
| 842 | !-- Use the largest number of possible levels anyway and recalculate |
---|
| 843 | !-- the switch level to this largest number of possible values |
---|
| 844 | maximum_grid_level = maximum_grid_level_l |
---|
| 845 | |
---|
| 846 | ENDIF |
---|
[709] | 847 | |
---|
[1] | 848 | ENDIF |
---|
| 849 | |
---|
| 850 | ENDIF |
---|
| 851 | |
---|
| 852 | ALLOCATE( grid_level_count(maximum_grid_level), & |
---|
| 853 | nxl_mg(maximum_grid_level), nxr_mg(maximum_grid_level), & |
---|
| 854 | nyn_mg(maximum_grid_level), nys_mg(maximum_grid_level), & |
---|
| 855 | nzt_mg(maximum_grid_level) ) |
---|
| 856 | |
---|
| 857 | grid_level_count = 0 |
---|
[778] | 858 | |
---|
[1] | 859 | nxl_l = nxl; nxr_l = nxr; nys_l = nys; nyn_l = nyn; nzt_l = nzt |
---|
| 860 | |
---|
| 861 | DO i = maximum_grid_level, 1 , -1 |
---|
| 862 | |
---|
| 863 | IF ( i == mg_switch_to_pe0_level ) THEN |
---|
[809] | 864 | #if defined( __parallel ) && ! defined( __check ) |
---|
[1] | 865 | ! |
---|
| 866 | !-- Save the grid size of the subdomain at the switch level, because |
---|
| 867 | !-- it is needed in poismg. |
---|
| 868 | ind(1) = nxl_l; ind(2) = nxr_l |
---|
| 869 | ind(3) = nys_l; ind(4) = nyn_l |
---|
| 870 | ind(5) = nzt_l |
---|
| 871 | ALLOCATE( ind_all(5*numprocs), mg_loc_ind(5,0:numprocs-1) ) |
---|
| 872 | CALL MPI_ALLGATHER( ind, 5, MPI_INTEGER, ind_all, 5, & |
---|
| 873 | MPI_INTEGER, comm2d, ierr ) |
---|
| 874 | DO j = 0, numprocs-1 |
---|
| 875 | DO k = 1, 5 |
---|
| 876 | mg_loc_ind(k,j) = ind_all(k+j*5) |
---|
| 877 | ENDDO |
---|
| 878 | ENDDO |
---|
| 879 | DEALLOCATE( ind_all ) |
---|
| 880 | ! |
---|
[709] | 881 | !-- Calculate the grid size of the total domain |
---|
[1] | 882 | nxr_l = ( nxr_l-nxl_l+1 ) * pdims(1) - 1 |
---|
| 883 | nxl_l = 0 |
---|
| 884 | nyn_l = ( nyn_l-nys_l+1 ) * pdims(2) - 1 |
---|
| 885 | nys_l = 0 |
---|
| 886 | ! |
---|
| 887 | !-- The size of this gathered array must not be larger than the |
---|
| 888 | !-- array tend, which is used in the multigrid scheme as a temporary |
---|
[778] | 889 | !-- array. Therefore the subdomain size of an PE is calculated and |
---|
| 890 | !-- the size of the gathered grid. These values are used in |
---|
| 891 | !-- routines pres and poismg |
---|
| 892 | subdomain_size = ( nxr - nxl + 2 * nbgp + 1 ) * & |
---|
| 893 | ( nyn - nys + 2 * nbgp + 1 ) * ( nzt - nzb + 2 ) |
---|
[1] | 894 | gathered_size = ( nxr_l - nxl_l + 3 ) * ( nyn_l - nys_l + 3 ) * & |
---|
| 895 | ( nzt_l - nzb + 2 ) |
---|
| 896 | |
---|
[809] | 897 | #elif ! defined ( __parallel ) |
---|
[254] | 898 | message_string = 'multigrid gather/scatter impossible ' // & |
---|
[1] | 899 | 'in non parallel mode' |
---|
[254] | 900 | CALL message( 'init_pegrid', 'PA0237', 1, 2, 0, 6, 0 ) |
---|
[1] | 901 | #endif |
---|
| 902 | ENDIF |
---|
| 903 | |
---|
| 904 | nxl_mg(i) = nxl_l |
---|
| 905 | nxr_mg(i) = nxr_l |
---|
| 906 | nys_mg(i) = nys_l |
---|
| 907 | nyn_mg(i) = nyn_l |
---|
| 908 | nzt_mg(i) = nzt_l |
---|
| 909 | |
---|
| 910 | nxl_l = nxl_l / 2 |
---|
| 911 | nxr_l = nxr_l / 2 |
---|
| 912 | nys_l = nys_l / 2 |
---|
| 913 | nyn_l = nyn_l / 2 |
---|
| 914 | nzt_l = nzt_l / 2 |
---|
[778] | 915 | |
---|
[1] | 916 | ENDDO |
---|
| 917 | |
---|
[780] | 918 | ! |
---|
| 919 | !-- Temporary problem: Currently calculation of maxerror iin routine poismg crashes |
---|
| 920 | !-- if grid data are collected on PE0 already on the finest grid level. |
---|
| 921 | !-- To be solved later. |
---|
| 922 | IF ( maximum_grid_level == mg_switch_to_pe0_level ) THEN |
---|
| 923 | message_string = 'grid coarsening on subdomain level cannot be performed' |
---|
| 924 | CALL message( 'poismg', 'PA0236', 1, 2, 0, 6, 0 ) |
---|
| 925 | ENDIF |
---|
| 926 | |
---|
[1] | 927 | ELSE |
---|
| 928 | |
---|
[667] | 929 | maximum_grid_level = 0 |
---|
[1] | 930 | |
---|
| 931 | ENDIF |
---|
| 932 | |
---|
[722] | 933 | ! |
---|
| 934 | !-- Default level 0 tells exchange_horiz that all ghost planes have to be |
---|
| 935 | !-- exchanged. grid_level is adjusted in poismg, where only one ghost plane |
---|
| 936 | !-- is required. |
---|
| 937 | grid_level = 0 |
---|
[1] | 938 | |
---|
[809] | 939 | #if defined( __parallel ) && ! defined ( __check ) |
---|
[1] | 940 | ! |
---|
| 941 | !-- Gridpoint number for the exchange of ghost points (y-line for 2D-arrays) |
---|
[667] | 942 | ngp_y = nyn - nys + 1 + 2 * nbgp |
---|
[1] | 943 | |
---|
| 944 | ! |
---|
[709] | 945 | !-- Define new MPI derived datatypes for the exchange of ghost points in |
---|
| 946 | !-- x- and y-direction for 2D-arrays (line) |
---|
| 947 | CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_REAL, type_x, & |
---|
| 948 | ierr ) |
---|
[1] | 949 | CALL MPI_TYPE_COMMIT( type_x, ierr ) |
---|
[709] | 950 | CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_INTEGER, & |
---|
| 951 | type_x_int, ierr ) |
---|
[1] | 952 | CALL MPI_TYPE_COMMIT( type_x_int, ierr ) |
---|
| 953 | |
---|
[667] | 954 | CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_REAL, type_y, ierr ) |
---|
| 955 | CALL MPI_TYPE_COMMIT( type_y, ierr ) |
---|
| 956 | CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_INTEGER, type_y_int, ierr ) |
---|
| 957 | CALL MPI_TYPE_COMMIT( type_y_int, ierr ) |
---|
| 958 | |
---|
| 959 | |
---|
[1] | 960 | ! |
---|
| 961 | !-- Calculate gridpoint numbers for the exchange of ghost points along x |
---|
| 962 | !-- (yz-plane for 3D-arrays) and define MPI derived data type(s) for the |
---|
| 963 | !-- exchange of ghost points in y-direction (xz-plane). |
---|
| 964 | !-- Do these calculations for the model grid and (if necessary) also |
---|
| 965 | !-- for the coarser grid levels used in the multigrid method |
---|
[667] | 966 | ALLOCATE ( ngp_yz(0:maximum_grid_level), type_xz(0:maximum_grid_level),& |
---|
| 967 | type_yz(0:maximum_grid_level) ) |
---|
[1] | 968 | |
---|
| 969 | nxl_l = nxl; nxr_l = nxr; nys_l = nys; nyn_l = nyn; nzb_l = nzb; nzt_l = nzt |
---|
[709] | 970 | |
---|
[667] | 971 | ! |
---|
| 972 | !-- Discern between the model grid, which needs nbgp ghost points and |
---|
| 973 | !-- grid levels for the multigrid scheme. In the latter case only one |
---|
| 974 | !-- ghost point is necessary. |
---|
[709] | 975 | !-- First definition of MPI-datatypes for exchange of ghost layers on normal |
---|
[667] | 976 | !-- grid. The following loop is needed for data exchange in poismg.f90. |
---|
| 977 | ! |
---|
| 978 | !-- Determine number of grid points of yz-layer for exchange |
---|
| 979 | ngp_yz(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp) |
---|
[709] | 980 | |
---|
[667] | 981 | ! |
---|
[709] | 982 | !-- Define an MPI-datatype for the exchange of left/right boundaries. |
---|
| 983 | !-- Although data are contiguous in physical memory (which does not |
---|
| 984 | !-- necessarily require an MPI-derived datatype), the data exchange between |
---|
| 985 | !-- left and right PE's using the MPI-derived type is 10% faster than without. |
---|
[667] | 986 | CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz(0), & |
---|
[709] | 987 | MPI_REAL, type_xz(0), ierr ) |
---|
[667] | 988 | CALL MPI_TYPE_COMMIT( type_xz(0), ierr ) |
---|
[1] | 989 | |
---|
[709] | 990 | CALL MPI_TYPE_VECTOR( nbgp, ngp_yz(0), ngp_yz(0), MPI_REAL, type_yz(0), & |
---|
| 991 | ierr ) |
---|
[667] | 992 | CALL MPI_TYPE_COMMIT( type_yz(0), ierr ) |
---|
[709] | 993 | |
---|
[667] | 994 | ! |
---|
[709] | 995 | !-- Definition of MPI-datatypes for multigrid method (coarser level grids) |
---|
[667] | 996 | IF ( psolver == 'multigrid' ) THEN |
---|
| 997 | ! |
---|
[709] | 998 | !-- Definition of MPI-datatyoe as above, but only 1 ghost level is used |
---|
| 999 | DO i = maximum_grid_level, 1 , -1 |
---|
| 1000 | |
---|
[667] | 1001 | ngp_yz(i) = (nzt_l - nzb_l + 2) * (nyn_l - nys_l + 3) |
---|
| 1002 | |
---|
| 1003 | CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+3, nzt_l-nzb_l+2, ngp_yz(i), & |
---|
[709] | 1004 | MPI_REAL, type_xz(i), ierr ) |
---|
[667] | 1005 | CALL MPI_TYPE_COMMIT( type_xz(i), ierr ) |
---|
[1] | 1006 | |
---|
[709] | 1007 | CALL MPI_TYPE_VECTOR( 1, ngp_yz(i), ngp_yz(i), MPI_REAL, type_yz(i), & |
---|
| 1008 | ierr ) |
---|
[667] | 1009 | CALL MPI_TYPE_COMMIT( type_yz(i), ierr ) |
---|
| 1010 | |
---|
| 1011 | nxl_l = nxl_l / 2 |
---|
| 1012 | nxr_l = nxr_l / 2 |
---|
| 1013 | nys_l = nys_l / 2 |
---|
| 1014 | nyn_l = nyn_l / 2 |
---|
| 1015 | nzt_l = nzt_l / 2 |
---|
[709] | 1016 | |
---|
[667] | 1017 | ENDDO |
---|
[709] | 1018 | |
---|
| 1019 | ENDIF |
---|
[1] | 1020 | #endif |
---|
| 1021 | |
---|
[809] | 1022 | #if defined( __parallel ) && ! defined ( __check ) |
---|
[1] | 1023 | ! |
---|
| 1024 | !-- Setting of flags for inflow/outflow conditions in case of non-cyclic |
---|
[106] | 1025 | !-- horizontal boundary conditions. |
---|
[1] | 1026 | IF ( pleft == MPI_PROC_NULL ) THEN |
---|
[978] | 1027 | IF ( bc_lr == 'dirichlet/radiation' .OR. bc_lr == 'dirichlet/neumann' ) THEN |
---|
[1] | 1028 | inflow_l = .TRUE. |
---|
[978] | 1029 | ELSEIF ( bc_lr == 'radiation/dirichlet' .OR. bc_lr == 'neumann/dirichlet' ) THEN |
---|
[1] | 1030 | outflow_l = .TRUE. |
---|
| 1031 | ENDIF |
---|
| 1032 | ENDIF |
---|
| 1033 | |
---|
| 1034 | IF ( pright == MPI_PROC_NULL ) THEN |
---|
[978] | 1035 | IF ( bc_lr == 'dirichlet/radiation' .OR. bc_lr == 'dirichlet/neumann' ) THEN |
---|
[1] | 1036 | outflow_r = .TRUE. |
---|
[978] | 1037 | ELSEIF ( bc_lr == 'radiation/dirichlet' .OR. bc_lr == 'neumann/dirichlet' ) THEN |
---|
[1] | 1038 | inflow_r = .TRUE. |
---|
| 1039 | ENDIF |
---|
| 1040 | ENDIF |
---|
| 1041 | |
---|
| 1042 | IF ( psouth == MPI_PROC_NULL ) THEN |
---|
[978] | 1043 | IF ( bc_ns == 'dirichlet/radiation' .OR. bc_ns == 'dirichlet/neumann' ) THEN |
---|
[1] | 1044 | outflow_s = .TRUE. |
---|
[978] | 1045 | ELSEIF ( bc_ns == 'radiation/dirichlet' .OR. bc_ns == 'neumann/dirichlet' ) THEN |
---|
[1] | 1046 | inflow_s = .TRUE. |
---|
| 1047 | ENDIF |
---|
| 1048 | ENDIF |
---|
| 1049 | |
---|
| 1050 | IF ( pnorth == MPI_PROC_NULL ) THEN |
---|
[978] | 1051 | IF ( bc_ns == 'dirichlet/radiation' .OR. bc_ns == 'dirichlet/neumann' ) THEN |
---|
[1] | 1052 | inflow_n = .TRUE. |
---|
[978] | 1053 | ELSEIF ( bc_ns == 'radiation/dirichlet' .OR. bc_ns == 'neumann/dirichlet' ) THEN |
---|
[1] | 1054 | outflow_n = .TRUE. |
---|
| 1055 | ENDIF |
---|
| 1056 | ENDIF |
---|
| 1057 | |
---|
[151] | 1058 | ! |
---|
| 1059 | !-- Broadcast the id of the inflow PE |
---|
| 1060 | IF ( inflow_l ) THEN |
---|
[163] | 1061 | id_inflow_l = myidx |
---|
[151] | 1062 | ELSE |
---|
| 1063 | id_inflow_l = 0 |
---|
| 1064 | ENDIF |
---|
[622] | 1065 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
[151] | 1066 | CALL MPI_ALLREDUCE( id_inflow_l, id_inflow, 1, MPI_INTEGER, MPI_SUM, & |
---|
| 1067 | comm1dx, ierr ) |
---|
| 1068 | |
---|
[163] | 1069 | ! |
---|
| 1070 | !-- Broadcast the id of the recycling plane |
---|
| 1071 | !-- WARNING: needs to be adjusted in case of inflows other than from left side! |
---|
[622] | 1072 | IF ( ( recycling_width / dx ) >= nxl .AND. & |
---|
| 1073 | ( recycling_width / dx ) <= nxr ) THEN |
---|
[163] | 1074 | id_recycling_l = myidx |
---|
| 1075 | ELSE |
---|
| 1076 | id_recycling_l = 0 |
---|
| 1077 | ENDIF |
---|
[622] | 1078 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
[163] | 1079 | CALL MPI_ALLREDUCE( id_recycling_l, id_recycling, 1, MPI_INTEGER, MPI_SUM, & |
---|
| 1080 | comm1dx, ierr ) |
---|
| 1081 | |
---|
[809] | 1082 | #elif ! defined ( __parallel ) |
---|
[978] | 1083 | IF ( bc_lr == 'dirichlet/radiation' .OR. bc_lr == 'dirichlet/neumann' ) THEN |
---|
[1] | 1084 | inflow_l = .TRUE. |
---|
| 1085 | outflow_r = .TRUE. |
---|
[978] | 1086 | ELSEIF ( bc_lr == 'radiation/dirichlet' .OR. bc_lr == 'neumann/dirichlet' ) THEN |
---|
[1] | 1087 | outflow_l = .TRUE. |
---|
| 1088 | inflow_r = .TRUE. |
---|
| 1089 | ENDIF |
---|
| 1090 | |
---|
[978] | 1091 | IF ( bc_ns == 'dirichlet/radiation' .OR. bc_ns == 'dirichlet/neumann' ) THEN |
---|
[1] | 1092 | inflow_n = .TRUE. |
---|
| 1093 | outflow_s = .TRUE. |
---|
[978] | 1094 | ELSEIF ( bc_ns == 'radiation/dirichlet' .OR. bc_ns == 'neumann/dirichlet' ) THEN |
---|
[1] | 1095 | outflow_n = .TRUE. |
---|
| 1096 | inflow_s = .TRUE. |
---|
| 1097 | ENDIF |
---|
| 1098 | #endif |
---|
[807] | 1099 | |
---|
[106] | 1100 | ! |
---|
[978] | 1101 | !-- At the inflow or outflow, u or v, respectively, have to be calculated for |
---|
| 1102 | !-- one more grid point. |
---|
| 1103 | IF ( inflow_l .OR. outflow_l ) THEN |
---|
[106] | 1104 | nxlu = nxl + 1 |
---|
| 1105 | ELSE |
---|
| 1106 | nxlu = nxl |
---|
| 1107 | ENDIF |
---|
[978] | 1108 | IF ( inflow_s .OR. outflow_s ) THEN |
---|
[106] | 1109 | nysv = nys + 1 |
---|
| 1110 | ELSE |
---|
| 1111 | nysv = nys |
---|
| 1112 | ENDIF |
---|
[1] | 1113 | |
---|
| 1114 | IF ( psolver == 'poisfft_hybrid' ) THEN |
---|
| 1115 | CALL poisfft_hybrid_ini |
---|
[75] | 1116 | ELSEIF ( psolver == 'poisfft' ) THEN |
---|
[1] | 1117 | CALL poisfft_init |
---|
| 1118 | ENDIF |
---|
| 1119 | |
---|
[114] | 1120 | ! |
---|
| 1121 | !-- Allocate wall flag arrays used in the multigrid solver |
---|
| 1122 | IF ( psolver == 'multigrid' ) THEN |
---|
| 1123 | |
---|
| 1124 | DO i = maximum_grid_level, 1, -1 |
---|
| 1125 | |
---|
| 1126 | SELECT CASE ( i ) |
---|
| 1127 | |
---|
| 1128 | CASE ( 1 ) |
---|
| 1129 | ALLOCATE( wall_flags_1(nzb:nzt_mg(i)+1, & |
---|
| 1130 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1131 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1132 | |
---|
| 1133 | CASE ( 2 ) |
---|
| 1134 | ALLOCATE( wall_flags_2(nzb:nzt_mg(i)+1, & |
---|
| 1135 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1136 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1137 | |
---|
| 1138 | CASE ( 3 ) |
---|
| 1139 | ALLOCATE( wall_flags_3(nzb:nzt_mg(i)+1, & |
---|
| 1140 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1141 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1142 | |
---|
| 1143 | CASE ( 4 ) |
---|
| 1144 | ALLOCATE( wall_flags_4(nzb:nzt_mg(i)+1, & |
---|
| 1145 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1146 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1147 | |
---|
| 1148 | CASE ( 5 ) |
---|
| 1149 | ALLOCATE( wall_flags_5(nzb:nzt_mg(i)+1, & |
---|
| 1150 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1151 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1152 | |
---|
| 1153 | CASE ( 6 ) |
---|
| 1154 | ALLOCATE( wall_flags_6(nzb:nzt_mg(i)+1, & |
---|
| 1155 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1156 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1157 | |
---|
| 1158 | CASE ( 7 ) |
---|
| 1159 | ALLOCATE( wall_flags_7(nzb:nzt_mg(i)+1, & |
---|
| 1160 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1161 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1162 | |
---|
| 1163 | CASE ( 8 ) |
---|
| 1164 | ALLOCATE( wall_flags_8(nzb:nzt_mg(i)+1, & |
---|
| 1165 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1166 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1167 | |
---|
| 1168 | CASE ( 9 ) |
---|
| 1169 | ALLOCATE( wall_flags_9(nzb:nzt_mg(i)+1, & |
---|
| 1170 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1171 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1172 | |
---|
| 1173 | CASE ( 10 ) |
---|
| 1174 | ALLOCATE( wall_flags_10(nzb:nzt_mg(i)+1, & |
---|
| 1175 | nys_mg(i)-1:nyn_mg(i)+1, & |
---|
| 1176 | nxl_mg(i)-1:nxr_mg(i)+1) ) |
---|
| 1177 | |
---|
| 1178 | CASE DEFAULT |
---|
[254] | 1179 | message_string = 'more than 10 multigrid levels' |
---|
| 1180 | CALL message( 'init_pegrid', 'PA0238', 1, 2, 0, 6, 0 ) |
---|
[114] | 1181 | |
---|
| 1182 | END SELECT |
---|
| 1183 | |
---|
| 1184 | ENDDO |
---|
| 1185 | |
---|
| 1186 | ENDIF |
---|
| 1187 | |
---|
[759] | 1188 | ! |
---|
| 1189 | !-- Calculate the number of groups into which parallel I/O is split. |
---|
| 1190 | !-- The default for files which are opened by all PEs (or where each |
---|
| 1191 | !-- PE opens his own independent file) is, that all PEs are doing input/output |
---|
| 1192 | !-- in parallel at the same time. This might cause performance or even more |
---|
| 1193 | !-- severe problems depending on the configuration of the underlying file |
---|
| 1194 | !-- system. |
---|
| 1195 | !-- First, set the default: |
---|
| 1196 | IF ( maximum_parallel_io_streams == -1 .OR. & |
---|
| 1197 | maximum_parallel_io_streams > numprocs ) THEN |
---|
| 1198 | maximum_parallel_io_streams = numprocs |
---|
| 1199 | ENDIF |
---|
| 1200 | |
---|
| 1201 | ! |
---|
| 1202 | !-- Now calculate the number of io_blocks and the io_group to which the |
---|
| 1203 | !-- respective PE belongs. I/O of the groups is done in serial, but in parallel |
---|
| 1204 | !-- for all PEs belonging to the same group. A preliminary setting with myid |
---|
| 1205 | !-- based on MPI_COMM_WORLD has been done in parin. |
---|
| 1206 | io_blocks = numprocs / maximum_parallel_io_streams |
---|
| 1207 | io_group = MOD( myid+1, io_blocks ) |
---|
| 1208 | |
---|
| 1209 | |
---|
[1] | 1210 | END SUBROUTINE init_pegrid |
---|