Changeset 4648 for palm/trunk/SOURCE/init_pegrid.f90
- Timestamp:
- Aug 25, 2020 7:52:08 AM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/init_pegrid.f90
r4564 r4648 1 1 !> @file init_pegrid.f90 2 !------------------------------------------------------------------------------ !2 !--------------------------------------------------------------------------------------------------! 3 3 ! This file is part of the PALM model system. 4 4 ! 5 ! PALM is free software: you can redistribute it and/or modify it under the 6 ! terms of the GNU General Public License as published by the Free Software 7 ! Foundation, either version 3 of the License, or (at your option) any later 8 ! version. 9 ! 10 ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY 11 ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR 12 ! A PARTICULAR PURPOSE. See the GNU General Public License for more details. 13 ! 14 ! You should have received a copy of the GNU General Public License along with 15 ! PALM. If not, see <http://www.gnu.org/licenses/>. 16 ! 17 ! Copyright 1997-2020 Leibniz Universitaet Hannover 18 !------------------------------------------------------------------------------! 5 ! PALM is free software: you can redistribute it and/or modify it under the terms of the GNU General 6 ! Public License as published by the Free Software Foundation, either version 3 of the License, or 7 ! (at your option) any later version. 8 ! 9 ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the 10 ! implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General 11 ! Public License for more details. 12 ! 13 ! You should have received a copy of the GNU General Public License along with PALM. If not, see 14 ! <http://www.gnu.org/licenses/>. 19 15 ! 20 16 ! Current revisions: 21 17 ! ------------------ 22 ! 23 ! 18 ! 19 ! 24 20 ! Former revisions: 25 21 ! ----------------- 26 22 ! $Id$ 23 ! file re-formatted to follow the PALM coding standard 24 ! 25 ! 4564 2020-06-12 14:03:36Z raasch 27 26 ! Vertical nesting method of Huq et al. (2019) removed 28 ! 27 ! 29 28 ! 4461 2020-03-12 16:51:59Z raasch 30 29 ! communicator configurations for four virtual pe grids defined 31 ! 30 ! 32 31 ! 4444 2020-03-05 15:59:50Z raasch 33 32 ! bugfix: cpp-directives for serial mode added 34 ! 33 ! 35 34 ! 4360 2020-01-07 11:25:50Z suehring 36 35 ! changed message PA0467 37 ! 36 ! 38 37 ! 4264 2019-10-15 16:00:23Z scharf 39 38 ! corrected error message string 40 ! 39 ! 41 40 ! 4241 2019-09-27 06:32:47Z raasch 42 ! Check added to ensure that subdomain grid has at least the size as given by the number 43 ! of ghostpoints44 ! 41 ! Check added to ensure that subdomain grid has at least the size as given by the number of ghost 42 ! points 43 ! 45 44 ! 4182 2019-08-22 15:20:23Z scharf 46 45 ! Corrected "Former revisions" section 47 ! 46 ! 48 47 ! 4045 2019-06-21 10:58:47Z raasch 49 ! bugfix: kind attribute added to nint function to allow for large integers which may appear in 50 ! caseof default recycling width and small grid spacings51 ! 48 ! bugfix: kind attribute added to nint function to allow for large integers which may appear in case 49 ! of default recycling width and small grid spacings 50 ! 52 51 ! 3999 2019-05-23 16:09:37Z suehring 53 52 ! Spend 3 ghost points also in case of pw-scheme when nesting is applied 54 ! 53 ! 55 54 ! 3897 2019-04-15 11:51:14Z suehring 56 55 ! Minor revision of multigrid check; give warning instead of an abort. 57 ! 56 ! 58 57 ! 3890 2019-04-12 15:59:20Z suehring 59 ! Check if grid coarsening is possible on subdomain, in order to avoid that 60 ! multigrid approacheffectively reduces to a Gauss-Seidel scheme.61 ! 58 ! Check if grid coarsening is possible on subdomain, in order to avoid that multigrid approach 59 ! effectively reduces to a Gauss-Seidel scheme. 60 ! 62 61 ! 3885 2019-04-11 11:29:34Z kanani 63 ! Changes related to global restructuring of location messages and introduction 64 ! of additional debugmessages65 ! 62 ! Changes related to global restructuring of location messages and introduction of additional debug 63 ! messages 64 ! 66 65 ! 3884 2019-04-10 13:31:55Z Giersch 67 66 ! id_recycling is only calculated in case of tubulent inflow 68 ! 67 ! 69 68 ! 3761 2019-02-25 15:31:42Z raasch 70 69 ! unused variable removed 71 ! 70 ! 72 71 ! 3655 2019-01-07 16:51:22Z knoop 73 72 ! variables documented … … 79 78 ! Description: 80 79 ! ------------ 81 !> Determination of the virtual processor topology (if not prescribed by the 82 !> user)and computation of the grid point number and array bounds of the local 83 !> domains. 84 !> @todo: remove MPI-data types for 2D exchange on coarse multigrid level (not 85 !> used any more) 86 !------------------------------------------------------------------------------! 80 !> Determination of the virtual processor topology (if not prescribed by the user) and computation 81 !> of the grid point number and array bounds of the local domains. 82 !> @todo: remove MPI-data types for 2D exchange on coarse multigrid level (not used any more) 83 !--------------------------------------------------------------------------------------------------! 87 84 SUBROUTINE init_pegrid 88 89 90 USE control_parameters, & 91 ONLY: bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, & 92 bc_lr, bc_ns, bc_radiation_l, bc_radiation_n, bc_radiation_r, & 93 bc_radiation_s, & 94 grid_level, grid_level_count, maximum_grid_level, & 95 message_string, mg_switch_to_pe0_level, & 96 psolver 85 86 87 USE control_parameters, & 88 ONLY: bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, bc_lr, bc_ns, & 89 bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s, grid_level, & 90 grid_level_count, maximum_grid_level, message_string, mg_switch_to_pe0_level, psolver 97 91 98 92 99 93 #if defined( __parallel ) 100 USE control_parameters, &101 ONLY: coupling_mode, coupling_topology, gathered_size, momentum_advec, &102 outflow_source_plane, recycling_width, scalar_advec, subdomain_size, &94 USE control_parameters, & 95 ONLY: coupling_mode, coupling_topology, gathered_size, momentum_advec, & 96 outflow_source_plane, recycling_width, scalar_advec, subdomain_size, & 103 97 turbulent_inflow, turbulent_outflow, y_shift 104 98 105 USE grid_variables, &99 USE grid_variables, & 106 100 ONLY: dx 107 101 #endif 108 109 USE indices, & 110 ONLY: nnx, nny, nnz, nx, nxl, nxl_mg, & 111 nxlu, nxr, nxr_mg, ny, nyn, nyn_mg, nys, nys_mg, & 112 nysv, nz, nzb, nzt, nzt_mg, wall_flags_1, wall_flags_2, & 113 wall_flags_3, wall_flags_4, wall_flags_5, wall_flags_6, & 114 wall_flags_7, wall_flags_8, wall_flags_9, wall_flags_10 102 103 USE indices, & 104 ONLY: nnx, nny, nnz, nx, nxl, nxl_mg, nxlu, nxr, nxr_mg, ny, nyn, nyn_mg, nys, nys_mg, & 105 nysv, nz, nzb, nzt, nzt_mg, wall_flags_1, wall_flags_2, wall_flags_3, wall_flags_4, & 106 wall_flags_5, wall_flags_6, wall_flags_7, wall_flags_8, wall_flags_9, wall_flags_10 115 107 116 108 #if defined( __parallel ) 117 USE indices, &109 USE indices, & 118 110 ONLY: mg_loc_ind, nbgp, nx_a, nx_o, ny_a, ny_o 119 111 #endif 120 112 121 113 USE kinds 122 114 123 115 USE pegrid 124 116 125 117 #if defined( __parallel ) 126 USE pmc_interface, &118 USE pmc_interface, & 127 119 ONLY: nested_run 128 120 129 USE spectra_mod, &121 USE spectra_mod, & 130 122 ONLY: calculate_spectra 131 123 132 USE synthetic_turbulence_generator_mod, & 133 ONLY: id_stg_left, id_stg_north, id_stg_right, id_stg_south, & 134 use_syn_turb_gen 124 USE synthetic_turbulence_generator_mod, & 125 ONLY: id_stg_left, id_stg_north, id_stg_right, id_stg_south, use_syn_turb_gen 135 126 #endif 136 127 137 USE transpose_indices, & 138 ONLY: nxl_y, nxl_z, nxr_y, nxr_z, nyn_x, nyn_z, nys_x,& 139 nys_z, nzb_x, nzb_y, nzt_x, nzt_y 128 USE transpose_indices, & 129 ONLY: nxl_y, nxl_z, nxr_y, nxr_z, nyn_x, nyn_z, nys_x, nys_z, nzb_x, nzb_y, nzt_x, nzt_y 140 130 141 131 #if defined( __parallel ) 142 USE transpose_indices, &132 USE transpose_indices, & 143 133 ONLY: nxl_yd, nxr_yd, nzb_yd, nzt_yd 144 134 #endif … … 152 142 INTEGER(iwp) :: id_outflow_source_l !< local value of id_outflow_source 153 143 INTEGER(iwp) :: id_recycling_l !< ID indicating processors located at the recycling plane 154 INTEGER(iwp) :: id_stg_left_l !< left lateral boundary local core id in case of turbulence generator 155 INTEGER(iwp) :: id_stg_north_l !< north lateral boundary local core id in case of turbulence generator 156 INTEGER(iwp) :: id_stg_right_l !< right lateral boundary local core id in case of turbulence generator 157 INTEGER(iwp) :: id_stg_south_l !< south lateral boundary local core id in case of turbulence generator 144 INTEGER(iwp) :: id_stg_left_l !< left lateral boundary local core id in case of turbulence generator 145 INTEGER(iwp) :: id_stg_north_l !< north lateral boundary local core id in case of turbulence generator 146 INTEGER(iwp) :: id_stg_right_l !< right lateral boundary local core id in case of turbulence generator 147 INTEGER(iwp) :: id_stg_south_l !< south lateral boundary local core id in case of turbulence generator 158 148 INTEGER(iwp) :: ind(5) !< array containing the subdomain bounds 159 149 #endif … … 222 212 223 213 ! 224 !-- Prescribed by user. Number of processors on the prescribed topology 225 !-- must be equal to thenumber of PEs available to the job214 !-- Prescribed by user. Number of processors on the prescribed topology must be equal to the 215 !-- number of PEs available to the job 226 216 IF ( ( npex * npey ) /= numprocs ) THEN 227 WRITE( message_string, * ) 'number of PEs of the prescribed ', 228 'topology (', npex*npey,') does not match & the number of ',&229 'PEs available to the job (', numprocs, ')'217 WRITE( message_string, * ) 'number of PEs of the prescribed ', 'topology (', npex*npey, & 218 ') does not match & the number of ', & 219 'PEs available to the job (', numprocs, ')' 230 220 CALL message( 'init_pegrid', 'PA0221', 1, 2, 0, 6, 0 ) 231 221 ENDIF … … 237 227 !-- If the processor topology is prescribed by the user, the number of 238 228 !-- PEs must be given in both directions 239 message_string = 'if the processor topology is prescribed by th' // &240 'e user & both values of "npex" and "npey" must be given' //&241 ' in the &NAMELIST-parameter file'229 message_string = 'if the processor topology is prescribed by th' // & 230 'e user & both values of "npex" and "npey" must be given' // & 231 ' in the &NAMELIST-parameter file' 242 232 CALL message( 'init_pegrid', 'PA0222', 1, 2, 0, 6, 0 ) 243 233 … … 245 235 246 236 ! 247 !-- Create four default MPI communicators for the 2d virtual PE grid. One of them will be used 248 !-- asthe main communicator for this run, while others might be used for specific quantities like237 !-- Create four default MPI communicators for the 2d virtual PE grid. One of them will be used as 238 !-- the main communicator for this run, while others might be used for specific quantities like 249 239 !-- aerosol, chemical species, or passive scalars), if their horizontal boundary conditions shall 250 240 !-- be different from those of the other quantities (e.g. non-cyclic conditions for aerosols, and … … 297 287 298 288 ! 299 !-- In case of cyclic boundary conditions, a y-shift at the boundaries in 300 !-- x-direction can be introduced via parameter y_shift. The shift is done 301 !-- by modifying the processor grid in such a way that processors located 302 !-- at the x-boundary communicate across it to processors with y-coordinate 303 !-- shifted by y_shift relative to their own. This feature can not be used 304 !-- in combination with an fft pressure solver. It has been implemented to 305 !-- counter the effect of streak structures in case of cyclic boundary 306 !-- conditions. For a description of these see Munters 289 !-- In case of cyclic boundary conditions, a y-shift at the boundaries in x-direction can be 290 !-- introduced via parameter y_shift. The shift is done by modifying the processor grid in such a 291 !-- way that processors located at the x-boundary communicate across it to processors with 292 !-- y-coordinate shifted by y_shift relative to their own. This feature can not be used in 293 !-- combination with an fft pressure solver. It has been implemented to counter the effect of streak 294 !-- structures in case of cyclic boundary conditions. For a description of these see Munters 307 295 !-- (2016; dx.doi.org/10.1063/1.4941912) 308 296 !-- … … 310 298 IF ( y_shift /= 0 ) THEN 311 299 IF ( bc_lr == 'cyclic' ) THEN 312 IF ( TRIM( psolver ) /= 'multigrid' .AND. & 313 TRIM( psolver ) /= 'multigrid_noopt') & 314 THEN 300 IF ( TRIM( psolver ) /= 'multigrid' .AND. TRIM( psolver ) /= 'multigrid_noopt') THEN 315 301 message_string = 'y_shift /= 0 requires a multigrid pressure solver ' 316 302 CALL message( 'check_parameters', 'PA0468', 1, 2, 0, 6, 0 ) … … 320 306 CALL MPI_CART_COORDS( comm2d, pleft, ndim, lcoord, ierr ) 321 307 322 ! 323 !-- If the x(y)-coordinate of the right (left) neighbor is smaller (greater) 324 !-- than that of the calling process, then the calling process is located on 325 !-- the right (left) boundary of the processor grid. In that case, 326 !-- the y-coordinate of that neighbor is increased (decreased) by y_shift. 327 !-- The rank of the process with that coordinate is then inquired and the 328 !-- neighbor rank for MPI_SENDRECV, pright (pleft) is set to it. 329 !-- In this way, the calling process receives a new right (left) neighbor 330 !-- for all future MPI_SENDRECV calls. That neighbor has a y-coordinate 331 !-- of y+(-)y_shift, where y is the original right (left) neighbor's 332 !-- y-coordinate. The modulo-operation ensures that if the neighbor's 333 !-- y-coordinate exceeds the grid-boundary, it will be relocated to 334 !-- the opposite part of the grid cyclicly. 335 IF ( rcoord(1) < pcoord(1) ) THEN 308 ! 309 !-- If the x(y)-coordinate of the right (left) neighbor is smaller (greater) than that of the 310 !-- calling process, then the calling process is located on the right (left) boundary of the 311 !-- processor grid. In that case, the y-coordinate of that neighbor is increased (decreased) 312 !-- by y_shift. 313 !-- The rank of the process with that coordinate is then inquired and the neighbor rank for 314 !-- MPI_SENDRECV, pright (pleft) is set to it. 315 !-- In this way, the calling process receives a new right (left) neighbor for all future 316 !-- MPI_SENDRECV calls. That neighbor has a y-coordinate of y+(-)y_shift, where y is the 317 !-- original right (left) neighbor's y-coordinate. The modulo-operation ensures that if the 318 !-- neighbor's y-coordinate exceeds the grid-boundary, it will be relocated to the opposite 319 !-- part of the grid cyclicly. 320 IF ( rcoord(1) < pcoord(1) ) THEN 336 321 rcoord(2) = MODULO( rcoord(2) + y_shift, pdims(2) ) 337 322 CALL MPI_CART_RANK( comm2d, rcoord, pright, ierr ) 338 323 ENDIF 339 324 340 IF ( lcoord(1) > pcoord(1) ) THEN325 IF ( lcoord(1) > pcoord(1) ) THEN 341 326 lcoord(2) = MODULO( lcoord(2) - y_shift, pdims(2) ) 342 327 CALL MPI_CART_RANK( comm2d, lcoord, pleft, ierr ) 343 328 ENDIF 344 329 345 330 ELSE 346 331 ! 347 !-- y-shift for non-cyclic boundary conditions is only implemented 332 !-- y-shift for non-cyclic boundary conditions is only implemented 348 333 !-- for the turbulence recycling method in inflow_turbulence.f90 349 334 IF ( .NOT. turbulent_inflow ) THEN 350 message_string = 'y_shift /= 0 is only allowed for cyclic ' // &351 'boundary conditions in both directions ' // &335 message_string = 'y_shift /= 0 is only allowed for cyclic ' // & 336 'boundary conditions in both directions ' // & 352 337 'or with turbulent_inflow == .TRUE.' 353 338 CALL message( 'check_parameters', 'PA0467', 1, 2, 0, 6, 0 ) … … 373 358 ! 374 359 !-- Calculate array bounds along x-direction for every PE. 375 ALLOCATE( nxlf(0:pdims(1)-1), nxrf(0:pdims(1)-1), nynf(0:pdims(2)-1), & 376 nysf(0:pdims(2)-1) ) 360 ALLOCATE( nxlf(0:pdims(1)-1), nxrf(0:pdims(1)-1), nynf(0:pdims(2)-1), nysf(0:pdims(2)-1) ) 377 361 378 362 IF ( MOD( nx+1 , pdims(1) ) /= 0 ) THEN 379 WRITE( message_string, * ) 'x-direction: gridpoint number (' ,nx+1,') ',&380 'is not an& integral multiple of the number', &381 ' of processors (', pdims(1),')'363 WRITE( message_string, * ) 'x-direction: gridpoint number (' ,nx+1, ') ', & 364 'is not an& integral multiple of the number', & 365 ' of processors (', pdims(1), ')' 382 366 CALL message( 'init_pegrid', 'PA0225', 1, 2, 0, 6, 0 ) 383 367 ELSE … … 395 379 !-- Calculate array bounds in y-direction for every PE. 396 380 IF ( MOD( ny+1 , pdims(2) ) /= 0 ) THEN 397 WRITE( message_string, * ) 'y-direction: gridpoint number (', ny+1,') ',&398 'is not an& integral multiple of the number', &399 ' of processors (', pdims(2),')'381 WRITE( message_string, * ) 'y-direction: gridpoint number (', ny+1, ') ', & 382 'is not an& integral multiple of the number', & 383 ' of processors (', pdims(2), ')' 400 384 CALL message( 'init_pegrid', 'PA0227', 1, 2, 0, 6, 0 ) 401 385 ELSE … … 421 405 422 406 ! 423 !-- Set switches to define if the PE is situated at the border of the virtual 424 !-- processor grid 407 !-- Set switches to define if the PE is situated at the border of the virtual processor grid 425 408 IF ( nxl == 0 ) left_border_pe = .TRUE. 426 409 IF ( nxr == nx ) right_border_pe = .TRUE. … … 429 412 430 413 ! 431 !-- Calculate array bounds and gridpoint numbers for the transposed arrays 432 !-- (needed in the pressuresolver)433 !-- For the transposed arrays, cyclic boundaries as well as top and bottom 434 !-- b oundaries are omitted, because they are obstructive to the transposition414 !-- Calculate array bounds and gridpoint numbers for the transposed arrays (needed in the pressure 415 !-- solver) 416 !-- For the transposed arrays, cyclic boundaries as well as top and bottom boundaries are omitted, 417 !-- because they are obstructive to the transposition 435 418 436 419 ! … … 441 424 IF ( pdims(2) /= 1 ) THEN 442 425 IF ( MOD( nz , pdims(1) ) /= 0 ) THEN 443 WRITE( message_string, * ) 'transposition z --> x:& ', &444 'nz=', nz,' is not an integral multiple ',&445 'of pdims(1)=', pdims(1)426 WRITE( message_string, * ) 'transposition z --> x:& ', & 427 'nz=', nz, ' is not an integral multiple ', & 428 'of pdims(1)=', pdims(1) 446 429 CALL message( 'init_pegrid', 'PA0230', 1, 2, 0, 6, 0 ) 447 430 ENDIF … … 463 446 !-- 2. transposition x --> y 464 447 IF ( MOD( nx+1 , pdims(2) ) /= 0 ) THEN 465 WRITE( message_string, * ) 'transposition x --> y:& ', &466 'nx+1=', nx+1,' is not an integral ',&467 'multiple of pdims(2)=', pdims(2)448 WRITE( message_string, * ) 'transposition x --> y:& ', & 449 'nx+1=', nx+1, ' is not an integral ', & 450 'multiple of pdims(2)=', pdims(2) 468 451 CALL message( 'init_pegrid', 'PA0231', 1, 2, 0, 6, 0 ) 469 452 ENDIF … … 492 475 !-- along x, except that the uptream-spline method is switched on 493 476 IF ( MOD( ny+1 , pdims(1) ) /= 0 ) THEN 494 WRITE( message_string, * ) 'transposition y --> z:& ', &495 'ny+1=', ny+1,' is not an integral ',&496 'multiple of pdims(1)=', pdims(1)477 WRITE( message_string, * ) 'transposition y --> z:& ', & 478 'ny+1=', ny+1, ' is not an integral ', & 479 'multiple of pdims(1)=', pdims(1) 497 480 CALL message( 'init_pegrid', 'PA0232', 1, 2, 0, 6, 0 ) 498 481 ENDIF … … 503 486 !-- This condition must be fulfilled for a 1D-decomposition along x 504 487 IF ( MOD( ny+1 , pdims(1) ) /= 0 ) THEN 505 WRITE( message_string, * ) 'transposition x --> y:& ', &506 'ny+1=', ny+1,' is not an integral ',&507 'multiple of pdims(1)=', pdims(1)488 WRITE( message_string, * ) 'transposition x --> y:& ', & 489 'ny+1=', ny+1, ' is not an integral ', & 490 'multiple of pdims(1)=', pdims(1) 508 491 CALL message( 'init_pegrid', 'PA0233', 1, 2, 0, 6, 0 ) 509 492 ENDIF … … 517 500 IF ( calculate_spectra ) THEN 518 501 IF ( MOD( nz, pdims(2) ) /= 0 ) THEN 519 WRITE( message_string, * ) 'direct transposition z --> y (needed ', &520 'for spectra):& nz=', nz,' is not an ',&521 'integral multiple of pdims(2)=', pdims(2)502 WRITE( message_string, * ) 'direct transposition z --> y (needed ', & 503 'for spectra):& nz=', nz, ' is not an ', & 504 'integral multiple of pdims(2)=', pdims(2) 522 505 CALL message( 'init_pegrid', 'PA0234', 1, 2, 0, 6, 0 ) 523 506 ELSE … … 532 515 IF ( psolver == 'poisfft' .OR. calculate_spectra ) THEN 533 516 ! 534 !-- Indices for direct transpositions y --> x 517 !-- Indices for direct transpositions y --> x 535 518 !-- (they are only possible in case of a 1d-decomposition along x) 536 519 IF ( pdims(2) == 1 ) THEN … … 547 530 IF ( psolver == 'poisfft' ) THEN 548 531 ! 549 !-- Indices for direct transpositions x --> y 532 !-- Indices for direct transpositions x --> y 550 533 !-- (they are only possible in case of a 1d-decomposition along y) 551 534 IF ( pdims(1) == 1 ) THEN … … 579 562 !-- Receive data from all other PEs 580 563 DO i = 1, numprocs-1 581 CALL MPI_RECV( ibuf, 4, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, & 582 ierr ) 564 CALL MPI_RECV( ibuf, 4, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, ierr ) 583 565 hor_index_bounds(:,i) = ibuf(1:4) 584 566 ENDDO … … 602 584 PRINT*, '*** processor topology ***' 603 585 PRINT*, ' ' 604 PRINT*, 'myid pcoord left right south north idx idy nxl: nxr',& 605 &' nys: nyn' 606 PRINT*, '------------------------------------------------------------',& 607 &'-----------' 608 WRITE (*,1000) 0, pcoord(1), pcoord(2), pleft, pright, psouth, pnorth, & 609 myidx, myidy, nxl, nxr, nys, nyn 610 1000 FORMAT (I4,2X,'(',I3,',',I3,')',3X,I4,2X,I4,3X,I4,2X,I4,2X,I3,1X,I3, & 611 2(2X,I4,':',I4)) 586 PRINT*, 'myid pcoord left right south north idx idy nxl: nxr',' nys: nyn' 587 PRINT*, '------------------------------------------------------------','-----------' 588 WRITE (*,1000) 0, pcoord(1), pcoord(2), pleft, pright, psouth, pnorth, myidx, myidy, nxl, & 589 nxr, nys, nyn 590 1000 FORMAT (I4,2X,'(',I3,',',I3,')',3X,I4,2X,I4,3X,I4,2X,I4,2X,I3,1X,I3,2(2X,I4,':',I4)) 612 591 613 592 ! 614 593 !-- Receive data from the other PEs 615 594 DO i = 1,numprocs-1 616 CALL MPI_RECV( ibuf, 12, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, & 617 ierr ) 595 CALL MPI_RECV( ibuf, 12, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, ierr ) 618 596 WRITE (*,1000) i, ( ibuf(j) , j = 1,12 ) 619 597 ENDDO … … 626 604 ibuf(8) = myidy; ibuf(9) = nxl; ibuf(10) = nxr; ibuf(11) = nys 627 605 ibuf(12) = nyn 628 CALL MPI_SEND( ibuf, 12, MPI_INTEGER, 0, myid, comm2d, ierr ) 606 CALL MPI_SEND( ibuf, 12, MPI_INTEGER, 0, myid, comm2d, ierr ) 629 607 ENDIF 630 608 #endif 631 609 632 ! 610 ! 633 611 !-- Determine the number of ghost point layers 634 IF ( scalar_advec == 'ws-scheme' .OR. &612 IF ( scalar_advec == 'ws-scheme' .OR. & 635 613 momentum_advec == 'ws-scheme' .OR. nested_run ) THEN 636 614 nbgp = 3 637 615 ELSE 638 616 nbgp = 1 639 ENDIF 640 641 ! 642 !-- Check that the number of computational grid points is not smaller than the number of 643 !-- ghostpoints.617 ENDIF 618 619 ! 620 !-- Check that the number of computational grid points is not smaller than the number of ghost 621 !-- points. 644 622 IF ( nnx < nbgp ) THEN 645 623 WRITE( message_string, * ) 'number of subdomain grid points along x (', nnx, ') is smaller',& … … 654 632 655 633 ! 656 !-- Create a new MPI derived datatype for the exchange of surface (xy) data, 657 !-- which is needed forcoupled atmosphere-ocean runs.634 !-- Create a new MPI derived datatype for the exchange of surface (xy) data, which is needed for 635 !-- coupled atmosphere-ocean runs. 658 636 !-- First, calculate number of grid points of an xy-plane. 659 637 ngp_xy = ( nxr - nxl + 1 + 2 * nbgp ) * ( nyn - nys + 1 + 2 * nbgp ) … … 662 640 663 641 IF ( TRIM( coupling_mode ) /= 'uncoupled' ) THEN 664 642 665 643 ! 666 644 !-- Pass the number of grid points of the atmosphere model to … … 673 651 IF ( myid == 0 ) THEN 674 652 675 CALL MPI_SEND( nx_a, 1, MPI_INTEGER, numprocs, 1, comm_inter, & 676 ierr ) 677 CALL MPI_SEND( ny_a, 1, MPI_INTEGER, numprocs, 2, comm_inter, & 678 ierr ) 679 CALL MPI_SEND( pdims, 2, MPI_INTEGER, numprocs, 3, comm_inter, & 680 ierr ) 681 CALL MPI_RECV( nx_o, 1, MPI_INTEGER, numprocs, 4, comm_inter, & 682 status, ierr ) 683 CALL MPI_RECV( ny_o, 1, MPI_INTEGER, numprocs, 5, comm_inter, & 684 status, ierr ) 685 CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, numprocs, 6, & 686 comm_inter, status, ierr ) 653 CALL MPI_SEND( nx_a, 1, MPI_INTEGER, numprocs, 1, comm_inter, ierr ) 654 CALL MPI_SEND( ny_a, 1, MPI_INTEGER, numprocs, 2, comm_inter, ierr ) 655 CALL MPI_SEND( pdims, 2, MPI_INTEGER, numprocs, 3, comm_inter, ierr ) 656 CALL MPI_RECV( nx_o, 1, MPI_INTEGER, numprocs, 4, comm_inter, status, ierr ) 657 CALL MPI_RECV( ny_o, 1, MPI_INTEGER, numprocs, 5, comm_inter, status, ierr ) 658 CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, numprocs, 6, comm_inter, status, ierr ) 687 659 ENDIF 688 660 689 661 CALL MPI_BCAST( nx_o, 1, MPI_INTEGER, 0, comm2d, ierr ) 690 CALL MPI_BCAST( ny_o, 1, MPI_INTEGER, 0, comm2d, ierr ) 662 CALL MPI_BCAST( ny_o, 1, MPI_INTEGER, 0, comm2d, ierr ) 691 663 CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr ) 692 664 693 665 ELSEIF ( coupling_mode == 'ocean_to_atmosphere' ) THEN 694 666 695 667 nx_o = nx 696 ny_o = ny 697 698 IF ( myid == 0 ) THEN 699 700 CALL MPI_RECV( nx_a, 1, MPI_INTEGER, 0, 1, comm_inter, status, & 701 ierr ) 702 CALL MPI_RECV( ny_a, 1, MPI_INTEGER, 0, 2, comm_inter, status, & 703 ierr ) 704 CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, 0, 3, comm_inter, & 705 status, ierr ) 668 ny_o = ny 669 670 IF ( myid == 0 ) THEN 671 672 CALL MPI_RECV( nx_a, 1, MPI_INTEGER, 0, 1, comm_inter, status, ierr ) 673 CALL MPI_RECV( ny_a, 1, MPI_INTEGER, 0, 2, comm_inter, status, ierr ) 674 CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, 0, 3, comm_inter, status, ierr ) 706 675 CALL MPI_SEND( nx_o, 1, MPI_INTEGER, 0, 4, comm_inter, ierr ) 707 676 CALL MPI_SEND( ny_o, 1, MPI_INTEGER, 0, 5, comm_inter, ierr ) … … 710 679 711 680 CALL MPI_BCAST( nx_a, 1, MPI_INTEGER, 0, comm2d, ierr) 712 CALL MPI_BCAST( ny_a, 1, MPI_INTEGER, 0, comm2d, ierr) 713 CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr) 714 715 ENDIF 716 681 CALL MPI_BCAST( ny_a, 1, MPI_INTEGER, 0, comm2d, ierr) 682 CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr) 683 684 ENDIF 685 717 686 ngp_a = ( nx_a+1 + 2 * nbgp ) * ( ny_a+1 + 2 * nbgp ) 718 687 ngp_o = ( nx_o+1 + 2 * nbgp ) * ( ny_o+1 + 2 * nbgp ) 719 688 720 689 ! 721 !-- Determine if the horizontal grid and the number of PEs in ocean and 722 !-- atmosphere is same or not 723 IF ( nx_o == nx_a .AND. ny_o == ny_a .AND. & 724 pdims(1) == pdims_remote(1) .AND. pdims(2) == pdims_remote(2) ) & 725 THEN 690 !-- Determine if the horizontal grid and the number of PEs in ocean and atmosphere is same or not. 691 IF ( nx_o == nx_a .AND. ny_o == ny_a .AND. & 692 pdims(1) == pdims_remote(1) .AND. pdims(2) == pdims_remote(2) ) THEN 726 693 coupling_topology = 0 727 694 ELSE 728 695 coupling_topology = 1 729 ENDIF 730 731 ! 732 !-- Determine the target PEs for the exchange between ocean and 733 !-- atmosphere (comm2d) 696 ENDIF 697 698 ! 699 !-- Determine the target PEs for the exchange between ocean and atmosphere (comm2d) 734 700 IF ( coupling_topology == 0 ) THEN 735 701 ! 736 !-- In case of identical topologies, every atmosphere PE has exactly one 737 !-- ocean PE counterpartand vice versa738 IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' ) THEN702 !-- In case of identical topologies, every atmosphere PE has exactly one ocean PE counterpart 703 !-- and vice versa 704 IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' ) THEN 739 705 target_id = myid + numprocs 740 706 ELSE 741 target_id = myid 707 target_id = myid 742 708 ENDIF 743 709 744 710 ELSE 745 711 ! 746 !-- In case of nonequivalent topology in ocean and atmosphere only for 747 !-- PE0 in ocean and PE0 in atmosphere a target_id is needed, since 748 !-- data echxchange between ocean and atmosphere will be done only 749 !-- between these PEs. 712 !-- In case of nonequivalent topology in ocean and atmosphere only for PE0 in ocean and PE0 in 713 !-- atmosphere a target_id is needed, since data echxchange between ocean and atmosphere will 714 !-- be done only between these PEs. 750 715 IF ( myid == 0 ) THEN 751 716 752 717 IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' ) THEN 753 target_id = numprocs 718 target_id = numprocs 754 719 ELSE 755 720 target_id = 0 … … 765 730 766 731 ! 767 !-- Array bounds when running on a single PE (respectively a non-parallel 768 !-- machine) 732 !-- Array bounds when running on a single PE (respectively a non-parallel machine) 769 733 nxl = 0 770 734 nxr = nx … … 784 748 785 749 ! 786 !-- Array bounds for the pressure solver (in the parallel code, these bounds 787 !-- are the ones for thetransposed arrays)750 !-- Array bounds for the pressure solver (in the parallel code, these bounds are the ones for the 751 !-- transposed arrays) 788 752 nys_x = nys 789 753 nyn_x = nyn … … 804 768 805 769 ! 806 !-- Calculate number of grid levels necessary for the multigrid poisson solver 807 !-- as well as thegridpoint indices on each level770 !-- Calculate number of grid levels necessary for the multigrid poisson solver as well as the 771 !-- gridpoint indices on each level 808 772 IF ( psolver(1:9) == 'multigrid' ) THEN 809 773 … … 833 797 ENDDO 834 798 ! 835 !-- The optimized MG-solver does not allow odd values for nz at the coarsest 836 !-- grid level 799 !-- The optimized MG-solver does not allow odd values for nz at the coarsest grid level 837 800 IF ( TRIM( psolver ) /= 'multigrid_noopt' ) THEN 838 801 IF ( MOD( k, 2 ) /= 0 ) mg_levels_z = mg_levels_z - 1 839 802 ! 840 !-- An odd value of nz does not work. The finest level must have an even 841 !-- value. 803 !-- An odd value of nz does not work. The finest level must have an even value. 842 804 IF ( mg_levels_z == 0 ) THEN 843 805 message_string = 'optimized multigrid method requires nz to be even' … … 847 809 848 810 maximum_grid_level = MIN( mg_levels_x, mg_levels_y, mg_levels_z ) 849 ! 850 !-- Check if subdomain sizes prevents any coarsening. 851 !-- This case, the maximum number of grid levels is 1, i.e. effectively 852 !-- a Gauss-Seidel scheme is applied rather than a multigrid approach.811 ! 812 !-- Check if subdomain sizes prevents any coarsening. 813 !-- This case, the maximum number of grid levels is 1, i.e. effectively a Gauss-Seidel scheme is 814 !-- applied rather than a multigrid approach. 853 815 !-- Give a warning in this case. 854 816 IF ( maximum_grid_level == 1 .AND. mg_switch_to_pe0_level == -1 ) THEN 855 message_string = 'No grid coarsening possible, multigrid ' // &856 'approach effectively reduces to a Gauss-Seidel ' // &817 message_string = 'No grid coarsening possible, multigrid ' // & 818 'approach effectively reduces to a Gauss-Seidel ' // & 857 819 'scheme.' 858 820 859 821 CALL message( 'poismg', 'PA0648', 0, 1, 0, 6, 0 ) 860 822 ENDIF 861 823 862 824 ! 863 !-- Find out, if the total domain allows more levels. These additional 864 !-- levels are identicallyprocessed on all PEs.825 !-- Find out, if the total domain allows more levels. These additional levels are identically 826 !-- processed on all PEs. 865 827 IF ( numprocs > 1 .AND. mg_switch_to_pe0_level /= -1 ) THEN 866 828 … … 887 849 888 850 IF ( maximum_grid_level_l > mg_switch_to_pe0_level_l ) THEN 889 mg_switch_to_pe0_level_l = maximum_grid_level_l - & 890 mg_switch_to_pe0_level_l + 1 851 mg_switch_to_pe0_level_l = maximum_grid_level_l - mg_switch_to_pe0_level_l + 1 891 852 ELSE 892 853 mg_switch_to_pe0_level_l = 0 … … 901 862 902 863 ! 903 !-- Use switch level calculated above only if it is not pre-defined 904 !-- by user 864 !-- Use switch level calculated above only if it is not pre-defined by user 905 865 IF ( mg_switch_to_pe0_level == 0 ) THEN 906 866 IF ( mg_switch_to_pe0_level_l /= 0 ) THEN … … 912 872 ! 913 873 !-- Check pre-defined value and reset to default, if neccessary 914 IF ( mg_switch_to_pe0_level < mg_switch_to_pe0_level_l .OR. &874 IF ( mg_switch_to_pe0_level < mg_switch_to_pe0_level_l .OR. & 915 875 mg_switch_to_pe0_level >= maximum_grid_level_l ) THEN 916 message_string = 'mg_switch_to_pe0_level ' // &876 message_string = 'mg_switch_to_pe0_level ' // & 917 877 'out of range and reset to 0' 918 878 CALL message( 'init_pegrid', 'PA0235', 0, 1, 0, 6, 0 ) … … 920 880 ELSE 921 881 ! 922 !-- Use the largest number of possible levels anyway and recalculate 923 !-- th e switch level to this largest number of possible values882 !-- Use the largest number of possible levels anyway and recalculate the switch level to 883 !-- this largest number of possible values 924 884 maximum_grid_level = maximum_grid_level_l 925 885 … … 930 890 ENDIF 931 891 932 ALLOCATE( grid_level_count(maximum_grid_level), &933 nxl_mg(0:maximum_grid_level), nxr_mg(0:maximum_grid_level), &934 nyn_mg(0:maximum_grid_level), nys_mg(0:maximum_grid_level), &892 ALLOCATE( grid_level_count(maximum_grid_level), & 893 nxl_mg(0:maximum_grid_level), nxr_mg(0:maximum_grid_level), & 894 nyn_mg(0:maximum_grid_level), nys_mg(0:maximum_grid_level), & 935 895 nzt_mg(0:maximum_grid_level) ) 936 896 937 897 grid_level_count = 0 938 898 ! 939 !-- Index zero required as dummy due to definition of arrays f2 and p2 in 940 !-- recursive subroutinenext_mg_level899 !-- Index zero required as dummy due to definition of arrays f2 and p2 in recursive subroutine 900 !-- next_mg_level 941 901 nxl_mg(0) = 0; nxr_mg(0) = 0; nyn_mg(0) = 0; nys_mg(0) = 0; nzt_mg(0) = 0 942 902 … … 948 908 #if defined( __parallel ) 949 909 ! 950 !-- Save the grid size of the subdomain at the switch level, because 951 !-- it is needed in poismg. 910 !-- Save the grid size of the subdomain at the switch level, because it is needed in poismg. 952 911 ind(1) = nxl_l; ind(2) = nxr_l 953 912 ind(3) = nys_l; ind(4) = nyn_l … … 969 928 nys_l = 0 970 929 ! 971 !-- The size of this gathered array must not be larger than the 972 !-- array tend, which is used in the multigrid scheme as a temporary 973 !-- array. Therefore the subdomain size of an PE is calculated and 974 !-- the size of the gathered grid. These values are used in 975 !-- routines pres and poismg 976 subdomain_size = ( nxr - nxl + 2 * nbgp + 1 ) * & 930 !-- The size of this gathered array must not be larger than the array tend, which is used 931 !-- in the multigrid scheme as a temporary array. Therefore the subdomain size of an PE is 932 !-- calculated and the size of the gathered grid. These values are used in routines pres 933 !-- and poismg. 934 subdomain_size = ( nxr - nxl + 2 * nbgp + 1 ) * & 977 935 ( nyn - nys + 2 * nbgp + 1 ) * ( nzt - nzb + 2 ) 978 gathered_size = ( nxr_l - nxl_l + 3 ) * ( nyn_l - nys_l + 3 ) * & 979 ( nzt_l - nzb + 2 ) 936 gathered_size = ( nxr_l - nxl_l + 3 ) * ( nyn_l - nys_l + 3 ) * ( nzt_l - nzb + 2 ) 980 937 981 938 #else 982 message_string = 'multigrid gather/scatter impossible ' // &939 message_string = 'multigrid gather/scatter impossible ' // & 983 940 'in non parallel mode' 984 941 CALL message( 'init_pegrid', 'PA0237', 1, 2, 0, 6, 0 ) … … 992 949 nzt_mg(i) = nzt_l 993 950 994 nxl_l = nxl_l / 2995 nxr_l = nxr_l / 2996 nys_l = nys_l / 2997 nyn_l = nyn_l / 2998 nzt_l = nzt_l / 2999 1000 ENDDO1001 1002 !1003 !-- Temporary problem: Currently calculation of maxerror in routine poismg crashes1004 !-- if grid data are collected on PE0 already on the finest grid level.1005 !-- To be solved later.1006 IF ( maximum_grid_level == mg_switch_to_pe0_level ) THEN1007 message_string = 'grid coarsening on subdomain level cannot be performed'1008 CALL message( 'poismg', 'PA0236', 1, 2, 0, 6, 0 )1009 ENDIF1010 1011 ELSE1012 1013 maximum_grid_level = 01014 1015 ENDIF1016 1017 !1018 !-- Default level 0 tells exchange_horiz that all ghost planes have to be1019 !-- exchanged. grid_level is adjusted in poismg, where only one ghost plane1020 !-- is required.1021 grid_level = 01022 1023 #if defined( __parallel )1024 !1025 !-- Gridpoint number for the exchange of ghost points (y-line for 2D-arrays)1026 ngp_y = nyn - nys + 1 + 2 * nbgp1027 1028 !1029 !-- Define new MPI derived datatypes for the exchange of ghost points in1030 !-- x- and y-direction for 2D-arrays (line)1031 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_REAL, type_x, &1032 ierr )1033 CALL MPI_TYPE_COMMIT( type_x, ierr )1034 1035 CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_REAL, type_y, ierr )1036 CALL MPI_TYPE_COMMIT( type_y, ierr )1037 !1038 !-- Define new MPI derived datatypes for the exchange of ghost points in1039 !-- x- and y-direction for 2D-INTEGER arrays (line) - on normal grid.1040 !-- Define types for 32-bit and 8-bit Integer. The 8-bit Integer are only1041 !-- required on normal grid, while 32-bit Integer may be also required on1042 !-- coarser grid level in case of multigrid solver.1043 !1044 !-- 8-bit Integer1045 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_BYTE, &1046 type_x_byte, ierr )1047 CALL MPI_TYPE_COMMIT( type_x_byte, ierr )1048 1049 CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_BYTE, &1050 type_y_byte, ierr )1051 CALL MPI_TYPE_COMMIT( type_y_byte, ierr )1052 !1053 !-- 32-bit Integer1054 ALLOCATE( type_x_int(0:maximum_grid_level), &1055 type_y_int(0:maximum_grid_level) )1056 1057 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_INTEGER, &1058 type_x_int(0), ierr )1059 CALL MPI_TYPE_COMMIT( type_x_int(0), ierr )1060 1061 CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_INTEGER, type_y_int(0), ierr )1062 CALL MPI_TYPE_COMMIT( type_y_int(0), ierr )1063 !1064 !-- Calculate gridpoint numbers for the exchange of ghost points along x1065 !-- (yz-plane for 3D-arrays) and define MPI derived data type(s) for the1066 !-- exchange of ghost points in y-direction (xz-plane).1067 !-- Do these calculations for the model grid and (if necessary) also1068 !-- for the coarser grid levels used in the multigrid method1069 ALLOCATE ( ngp_xz(0:maximum_grid_level), &1070 ngp_xz_int(0:maximum_grid_level), &1071 ngp_yz(0:maximum_grid_level), &1072 ngp_yz_int(0:maximum_grid_level), &1073 type_xz(0:maximum_grid_level), &1074 type_xz_int(0:maximum_grid_level), &1075 type_yz(0:maximum_grid_level), &1076 type_yz_int(0:maximum_grid_level) )1077 1078 nxl_l = nxl; nxr_l = nxr; nys_l = nys; nyn_l = nyn; nzb_l = nzb; nzt_l = nzt1079 1080 !1081 !-- Discern between the model grid, which needs nbgp ghost points and1082 !-- grid levels for the multigrid scheme. In the latter case only one1083 !-- ghost point is necessary.1084 !-- First definition of MPI-datatypes for exchange of ghost layers on normal1085 !-- grid. The following loop is needed for data exchange in poismg.f90.1086 !1087 !-- Determine number of grid points of yz-layer for exchange1088 ngp_yz(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp)1089 1090 !1091 !-- Define an MPI-datatype for the exchange of left/right boundaries.1092 !-- Although data are contiguous in physical memory (which does not1093 !-- necessarily require an MPI-derived datatype), the data exchange between1094 !-- left and right PE's using the MPI-derived type is 10% faster than without.1095 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz(0), &1096 MPI_REAL, type_xz(0), ierr )1097 CALL MPI_TYPE_COMMIT( type_xz(0), ierr )1098 1099 CALL MPI_TYPE_VECTOR( nbgp, ngp_yz(0), ngp_yz(0), MPI_REAL, type_yz(0), &1100 ierr )1101 CALL MPI_TYPE_COMMIT( type_yz(0), ierr )1102 1103 !1104 !-- Define data types for exchange of 3D Integer arrays.1105 ngp_yz_int(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp)1106 1107 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz_int(0), &1108 MPI_INTEGER, type_xz_int(0), ierr )1109 CALL MPI_TYPE_COMMIT( type_xz_int(0), ierr )1110 1111 CALL MPI_TYPE_VECTOR( nbgp, ngp_yz_int(0), ngp_yz_int(0), MPI_INTEGER, &1112 type_yz_int(0), ierr )1113 CALL MPI_TYPE_COMMIT( type_yz_int(0), ierr )1114 1115 !1116 !-- Definition of MPI-datatypes for multigrid method (coarser level grids)1117 IF ( psolver(1:9) == 'multigrid' ) THEN1118 !1119 !-- Definition of MPI-datatyoe as above, but only 1 ghost level is used1120 DO i = maximum_grid_level, 1 , -11121 !1122 !-- For 3D-exchange on different multigrid level, one ghost point for1123 !-- REAL arrays, two ghost points for INTEGER arrays1124 ngp_xz(i) = (nzt_l - nzb_l + 2) * (nxr_l - nxl_l + 3)1125 ngp_yz(i) = (nzt_l - nzb_l + 2) * (nyn_l - nys_l + 3)1126 1127 ngp_xz_int(i) = (nzt_l - nzb_l + 2) * (nxr_l - nxl_l + 3)1128 ngp_yz_int(i) = (nzt_l - nzb_l + 2) * (nyn_l - nys_l + 3)1129 !1130 !-- MPI data type for REAL arrays, for xz-layers1131 CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+3, nzt_l-nzb_l+2, ngp_yz(i), &1132 MPI_REAL, type_xz(i), ierr )1133 CALL MPI_TYPE_COMMIT( type_xz(i), ierr )1134 1135 !1136 !-- MPI data type for INTEGER arrays, for xz-layers1137 CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+3, nzt_l-nzb_l+2, ngp_yz_int(i), &1138 MPI_INTEGER, type_xz_int(i), ierr )1139 CALL MPI_TYPE_COMMIT( type_xz_int(i), ierr )1140 1141 !1142 !-- MPI data type for REAL arrays, for yz-layers1143 CALL MPI_TYPE_VECTOR( 1, ngp_yz(i), ngp_yz(i), MPI_REAL, type_yz(i), &1144 ierr )1145 CALL MPI_TYPE_COMMIT( type_yz(i), ierr )1146 !1147 !-- MPI data type for INTEGER arrays, for yz-layers1148 CALL MPI_TYPE_VECTOR( 1, ngp_yz_int(i), ngp_yz_int(i), MPI_INTEGER, &1149 type_yz_int(i), ierr )1150 CALL MPI_TYPE_COMMIT( type_yz_int(i), ierr )1151 1152 1153 !-- For 2D-exchange of INTEGER arrays on coarser grid level, where 2 ghost1154 !-- points need to be exchanged. Only required for 32-bit Integer arrays.1155 CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+5, 2, nyn_l-nys_l+5, MPI_INTEGER, &1156 type_x_int(i), ierr )1157 CALL MPI_TYPE_COMMIT( type_x_int(i), ierr )1158 1159 1160 CALL MPI_TYPE_VECTOR( 2, nyn_l-nys_l+5, nyn_l-nys_l+5, MPI_INTEGER, &1161 type_y_int(i), ierr )1162 CALL MPI_TYPE_COMMIT( type_y_int(i), ierr )1163 1164 951 nxl_l = nxl_l / 2 1165 952 nxr_l = nxr_l / 2 … … 1170 957 ENDDO 1171 958 959 ! 960 !-- Temporary problem: Currently calculation of maxerror in routine poismg crashes if grid data 961 !-- are collected on PE0 already on the finest grid level. 962 !-- To be solved later. 963 IF ( maximum_grid_level == mg_switch_to_pe0_level ) THEN 964 message_string = 'grid coarsening on subdomain level cannot be performed' 965 CALL message( 'poismg', 'PA0236', 1, 2, 0, 6, 0 ) 966 ENDIF 967 968 ELSE 969 970 maximum_grid_level = 0 971 972 ENDIF 973 974 ! 975 !-- Default level 0 tells exchange_horiz that all ghost planes have to be exchanged. grid_level is 976 !-- adjusted in poismg, where only one ghost plane is required. 977 grid_level = 0 978 979 #if defined( __parallel ) 980 ! 981 !-- Gridpoint number for the exchange of ghost points (y-line for 2D-arrays) 982 ngp_y = nyn - nys + 1 + 2 * nbgp 983 984 ! 985 !-- Define new MPI derived datatypes for the exchange of ghost points in x- and y-direction for 986 !-- 2D-arrays (line) 987 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_REAL, type_x, ierr ) 988 CALL MPI_TYPE_COMMIT( type_x, ierr ) 989 990 CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_REAL, type_y, ierr ) 991 CALL MPI_TYPE_COMMIT( type_y, ierr ) 992 ! 993 !-- Define new MPI derived datatypes for the exchange of ghost points in x- and y-direction for 994 !-- 2D-INTEGER arrays (line) - on normal grid. 995 !-- Define types for 32-bit and 8-bit Integer. The 8-bit Integer are only required on normal grid, 996 !-- while 32-bit Integer may be also required on coarser grid level in case of multigrid solver. 997 ! 998 !-- 8-bit Integer 999 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_BYTE, type_x_byte, ierr ) 1000 CALL MPI_TYPE_COMMIT( type_x_byte, ierr ) 1001 1002 CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_BYTE, type_y_byte, ierr ) 1003 CALL MPI_TYPE_COMMIT( type_y_byte, ierr ) 1004 ! 1005 !-- 32-bit Integer 1006 ALLOCATE( type_x_int(0:maximum_grid_level), type_y_int(0:maximum_grid_level) ) 1007 1008 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_INTEGER, type_x_int(0), ierr ) 1009 CALL MPI_TYPE_COMMIT( type_x_int(0), ierr ) 1010 1011 CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_INTEGER, type_y_int(0), ierr ) 1012 CALL MPI_TYPE_COMMIT( type_y_int(0), ierr ) 1013 ! 1014 !-- Calculate gridpoint numbers for the exchange of ghost points along x (yz-plane for 3D-arrays) 1015 !-- and define MPI derived data type(s) for the exchange of ghost points in y-direction (xz-plane). 1016 !-- Do these calculations for the model grid and (if necessary) also for the coarser grid levels 1017 !-- used in the multigrid method 1018 ALLOCATE ( ngp_xz(0:maximum_grid_level), & 1019 ngp_xz_int(0:maximum_grid_level), & 1020 ngp_yz(0:maximum_grid_level), & 1021 ngp_yz_int(0:maximum_grid_level), & 1022 type_xz(0:maximum_grid_level), & 1023 type_xz_int(0:maximum_grid_level), & 1024 type_yz(0:maximum_grid_level), & 1025 type_yz_int(0:maximum_grid_level) ) 1026 1027 nxl_l = nxl; nxr_l = nxr; nys_l = nys; nyn_l = nyn; nzb_l = nzb; nzt_l = nzt 1028 1029 ! 1030 !-- Discern between the model grid, which needs nbgp ghost points and grid levels for the multigrid 1031 !-- scheme. In the latter case only one ghost point is necessary. 1032 !-- First definition of MPI-datatypes for exchange of ghost layers on normal grid. The following 1033 !-- loop is needed for data exchange in poismg.f90. 1034 ! 1035 !-- Determine number of grid points of yz-layer for exchange 1036 ngp_yz(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp) 1037 1038 ! 1039 !-- Define an MPI-datatype for the exchange of left/right boundaries. 1040 !-- Although data are contiguous in physical memory (which does not necessarily require an 1041 !-- MPI-derived datatype), the data exchange between left and right PE's using the MPI-derived type 1042 !-- is 10% faster than without. 1043 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz(0), MPI_REAL, type_xz(0), & 1044 ierr ) 1045 CALL MPI_TYPE_COMMIT( type_xz(0), ierr ) 1046 1047 CALL MPI_TYPE_VECTOR( nbgp, ngp_yz(0), ngp_yz(0), MPI_REAL, type_yz(0), ierr ) 1048 CALL MPI_TYPE_COMMIT( type_yz(0), ierr ) 1049 1050 ! 1051 !-- Define data types for exchange of 3D Integer arrays. 1052 ngp_yz_int(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp) 1053 1054 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz_int(0), MPI_INTEGER, & 1055 type_xz_int(0), ierr ) 1056 CALL MPI_TYPE_COMMIT( type_xz_int(0), ierr ) 1057 1058 CALL MPI_TYPE_VECTOR( nbgp, ngp_yz_int(0), ngp_yz_int(0), MPI_INTEGER, type_yz_int(0), ierr ) 1059 CALL MPI_TYPE_COMMIT( type_yz_int(0), ierr ) 1060 1061 ! 1062 !-- Definition of MPI-datatypes for multigrid method (coarser level grids) 1063 IF ( psolver(1:9) == 'multigrid' ) THEN 1064 ! 1065 !-- Definition of MPI-datatyoe as above, but only 1 ghost level is used 1066 DO i = maximum_grid_level, 1 , -1 1067 ! 1068 !-- For 3D-exchange on different multigrid level, one ghost point for REAL arrays, two ghost 1069 !-- points for INTEGER arrays 1070 ngp_xz(i) = (nzt_l - nzb_l + 2) * (nxr_l - nxl_l + 3) 1071 ngp_yz(i) = (nzt_l - nzb_l + 2) * (nyn_l - nys_l + 3) 1072 1073 ngp_xz_int(i) = (nzt_l - nzb_l + 2) * (nxr_l - nxl_l + 3) 1074 ngp_yz_int(i) = (nzt_l - nzb_l + 2) * (nyn_l - nys_l + 3) 1075 ! 1076 !-- MPI data type for REAL arrays, for xz-layers 1077 CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+3, nzt_l-nzb_l+2, ngp_yz(i), MPI_REAL, type_xz(i), & 1078 ierr ) 1079 CALL MPI_TYPE_COMMIT( type_xz(i), ierr ) 1080 1081 ! 1082 !-- MPI data type for INTEGER arrays, for xz-layers 1083 CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+3, nzt_l-nzb_l+2, ngp_yz_int(i), MPI_INTEGER, & 1084 type_xz_int(i), ierr ) 1085 CALL MPI_TYPE_COMMIT( type_xz_int(i), ierr ) 1086 1087 ! 1088 !-- MPI data type for REAL arrays, for yz-layers 1089 CALL MPI_TYPE_VECTOR( 1, ngp_yz(i), ngp_yz(i), MPI_REAL, type_yz(i), ierr ) 1090 CALL MPI_TYPE_COMMIT( type_yz(i), ierr ) 1091 ! 1092 !-- MPI data type for INTEGER arrays, for yz-layers 1093 CALL MPI_TYPE_VECTOR( 1, ngp_yz_int(i), ngp_yz_int(i), MPI_INTEGER, type_yz_int(i), ierr ) 1094 CALL MPI_TYPE_COMMIT( type_yz_int(i), ierr ) 1095 1096 1097 !-- For 2D-exchange of INTEGER arrays on coarser grid level, where 2 ghost points need to be 1098 !-- exchanged. Only required for 32-bit Integer arrays. 1099 CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+5, 2, nyn_l-nys_l+5, MPI_INTEGER, type_x_int(i), ierr ) 1100 CALL MPI_TYPE_COMMIT( type_x_int(i), ierr ) 1101 1102 1103 CALL MPI_TYPE_VECTOR( 2, nyn_l-nys_l+5, nyn_l-nys_l+5, MPI_INTEGER, type_y_int(i), ierr ) 1104 CALL MPI_TYPE_COMMIT( type_y_int(i), ierr ) 1105 1106 nxl_l = nxl_l / 2 1107 nxr_l = nxr_l / 2 1108 nys_l = nys_l / 2 1109 nyn_l = nyn_l / 2 1110 nzt_l = nzt_l / 2 1111 1112 ENDDO 1113 1172 1114 ENDIF 1173 1115 … … 1178 1120 !-- Setting of flags for inflow/outflow/nesting conditions. 1179 1121 IF ( pleft == MPI_PROC_NULL ) THEN 1180 IF ( bc_lr == 'dirichlet/radiation' .OR. bc_lr == 'nested' .OR. &1122 IF ( bc_lr == 'dirichlet/radiation' .OR. bc_lr == 'nested' .OR. & 1181 1123 bc_lr == 'nesting_offline' ) THEN 1182 1124 bc_dirichlet_l = .TRUE. … … 1185 1127 ENDIF 1186 1128 ENDIF 1187 1129 1188 1130 IF ( pright == MPI_PROC_NULL ) THEN 1189 1131 IF ( bc_lr == 'dirichlet/radiation' ) THEN 1190 1132 bc_radiation_r = .TRUE. 1191 ELSEIF ( bc_lr == 'radiation/dirichlet' .OR. bc_lr == 'nested' .OR. &1133 ELSEIF ( bc_lr == 'radiation/dirichlet' .OR. bc_lr == 'nested' .OR. & 1192 1134 bc_lr == 'nesting_offline' ) THEN 1193 1135 bc_dirichlet_r = .TRUE. … … 1198 1140 IF ( bc_ns == 'dirichlet/radiation' ) THEN 1199 1141 bc_radiation_s = .TRUE. 1200 ELSEIF ( bc_ns == 'radiation/dirichlet' .OR. bc_ns == 'nested' .OR. &1142 ELSEIF ( bc_ns == 'radiation/dirichlet' .OR. bc_ns == 'nested' .OR. & 1201 1143 bc_ns == 'nesting_offline' ) THEN 1202 1144 bc_dirichlet_s = .TRUE. … … 1205 1147 1206 1148 IF ( pnorth == MPI_PROC_NULL ) THEN 1207 IF ( bc_ns == 'dirichlet/radiation' .OR. bc_ns == 'nested' .OR. &1149 IF ( bc_ns == 'dirichlet/radiation' .OR. bc_ns == 'nested' .OR. & 1208 1150 bc_ns == 'nesting_offline' ) THEN 1209 1151 bc_dirichlet_n = .TRUE. … … 1213 1155 ENDIF 1214 1156 ! 1215 !-- In case of synthetic turbulence geneartor determine ids. 1216 !-- Please note, if no forcing or nesting is applied, the generator is applied 1217 !-- only at the leftlateral boundary.1157 !-- In case of synthetic turbulence geneartor determine ids. 1158 !-- Please note, if no forcing or nesting is applied, the generator is applied only at the left 1159 !-- lateral boundary. 1218 1160 IF ( use_syn_turb_gen ) THEN 1219 1161 IF ( bc_dirichlet_l ) THEN … … 1239 1181 1240 1182 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1241 CALL MPI_ALLREDUCE( id_stg_left_l, id_stg_left, 1, MPI_INTEGER, & 1242 MPI_SUM, comm1dx, ierr ) 1183 CALL MPI_ALLREDUCE( id_stg_left_l, id_stg_left, 1, MPI_INTEGER, MPI_SUM, comm1dx, ierr ) 1243 1184 1244 1185 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1245 CALL MPI_ALLREDUCE( id_stg_right_l, id_stg_right, 1, MPI_INTEGER, & 1246 MPI_SUM, comm1dx, ierr ) 1186 CALL MPI_ALLREDUCE( id_stg_right_l, id_stg_right, 1, MPI_INTEGER, MPI_SUM, comm1dx, ierr ) 1247 1187 1248 1188 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1249 CALL MPI_ALLREDUCE( id_stg_south_l, id_stg_south, 1, MPI_INTEGER, & 1250 MPI_SUM, comm1dy, ierr ) 1189 CALL MPI_ALLREDUCE( id_stg_south_l, id_stg_south, 1, MPI_INTEGER, MPI_SUM, comm1dy, ierr ) 1251 1190 1252 1191 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1253 CALL MPI_ALLREDUCE( id_stg_north_l, id_stg_north, 1, MPI_INTEGER, & 1254 MPI_SUM, comm1dy, ierr ) 1255 1256 ENDIF 1257 1192 CALL MPI_ALLREDUCE( id_stg_north_l, id_stg_north, 1, MPI_INTEGER, MPI_SUM, comm1dy, ierr ) 1193 1194 ENDIF 1195 1258 1196 ! 1259 1197 !-- Broadcast the id of the inflow PE … … 1264 1202 ENDIF 1265 1203 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1266 CALL MPI_ALLREDUCE( id_inflow_l, id_inflow, 1, MPI_INTEGER, MPI_SUM, & 1267 comm1dx, ierr ) 1204 CALL MPI_ALLREDUCE( id_inflow_l, id_inflow, 1, MPI_INTEGER, MPI_SUM, comm1dx, ierr ) 1268 1205 1269 1206 ! … … 1271 1208 !-- WARNING: needs to be adjusted in case of inflows other than from left side! 1272 1209 IF ( turbulent_inflow ) THEN 1273 1210 1274 1211 IF ( NINT( recycling_width / dx, KIND=idp ) >= nxl .AND. & 1275 1212 NINT( recycling_width / dx, KIND=idp ) <= nxr ) THEN … … 1279 1216 ENDIF 1280 1217 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1281 CALL MPI_ALLREDUCE( id_recycling_l, id_recycling, 1, MPI_INTEGER, MPI_SUM, & 1282 comm1dx, ierr ) 1283 1218 CALL MPI_ALLREDUCE( id_recycling_l, id_recycling, 1, MPI_INTEGER, MPI_SUM, comm1dx, ierr ) 1219 1284 1220 ENDIF 1285 1221 … … 1297 1233 comm1dx, ierr ) 1298 1234 1299 IF ( NINT( outflow_source_plane / dx ) >= nxl .AND. &1235 IF ( NINT( outflow_source_plane / dx ) >= nxl .AND. & 1300 1236 NINT( outflow_source_plane / dx ) <= nxr ) THEN 1301 1237 id_outflow_source_l = myidx … … 1304 1240 ENDIF 1305 1241 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1306 CALL MPI_ALLREDUCE( id_outflow_source_l, id_outflow_source, 1, &1307 MPI_INTEGER, MPI_SUM,comm1dx, ierr )1242 CALL MPI_ALLREDUCE( id_outflow_source_l, id_outflow_source, 1, MPI_INTEGER, MPI_SUM, & 1243 comm1dx, ierr ) 1308 1244 1309 1245 ENDIF … … 1330 1266 1331 1267 ! 1332 !-- At the inflow or outflow, u or v, respectively, have to be calculated for 1333 !-- one more grid point. 1268 !-- At the inflow or outflow, u or v, respectively, have to be calculated for one more grid point. 1334 1269 IF ( bc_dirichlet_l .OR. bc_radiation_l ) THEN 1335 1270 nxlu = nxl + 1 … … 1352 1287 1353 1288 CASE ( 1 ) 1354 ALLOCATE( wall_flags_1(nzb:nzt_mg(i)+1, &1355 nys_mg(i)-1:nyn_mg(i)+1, &1289 ALLOCATE( wall_flags_1(nzb:nzt_mg(i)+1, & 1290 nys_mg(i)-1:nyn_mg(i)+1, & 1356 1291 nxl_mg(i)-1:nxr_mg(i)+1) ) 1357 1292 1358 1293 CASE ( 2 ) 1359 ALLOCATE( wall_flags_2(nzb:nzt_mg(i)+1, &1360 nys_mg(i)-1:nyn_mg(i)+1, &1294 ALLOCATE( wall_flags_2(nzb:nzt_mg(i)+1, & 1295 nys_mg(i)-1:nyn_mg(i)+1, & 1361 1296 nxl_mg(i)-1:nxr_mg(i)+1) ) 1362 1297 1363 1298 CASE ( 3 ) 1364 ALLOCATE( wall_flags_3(nzb:nzt_mg(i)+1, &1365 nys_mg(i)-1:nyn_mg(i)+1, &1299 ALLOCATE( wall_flags_3(nzb:nzt_mg(i)+1, & 1300 nys_mg(i)-1:nyn_mg(i)+1, & 1366 1301 nxl_mg(i)-1:nxr_mg(i)+1) ) 1367 1302 1368 1303 CASE ( 4 ) 1369 ALLOCATE( wall_flags_4(nzb:nzt_mg(i)+1, &1370 nys_mg(i)-1:nyn_mg(i)+1, &1304 ALLOCATE( wall_flags_4(nzb:nzt_mg(i)+1, & 1305 nys_mg(i)-1:nyn_mg(i)+1, & 1371 1306 nxl_mg(i)-1:nxr_mg(i)+1) ) 1372 1307 1373 1308 CASE ( 5 ) 1374 ALLOCATE( wall_flags_5(nzb:nzt_mg(i)+1, &1375 nys_mg(i)-1:nyn_mg(i)+1, &1309 ALLOCATE( wall_flags_5(nzb:nzt_mg(i)+1, & 1310 nys_mg(i)-1:nyn_mg(i)+1, & 1376 1311 nxl_mg(i)-1:nxr_mg(i)+1) ) 1377 1312 1378 1313 CASE ( 6 ) 1379 ALLOCATE( wall_flags_6(nzb:nzt_mg(i)+1, &1380 nys_mg(i)-1:nyn_mg(i)+1, &1314 ALLOCATE( wall_flags_6(nzb:nzt_mg(i)+1, & 1315 nys_mg(i)-1:nyn_mg(i)+1, & 1381 1316 nxl_mg(i)-1:nxr_mg(i)+1) ) 1382 1317 1383 1318 CASE ( 7 ) 1384 ALLOCATE( wall_flags_7(nzb:nzt_mg(i)+1, &1385 nys_mg(i)-1:nyn_mg(i)+1, &1319 ALLOCATE( wall_flags_7(nzb:nzt_mg(i)+1, & 1320 nys_mg(i)-1:nyn_mg(i)+1, & 1386 1321 nxl_mg(i)-1:nxr_mg(i)+1) ) 1387 1322 1388 1323 CASE ( 8 ) 1389 ALLOCATE( wall_flags_8(nzb:nzt_mg(i)+1, &1390 nys_mg(i)-1:nyn_mg(i)+1, &1324 ALLOCATE( wall_flags_8(nzb:nzt_mg(i)+1, & 1325 nys_mg(i)-1:nyn_mg(i)+1, & 1391 1326 nxl_mg(i)-1:nxr_mg(i)+1) ) 1392 1327 1393 1328 CASE ( 9 ) 1394 ALLOCATE( wall_flags_9(nzb:nzt_mg(i)+1, &1395 nys_mg(i)-1:nyn_mg(i)+1, &1329 ALLOCATE( wall_flags_9(nzb:nzt_mg(i)+1, & 1330 nys_mg(i)-1:nyn_mg(i)+1, & 1396 1331 nxl_mg(i)-1:nxr_mg(i)+1) ) 1397 1332 1398 1333 CASE ( 10 ) 1399 ALLOCATE( wall_flags_10(nzb:nzt_mg(i)+1, &1400 nys_mg(i)-1:nyn_mg(i)+1, &1334 ALLOCATE( wall_flags_10(nzb:nzt_mg(i)+1, & 1335 nys_mg(i)-1:nyn_mg(i)+1, & 1401 1336 nxl_mg(i)-1:nxr_mg(i)+1) ) 1402 1337
Note: See TracChangeset
for help on using the changeset viewer.