Ignore:
Timestamp:
Aug 25, 2020 7:52:08 AM (4 years ago)
Author:
raasch
Message:

files re-formatted to follow the PALM coding standard

File:
1 edited

Legend:

Unmodified
Added
Removed
  • palm/trunk/SOURCE/init_pegrid.f90

    r4564 r4648  
    11!> @file init_pegrid.f90
    2 !------------------------------------------------------------------------------!
     2!--------------------------------------------------------------------------------------------------!
    33! This file is part of the PALM model system.
    44!
    5 ! PALM is free software: you can redistribute it and/or modify it under the
    6 ! terms of the GNU General Public License as published by the Free Software
    7 ! Foundation, either version 3 of the License, or (at your option) any later
    8 ! version.
    9 !
    10 ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY
    11 ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
    12 ! A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
    13 !
    14 ! You should have received a copy of the GNU General Public License along with
    15 ! PALM. If not, see <http://www.gnu.org/licenses/>.
    16 !
    17 ! Copyright 1997-2020 Leibniz Universitaet Hannover
    18 !------------------------------------------------------------------------------!
     5! PALM is free software: you can redistribute it and/or modify it under the terms of the GNU General
     6! Public License as published by the Free Software Foundation, either version 3 of the License, or
     7! (at your option) any later version.
     8!
     9! PALM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
     10! implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
     11! Public License for more details.
     12!
     13! You should have received a copy of the GNU General Public License along with PALM. If not, see
     14! <http://www.gnu.org/licenses/>.
    1915!
    2016! Current revisions:
    2117! ------------------
    22 ! 
    23 ! 
     18!
     19!
    2420! Former revisions:
    2521! -----------------
    2622! $Id$
     23! file re-formatted to follow the PALM coding standard
     24!
     25! 4564 2020-06-12 14:03:36Z raasch
    2726! Vertical nesting method of Huq et al. (2019) removed
    28 ! 
     27!
    2928! 4461 2020-03-12 16:51:59Z raasch
    3029! communicator configurations for four virtual pe grids defined
    31 ! 
     30!
    3231! 4444 2020-03-05 15:59:50Z raasch
    3332! bugfix: cpp-directives for serial mode added
    34 ! 
     33!
    3534! 4360 2020-01-07 11:25:50Z suehring
    3635! changed message PA0467
    37 ! 
     36!
    3837! 4264 2019-10-15 16:00:23Z scharf
    3938! corrected error message string
    40 ! 
     39!
    4140! 4241 2019-09-27 06:32:47Z raasch
    42 ! Check added to ensure that subdomain grid has at least the size as given by the number
    43 ! of ghost points
    44 ! 
     41! Check added to ensure that subdomain grid has at least the size as given by the number of ghost
     42! points
     43!
    4544! 4182 2019-08-22 15:20:23Z scharf
    4645! Corrected "Former revisions" section
    47 ! 
     46!
    4847! 4045 2019-06-21 10:58:47Z raasch
    49 ! bugfix: kind attribute added to nint function to allow for large integers which may appear in
    50 ! case of default recycling width and small grid spacings
    51 ! 
     48! bugfix: kind attribute added to nint function to allow for large integers which may appear in case
     49!         of default recycling width and small grid spacings
     50!
    5251! 3999 2019-05-23 16:09:37Z suehring
    5352! Spend 3 ghost points also in case of pw-scheme when nesting is applied
    54 ! 
     53!
    5554! 3897 2019-04-15 11:51:14Z suehring
    5655! Minor revision of multigrid check; give warning instead of an abort.
    57 ! 
     56!
    5857! 3890 2019-04-12 15:59:20Z suehring
    59 ! Check if grid coarsening is possible on subdomain, in order to avoid that
    60 ! multigrid approach effectively reduces to a Gauss-Seidel scheme.
    61 ! 
     58! Check if grid coarsening is possible on subdomain, in order to avoid that multigrid approach
     59! effectively reduces to a Gauss-Seidel scheme.
     60!
    6261! 3885 2019-04-11 11:29:34Z kanani
    63 ! Changes related to global restructuring of location messages and introduction
    64 ! of additional debug messages
    65 ! 
     62! Changes related to global restructuring of location messages and introduction of additional debug
     63! messages
     64!
    6665! 3884 2019-04-10 13:31:55Z Giersch
    6766! id_recycling is only calculated in case of tubulent inflow
    68 ! 
     67!
    6968! 3761 2019-02-25 15:31:42Z raasch
    7069! unused variable removed
    71 ! 
     70!
    7271! 3655 2019-01-07 16:51:22Z knoop
    7372! variables documented
     
    7978! Description:
    8079! ------------
    81 !> Determination of the virtual processor topology (if not prescribed by the
    82 !> user)and computation of the grid point number and array bounds of the local
    83 !> domains.
    84 !> @todo: remove MPI-data types for 2D exchange on coarse multigrid level (not
    85 !>        used any more)
    86 !------------------------------------------------------------------------------!
     80!> Determination of the virtual processor topology (if not prescribed by the user) and computation
     81!> of the grid point number and array bounds of the local domains.
     82!> @todo: remove MPI-data types for 2D exchange on coarse multigrid level (not used any more)
     83!--------------------------------------------------------------------------------------------------!
    8784 SUBROUTINE init_pegrid
    88  
    89 
    90     USE control_parameters,                                                    &
    91         ONLY:  bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, &
    92                bc_lr, bc_ns, bc_radiation_l, bc_radiation_n, bc_radiation_r,   &
    93                bc_radiation_s, &
    94                grid_level, grid_level_count, maximum_grid_level,               &
    95                message_string, mg_switch_to_pe0_level,         &
    96                psolver
     85
     86
     87    USE control_parameters,                                                                        &
     88        ONLY:  bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, bc_lr, bc_ns,       &
     89               bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s, grid_level,         &
     90               grid_level_count, maximum_grid_level, message_string, mg_switch_to_pe0_level, psolver
    9791
    9892
    9993#if defined( __parallel )
    100     USE control_parameters,                                                    &
    101         ONLY:  coupling_mode, coupling_topology, gathered_size, momentum_advec, &
    102                outflow_source_plane, recycling_width, scalar_advec, subdomain_size, &
     94    USE control_parameters,                                                                        &
     95        ONLY:  coupling_mode, coupling_topology, gathered_size, momentum_advec,                    &
     96               outflow_source_plane, recycling_width, scalar_advec, subdomain_size,                &
    10397               turbulent_inflow, turbulent_outflow, y_shift
    10498
    105     USE grid_variables,                                                        &
     99    USE grid_variables,                                                                            &
    106100        ONLY:  dx
    107101#endif
    108        
    109     USE indices,                                                               &
    110         ONLY:  nnx, nny, nnz, nx, nxl, nxl_mg,   &
    111                nxlu, nxr, nxr_mg, ny, nyn, nyn_mg, nys, nys_mg,    &
    112                nysv, nz, nzb, nzt, nzt_mg, wall_flags_1, wall_flags_2,         &
    113                wall_flags_3, wall_flags_4, wall_flags_5, wall_flags_6,         &
    114                wall_flags_7, wall_flags_8, wall_flags_9, wall_flags_10
     102
     103    USE indices,                                                                                   &
     104        ONLY:  nnx, nny, nnz, nx, nxl, nxl_mg, nxlu, nxr, nxr_mg, ny, nyn, nyn_mg, nys, nys_mg,    &
     105               nysv, nz, nzb, nzt, nzt_mg, wall_flags_1, wall_flags_2, wall_flags_3, wall_flags_4, &
     106               wall_flags_5, wall_flags_6, wall_flags_7, wall_flags_8, wall_flags_9, wall_flags_10
    115107
    116108#if defined( __parallel )
    117     USE indices,                                                               &
     109    USE indices,                                                                                   &
    118110        ONLY:  mg_loc_ind, nbgp, nx_a, nx_o, ny_a, ny_o
    119111#endif
    120112
    121113    USE kinds
    122      
     114
    123115    USE pegrid
    124    
     116
    125117#if defined( __parallel )
    126     USE pmc_interface,                                                         &
     118    USE pmc_interface,                                                                             &
    127119        ONLY:  nested_run
    128120
    129     USE spectra_mod,                                                           &
     121    USE spectra_mod,                                                                               &
    130122        ONLY:  calculate_spectra
    131123
    132     USE synthetic_turbulence_generator_mod,                                    &
    133         ONLY:  id_stg_left, id_stg_north, id_stg_right, id_stg_south,          &
    134                use_syn_turb_gen
     124    USE synthetic_turbulence_generator_mod,                                                        &
     125        ONLY:  id_stg_left, id_stg_north, id_stg_right, id_stg_south, use_syn_turb_gen
    135126#endif
    136127
    137     USE transpose_indices,                                                     &
    138         ONLY:  nxl_y, nxl_z, nxr_y, nxr_z, nyn_x, nyn_z, nys_x,&
    139                nys_z, nzb_x, nzb_y, nzt_x, nzt_y
     128    USE transpose_indices,                                                                         &
     129        ONLY:  nxl_y, nxl_z, nxr_y, nxr_z, nyn_x, nyn_z, nys_x, nys_z, nzb_x, nzb_y, nzt_x, nzt_y
    140130
    141131#if defined( __parallel )
    142     USE transpose_indices,                                                     &
     132    USE transpose_indices,                                                                         &
    143133        ONLY:  nxl_yd, nxr_yd, nzb_yd, nzt_yd
    144134#endif
     
    152142    INTEGER(iwp) ::  id_outflow_source_l      !< local value of id_outflow_source
    153143    INTEGER(iwp) ::  id_recycling_l           !< ID indicating processors located at the recycling plane
    154     INTEGER(iwp) ::  id_stg_left_l            !< left lateral boundary local core id in case of turbulence generator 
    155     INTEGER(iwp) ::  id_stg_north_l           !< north lateral boundary local core id in case of turbulence generator 
    156     INTEGER(iwp) ::  id_stg_right_l           !< right lateral boundary local core id in case of turbulence generator 
    157     INTEGER(iwp) ::  id_stg_south_l           !< south lateral boundary local core id in case of turbulence generator 
     144    INTEGER(iwp) ::  id_stg_left_l            !< left lateral boundary local core id in case of turbulence generator
     145    INTEGER(iwp) ::  id_stg_north_l           !< north lateral boundary local core id in case of turbulence generator
     146    INTEGER(iwp) ::  id_stg_right_l           !< right lateral boundary local core id in case of turbulence generator
     147    INTEGER(iwp) ::  id_stg_south_l           !< south lateral boundary local core id in case of turbulence generator
    158148    INTEGER(iwp) ::  ind(5)                   !< array containing the subdomain bounds
    159149#endif
     
    222212
    223213!
    224 !--    Prescribed by user. Number of processors on the prescribed topology
    225 !--    must be equal to the number of PEs available to the job
     214!--    Prescribed by user. Number of processors on the prescribed topology must be equal to the
     215!--    number of PEs available to the job
    226216       IF ( ( npex * npey ) /= numprocs )  THEN
    227           WRITE( message_string, * ) 'number of PEs of the prescribed ',       &
    228               'topology (', npex*npey,') does not match & the number of ',     &
    229               'PEs available to the job (', numprocs, ')'
     217          WRITE( message_string, * ) 'number of PEs of the prescribed ', 'topology (', npex*npey,  &
     218                                     ') does not match & the number of ',                          &
     219                                     'PEs available to the job (', numprocs, ')'
    230220          CALL message( 'init_pegrid', 'PA0221', 1, 2, 0, 6, 0 )
    231221       ENDIF
     
    237227!--    If the processor topology is prescribed by the user, the number of
    238228!--    PEs must be given in both directions
    239        message_string = 'if the processor topology is prescribed by th' //     &
    240                 'e user & both values of "npex" and "npey" must be given' //   &
    241                 ' in the &NAMELIST-parameter file'
     229       message_string = 'if the processor topology is prescribed by th' //                         &
     230                        'e user & both values of "npex" and "npey" must be given' //               &
     231                        ' in the &NAMELIST-parameter file'
    242232       CALL message( 'init_pegrid', 'PA0222', 1, 2, 0, 6, 0 )
    243233
     
    245235
    246236!
    247 !-- Create four default MPI communicators for the 2d virtual PE grid. One of them will be used
    248 !-- as the main communicator for this run, while others might be used for specific quantities like
     237!-- Create four default MPI communicators for the 2d virtual PE grid. One of them will be used as
     238!-- the main communicator for this run, while others might be used for specific quantities like
    249239!-- aerosol, chemical species, or passive scalars), if their horizontal boundary conditions shall
    250240!-- be different from those of the other quantities (e.g. non-cyclic conditions for aerosols, and
     
    297287
    298288!
    299 !-- In case of cyclic boundary conditions, a y-shift at the boundaries in
    300 !-- x-direction can be introduced via parameter y_shift. The shift is done
    301 !-- by modifying the processor grid in such a way that processors located
    302 !-- at the x-boundary communicate across it to processors with y-coordinate
    303 !-- shifted by y_shift relative to their own. This feature can not be used
    304 !-- in combination with an fft pressure solver. It has been implemented to
    305 !-- counter the effect of streak structures in case of cyclic boundary
    306 !-- conditions. For a description of these see Munters
     289!-- In case of cyclic boundary conditions, a y-shift at the boundaries in x-direction can be
     290!-- introduced via parameter y_shift. The shift is done by modifying the processor grid in such a
     291!-- way that processors located at the x-boundary communicate across it to processors with
     292!-- y-coordinate shifted by y_shift relative to their own. This feature can not be used in
     293!-- combination with an fft pressure solver. It has been implemented to counter the effect of streak
     294!-- structures in case of cyclic boundary conditions. For a description of these see Munters
    307295!-- (2016; dx.doi.org/10.1063/1.4941912)
    308296!--
     
    310298    IF ( y_shift /= 0 ) THEN
    311299       IF ( bc_lr == 'cyclic' ) THEN
    312           IF ( TRIM( psolver ) /= 'multigrid' .AND.                            &
    313                 TRIM( psolver ) /= 'multigrid_noopt')                          &
    314           THEN
     300          IF ( TRIM( psolver ) /= 'multigrid' .AND.  TRIM( psolver ) /= 'multigrid_noopt')  THEN
    315301             message_string = 'y_shift /= 0 requires a multigrid pressure solver '
    316302             CALL message( 'check_parameters', 'PA0468', 1, 2, 0, 6, 0 )
     
    320306          CALL MPI_CART_COORDS( comm2d, pleft, ndim, lcoord, ierr )
    321307
    322 !   
    323 !--       If the x(y)-coordinate of the right (left) neighbor is smaller (greater)
    324 !--       than that of the calling process, then the calling process is located on
    325 !--       the right (left) boundary of the processor grid. In that case,
    326 !--       the y-coordinate of that neighbor is increased (decreased) by y_shift.
    327 !--       The rank of the process with that coordinate is then inquired and the
    328 !--       neighbor rank for MPI_SENDRECV, pright (pleft) is set to it.
    329 !--       In this way, the calling process receives a new right (left) neighbor
    330 !--       for all future MPI_SENDRECV calls. That neighbor has a y-coordinate
    331 !--       of y+(-)y_shift, where y is the original right (left) neighbor's
    332 !--       y-coordinate. The modulo-operation ensures that if the neighbor's
    333 !--       y-coordinate exceeds the grid-boundary, it will be relocated to
    334 !--       the opposite part of the grid cyclicly.
    335           IF ( rcoord(1) < pcoord(1) ) THEN
     308!
     309!--       If the x(y)-coordinate of the right (left) neighbor is smaller (greater) than that of the
     310!--       calling process, then the calling process is located on the right (left) boundary of the
     311!--       processor grid. In that case, the y-coordinate of that neighbor is increased (decreased)
     312!--       by y_shift.
     313!--       The rank of the process with that coordinate is then inquired and the neighbor rank for
     314!--       MPI_SENDRECV, pright (pleft) is set to it.
     315!--       In this way, the calling process receives a new right (left) neighbor for all future
     316!--       MPI_SENDRECV calls. That neighbor has a y-coordinate of y+(-)y_shift, where y is the
     317!--       original right (left) neighbor's y-coordinate. The modulo-operation ensures that if the
     318!--       neighbor's y-coordinate exceeds the grid-boundary, it will be relocated to the opposite
     319!--       part of the grid cyclicly.
     320          IF ( rcoord(1) < pcoord(1) )  THEN
    336321             rcoord(2) = MODULO( rcoord(2) + y_shift, pdims(2) )
    337322             CALL MPI_CART_RANK( comm2d, rcoord, pright, ierr )
    338323          ENDIF
    339324
    340           IF ( lcoord(1) > pcoord(1) ) THEN
     325          IF ( lcoord(1) > pcoord(1) )  THEN
    341326             lcoord(2) = MODULO( lcoord(2) - y_shift, pdims(2) )
    342327             CALL MPI_CART_RANK( comm2d, lcoord, pleft, ierr )
    343328          ENDIF
    344          
     329
    345330       ELSE
    346331!
    347 !--       y-shift for non-cyclic boundary conditions is only implemented 
     332!--       y-shift for non-cyclic boundary conditions is only implemented
    348333!--       for the turbulence recycling method in inflow_turbulence.f90
    349334          IF ( .NOT. turbulent_inflow )  THEN
    350              message_string = 'y_shift /= 0 is only allowed for cyclic ' //    &
    351                               'boundary conditions in both directions '  //    &
     335             message_string = 'y_shift /= 0 is only allowed for cyclic ' //                        &
     336                              'boundary conditions in both directions '  //                        &
    352337                              'or with turbulent_inflow == .TRUE.'
    353338             CALL message( 'check_parameters', 'PA0467', 1, 2, 0, 6, 0 )
     
    373358!
    374359!-- Calculate array bounds along x-direction for every PE.
    375     ALLOCATE( nxlf(0:pdims(1)-1), nxrf(0:pdims(1)-1), nynf(0:pdims(2)-1),      &
    376               nysf(0:pdims(2)-1) )
     360    ALLOCATE( nxlf(0:pdims(1)-1), nxrf(0:pdims(1)-1), nynf(0:pdims(2)-1), nysf(0:pdims(2)-1) )
    377361
    378362    IF ( MOD( nx+1 , pdims(1) ) /= 0 )  THEN
    379        WRITE( message_string, * ) 'x-direction: gridpoint number (',nx+1,') ', &
    380                                   'is not an& integral multiple of the number',&
    381                                   ' of processors (',pdims(1),')'
     363       WRITE( message_string, * ) 'x-direction: gridpoint number (' ,nx+1, ') ',                  &
     364                                  'is not an& integral multiple of the number',                    &
     365                                  ' of processors (', pdims(1), ')'
    382366       CALL message( 'init_pegrid', 'PA0225', 1, 2, 0, 6, 0 )
    383367    ELSE
     
    395379!-- Calculate array bounds in y-direction for every PE.
    396380    IF ( MOD( ny+1 , pdims(2) ) /= 0 )  THEN
    397        WRITE( message_string, * ) 'y-direction: gridpoint number (',ny+1,') ', &
    398                                   'is not an& integral multiple of the number',&
    399                                   ' of processors (',pdims(2),')'
     381       WRITE( message_string, * ) 'y-direction: gridpoint number (', ny+1, ') ',                  &
     382                                  'is not an& integral multiple of the number',                    &
     383                                  ' of processors (', pdims(2), ')'
    400384       CALL message( 'init_pegrid', 'PA0227', 1, 2, 0, 6, 0 )
    401385    ELSE
     
    421405
    422406!
    423 !-- Set switches to define if the PE is situated at the border of the virtual
    424 !-- processor grid
     407!-- Set switches to define if the PE is situated at the border of the virtual processor grid
    425408    IF ( nxl == 0 )   left_border_pe  = .TRUE.
    426409    IF ( nxr == nx )  right_border_pe = .TRUE.
     
    429412
    430413!
    431 !-- Calculate array bounds and gridpoint numbers for the transposed arrays
    432 !-- (needed in the pressure solver)
    433 !-- For the transposed arrays, cyclic boundaries as well as top and bottom
    434 !-- boundaries are omitted, because they are obstructive to the transposition
     414!-- Calculate array bounds and gridpoint numbers for the transposed arrays (needed in the pressure
     415!-- solver)
     416!-- For the transposed arrays, cyclic boundaries as well as top and bottom boundaries are omitted,
     417!-- because they are obstructive to the transposition
    435418
    436419!
     
    441424       IF ( pdims(2) /= 1 )  THEN
    442425          IF ( MOD( nz , pdims(1) ) /= 0 )  THEN
    443              WRITE( message_string, * ) 'transposition z --> x:& ',              &
    444                                         'nz=',nz,' is not an integral multiple ',&
    445                                         'of pdims(1)=',pdims(1)
     426             WRITE( message_string, * ) 'transposition z --> x:& ',                                &
     427                                        'nz=', nz, ' is not an integral multiple ',                &
     428                                        'of pdims(1)=', pdims(1)
    446429             CALL message( 'init_pegrid', 'PA0230', 1, 2, 0, 6, 0 )
    447430          ENDIF
     
    463446!--    2. transposition  x --> y
    464447       IF ( MOD( nx+1 , pdims(2) ) /= 0 )  THEN
    465           WRITE( message_string, * ) 'transposition x --> y:& ',              &
    466                                      'nx+1=',nx+1,' is not an integral ',     &
    467                                      'multiple of pdims(2)=',pdims(2)
     448          WRITE( message_string, * ) 'transposition x --> y:& ',                                   &
     449                                     'nx+1=', nx+1, ' is not an integral ',                        &
     450                                     'multiple of pdims(2)=', pdims(2)
    468451          CALL message( 'init_pegrid', 'PA0231', 1, 2, 0, 6, 0 )
    469452       ENDIF
     
    492475!--       along x, except that the uptream-spline method is switched on
    493476          IF ( MOD( ny+1 , pdims(1) ) /= 0 )  THEN
    494              WRITE( message_string, * ) 'transposition y --> z:& ',            &
    495                                         'ny+1=',ny+1,' is not an integral ',   &
    496                                         'multiple of pdims(1)=',pdims(1)
     477             WRITE( message_string, * ) 'transposition y --> z:& ',                                &
     478                                        'ny+1=', ny+1, ' is not an integral ',                     &
     479                                        'multiple of pdims(1)=', pdims(1)
    497480             CALL message( 'init_pegrid', 'PA0232', 1, 2, 0, 6, 0 )
    498481          ENDIF
     
    503486!--       This condition must be fulfilled for a 1D-decomposition along x
    504487          IF ( MOD( ny+1 , pdims(1) ) /= 0 )  THEN
    505              WRITE( message_string, * ) 'transposition x --> y:& ',            &
    506                                         'ny+1=',ny+1,' is not an integral ',   &
    507                                         'multiple of pdims(1)=',pdims(1)
     488             WRITE( message_string, * ) 'transposition x --> y:& ',                                &
     489                                        'ny+1=', ny+1, ' is not an integral ',                     &
     490                                        'multiple of pdims(1)=', pdims(1)
    508491             CALL message( 'init_pegrid', 'PA0233', 1, 2, 0, 6, 0 )
    509492          ENDIF
     
    517500    IF ( calculate_spectra )  THEN
    518501       IF ( MOD( nz, pdims(2) ) /= 0 )  THEN
    519           WRITE( message_string, * ) 'direct transposition z --> y (needed ',  &
    520                                      'for spectra):& nz=',nz,' is not an ',    &
    521                                      'integral multiple of pdims(2)=',pdims(2)
     502          WRITE( message_string, * ) 'direct transposition z --> y (needed ',                      &
     503                                     'for spectra):& nz=', nz, ' is not an ',                      &
     504                                     'integral multiple of pdims(2)=', pdims(2)
    522505          CALL message( 'init_pegrid', 'PA0234', 1, 2, 0, 6, 0 )
    523506       ELSE
     
    532515    IF ( psolver == 'poisfft'  .OR.  calculate_spectra )  THEN
    533516!
    534 !--    Indices for direct transpositions y --> x 
     517!--    Indices for direct transpositions y --> x
    535518!--    (they are only possible in case of a 1d-decomposition along x)
    536519       IF ( pdims(2) == 1 )  THEN
     
    547530    IF ( psolver == 'poisfft' )  THEN
    548531!
    549 !--    Indices for direct transpositions x --> y 
     532!--    Indices for direct transpositions x --> y
    550533!--    (they are only possible in case of a 1d-decomposition along y)
    551534       IF ( pdims(1) == 1 )  THEN
     
    579562!--    Receive data from all other PEs
    580563       DO  i = 1, numprocs-1
    581           CALL MPI_RECV( ibuf, 4, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, &
    582                          ierr )
     564          CALL MPI_RECV( ibuf, 4, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, ierr )
    583565          hor_index_bounds(:,i) = ibuf(1:4)
    584566       ENDDO
     
    602584       PRINT*, '*** processor topology ***'
    603585       PRINT*, ' '
    604        PRINT*, 'myid   pcoord    left right  south north  idx idy   nxl: nxr',&
    605                &'   nys: nyn'
    606        PRINT*, '------------------------------------------------------------',&
    607                &'-----------'
    608        WRITE (*,1000)  0, pcoord(1), pcoord(2), pleft, pright, psouth, pnorth, &
    609                        myidx, myidy, nxl, nxr, nys, nyn
    610 1000   FORMAT (I4,2X,'(',I3,',',I3,')',3X,I4,2X,I4,3X,I4,2X,I4,2X,I3,1X,I3, &
    611                2(2X,I4,':',I4))
     586       PRINT*, 'myid   pcoord    left right  south north  idx idy   nxl: nxr','   nys: nyn'
     587       PRINT*, '------------------------------------------------------------','-----------'
     588       WRITE (*,1000)  0, pcoord(1), pcoord(2), pleft, pright, psouth, pnorth, myidx, myidy, nxl,  &
     589                       nxr, nys, nyn
     5901000   FORMAT (I4,2X,'(',I3,',',I3,')',3X,I4,2X,I4,3X,I4,2X,I4,2X,I3,1X,I3,2(2X,I4,':',I4))
    612591
    613592!
    614593!--    Receive data from the other PEs
    615594       DO  i = 1,numprocs-1
    616           CALL MPI_RECV( ibuf, 12, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, &
    617                          ierr )
     595          CALL MPI_RECV( ibuf, 12, MPI_INTEGER, i, MPI_ANY_TAG, comm2d, status, ierr )
    618596          WRITE (*,1000)  i, ( ibuf(j) , j = 1,12 )
    619597       ENDDO
     
    626604       ibuf(8) = myidy; ibuf(9) = nxl; ibuf(10) = nxr; ibuf(11) = nys
    627605       ibuf(12) = nyn
    628        CALL MPI_SEND( ibuf, 12, MPI_INTEGER, 0, myid, comm2d, ierr )       
     606       CALL MPI_SEND( ibuf, 12, MPI_INTEGER, 0, myid, comm2d, ierr )
    629607    ENDIF
    630608#endif
    631609
    632 ! 
     610!
    633611!-- Determine the number of ghost point layers
    634     IF ( scalar_advec   == 'ws-scheme'  .OR.                                   &
     612    IF ( scalar_advec   == 'ws-scheme'  .OR.                                                       &
    635613         momentum_advec == 'ws-scheme'  .OR.  nested_run )  THEN
    636614       nbgp = 3
    637615    ELSE
    638616       nbgp = 1
    639     ENDIF 
    640 
    641 !
    642 !-- Check that the number of computational grid points is not smaller than the number of
    643 !-- ghost points.
     617    ENDIF
     618
     619!
     620!-- Check that the number of computational grid points is not smaller than the number of ghost
     621!-- points.
    644622    IF ( nnx < nbgp )  THEN
    645623       WRITE( message_string, * ) 'number of subdomain grid points along x (', nnx, ') is smaller',&
     
    654632
    655633!
    656 !-- Create a new MPI derived datatype for the exchange of surface (xy) data,
    657 !-- which is needed for coupled atmosphere-ocean runs.
     634!-- Create a new MPI derived datatype for the exchange of surface (xy) data, which is needed for
     635!-- coupled atmosphere-ocean runs.
    658636!-- First, calculate number of grid points of an xy-plane.
    659637    ngp_xy  = ( nxr - nxl + 1 + 2 * nbgp ) * ( nyn - nys + 1 + 2 * nbgp )
     
    662640
    663641    IF ( TRIM( coupling_mode ) /= 'uncoupled' )  THEN
    664    
     642
    665643!
    666644!--    Pass the number of grid points of the atmosphere model to
     
    673651          IF ( myid == 0 )  THEN
    674652
    675              CALL MPI_SEND( nx_a, 1, MPI_INTEGER, numprocs, 1, comm_inter,  &
    676                             ierr )
    677              CALL MPI_SEND( ny_a, 1, MPI_INTEGER, numprocs, 2, comm_inter,  &
    678                             ierr )
    679              CALL MPI_SEND( pdims, 2, MPI_INTEGER, numprocs, 3, comm_inter, &
    680                             ierr )
    681              CALL MPI_RECV( nx_o, 1, MPI_INTEGER, numprocs, 4, comm_inter,  &
    682                             status, ierr )
    683              CALL MPI_RECV( ny_o, 1, MPI_INTEGER, numprocs, 5, comm_inter,  &
    684                             status, ierr )
    685              CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, numprocs, 6,      &
    686                             comm_inter, status, ierr )
     653             CALL MPI_SEND( nx_a, 1, MPI_INTEGER, numprocs, 1, comm_inter, ierr )
     654             CALL MPI_SEND( ny_a, 1, MPI_INTEGER, numprocs, 2, comm_inter,  ierr )
     655             CALL MPI_SEND( pdims, 2, MPI_INTEGER, numprocs, 3, comm_inter, ierr )
     656             CALL MPI_RECV( nx_o, 1, MPI_INTEGER, numprocs, 4, comm_inter, status, ierr )
     657             CALL MPI_RECV( ny_o, 1, MPI_INTEGER, numprocs, 5, comm_inter, status, ierr )
     658             CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, numprocs, 6, comm_inter, status, ierr )
    687659          ENDIF
    688660
    689661          CALL MPI_BCAST( nx_o, 1, MPI_INTEGER, 0, comm2d, ierr )
    690           CALL MPI_BCAST( ny_o, 1, MPI_INTEGER, 0, comm2d, ierr ) 
     662          CALL MPI_BCAST( ny_o, 1, MPI_INTEGER, 0, comm2d, ierr )
    691663          CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr )
    692        
     664
    693665       ELSEIF ( coupling_mode == 'ocean_to_atmosphere' )  THEN
    694666
    695667          nx_o = nx
    696           ny_o = ny
    697 
    698           IF ( myid == 0 ) THEN
    699 
    700              CALL MPI_RECV( nx_a, 1, MPI_INTEGER, 0, 1, comm_inter, status, &
    701                             ierr )
    702              CALL MPI_RECV( ny_a, 1, MPI_INTEGER, 0, 2, comm_inter, status, &
    703                             ierr )
    704              CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, 0, 3, comm_inter, &
    705                             status, ierr )
     668          ny_o = ny
     669
     670          IF ( myid == 0 )  THEN
     671
     672             CALL MPI_RECV( nx_a, 1, MPI_INTEGER, 0, 1, comm_inter, status, ierr )
     673             CALL MPI_RECV( ny_a, 1, MPI_INTEGER, 0, 2, comm_inter, status, ierr )
     674             CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, 0, 3, comm_inter, status, ierr )
    706675             CALL MPI_SEND( nx_o, 1, MPI_INTEGER, 0, 4, comm_inter, ierr )
    707676             CALL MPI_SEND( ny_o, 1, MPI_INTEGER, 0, 5, comm_inter, ierr )
     
    710679
    711680          CALL MPI_BCAST( nx_a, 1, MPI_INTEGER, 0, comm2d, ierr)
    712           CALL MPI_BCAST( ny_a, 1, MPI_INTEGER, 0, comm2d, ierr) 
    713           CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr) 
    714 
    715        ENDIF
    716  
     681          CALL MPI_BCAST( ny_a, 1, MPI_INTEGER, 0, comm2d, ierr)
     682          CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr)
     683
     684       ENDIF
     685
    717686       ngp_a = ( nx_a+1 + 2 * nbgp ) * ( ny_a+1 + 2 * nbgp )
    718687       ngp_o = ( nx_o+1 + 2 * nbgp ) * ( ny_o+1 + 2 * nbgp )
    719688
    720689!
    721 !--    Determine if the horizontal grid and the number of PEs in ocean and
    722 !--    atmosphere is same or not
    723        IF ( nx_o == nx_a  .AND.  ny_o == ny_a  .AND.  &
    724             pdims(1) == pdims_remote(1) .AND. pdims(2) == pdims_remote(2) ) &
    725        THEN
     690!--    Determine if the horizontal grid and the number of PEs in ocean and atmosphere is same or not.
     691       IF ( nx_o == nx_a  .AND.  ny_o == ny_a  .AND.                                               &
     692            pdims(1) == pdims_remote(1)  .AND.  pdims(2) == pdims_remote(2) )  THEN
    726693          coupling_topology = 0
    727694       ELSE
    728695          coupling_topology = 1
    729        ENDIF
    730 
    731 !
    732 !--    Determine the target PEs for the exchange between ocean and
    733 !--    atmosphere (comm2d)
     696       ENDIF
     697
     698!
     699!--    Determine the target PEs for the exchange between ocean and atmosphere (comm2d)
    734700       IF ( coupling_topology == 0 )  THEN
    735701!
    736 !--       In case of identical topologies, every atmosphere PE has exactly one
    737 !--       ocean PE counterpart and vice versa
    738           IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' ) THEN
     702!--       In case of identical topologies, every atmosphere PE has exactly one ocean PE counterpart
     703!--       and vice versa
     704          IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' )  THEN
    739705             target_id = myid + numprocs
    740706          ELSE
    741              target_id = myid 
     707             target_id = myid
    742708          ENDIF
    743709
    744710       ELSE
    745711!
    746 !--       In case of nonequivalent topology in ocean and atmosphere only for
    747 !--       PE0 in ocean and PE0 in atmosphere a target_id is needed, since
    748 !--       data echxchange between ocean and atmosphere will be done only
    749 !--       between these PEs.   
     712!--       In case of nonequivalent topology in ocean and atmosphere only for PE0 in ocean and PE0 in
     713!--       atmosphere a target_id is needed, since data echxchange between ocean and atmosphere will
     714!--       be done only between these PEs.
    750715          IF ( myid == 0 )  THEN
    751716
    752717             IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' )  THEN
    753                 target_id = numprocs 
     718                target_id = numprocs
    754719             ELSE
    755720                target_id = 0
     
    765730
    766731!
    767 !-- Array bounds when running on a single PE (respectively a non-parallel
    768 !-- machine)
     732!-- Array bounds when running on a single PE (respectively a non-parallel machine)
    769733    nxl = 0
    770734    nxr = nx
     
    784748
    785749!
    786 !-- Array bounds for the pressure solver (in the parallel code, these bounds
    787 !-- are the ones for the transposed arrays)
     750!-- Array bounds for the pressure solver (in the parallel code, these bounds are the ones for the
     751!-- transposed arrays)
    788752    nys_x = nys
    789753    nyn_x = nyn
     
    804768
    805769!
    806 !-- Calculate number of grid levels necessary for the multigrid poisson solver
    807 !-- as well as the gridpoint indices on each level
     770!-- Calculate number of grid levels necessary for the multigrid poisson solver as well as the
     771!-- gridpoint indices on each level
    808772    IF ( psolver(1:9) == 'multigrid' )  THEN
    809773
     
    833797       ENDDO
    834798!
    835 !--    The optimized MG-solver does not allow odd values for nz at the coarsest
    836 !--    grid level
     799!--    The optimized MG-solver does not allow odd values for nz at the coarsest grid level
    837800       IF ( TRIM( psolver ) /= 'multigrid_noopt' )  THEN
    838801          IF ( MOD( k, 2 ) /= 0 )  mg_levels_z = mg_levels_z - 1
    839802!
    840 !--       An odd value of nz does not work. The finest level must have an even
    841 !--       value.
     803!--       An odd value of nz does not work. The finest level must have an even value.
    842804          IF (  mg_levels_z == 0 )  THEN
    843805             message_string = 'optimized multigrid method requires nz to be even'
     
    847809
    848810       maximum_grid_level = MIN( mg_levels_x, mg_levels_y, mg_levels_z )
    849 !       
    850 !--    Check if subdomain sizes prevents any coarsening. 
    851 !--    This case, the maximum number of grid levels is 1, i.e. effectively
    852 !--    a Gauss-Seidel scheme is applied rather than a multigrid approach.
     811!
     812!--    Check if subdomain sizes prevents any coarsening.
     813!--    This case, the maximum number of grid levels is 1, i.e. effectively a Gauss-Seidel scheme is
     814!--    applied rather than a multigrid approach.
    853815!--    Give a warning in this case.
    854816       IF ( maximum_grid_level == 1  .AND.  mg_switch_to_pe0_level == -1 )  THEN
    855           message_string = 'No grid coarsening possible, multigrid ' //        &
    856                            'approach effectively reduces to a Gauss-Seidel ' //&
     817          message_string = 'No grid coarsening possible, multigrid ' //                            &
     818                           'approach effectively reduces to a Gauss-Seidel ' //                    &
    857819                           'scheme.'
    858          
     820
    859821          CALL message( 'poismg', 'PA0648', 0, 1, 0, 6, 0 )
    860822       ENDIF
    861823
    862824!
    863 !--    Find out, if the total domain allows more levels. These additional
    864 !--    levels are identically processed on all PEs.
     825!--    Find out, if the total domain allows more levels. These additional levels are identically
     826!--    processed on all PEs.
    865827       IF ( numprocs > 1  .AND.  mg_switch_to_pe0_level /= -1 )  THEN
    866828
     
    887849
    888850             IF ( maximum_grid_level_l > mg_switch_to_pe0_level_l )  THEN
    889                 mg_switch_to_pe0_level_l = maximum_grid_level_l - &
    890                                            mg_switch_to_pe0_level_l + 1
     851                mg_switch_to_pe0_level_l = maximum_grid_level_l - mg_switch_to_pe0_level_l + 1
    891852             ELSE
    892853                mg_switch_to_pe0_level_l = 0
     
    901862
    902863!
    903 !--       Use switch level calculated above only if it is not pre-defined
    904 !--       by user
     864!--       Use switch level calculated above only if it is not pre-defined by user
    905865          IF ( mg_switch_to_pe0_level == 0 )  THEN
    906866             IF ( mg_switch_to_pe0_level_l /= 0 )  THEN
     
    912872!
    913873!--          Check pre-defined value and reset to default, if neccessary
    914              IF ( mg_switch_to_pe0_level < mg_switch_to_pe0_level_l  .OR.      &
     874             IF ( mg_switch_to_pe0_level < mg_switch_to_pe0_level_l  .OR.                          &
    915875                  mg_switch_to_pe0_level >= maximum_grid_level_l )  THEN
    916                 message_string = 'mg_switch_to_pe0_level ' //                  &
     876                message_string = 'mg_switch_to_pe0_level ' //                                      &
    917877                                 'out of range and reset to 0'
    918878                CALL message( 'init_pegrid', 'PA0235', 0, 1, 0, 6, 0 )
     
    920880             ELSE
    921881!
    922 !--             Use the largest number of possible levels anyway and recalculate
    923 !--             the switch level to this largest number of possible values
     882!--             Use the largest number of possible levels anyway and recalculate the switch level to
     883!--             this largest number of possible values
    924884                maximum_grid_level = maximum_grid_level_l
    925885
     
    930890       ENDIF
    931891
    932        ALLOCATE( grid_level_count(maximum_grid_level),                       &
    933                  nxl_mg(0:maximum_grid_level), nxr_mg(0:maximum_grid_level), &
    934                  nyn_mg(0:maximum_grid_level), nys_mg(0:maximum_grid_level), &
     892       ALLOCATE( grid_level_count(maximum_grid_level),                                             &
     893                 nxl_mg(0:maximum_grid_level), nxr_mg(0:maximum_grid_level),                       &
     894                 nyn_mg(0:maximum_grid_level), nys_mg(0:maximum_grid_level),                       &
    935895                 nzt_mg(0:maximum_grid_level) )
    936896
    937897       grid_level_count = 0
    938898!
    939 !--    Index zero required as dummy due to definition of arrays f2 and p2 in
    940 !--    recursive subroutine next_mg_level
     899!--    Index zero required as dummy due to definition of arrays f2 and p2 in recursive subroutine
     900!--    next_mg_level
    941901       nxl_mg(0) = 0; nxr_mg(0) = 0; nyn_mg(0) = 0; nys_mg(0) = 0; nzt_mg(0) = 0
    942902
     
    948908#if defined( __parallel )
    949909!
    950 !--          Save the grid size of the subdomain at the switch level, because
    951 !--          it is needed in poismg.
     910!--          Save the grid size of the subdomain at the switch level, because it is needed in poismg.
    952911             ind(1) = nxl_l; ind(2) = nxr_l
    953912             ind(3) = nys_l; ind(4) = nyn_l
     
    969928             nys_l = 0
    970929!
    971 !--          The size of this gathered array must not be larger than the
    972 !--          array tend, which is used in the multigrid scheme as a temporary
    973 !--          array. Therefore the subdomain size of an PE is calculated and
    974 !--          the size of the gathered grid. These values are used in 
    975 !--          routines pres and poismg
    976              subdomain_size = ( nxr - nxl + 2 * nbgp + 1 ) * &
     930!--          The size of this gathered array must not be larger than the array tend, which is used
     931!--          in the multigrid scheme as a temporary array. Therefore the subdomain size of an PE is
     932!--          calculated and the size of the gathered grid. These values are used in routines pres
     933!--          and poismg.
     934             subdomain_size = ( nxr - nxl + 2 * nbgp + 1 ) *                                       &
    977935                              ( nyn - nys + 2 * nbgp + 1 ) * ( nzt - nzb + 2 )
    978              gathered_size  = ( nxr_l - nxl_l + 3 ) * ( nyn_l - nys_l + 3 ) *  &
    979                               ( nzt_l - nzb + 2 )
     936             gathered_size  = ( nxr_l - nxl_l + 3 ) * ( nyn_l - nys_l + 3 ) * ( nzt_l - nzb + 2 )
    980937
    981938#else
    982              message_string = 'multigrid gather/scatter impossible ' //        &
     939             message_string = 'multigrid gather/scatter impossible ' //                            &
    983940                          'in non parallel mode'
    984941             CALL message( 'init_pegrid', 'PA0237', 1, 2, 0, 6, 0 )
     
    992949          nzt_mg(i) = nzt_l
    993950
    994           nxl_l = nxl_l / 2
    995           nxr_l = nxr_l / 2
    996           nys_l = nys_l / 2
    997           nyn_l = nyn_l / 2
    998           nzt_l = nzt_l / 2
    999          
    1000        ENDDO
    1001 
    1002 !
    1003 !--    Temporary problem: Currently calculation of maxerror in routine poismg crashes
    1004 !--    if grid data are collected on PE0 already on the finest grid level.
    1005 !--    To be solved later.
    1006        IF ( maximum_grid_level == mg_switch_to_pe0_level )  THEN
    1007           message_string = 'grid coarsening on subdomain level cannot be performed'
    1008           CALL message( 'poismg', 'PA0236', 1, 2, 0, 6, 0 )
    1009        ENDIF
    1010 
    1011     ELSE
    1012 
    1013        maximum_grid_level = 0
    1014 
    1015     ENDIF
    1016 
    1017 !
    1018 !-- Default level 0 tells exchange_horiz that all ghost planes have to be
    1019 !-- exchanged. grid_level is adjusted in poismg, where only one ghost plane
    1020 !-- is required.
    1021     grid_level = 0
    1022 
    1023 #if defined( __parallel )
    1024 !
    1025 !-- Gridpoint number for the exchange of ghost points (y-line for 2D-arrays)
    1026     ngp_y  = nyn - nys + 1 + 2 * nbgp
    1027 
    1028 !
    1029 !-- Define new MPI derived datatypes for the exchange of ghost points in
    1030 !-- x- and y-direction for 2D-arrays (line)
    1031     CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_REAL, type_x,     &
    1032                           ierr )
    1033     CALL MPI_TYPE_COMMIT( type_x, ierr )
    1034 
    1035     CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_REAL, type_y, ierr )
    1036     CALL MPI_TYPE_COMMIT( type_y, ierr )
    1037 !
    1038 !-- Define new MPI derived datatypes for the exchange of ghost points in
    1039 !-- x- and y-direction for 2D-INTEGER arrays (line) - on normal grid.
    1040 !-- Define types for 32-bit and 8-bit Integer. The 8-bit Integer are only
    1041 !-- required on normal grid, while 32-bit Integer may be also required on
    1042 !-- coarser grid level in case of multigrid solver.
    1043 !
    1044 !-- 8-bit Integer
    1045     CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_BYTE,             &
    1046                           type_x_byte, ierr )
    1047     CALL MPI_TYPE_COMMIT( type_x_byte, ierr )
    1048 
    1049     CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_BYTE,                        &
    1050                           type_y_byte, ierr )
    1051     CALL MPI_TYPE_COMMIT( type_y_byte, ierr )
    1052 !
    1053 !-- 32-bit Integer
    1054     ALLOCATE( type_x_int(0:maximum_grid_level),                                &
    1055               type_y_int(0:maximum_grid_level) )
    1056              
    1057     CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_INTEGER,          &
    1058                           type_x_int(0), ierr )
    1059     CALL MPI_TYPE_COMMIT( type_x_int(0), ierr )
    1060 
    1061     CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_INTEGER, type_y_int(0), ierr )
    1062     CALL MPI_TYPE_COMMIT( type_y_int(0), ierr )
    1063 !
    1064 !-- Calculate gridpoint numbers for the exchange of ghost points along x
    1065 !-- (yz-plane for 3D-arrays) and define MPI derived data type(s) for the
    1066 !-- exchange of ghost points in y-direction (xz-plane).
    1067 !-- Do these calculations for the model grid and (if necessary) also
    1068 !-- for the coarser grid levels used in the multigrid method
    1069     ALLOCATE ( ngp_xz(0:maximum_grid_level),                                   &
    1070                ngp_xz_int(0:maximum_grid_level),                               &
    1071                ngp_yz(0:maximum_grid_level),                                   &
    1072                ngp_yz_int(0:maximum_grid_level),                               &
    1073                type_xz(0:maximum_grid_level),                                  &
    1074                type_xz_int(0:maximum_grid_level),                              &
    1075                type_yz(0:maximum_grid_level),                                  &
    1076                type_yz_int(0:maximum_grid_level) )
    1077 
    1078     nxl_l = nxl; nxr_l = nxr; nys_l = nys; nyn_l = nyn; nzb_l = nzb; nzt_l = nzt
    1079 
    1080 !
    1081 !-- Discern between the model grid, which needs nbgp ghost points and
    1082 !-- grid levels for the multigrid scheme. In the latter case only one
    1083 !-- ghost point is necessary.
    1084 !-- First definition of MPI-datatypes for exchange of ghost layers on normal
    1085 !-- grid. The following loop is needed for data exchange in poismg.f90.
    1086 !
    1087 !-- Determine number of grid points of yz-layer for exchange
    1088     ngp_yz(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp)
    1089 
    1090 !
    1091 !-- Define an MPI-datatype for the exchange of left/right boundaries.
    1092 !-- Although data are contiguous in physical memory (which does not
    1093 !-- necessarily require an MPI-derived datatype), the data exchange between
    1094 !-- left and right PE's using the MPI-derived type is 10% faster than without.
    1095     CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz(0), &
    1096                           MPI_REAL, type_xz(0), ierr )
    1097     CALL MPI_TYPE_COMMIT( type_xz(0), ierr )
    1098 
    1099     CALL MPI_TYPE_VECTOR( nbgp, ngp_yz(0), ngp_yz(0), MPI_REAL, type_yz(0), &
    1100                           ierr )
    1101     CALL MPI_TYPE_COMMIT( type_yz(0), ierr )
    1102 
    1103 !
    1104 !-- Define data types for exchange of 3D Integer arrays.
    1105     ngp_yz_int(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp)
    1106 
    1107     CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz_int(0),   &
    1108                           MPI_INTEGER, type_xz_int(0), ierr )
    1109     CALL MPI_TYPE_COMMIT( type_xz_int(0), ierr )
    1110 
    1111     CALL MPI_TYPE_VECTOR( nbgp, ngp_yz_int(0), ngp_yz_int(0), MPI_INTEGER,     &
    1112                           type_yz_int(0), ierr )
    1113     CALL MPI_TYPE_COMMIT( type_yz_int(0), ierr )
    1114 
    1115 !
    1116 !-- Definition of MPI-datatypes for multigrid method (coarser level grids)
    1117     IF ( psolver(1:9) == 'multigrid' )  THEN
    1118 !   
    1119 !--    Definition of MPI-datatyoe as above, but only 1 ghost level is used
    1120        DO  i = maximum_grid_level, 1 , -1
    1121 !
    1122 !--       For 3D-exchange on different multigrid level, one ghost point for
    1123 !--       REAL arrays, two ghost points for INTEGER arrays
    1124           ngp_xz(i) = (nzt_l - nzb_l + 2) * (nxr_l - nxl_l + 3)
    1125           ngp_yz(i) = (nzt_l - nzb_l + 2) * (nyn_l - nys_l + 3)
    1126 
    1127           ngp_xz_int(i) = (nzt_l - nzb_l + 2) * (nxr_l - nxl_l + 3)
    1128           ngp_yz_int(i) = (nzt_l - nzb_l + 2) * (nyn_l - nys_l + 3)
    1129 !
    1130 !--       MPI data type for REAL arrays, for xz-layers
    1131           CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+3, nzt_l-nzb_l+2, ngp_yz(i),       &
    1132                                 MPI_REAL, type_xz(i), ierr )
    1133           CALL MPI_TYPE_COMMIT( type_xz(i), ierr )
    1134 
    1135 !
    1136 !--       MPI data type for INTEGER arrays, for xz-layers
    1137           CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+3, nzt_l-nzb_l+2, ngp_yz_int(i),   &
    1138                                 MPI_INTEGER, type_xz_int(i), ierr )
    1139           CALL MPI_TYPE_COMMIT( type_xz_int(i), ierr )
    1140 
    1141 !
    1142 !--       MPI data type for REAL arrays, for yz-layers
    1143           CALL MPI_TYPE_VECTOR( 1, ngp_yz(i), ngp_yz(i), MPI_REAL, type_yz(i), &
    1144                                 ierr )
    1145           CALL MPI_TYPE_COMMIT( type_yz(i), ierr )
    1146 !
    1147 !--       MPI data type for INTEGER arrays, for yz-layers
    1148           CALL MPI_TYPE_VECTOR( 1, ngp_yz_int(i), ngp_yz_int(i), MPI_INTEGER,  &
    1149                                 type_yz_int(i), ierr )
    1150           CALL MPI_TYPE_COMMIT( type_yz_int(i), ierr )
    1151 
    1152 
    1153 !--       For 2D-exchange of INTEGER arrays on coarser grid level, where 2 ghost
    1154 !--       points need to be exchanged. Only required for 32-bit Integer arrays.
    1155           CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+5, 2, nyn_l-nys_l+5, MPI_INTEGER,  &
    1156                                 type_x_int(i), ierr )
    1157           CALL MPI_TYPE_COMMIT( type_x_int(i), ierr )
    1158 
    1159 
    1160           CALL MPI_TYPE_VECTOR( 2, nyn_l-nys_l+5, nyn_l-nys_l+5, MPI_INTEGER,  &
    1161                                 type_y_int(i), ierr )
    1162           CALL MPI_TYPE_COMMIT( type_y_int(i), ierr )
    1163 
    1164951          nxl_l = nxl_l / 2
    1165952          nxr_l = nxr_l / 2
     
    1170957       ENDDO
    1171958
     959!
     960!--    Temporary problem: Currently calculation of maxerror in routine poismg crashes if grid data
     961!--    are collected on PE0 already on the finest grid level.
     962!--    To be solved later.
     963       IF ( maximum_grid_level == mg_switch_to_pe0_level )  THEN
     964          message_string = 'grid coarsening on subdomain level cannot be performed'
     965          CALL message( 'poismg', 'PA0236', 1, 2, 0, 6, 0 )
     966       ENDIF
     967
     968    ELSE
     969
     970       maximum_grid_level = 0
     971
     972    ENDIF
     973
     974!
     975!-- Default level 0 tells exchange_horiz that all ghost planes have to be exchanged. grid_level is
     976!-- adjusted in poismg, where only one ghost plane is required.
     977    grid_level = 0
     978
     979#if defined( __parallel )
     980!
     981!-- Gridpoint number for the exchange of ghost points (y-line for 2D-arrays)
     982    ngp_y  = nyn - nys + 1 + 2 * nbgp
     983
     984!
     985!-- Define new MPI derived datatypes for the exchange of ghost points in x- and y-direction for
     986!-- 2D-arrays (line)
     987    CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_REAL, type_x, ierr )
     988    CALL MPI_TYPE_COMMIT( type_x, ierr )
     989
     990    CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_REAL, type_y, ierr )
     991    CALL MPI_TYPE_COMMIT( type_y, ierr )
     992!
     993!-- Define new MPI derived datatypes for the exchange of ghost points in x- and y-direction for
     994!-- 2D-INTEGER arrays (line) - on normal grid.
     995!-- Define types for 32-bit and 8-bit Integer. The 8-bit Integer are only required on normal grid,
     996!-- while 32-bit Integer may be also required on coarser grid level in case of multigrid solver.
     997!
     998!-- 8-bit Integer
     999    CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_BYTE, type_x_byte, ierr )
     1000    CALL MPI_TYPE_COMMIT( type_x_byte, ierr )
     1001
     1002    CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_BYTE, type_y_byte, ierr )
     1003    CALL MPI_TYPE_COMMIT( type_y_byte, ierr )
     1004!
     1005!-- 32-bit Integer
     1006    ALLOCATE( type_x_int(0:maximum_grid_level), type_y_int(0:maximum_grid_level) )
     1007
     1008    CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_INTEGER, type_x_int(0), ierr )
     1009    CALL MPI_TYPE_COMMIT( type_x_int(0), ierr )
     1010
     1011    CALL MPI_TYPE_VECTOR( nbgp, ngp_y, ngp_y, MPI_INTEGER, type_y_int(0), ierr )
     1012    CALL MPI_TYPE_COMMIT( type_y_int(0), ierr )
     1013!
     1014!-- Calculate gridpoint numbers for the exchange of ghost points along x (yz-plane for 3D-arrays)
     1015!-- and define MPI derived data type(s) for the exchange of ghost points in y-direction (xz-plane).
     1016!-- Do these calculations for the model grid and (if necessary) also for the coarser grid levels
     1017!-- used in the multigrid method
     1018    ALLOCATE ( ngp_xz(0:maximum_grid_level),                                                       &
     1019               ngp_xz_int(0:maximum_grid_level),                                                   &
     1020               ngp_yz(0:maximum_grid_level),                                                       &
     1021               ngp_yz_int(0:maximum_grid_level),                                                   &
     1022               type_xz(0:maximum_grid_level),                                                      &
     1023               type_xz_int(0:maximum_grid_level),                                                  &
     1024               type_yz(0:maximum_grid_level),                                                      &
     1025               type_yz_int(0:maximum_grid_level) )
     1026
     1027    nxl_l = nxl; nxr_l = nxr; nys_l = nys; nyn_l = nyn; nzb_l = nzb; nzt_l = nzt
     1028
     1029!
     1030!-- Discern between the model grid, which needs nbgp ghost points and grid levels for the multigrid
     1031!-- scheme. In the latter case only one ghost point is necessary.
     1032!-- First definition of MPI-datatypes for exchange of ghost layers on normal grid. The following
     1033!-- loop is needed for data exchange in poismg.f90.
     1034!
     1035!-- Determine number of grid points of yz-layer for exchange
     1036    ngp_yz(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp)
     1037
     1038!
     1039!-- Define an MPI-datatype for the exchange of left/right boundaries.
     1040!-- Although data are contiguous in physical memory (which does not necessarily require an
     1041!-- MPI-derived datatype), the data exchange between left and right PE's using the MPI-derived type
     1042!-- is 10% faster than without.
     1043    CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz(0), MPI_REAL, type_xz(0),     &
     1044                          ierr )
     1045    CALL MPI_TYPE_COMMIT( type_xz(0), ierr )
     1046
     1047    CALL MPI_TYPE_VECTOR( nbgp, ngp_yz(0), ngp_yz(0), MPI_REAL, type_yz(0), ierr )
     1048    CALL MPI_TYPE_COMMIT( type_yz(0), ierr )
     1049
     1050!
     1051!-- Define data types for exchange of 3D Integer arrays.
     1052    ngp_yz_int(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp)
     1053
     1054    CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz_int(0), MPI_INTEGER,          &
     1055                          type_xz_int(0), ierr )
     1056    CALL MPI_TYPE_COMMIT( type_xz_int(0), ierr )
     1057
     1058    CALL MPI_TYPE_VECTOR( nbgp, ngp_yz_int(0), ngp_yz_int(0), MPI_INTEGER, type_yz_int(0), ierr )
     1059    CALL MPI_TYPE_COMMIT( type_yz_int(0), ierr )
     1060
     1061!
     1062!-- Definition of MPI-datatypes for multigrid method (coarser level grids)
     1063    IF ( psolver(1:9) == 'multigrid' )  THEN
     1064!
     1065!--    Definition of MPI-datatyoe as above, but only 1 ghost level is used
     1066       DO  i = maximum_grid_level, 1 , -1
     1067!
     1068!--       For 3D-exchange on different multigrid level, one ghost point for REAL arrays, two ghost
     1069!--       points for INTEGER arrays
     1070          ngp_xz(i) = (nzt_l - nzb_l + 2) * (nxr_l - nxl_l + 3)
     1071          ngp_yz(i) = (nzt_l - nzb_l + 2) * (nyn_l - nys_l + 3)
     1072
     1073          ngp_xz_int(i) = (nzt_l - nzb_l + 2) * (nxr_l - nxl_l + 3)
     1074          ngp_yz_int(i) = (nzt_l - nzb_l + 2) * (nyn_l - nys_l + 3)
     1075!
     1076!--       MPI data type for REAL arrays, for xz-layers
     1077          CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+3, nzt_l-nzb_l+2, ngp_yz(i), MPI_REAL, type_xz(i),     &
     1078                                ierr )
     1079          CALL MPI_TYPE_COMMIT( type_xz(i), ierr )
     1080
     1081!
     1082!--       MPI data type for INTEGER arrays, for xz-layers
     1083          CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+3, nzt_l-nzb_l+2, ngp_yz_int(i), MPI_INTEGER,          &
     1084                                type_xz_int(i), ierr )
     1085          CALL MPI_TYPE_COMMIT( type_xz_int(i), ierr )
     1086
     1087!
     1088!--       MPI data type for REAL arrays, for yz-layers
     1089          CALL MPI_TYPE_VECTOR( 1, ngp_yz(i), ngp_yz(i), MPI_REAL, type_yz(i), ierr )
     1090          CALL MPI_TYPE_COMMIT( type_yz(i), ierr )
     1091!
     1092!--       MPI data type for INTEGER arrays, for yz-layers
     1093          CALL MPI_TYPE_VECTOR( 1, ngp_yz_int(i), ngp_yz_int(i), MPI_INTEGER, type_yz_int(i), ierr )
     1094          CALL MPI_TYPE_COMMIT( type_yz_int(i), ierr )
     1095
     1096
     1097!--       For 2D-exchange of INTEGER arrays on coarser grid level, where 2 ghost points need to be
     1098!--       exchanged. Only required for 32-bit Integer arrays.
     1099          CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+5, 2, nyn_l-nys_l+5, MPI_INTEGER, type_x_int(i), ierr )
     1100          CALL MPI_TYPE_COMMIT( type_x_int(i), ierr )
     1101
     1102
     1103          CALL MPI_TYPE_VECTOR( 2, nyn_l-nys_l+5, nyn_l-nys_l+5, MPI_INTEGER, type_y_int(i), ierr )
     1104          CALL MPI_TYPE_COMMIT( type_y_int(i), ierr )
     1105
     1106          nxl_l = nxl_l / 2
     1107          nxr_l = nxr_l / 2
     1108          nys_l = nys_l / 2
     1109          nyn_l = nyn_l / 2
     1110          nzt_l = nzt_l / 2
     1111
     1112       ENDDO
     1113
    11721114    ENDIF
    11731115
     
    11781120!-- Setting of flags for inflow/outflow/nesting conditions.
    11791121    IF ( pleft == MPI_PROC_NULL )  THEN
    1180        IF ( bc_lr == 'dirichlet/radiation'  .OR.  bc_lr == 'nested'  .OR.      &
     1122       IF ( bc_lr == 'dirichlet/radiation'  .OR.  bc_lr == 'nested'  .OR.                          &
    11811123            bc_lr == 'nesting_offline' )  THEN
    11821124          bc_dirichlet_l  = .TRUE.
     
    11851127       ENDIF
    11861128    ENDIF
    1187  
     1129
    11881130    IF ( pright == MPI_PROC_NULL )  THEN
    11891131       IF ( bc_lr == 'dirichlet/radiation' )  THEN
    11901132          bc_radiation_r = .TRUE.
    1191        ELSEIF ( bc_lr == 'radiation/dirichlet'  .OR.  bc_lr == 'nested'  .OR.  &
     1133       ELSEIF ( bc_lr == 'radiation/dirichlet'  .OR.  bc_lr == 'nested'  .OR.                      &
    11921134                bc_lr == 'nesting_offline' )  THEN
    11931135          bc_dirichlet_r  = .TRUE.
     
    11981140       IF ( bc_ns == 'dirichlet/radiation' )  THEN
    11991141          bc_radiation_s = .TRUE.
    1200        ELSEIF ( bc_ns == 'radiation/dirichlet'  .OR.  bc_ns == 'nested'  .OR.  &
     1142       ELSEIF ( bc_ns == 'radiation/dirichlet'  .OR.  bc_ns == 'nested'  .OR.                      &
    12011143                bc_ns == 'nesting_offline' )  THEN
    12021144          bc_dirichlet_s  = .TRUE.
     
    12051147
    12061148    IF ( pnorth == MPI_PROC_NULL )  THEN
    1207        IF ( bc_ns == 'dirichlet/radiation'  .OR.  bc_ns == 'nested'  .OR.      &
     1149       IF ( bc_ns == 'dirichlet/radiation'  .OR.  bc_ns == 'nested'  .OR.                          &
    12081150            bc_ns == 'nesting_offline' )  THEN
    12091151          bc_dirichlet_n  = .TRUE.
     
    12131155    ENDIF
    12141156!
    1215 !-- In case of synthetic turbulence geneartor determine ids. 
    1216 !-- Please note, if no forcing or nesting is applied, the generator is applied
    1217 !-- only at the left lateral boundary.
     1157!-- In case of synthetic turbulence geneartor determine ids.
     1158!-- Please note, if no forcing or nesting is applied, the generator is applied only at the left
     1159!-- lateral boundary.
    12181160    IF ( use_syn_turb_gen )  THEN
    12191161       IF ( bc_dirichlet_l )  THEN
     
    12391181
    12401182       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    1241        CALL MPI_ALLREDUCE( id_stg_left_l, id_stg_left,   1, MPI_INTEGER,       &
    1242                            MPI_SUM, comm1dx, ierr )
     1183       CALL MPI_ALLREDUCE( id_stg_left_l, id_stg_left,   1, MPI_INTEGER, MPI_SUM, comm1dx, ierr )
    12431184
    12441185       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    1245        CALL MPI_ALLREDUCE( id_stg_right_l, id_stg_right, 1, MPI_INTEGER,       &
    1246                            MPI_SUM, comm1dx, ierr )
     1186       CALL MPI_ALLREDUCE( id_stg_right_l, id_stg_right, 1, MPI_INTEGER, MPI_SUM, comm1dx, ierr )
    12471187
    12481188       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    1249        CALL MPI_ALLREDUCE( id_stg_south_l, id_stg_south, 1, MPI_INTEGER,       &
    1250                            MPI_SUM, comm1dy, ierr )
     1189       CALL MPI_ALLREDUCE( id_stg_south_l, id_stg_south, 1, MPI_INTEGER, MPI_SUM, comm1dy, ierr )
    12511190
    12521191       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    1253        CALL MPI_ALLREDUCE( id_stg_north_l, id_stg_north, 1, MPI_INTEGER,       &
    1254                            MPI_SUM, comm1dy, ierr )
    1255 
    1256     ENDIF
    1257  
     1192       CALL MPI_ALLREDUCE( id_stg_north_l, id_stg_north, 1, MPI_INTEGER, MPI_SUM, comm1dy, ierr )
     1193
     1194    ENDIF
     1195
    12581196!
    12591197!-- Broadcast the id of the inflow PE
     
    12641202    ENDIF
    12651203    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    1266     CALL MPI_ALLREDUCE( id_inflow_l, id_inflow, 1, MPI_INTEGER, MPI_SUM, &
    1267                         comm1dx, ierr )
     1204    CALL MPI_ALLREDUCE( id_inflow_l, id_inflow, 1, MPI_INTEGER, MPI_SUM, comm1dx, ierr )
    12681205
    12691206!
     
    12711208!-- WARNING: needs to be adjusted in case of inflows other than from left side!
    12721209    IF ( turbulent_inflow ) THEN
    1273    
     1210
    12741211       IF ( NINT( recycling_width / dx, KIND=idp ) >= nxl  .AND.                                   &
    12751212            NINT( recycling_width / dx, KIND=idp ) <= nxr )  THEN
     
    12791216       ENDIF
    12801217       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    1281        CALL MPI_ALLREDUCE( id_recycling_l, id_recycling, 1, MPI_INTEGER, MPI_SUM, &
    1282                            comm1dx, ierr )
    1283                            
     1218       CALL MPI_ALLREDUCE( id_recycling_l, id_recycling, 1, MPI_INTEGER, MPI_SUM, comm1dx, ierr )
     1219
    12841220    ENDIF
    12851221
     
    12971233                           comm1dx, ierr )
    12981234
    1299        IF ( NINT( outflow_source_plane / dx ) >= nxl  .AND. &
     1235       IF ( NINT( outflow_source_plane / dx ) >= nxl  .AND.                                        &
    13001236            NINT( outflow_source_plane / dx ) <= nxr )  THEN
    13011237          id_outflow_source_l = myidx
     
    13041240       ENDIF
    13051241       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    1306        CALL MPI_ALLREDUCE( id_outflow_source_l, id_outflow_source, 1, &
    1307                            MPI_INTEGER, MPI_SUM, comm1dx, ierr )
     1242       CALL MPI_ALLREDUCE( id_outflow_source_l, id_outflow_source, 1, MPI_INTEGER, MPI_SUM,        &
     1243                           comm1dx, ierr )
    13081244
    13091245    ENDIF
     
    13301266
    13311267!
    1332 !-- At the inflow or outflow, u or v, respectively, have to be calculated for
    1333 !-- one more grid point.
     1268!-- At the inflow or outflow, u or v, respectively, have to be calculated for one more grid point.
    13341269    IF ( bc_dirichlet_l  .OR.  bc_radiation_l )  THEN
    13351270       nxlu = nxl + 1
     
    13521287
    13531288              CASE ( 1 )
    1354                  ALLOCATE( wall_flags_1(nzb:nzt_mg(i)+1,         &
    1355                                         nys_mg(i)-1:nyn_mg(i)+1, &
     1289                 ALLOCATE( wall_flags_1(nzb:nzt_mg(i)+1,                                           &
     1290                                        nys_mg(i)-1:nyn_mg(i)+1,                                   &
    13561291                                        nxl_mg(i)-1:nxr_mg(i)+1) )
    13571292
    13581293              CASE ( 2 )
    1359                  ALLOCATE( wall_flags_2(nzb:nzt_mg(i)+1,         &
    1360                                         nys_mg(i)-1:nyn_mg(i)+1, &
     1294                 ALLOCATE( wall_flags_2(nzb:nzt_mg(i)+1,                                           &
     1295                                        nys_mg(i)-1:nyn_mg(i)+1,                                   &
    13611296                                        nxl_mg(i)-1:nxr_mg(i)+1) )
    13621297
    13631298              CASE ( 3 )
    1364                  ALLOCATE( wall_flags_3(nzb:nzt_mg(i)+1,         &
    1365                                         nys_mg(i)-1:nyn_mg(i)+1, &
     1299                 ALLOCATE( wall_flags_3(nzb:nzt_mg(i)+1,                                           &
     1300                                        nys_mg(i)-1:nyn_mg(i)+1,                                   &
    13661301                                        nxl_mg(i)-1:nxr_mg(i)+1) )
    13671302
    13681303              CASE ( 4 )
    1369                  ALLOCATE( wall_flags_4(nzb:nzt_mg(i)+1,         &
    1370                                         nys_mg(i)-1:nyn_mg(i)+1, &
     1304                 ALLOCATE( wall_flags_4(nzb:nzt_mg(i)+1,                                           &
     1305                                        nys_mg(i)-1:nyn_mg(i)+1,                                   &
    13711306                                        nxl_mg(i)-1:nxr_mg(i)+1) )
    13721307
    13731308              CASE ( 5 )
    1374                  ALLOCATE( wall_flags_5(nzb:nzt_mg(i)+1,         &
    1375                                         nys_mg(i)-1:nyn_mg(i)+1, &
     1309                 ALLOCATE( wall_flags_5(nzb:nzt_mg(i)+1,                                           &
     1310                                        nys_mg(i)-1:nyn_mg(i)+1,                                   &
    13761311                                        nxl_mg(i)-1:nxr_mg(i)+1) )
    13771312
    13781313              CASE ( 6 )
    1379                  ALLOCATE( wall_flags_6(nzb:nzt_mg(i)+1,         &
    1380                                         nys_mg(i)-1:nyn_mg(i)+1, &
     1314                 ALLOCATE( wall_flags_6(nzb:nzt_mg(i)+1,                                           &
     1315                                        nys_mg(i)-1:nyn_mg(i)+1,                                   &
    13811316                                        nxl_mg(i)-1:nxr_mg(i)+1) )
    13821317
    13831318              CASE ( 7 )
    1384                  ALLOCATE( wall_flags_7(nzb:nzt_mg(i)+1,         &
    1385                                         nys_mg(i)-1:nyn_mg(i)+1, &
     1319                 ALLOCATE( wall_flags_7(nzb:nzt_mg(i)+1,                                           &
     1320                                        nys_mg(i)-1:nyn_mg(i)+1,                                   &
    13861321                                        nxl_mg(i)-1:nxr_mg(i)+1) )
    13871322
    13881323              CASE ( 8 )
    1389                  ALLOCATE( wall_flags_8(nzb:nzt_mg(i)+1,         &
    1390                                         nys_mg(i)-1:nyn_mg(i)+1, &
     1324                 ALLOCATE( wall_flags_8(nzb:nzt_mg(i)+1,                                           &
     1325                                        nys_mg(i)-1:nyn_mg(i)+1,                                   &
    13911326                                        nxl_mg(i)-1:nxr_mg(i)+1) )
    13921327
    13931328              CASE ( 9 )
    1394                  ALLOCATE( wall_flags_9(nzb:nzt_mg(i)+1,         &
    1395                                         nys_mg(i)-1:nyn_mg(i)+1, &
     1329                 ALLOCATE( wall_flags_9(nzb:nzt_mg(i)+1,                                           &
     1330                                        nys_mg(i)-1:nyn_mg(i)+1,                                   &
    13961331                                        nxl_mg(i)-1:nxr_mg(i)+1) )
    13971332
    13981333              CASE ( 10 )
    1399                  ALLOCATE( wall_flags_10(nzb:nzt_mg(i)+1,        &
    1400                                         nys_mg(i)-1:nyn_mg(i)+1, &
     1334                 ALLOCATE( wall_flags_10(nzb:nzt_mg(i)+1,                                          &
     1335                                        nys_mg(i)-1:nyn_mg(i)+1,                                   &
    14011336                                        nxl_mg(i)-1:nxr_mg(i)+1) )
    14021337
Note: See TracChangeset for help on using the changeset viewer.