!> @file pmc_interface_mod.f90 !------------------------------------------------------------------------------! ! This file is part of the PALM model system. ! ! PALM is free software: you can redistribute it and/or modify it under the ! terms of the GNU General Public License as published by the Free Software ! Foundation, either version 3 of the License, or (at your option) any later ! version. ! ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR ! A PARTICULAR PURPOSE. See the GNU General Public License for more details. ! ! You should have received a copy of the GNU General Public License along with ! PALM. If not, see . ! ! Copyright 1997-2019 Leibniz Universitaet Hannover !------------------------------------------------------------------------------! ! ! Current revisions: ! ------------------ ! ! ! Former revisions: ! ----------------- ! $Id: pmc_interface_mod.f90 3822 2019-03-27 13:10:23Z raasch $ ! Temporary increase of the vertical dimension of the parent-grid arrays and ! workarrc_t is cancelled as unnecessary. ! ! 3819 2019-03-27 11:01:36Z hellstea ! Adjustable anterpolation buffer introduced on all nest boundaries, it is controlled ! by the new nesting_parameters parameter anterpolation_buffer_width. ! ! 3804 2019-03-19 13:46:20Z hellstea ! Anterpolation domain is lowered from kct-1 to kct-3 to avoid exessive ! kinetic energy from building up in CBL flows. ! ! 3803 2019-03-19 13:44:40Z hellstea ! A bug fixed in lateral boundary interpolations. Dimension of val changed from ! 5 to 3 in pmci_setup_parent and pmci_setup_child. ! ! 3794 2019-03-15 09:36:33Z raasch ! two remaining unused variables removed ! ! 3792 2019-03-14 16:50:07Z hellstea ! Interpolations improved. Large number of obsolete subroutines removed. ! All unused variables removed. ! ! 3741 2019-02-13 16:24:49Z hellstea ! Interpolations and child initialization adjusted to handle set ups with child ! pe-subdomain dimension not integer divisible by the grid-spacing ratio in the ! respective direction. Set ups with pe-subdomain dimension smaller than the ! grid-spacing ratio in the respective direction are now forbidden. ! ! 3708 2019-01-30 12:58:13Z hellstea ! Checks for parent / child grid line matching introduced. ! Interpolation of nest-boundary-tangential velocity components revised. ! ! 3697 2019-01-24 17:16:13Z hellstea ! Bugfix: upper k-bound in the child initialization interpolation ! pmci_interp_1sto_all corrected. ! Copying of the nest boundary values into the redundant 2nd and 3rd ghost-node ! layers is added to the pmci_interp_1sto_*-routines. ! ! 3681 2019-01-18 15:06:05Z hellstea ! Linear interpolations are replaced by first order interpolations. The linear ! interpolation routines are still included but not called. In the child ! inititialization the interpolation is also changed to 1st order and the linear ! interpolation is not kept. ! Subroutine pmci_map_fine_to_coarse_grid is rewritten. ! Several changes in pmci_init_anterp_tophat. ! Child's parent-grid arrays (uc, vc,...) are made non-overlapping on the PE- ! subdomain boundaries in order to allow grid-spacing ratios higher than nbgp. ! Subroutine pmci_init_tkefactor is removed as unnecessary. ! ! 3655 2019-01-07 16:51:22Z knoop ! Remove unused variable simulated_time ! ! 3636 2018-12-19 13:48:34Z raasch ! nopointer option removed ! ! 3592 2018-12-03 12:38:40Z suehring ! Number of coupled arrays is determined dynamically (instead of a fixed value ! of 32) ! ! 3524 2018-11-14 13:36:44Z raasch ! declaration statements rearranged to avoid compile time errors ! ! 3484 2018-11-02 14:41:25Z hellstea ! Introduction of reversibility correction to the interpolation routines in order to ! guarantee mass and scalar conservation through the nest boundaries. Several errors ! are corrected and pmci_ensure_nest_mass_conservation is permanently removed. ! ! 3274 2018-09-24 15:42:55Z knoop ! Modularization of all bulk cloud physics code components ! ! 3241 2018-09-12 15:02:00Z raasch ! unused variables removed ! ! 3217 2018-08-29 12:53:59Z suehring ! Revise calculation of index bounds for array index_list, prevent compiler ! (cray) to delete the loop at high optimization level. ! ! 3215 2018-08-29 09:58:59Z suehring ! Apply an additional switch controlling the nesting of chemical species ! ! 3209 2018-08-27 16:58:37Z suehring ! Variable names for nest_bound_x replaced by bc_dirichlet_x. ! Remove commented prints into debug files. ! ! 3182 2018-07-27 13:36:03Z suehring ! dz was replaced by dz(1) ! ! 3049 2018-05-29 13:52:36Z Giersch ! Error messages revised ! ! 3045 2018-05-28 07:55:41Z Giersch ! Error messages revised ! ! 3022 2018-05-18 11:12:35Z suehring ! Minor fix - working precision added to real number ! ! 3021 2018-05-16 08:14:20Z maronga ! Bugfix: variable lcr was defined as INTENT(OUT) instead of INTENT(INOUT) ! ! 3020 2018-05-14 10:45:48Z hellstea ! Bugfix in pmci_define_loglaw_correction_parameters ! ! 3001 2018-04-20 12:27:13Z suehring ! Bugfix, replace MERGE function by an if-condition in the anterpolation (in ! order to avoid floating-point exceptions). ! ! 2997 2018-04-19 13:35:17Z suehring ! Mask topography grid points in anterpolation ! ! 2984 2018-04-18 11:51:30Z hellstea ! Bugfix in the log-law correction initialization. Pivot node cannot any more be ! selected from outside the subdomain in order to prevent array under/overflows. ! ! 2967 2018-04-13 11:22:08Z raasch ! bugfix: missing parallel cpp-directives added ! ! 2951 2018-04-06 09:05:08Z kanani ! Add log_point_s for pmci_model_configuration ! ! 2938 2018-03-27 15:52:42Z suehring ! - Nesting for RANS mode implemented ! - Interpolation of TKE onto child domain only if both parent and child are ! either in LES mode or in RANS mode ! - Interpolation of dissipation if both parent and child are in RANS mode ! and TKE-epsilon closure is applied ! - Enable anterpolation of TKE and dissipation rate in case parent and ! child operate in RANS mode ! ! - Some unused variables removed from ONLY list ! - Some formatting adjustments for particle nesting ! ! 2936 2018-03-27 14:49:27Z suehring ! Control logics improved to allow nesting also in cases with ! constant_flux_layer = .F. or constant_diffusion = .T. ! ! 2895 2018-03-15 10:26:12Z hellstea ! Change in the nest initialization (pmci_interp_tril_all). Bottom wall BC is no ! longer overwritten. ! ! 2868 2018-03-09 13:25:09Z hellstea ! Local conditional Neumann conditions for one-way coupling removed. ! ! 2853 2018-03-05 14:44:20Z suehring ! Bugfix in init log-law correction. ! ! 2841 2018-02-27 15:02:57Z knoop ! Bugfix: wrong placement of include 'mpif.h' corrected ! ! 2812 2018-02-16 13:40:25Z hellstea ! Bugfixes in computation of the interpolation loglaw-correction parameters ! ! 2809 2018-02-15 09:55:58Z schwenkel ! Bugfix for gfortran: Replace the function C_SIZEOF with STORAGE_SIZE ! ! 2806 2018-02-14 17:10:37Z thiele ! Bugfixing Write statements ! ! 2804 2018-02-14 16:57:03Z thiele ! preprocessor directive for c_sizeof in case of __gfortran added ! ! 2803 2018-02-14 16:56:32Z thiele ! Introduce particle transfer in nested models. ! ! 2795 2018-02-07 14:48:48Z hellstea ! Bugfix in computation of the anterpolation under-relaxation functions. ! ! 2773 2018-01-30 14:12:54Z suehring ! - Nesting for chemical species ! - Bugfix in setting boundary condition at downward-facing walls for passive ! scalar ! - Some formatting adjustments ! ! 2718 2018-01-02 08:49:38Z maronga ! Corrected "Former revisions" section ! ! 2701 2017-12-15 15:40:50Z suehring ! Changes from last commit documented ! ! 2698 2017-12-14 18:46:24Z suehring ! Bugfix in get_topography_top_index ! ! 2696 2017-12-14 17:12:51Z kanani ! Change in file header (GPL part) ! - Bugfix in init_tke_factor (MS) ! ! 2669 2017-12-06 16:03:27Z raasch ! file extension for nested domains changed to "_N##", ! created flag file in order to enable combine_plot_fields to process nest data ! ! 2663 2017-12-04 17:40:50Z suehring ! Bugfix, wrong coarse-grid index in init_tkefactor used. ! ! 2602 2017-11-03 11:06:41Z hellstea ! Index-limit related bug (occurred with nesting_mode='vertical') fixed in ! pmci_interp_tril_t. Check for too high nest domain added in pmci_setup_parent. ! Some cleaning up made. ! ! 2582 2017-10-26 13:19:46Z hellstea ! Resetting of e within buildings / topography in pmci_parent_datatrans removed ! as unnecessary since e is not anterpolated, and as incorrect since it overran ! the default Neumann condition (bc_e_b). ! ! 2359 2017-08-21 07:50:30Z hellstea ! A minor indexing error in pmci_init_loglaw_correction is corrected. ! ! 2351 2017-08-15 12:03:06Z kanani ! Removed check (PA0420) for nopointer version ! ! 2350 2017-08-15 11:48:26Z kanani ! Bugfix and error message for nopointer version. ! ! 2318 2017-07-20 17:27:44Z suehring ! Get topography top index via Function call ! ! 2317 2017-07-20 17:27:19Z suehring ! Set bottom boundary condition after anterpolation. ! Some variable description added. ! ! 2293 2017-06-22 12:59:12Z suehring ! In anterpolation, exclude grid points which are used for interpolation. ! This avoids the accumulation of numerical errors leading to increased ! variances for shallow child domains. ! ! 2292 2017-06-20 09:51:42Z schwenkel ! Implementation of new microphysic scheme: cloud_scheme = 'morrison' ! includes two more prognostic equations for cloud drop concentration (nc) ! and cloud water content (qc). ! ! 2285 2017-06-15 13:15:41Z suehring ! Consider multiple pure-vertical nest domains in overlap check ! ! 2283 2017-06-14 10:17:34Z suehring ! Bugfixes in inititalization of log-law correction concerning ! new topography concept ! ! 2281 2017-06-13 11:34:50Z suehring ! Bugfix, add pre-preprocessor directives to enable non-parrallel mode ! ! 2241 2017-06-01 13:46:13Z hellstea ! A minor indexing error in pmci_init_loglaw_correction is corrected. ! ! 2240 2017-06-01 13:45:34Z hellstea ! ! 2232 2017-05-30 17:47:52Z suehring ! Adjustments to new topography concept ! ! 2229 2017-05-30 14:52:52Z hellstea ! A minor indexing error in pmci_init_anterp_tophat is corrected. ! ! 2174 2017-03-13 08:18:57Z maronga ! Added support for cloud physics quantities, syntax layout improvements. Data ! transfer of qc and nc is prepared but currently deactivated until both ! quantities become prognostic variables. ! Some bugfixes. ! ! 2019 2016-09-30 13:40:09Z hellstea ! Bugfixes mainly in determining the anterpolation index bounds. These errors ! were detected when first time tested using 3:1 grid-spacing. ! ! 2003 2016-08-24 10:22:32Z suehring ! Humidity and passive scalar also separated in nesting mode ! ! 2000 2016-08-20 18:09:15Z knoop ! Forced header and separation lines into 80 columns ! ! 1938 2016-06-13 15:26:05Z hellstea ! Minor clean-up. ! ! 1901 2016-05-04 15:39:38Z raasch ! Initial version of purely vertical nesting introduced. ! Code clean up. The words server/client changed to parent/child. ! ! 1900 2016-05-04 15:27:53Z raasch ! unused variables removed ! ! 1894 2016-04-27 09:01:48Z raasch ! bugfix: pt interpolations are omitted in case that the temperature equation is ! switched off ! ! 1892 2016-04-26 13:49:47Z raasch ! bugfix: pt is not set as a data array in case that the temperature equation is ! switched off with neutral = .TRUE. ! ! 1882 2016-04-20 15:24:46Z hellstea ! The factor ijfc for nfc used in anterpolation is redefined as 2-D array ! and precomputed in pmci_init_anterp_tophat. ! ! 1878 2016-04-19 12:30:36Z hellstea ! Synchronization rewritten, logc-array index order changed for cache ! optimization ! ! 1850 2016-04-08 13:29:27Z maronga ! Module renamed ! ! ! 1808 2016-04-05 19:44:00Z raasch ! MPI module used by default on all machines ! ! 1801 2016-04-05 13:12:47Z raasch ! bugfix for r1797: zero setting of temperature within topography does not work ! and has been disabled ! ! 1797 2016-03-21 16:50:28Z raasch ! introduction of different datatransfer modes, ! further formatting cleanup, parameter checks added (including mismatches ! between root and nest model settings), ! +routine pmci_check_setting_mismatches ! comm_world_nesting introduced ! ! 1791 2016-03-11 10:41:25Z raasch ! routine pmci_update_new removed, ! pmc_get_local_model_info renamed pmc_get_model_info, some keywords also ! renamed, ! filling up redundant ghost points introduced, ! some index bound variables renamed, ! further formatting cleanup ! ! 1783 2016-03-06 18:36:17Z raasch ! calculation of nest top area simplified, ! interpolation and anterpolation moved to seperate wrapper subroutines ! ! 1781 2016-03-03 15:12:23Z raasch ! _p arrays are set zero within buildings too, t.._m arrays and respective ! settings within buildings completely removed ! ! 1779 2016-03-03 08:01:28Z raasch ! only the total number of PEs is given for the domains, npe_x and npe_y ! replaced by npe_total, two unused elements removed from array ! parent_grid_info_real, ! array management changed from linked list to sequential loop ! ! 1766 2016-02-29 08:37:15Z raasch ! modifications to allow for using PALM's pointer version, ! +new routine pmci_set_swaplevel ! ! 1764 2016-02-28 12:45:19Z raasch ! +cpl_parent_id, ! cpp-statements for nesting replaced by __parallel statements, ! errors output with message-subroutine, ! index bugfixes in pmci_interp_tril_all, ! some adjustments to PALM style ! ! 1762 2016-02-25 12:31:13Z hellstea ! Initial revision by A. Hellsten ! ! Description: ! ------------ ! Domain nesting interface routines. The low-level inter-domain communication ! is conducted by the PMC-library routines. ! ! @todo Remove array_3d variables from USE statements thate not used in the ! routine ! @todo Data transfer of qc and nc is prepared but not activated !------------------------------------------------------------------------------! MODULE pmc_interface USE ISO_C_BINDING USE arrays_3d, & ONLY: diss, diss_2, dzu, dzw, e, e_p, e_2, nc, nc_2, nc_p, nr, nr_2, & pt, pt_2, q, q_2, qc, qc_2, qr, qr_2, s, s_2, & u, u_p, u_2, v, v_p, v_2, w, w_p, w_2, zu, zw USE control_parameters, & ONLY: air_chemistry, bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, & bc_dirichlet_s, child_domain, & constant_diffusion, constant_flux_layer, & coupling_char, dt_3d, dz, humidity, message_string, & neutral, passive_scalar, rans_mode, rans_tke_e, & roughness_length, topography, volume_flow USE chem_modules, & ONLY: nspec USE chemistry_model_mod, & ONLY: chem_species, nest_chemistry, spec_conc_2 USE cpulog, & ONLY: cpu_log, log_point_s USE grid_variables, & ONLY: dx, dy USE indices, & ONLY: nbgp, nx, nxl, nxlg, nxlu, nxr, nxrg, ny, nyn, nyng, nys, nysg, & nysv, nz, nzb, nzt, wall_flags_0 USE bulk_cloud_model_mod, & ONLY: bulk_cloud_model, microphysics_morrison, microphysics_seifert USE particle_attributes, & ONLY: particle_advection USE kinds #if defined( __parallel ) #if !defined( __mpifh ) USE MPI #endif USE pegrid, & ONLY: collective_wait, comm1dx, comm1dy, comm2d, myid, myidx, myidy, & numprocs, pdims, pleft, pnorth, pright, psouth, status USE pmc_child, & ONLY: pmc_childinit, pmc_c_clear_next_array_list, & pmc_c_getnextarray, pmc_c_get_2d_index_list, pmc_c_getbuffer, & pmc_c_putbuffer, pmc_c_setind_and_allocmem, & pmc_c_set_dataarray, pmc_set_dataarray_name USE pmc_general, & ONLY: da_namelen USE pmc_handle_communicator, & ONLY: pmc_get_model_info, pmc_init_model, pmc_is_rootmodel, & pmc_no_namelist_found, pmc_parent_for_child, m_couplers USE pmc_mpi_wrapper, & ONLY: pmc_bcast, pmc_recv_from_child, pmc_recv_from_parent, & pmc_send_to_child, pmc_send_to_parent USE pmc_parent, & ONLY: pmc_parentinit, pmc_s_clear_next_array_list, pmc_s_fillbuffer, & pmc_s_getdata_from_buffer, pmc_s_getnextarray, & pmc_s_setind_and_allocmem, pmc_s_set_active_data_array, & pmc_s_set_dataarray, pmc_s_set_2d_index_list #endif USE surface_mod, & ONLY: get_topography_top_index_ji, surf_def_h, surf_lsm_h, surf_usm_h IMPLICIT NONE #if defined( __parallel ) #if defined( __mpifh ) INCLUDE "mpif.h" #endif #endif PRIVATE ! !-- Constants INTEGER(iwp), PARAMETER :: child_to_parent = 2 !< INTEGER(iwp), PARAMETER :: parent_to_child = 1 !< INTEGER(iwp), PARAMETER :: interpolation_scheme_lrsn = 2 !< Interpolation scheme to be used on lateral boundaries (maybe to be made user parameter) INTEGER(iwp), PARAMETER :: interpolation_scheme_t = 3 !< Interpolation scheme to be used on top boundary (maybe to be made user parameter) ! !-- Coupler setup INTEGER(iwp), SAVE :: comm_world_nesting !< INTEGER(iwp), SAVE :: cpl_id = 1 !< CHARACTER(LEN=32), SAVE :: cpl_name !< INTEGER(iwp), SAVE :: cpl_npe_total !< INTEGER(iwp), SAVE :: cpl_parent_id !< ! !-- Control parameters CHARACTER(LEN=7), SAVE :: nesting_datatransfer_mode = 'mixed' !< steering parameter for data-transfer mode CHARACTER(LEN=8), SAVE :: nesting_mode = 'two-way' !< steering parameter for 1- or 2-way nesting INTEGER(iwp), SAVE :: anterpolation_buffer_width = 2 !< Boundary buffer width for anterpolation LOGICAL, SAVE :: nested_run = .FALSE. !< general switch LOGICAL :: rans_mode_parent = .FALSE. !< mode of parent model (.F. - LES mode, .T. - RANS mode) ! !-- Geometry REAL(wp), SAVE, DIMENSION(:), ALLOCATABLE, PUBLIC :: coord_x !< REAL(wp), SAVE, DIMENSION(:), ALLOCATABLE, PUBLIC :: coord_y !< REAL(wp), SAVE, PUBLIC :: lower_left_coord_x !< REAL(wp), SAVE, PUBLIC :: lower_left_coord_y !< ! !-- Children's parent-grid arrays INTEGER(iwp), SAVE, DIMENSION(5), PUBLIC :: coarse_bound !< subdomain index bounds for children's parent-grid arrays INTEGER(iwp), SAVE, DIMENSION(4), PUBLIC :: coarse_bound_aux !< subdomain index bounds for allocation of index-mapping and other auxiliary arrays INTEGER(iwp), SAVE, DIMENSION(4), PUBLIC :: coarse_bound_w !< subdomain index bounds for children's parent-grid work arrays REAL(wp), SAVE, DIMENSION(:,:,:), ALLOCATABLE, TARGET :: dissc !< Parent-grid array on child domain - dissipation rate REAL(wp), SAVE, DIMENSION(:,:,:), ALLOCATABLE, TARGET :: ec !< Parent-grid array on child domain - SGS TKE REAL(wp), SAVE, DIMENSION(:,:,:), ALLOCATABLE, TARGET :: ptc !< Parent-grid array on child domain - potential temperature REAL(wp), SAVE, DIMENSION(:,:,:), ALLOCATABLE, TARGET :: uc !< Parent-grid array on child domain - velocity component u REAL(wp), SAVE, DIMENSION(:,:,:), ALLOCATABLE, TARGET :: vc !< Parent-grid array on child domain - velocity component v REAL(wp), SAVE, DIMENSION(:,:,:), ALLOCATABLE, TARGET :: wc !< Parent-grid array on child domain - velocity component w REAL(wp), SAVE, DIMENSION(:,:,:), ALLOCATABLE, TARGET :: q_c !< Parent-grid array on child domain - REAL(wp), SAVE, DIMENSION(:,:,:), ALLOCATABLE, TARGET :: qcc !< Parent-grid array on child domain - REAL(wp), SAVE, DIMENSION(:,:,:), ALLOCATABLE, TARGET :: qrc !< Parent-grid array on child domain - REAL(wp), SAVE, DIMENSION(:,:,:), ALLOCATABLE, TARGET :: nrc !< Parent-grid array on child domain - REAL(wp), SAVE, DIMENSION(:,:,:), ALLOCATABLE, TARGET :: ncc !< Parent-grid array on child domain - REAL(wp), SAVE, DIMENSION(:,:,:), ALLOCATABLE, TARGET :: sc !< Parent-grid array on child domain - INTEGER(idp), SAVE, DIMENSION(:,:), ALLOCATABLE, TARGET, PUBLIC :: nr_partc !< INTEGER(idp), SAVE, DIMENSION(:,:), ALLOCATABLE, TARGET, PUBLIC :: part_adrc !< REAL(wp), SAVE, DIMENSION(:,:,:,:), ALLOCATABLE, TARGET :: chem_spec_c !< Parent-grid array on child domain - chemical species ! !-- Grid-spacing ratios. INTEGER(iwp), SAVE :: igsr !< Integer grid-spacing ratio in i-direction INTEGER(iwp), SAVE :: jgsr !< Integer grid-spacing ratio in j-direction INTEGER(iwp), SAVE :: kgsr !< Integer grid-spacing ratio in k-direction ! !-- Global parent-grid index bounds INTEGER(iwp), SAVE :: iplg !< Leftmost parent-grid array ip index of the whole child domain INTEGER(iwp), SAVE :: iprg !< Rightmost parent-grid array ip index of the whole child domain INTEGER(iwp), SAVE :: jpsg !< Southmost parent-grid array jp index of the whole child domain INTEGER(iwp), SAVE :: jpng !< Northmost parent-grid array jp index of the whole child domain ! !-- Local parent-grid index bounds (to be moved here from pmci_setup_child) !-- EXPLAIN WHY SEVERAL SETS OF PARENT-GRID INDEX BOUNDS ARE NEEDED. ! !-- Highest prognostic parent-grid k-indices. INTEGER(iwp), SAVE :: kcto !< Upper bound for k in anterpolation of variables other than w. INTEGER(iwp), SAVE :: kctw !< Upper bound for k in anterpolation of w. ! !-- Child-array indices to be precomputed and stored for anterpolation. INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:) :: iflu !< child index indicating left bound of parent grid box on u-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:) :: ifuu !< child index indicating right bound of parent grid box on u-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:) :: iflo !< child index indicating left bound of parent grid box on scalar-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:) :: ifuo !< child index indicating right bound of parent grid box on scalar-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:) :: jflv !< child index indicating south bound of parent grid box on v-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:) :: jfuv !< child index indicating north bound of parent grid box on v-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:) :: jflo !< child index indicating south bound of parent grid box on scalar-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:) :: jfuo !< child index indicating north bound of parent grid box on scalar-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:) :: kflw !< child index indicating lower bound of parent grid box on w-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:) :: kfuw !< child index indicating upper bound of parent grid box on w-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:) :: kflo !< child index indicating lower bound of parent grid box on scalar-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:) :: kfuo !< child index indicating upper bound of parent grid box on scalar-grid ! !-- Number of fine-grid nodes inside coarse-grid ij-faces !-- to be precomputed for anterpolation. INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:,:,:) :: ijkfc_u !< number of child grid boxes contribution to a parent grid box, u-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:,:,:) :: ijkfc_v !< number of child grid boxes contribution to a parent grid box, v-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:,:,:) :: ijkfc_w !< number of child grid boxes contribution to a parent grid box, w-grid INTEGER(iwp), SAVE, ALLOCATABLE, DIMENSION(:,:,:) :: ijkfc_s !< number of child grid boxes contribution to a parent grid box, scalar-grid ! !-- Work arrays for interpolation and user-defined type definitions for horizontal work-array exchange REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: workarrc_lr REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: workarrc_sn REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: workarrc_t INTEGER(iwp) :: workarrc_lr_exchange_type INTEGER(iwp) :: workarrc_sn_exchange_type INTEGER(iwp) :: workarrc_t_exchange_type_x INTEGER(iwp) :: workarrc_t_exchange_type_y INTEGER(iwp), DIMENSION(3) :: parent_grid_info_int !< REAL(wp), DIMENSION(7) :: parent_grid_info_real !< REAL(wp), DIMENSION(2) :: zmax_coarse !< TYPE parentgrid_def INTEGER(iwp) :: nx !< INTEGER(iwp) :: ny !< INTEGER(iwp) :: nz !< REAL(wp) :: dx !< REAL(wp) :: dy !< REAL(wp) :: dz !< REAL(wp) :: lower_left_coord_x !< REAL(wp) :: lower_left_coord_y !< REAL(wp) :: xend !< REAL(wp) :: yend !< REAL(wp), DIMENSION(:), ALLOCATABLE :: coord_x !< REAL(wp), DIMENSION(:), ALLOCATABLE :: coord_y !< REAL(wp), DIMENSION(:), ALLOCATABLE :: dzu !< REAL(wp), DIMENSION(:), ALLOCATABLE :: dzw !< REAL(wp), DIMENSION(:), ALLOCATABLE :: zu !< REAL(wp), DIMENSION(:), ALLOCATABLE :: zw !< END TYPE parentgrid_def TYPE(parentgrid_def), SAVE, PUBLIC :: cg !< change to pg ! !-- Variables for particle coupling TYPE, PUBLIC :: childgrid_def INTEGER(iwp) :: nx !< INTEGER(iwp) :: ny !< INTEGER(iwp) :: nz !< REAL(wp) :: dx !< REAL(wp) :: dy !< REAL(wp) :: dz !< REAL(wp) :: lx_coord, lx_coord_b !< REAL(wp) :: rx_coord, rx_coord_b !< REAL(wp) :: sy_coord, sy_coord_b !< REAL(wp) :: ny_coord, ny_coord_b !< REAL(wp) :: uz_coord, uz_coord_b !< END TYPE childgrid_def TYPE(childgrid_def), SAVE, ALLOCATABLE, DIMENSION(:), PUBLIC :: childgrid !< INTEGER(idp),ALLOCATABLE,DIMENSION(:,:),PUBLIC,TARGET :: nr_part !< INTEGER(idp),ALLOCATABLE,DIMENSION(:,:),PUBLIC,TARGET :: part_adr !< INTERFACE pmci_boundary_conds MODULE PROCEDURE pmci_boundary_conds END INTERFACE pmci_boundary_conds INTERFACE pmci_check_setting_mismatches MODULE PROCEDURE pmci_check_setting_mismatches END INTERFACE INTERFACE pmci_child_initialize MODULE PROCEDURE pmci_child_initialize END INTERFACE INTERFACE pmci_synchronize MODULE PROCEDURE pmci_synchronize END INTERFACE INTERFACE pmci_datatrans MODULE PROCEDURE pmci_datatrans END INTERFACE pmci_datatrans INTERFACE pmci_init MODULE PROCEDURE pmci_init END INTERFACE INTERFACE pmci_modelconfiguration MODULE PROCEDURE pmci_modelconfiguration END INTERFACE INTERFACE pmci_parent_initialize MODULE PROCEDURE pmci_parent_initialize END INTERFACE INTERFACE get_number_of_childs MODULE PROCEDURE get_number_of_childs END INTERFACE get_number_of_childs INTERFACE get_childid MODULE PROCEDURE get_childid END INTERFACE get_childid INTERFACE get_child_edges MODULE PROCEDURE get_child_edges END INTERFACE get_child_edges INTERFACE get_child_gridspacing MODULE PROCEDURE get_child_gridspacing END INTERFACE get_child_gridspacing INTERFACE pmci_set_swaplevel MODULE PROCEDURE pmci_set_swaplevel END INTERFACE pmci_set_swaplevel PUBLIC child_to_parent, comm_world_nesting, cpl_id, nested_run, & nesting_datatransfer_mode, nesting_mode, parent_to_child, rans_mode_parent PUBLIC pmci_boundary_conds PUBLIC pmci_child_initialize PUBLIC pmci_datatrans PUBLIC pmci_init PUBLIC pmci_modelconfiguration PUBLIC pmci_parent_initialize PUBLIC pmci_synchronize PUBLIC pmci_set_swaplevel PUBLIC get_number_of_childs, get_childid, get_child_edges, get_child_gridspacing CONTAINS SUBROUTINE pmci_init( world_comm ) USE control_parameters, & ONLY: message_string IMPLICIT NONE INTEGER(iwp), INTENT(OUT) :: world_comm !< #if defined( __parallel ) INTEGER(iwp) :: pmc_status !< CALL pmc_init_model( world_comm, nesting_datatransfer_mode, nesting_mode, & anterpolation_buffer_width, pmc_status ) IF ( pmc_status == pmc_no_namelist_found ) THEN ! !-- This is not a nested run world_comm = MPI_COMM_WORLD cpl_id = 1 cpl_name = "" RETURN ENDIF ! !-- Check steering parameter values IF ( TRIM( nesting_mode ) /= 'one-way' .AND. & TRIM( nesting_mode ) /= 'two-way' .AND. & TRIM( nesting_mode ) /= 'vertical' ) & THEN message_string = 'illegal nesting mode: ' // TRIM( nesting_mode ) CALL message( 'pmci_init', 'PA0417', 3, 2, 0, 6, 0 ) ENDIF IF ( TRIM( nesting_datatransfer_mode ) /= 'cascade' .AND. & TRIM( nesting_datatransfer_mode ) /= 'mixed' .AND. & TRIM( nesting_datatransfer_mode ) /= 'overlap' ) & THEN message_string = 'illegal nesting datatransfer mode: ' & // TRIM( nesting_datatransfer_mode ) CALL message( 'pmci_init', 'PA0418', 3, 2, 0, 6, 0 ) ENDIF ! !-- Set the general steering switch which tells PALM that its a nested run nested_run = .TRUE. ! !-- Get some variables required by the pmc-interface (and in some cases in the !-- PALM code out of the pmci) out of the pmc-core CALL pmc_get_model_info( comm_world_nesting = comm_world_nesting, & cpl_id = cpl_id, cpl_parent_id = cpl_parent_id, & cpl_name = cpl_name, npe_total = cpl_npe_total, & lower_left_x = lower_left_coord_x, & lower_left_y = lower_left_coord_y ) ! !-- Set the steering switch which tells the models that they are nested (of !-- course the root domain (cpl_id = 1) is not nested) IF ( cpl_id >= 2 ) THEN child_domain = .TRUE. WRITE( coupling_char, '(A2,I2.2)') '_N', cpl_id ENDIF ! !-- Message that communicators for nesting are initialized. !-- Attention: myid has been set at the end of pmc_init_model in order to !-- guarantee that only PE0 of the root domain does the output. CALL location_message( 'finished', .TRUE. ) ! !-- Reset myid to its default value myid = 0 #else ! !-- Nesting cannot be used in serial mode. cpl_id is set to root domain (1) !-- because no location messages would be generated otherwise. !-- world_comm is given a dummy value to avoid compiler warnings (INTENT(OUT) !-- should get an explicit value) cpl_id = 1 nested_run = .FALSE. world_comm = 1 #endif END SUBROUTINE pmci_init SUBROUTINE pmci_modelconfiguration IMPLICIT NONE INTEGER(iwp) :: ncpl !< number of nest domains #if defined( __parallel ) CALL location_message( 'setup the nested model configuration', .FALSE. ) CALL cpu_log( log_point_s(79), 'pmci_model_config', 'start' ) ! !-- Compute absolute coordinates for all models CALL pmci_setup_coordinates ! !-- Determine the number of coupled arrays CALL pmci_num_arrays ! !-- Initialize the child (must be called before pmc_setup_parent) CALL pmci_setup_child ! !-- Initialize PMC parent CALL pmci_setup_parent ! !-- Check for mismatches between settings of master and child variables !-- (e.g., all children have to follow the end_time settings of the root master) CALL pmci_check_setting_mismatches ! !-- Set flag file for combine_plot_fields for precessing the nest output data OPEN( 90, FILE='3DNESTING', FORM='FORMATTED' ) CALL pmc_get_model_info( ncpl = ncpl ) WRITE( 90, '(I2)' ) ncpl CLOSE( 90 ) CALL cpu_log( log_point_s(79), 'pmci_model_config', 'stop' ) CALL location_message( 'finished', .TRUE. ) #endif END SUBROUTINE pmci_modelconfiguration SUBROUTINE pmci_setup_parent #if defined( __parallel ) IMPLICIT NONE CHARACTER(LEN=32) :: myname INTEGER(iwp) :: child_id !< INTEGER(iwp) :: ierr !< INTEGER(iwp) :: k !< INTEGER(iwp) :: m !< INTEGER(iwp) :: mid !< INTEGER(iwp) :: mm !< INTEGER(iwp) :: n = 1 !< Running index for chemical species INTEGER(iwp) :: nest_overlap !< INTEGER(iwp) :: nomatch !< INTEGER(iwp) :: nx_cl !< INTEGER(iwp) :: ny_cl !< INTEGER(iwp) :: nz_cl !< INTEGER(iwp), DIMENSION(3) :: val !< Array for receiving the child-grid dimensions REAL(wp), DIMENSION(:), ALLOCATABLE :: ch_xl !< REAL(wp), DIMENSION(:), ALLOCATABLE :: ch_xr !< REAL(wp), DIMENSION(:), ALLOCATABLE :: ch_ys !< REAL(wp), DIMENSION(:), ALLOCATABLE :: ch_yn !< REAL(wp) :: cl_height !< REAL(wp) :: dx_cl !< REAL(wp) :: dy_cl !< REAL(wp) :: dz_cl !< REAL(wp) :: left_limit !< REAL(wp) :: north_limit !< REAL(wp) :: right_limit !< REAL(wp) :: south_limit !< REAL(wp) :: xez !< REAL(wp) :: yez !< REAL(wp), DIMENSION(5) :: fval !< Array for receiving the child-grid spacings etc REAL(wp), DIMENSION(:), ALLOCATABLE :: cl_coord_x !< REAL(wp), DIMENSION(:), ALLOCATABLE :: cl_coord_y !< ! ! Initialize the pmc parent CALL pmc_parentinit ! !-- Corners of all children of the present parent IF ( ( SIZE( pmc_parent_for_child ) - 1 > 0 ) .AND. myid == 0 ) THEN ALLOCATE( ch_xl(1:SIZE( pmc_parent_for_child ) - 1) ) ALLOCATE( ch_xr(1:SIZE( pmc_parent_for_child ) - 1) ) ALLOCATE( ch_ys(1:SIZE( pmc_parent_for_child ) - 1) ) ALLOCATE( ch_yn(1:SIZE( pmc_parent_for_child ) - 1) ) ENDIF IF ( ( SIZE( pmc_parent_for_child ) - 1 > 0 ) ) THEN ALLOCATE( childgrid(1:SIZE( pmc_parent_for_child ) - 1) ) ENDIF ! !-- Get coordinates from all children DO m = 1, SIZE( pmc_parent_for_child ) - 1 child_id = pmc_parent_for_child(m) IF ( myid == 0 ) THEN CALL pmc_recv_from_child( child_id, val, SIZE(val), 0, 123, ierr ) CALL pmc_recv_from_child( child_id, fval, SIZE(fval), 0, 124, ierr ) nx_cl = val(1) ny_cl = val(2) dx_cl = fval(3) dy_cl = fval(4) dz_cl = fval(5) cl_height = fval(1) nz_cl = nz ! !-- Find the highest nest level in the parent grid for the reduced z !-- transfer DO k = 1, nz IF ( zw(k) > fval(1) ) THEN nz_cl = k EXIT ENDIF ENDDO zmax_coarse = fval(1:2) cl_height = fval(1) ! !-- Get absolute coordinates from the child ALLOCATE( cl_coord_x(-nbgp:nx_cl+nbgp) ) ALLOCATE( cl_coord_y(-nbgp:ny_cl+nbgp) ) CALL pmc_recv_from_child( child_id, cl_coord_x, SIZE( cl_coord_x ), & 0, 11, ierr ) CALL pmc_recv_from_child( child_id, cl_coord_y, SIZE( cl_coord_y ), & 0, 12, ierr ) parent_grid_info_real(1) = lower_left_coord_x parent_grid_info_real(2) = lower_left_coord_y parent_grid_info_real(3) = dx parent_grid_info_real(4) = dy parent_grid_info_real(5) = lower_left_coord_x + ( nx + 1 ) * dx parent_grid_info_real(6) = lower_left_coord_y + ( ny + 1 ) * dy parent_grid_info_real(7) = dz(1) parent_grid_info_int(1) = nx parent_grid_info_int(2) = ny parent_grid_info_int(3) = nz_cl ! !-- Check that the child domain matches parent domain. nomatch = 0 IF ( nesting_mode == 'vertical' ) THEN right_limit = parent_grid_info_real(5) north_limit = parent_grid_info_real(6) IF ( ( cl_coord_x(nx_cl+1) /= right_limit ) .OR. & ( cl_coord_y(ny_cl+1) /= north_limit ) ) THEN nomatch = 1 ENDIF ELSE ! !-- Check that the child domain is completely inside the parent domain. xez = ( nbgp + 1 ) * dx yez = ( nbgp + 1 ) * dy left_limit = lower_left_coord_x + xez right_limit = parent_grid_info_real(5) - xez south_limit = lower_left_coord_y + yez north_limit = parent_grid_info_real(6) - yez IF ( ( cl_coord_x(0) < left_limit ) .OR. & ( cl_coord_x(nx_cl+1) > right_limit ) .OR. & ( cl_coord_y(0) < south_limit ) .OR. & ( cl_coord_y(ny_cl+1) > north_limit ) ) THEN nomatch = 1 ENDIF ENDIF ! !-- Child domain must be lower than the parent domain such !-- that the top ghost layer of the child grid does not exceed !-- the parent domain top boundary. IF ( cl_height > zw(nz) ) THEN nomatch = 1 ENDIF ! !-- Check that parallel nest domains, if any, do not overlap. nest_overlap = 0 IF ( SIZE( pmc_parent_for_child ) - 1 > 0 ) THEN ch_xl(m) = cl_coord_x(-nbgp) ch_xr(m) = cl_coord_x(nx_cl+nbgp) ch_ys(m) = cl_coord_y(-nbgp) ch_yn(m) = cl_coord_y(ny_cl+nbgp) IF ( m > 1 ) THEN DO mm = 1, m - 1 mid = pmc_parent_for_child(mm) ! !-- Check only different nest levels IF (m_couplers(child_id)%parent_id /= m_couplers(mid)%parent_id) THEN IF ( ( ch_xl(m) < ch_xr(mm) .OR. & ch_xr(m) > ch_xl(mm) ) .AND. & ( ch_ys(m) < ch_yn(mm) .OR. & ch_yn(m) > ch_ys(mm) ) ) THEN nest_overlap = 1 ENDIF ENDIF ENDDO ENDIF ENDIF CALL set_child_edge_coords DEALLOCATE( cl_coord_x ) DEALLOCATE( cl_coord_y ) ! !-- Send information about operating mode (LES or RANS) to child. This will be !-- used to control TKE nesting and setting boundary conditions properly. CALL pmc_send_to_child( child_id, rans_mode, 1, 0, 19, ierr ) ! !-- Send coarse grid information to child CALL pmc_send_to_child( child_id, parent_grid_info_real, & SIZE( parent_grid_info_real ), 0, 21, & ierr ) CALL pmc_send_to_child( child_id, parent_grid_info_int, 3, 0, & 22, ierr ) ! !-- Send local grid to child CALL pmc_send_to_child( child_id, coord_x, nx+1+2*nbgp, 0, 24, & ierr ) CALL pmc_send_to_child( child_id, coord_y, ny+1+2*nbgp, 0, 25, & ierr ) ! !-- Also send the dzu-, dzw-, zu- and zw-arrays here CALL pmc_send_to_child( child_id, dzu, nz_cl+1, 0, 26, ierr ) CALL pmc_send_to_child( child_id, dzw, nz_cl+1, 0, 27, ierr ) CALL pmc_send_to_child( child_id, zu, nz_cl+2, 0, 28, ierr ) CALL pmc_send_to_child( child_id, zw, nz_cl+2, 0, 29, ierr ) ENDIF CALL MPI_BCAST( nomatch, 1, MPI_INTEGER, 0, comm2d, ierr ) IF ( nomatch /= 0 ) THEN WRITE ( message_string, * ) 'nested child domain does ', & 'not fit into its parent domain' CALL message( 'pmci_setup_parent', 'PA0425', 3, 2, 0, 6, 0 ) ENDIF CALL MPI_BCAST( nest_overlap, 1, MPI_INTEGER, 0, comm2d, ierr ) IF ( nest_overlap /= 0 .AND. nesting_mode /= 'vertical' ) THEN WRITE ( message_string, * ) 'nested parallel child domains overlap' CALL message( 'pmci_setup_parent', 'PA0426', 3, 2, 0, 6, 0 ) ENDIF CALL MPI_BCAST( nz_cl, 1, MPI_INTEGER, 0, comm2d, ierr ) CALL MPI_BCAST( childgrid(m), STORAGE_SIZE(childgrid(1))/8, MPI_BYTE, 0, comm2d, ierr ) ! !-- TO_DO: Klaus: please give a comment what is done here CALL pmci_create_index_list ! !-- Include couple arrays into parent content !-- The adresses of the PALM 2D or 3D array (here server coarse grid) which are candidates !-- for coupling are stored once into the pmc context. While data transfer, the array do not !-- have to be specified again CALL pmc_s_clear_next_array_list DO WHILE ( pmc_s_getnextarray( child_id, myname ) ) IF ( INDEX( TRIM( myname ), 'chem_' ) /= 0 ) THEN CALL pmci_set_array_pointer( myname, child_id = child_id, & nz_cl = nz_cl, n = n ) n = n + 1 ELSE CALL pmci_set_array_pointer( myname, child_id = child_id, & nz_cl = nz_cl ) ENDIF ENDDO CALL pmc_s_setind_and_allocmem( child_id ) ENDDO IF ( ( SIZE( pmc_parent_for_child ) - 1 > 0 ) .AND. myid == 0 ) THEN DEALLOCATE( ch_xl ) DEALLOCATE( ch_xr ) DEALLOCATE( ch_ys ) DEALLOCATE( ch_yn ) ENDIF CONTAINS SUBROUTINE pmci_create_index_list IMPLICIT NONE INTEGER(iwp) :: i !< INTEGER(iwp) :: ic !< INTEGER(iwp) :: ierr !< INTEGER(iwp) :: j !< INTEGER(iwp) :: k !< INTEGER(iwp) :: npx !< INTEGER(iwp) :: npy !< INTEGER(iwp) :: nrx !< INTEGER(iwp) :: nry !< INTEGER(iwp) :: px !< INTEGER(iwp) :: py !< INTEGER(iwp) :: parent_pe !< INTEGER(iwp), DIMENSION(2) :: scoord !< INTEGER(iwp), DIMENSION(2) :: size_of_array !< INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: coarse_bound_all !< INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: index_list !< IF ( myid == 0 ) THEN ! !-- TO_DO: Klaus: give more specific comment what size_of_array stands for CALL pmc_recv_from_child( child_id, size_of_array, 2, 0, 40, ierr ) ALLOCATE( coarse_bound_all(size_of_array(1),size_of_array(2)) ) CALL pmc_recv_from_child( child_id, coarse_bound_all, & SIZE( coarse_bound_all ), 0, 41, ierr ) ! !-- Compute size of index_list. ic = 0 DO k = 1, size_of_array(2) ! Replace k by some other letter ic = ic + ( coarse_bound_all(4,k) - coarse_bound_all(3,k) + 1 ) * & ( coarse_bound_all(2,k) - coarse_bound_all(1,k) + 1 ) ENDDO ALLOCATE( index_list(6,ic) ) CALL MPI_COMM_SIZE( comm1dx, npx, ierr ) CALL MPI_COMM_SIZE( comm1dy, npy, ierr ) ! !-- Nrx is the same for all PEs and so is nry, thus there is no need to compute !-- them separately for each PE. nrx = nxr - nxl + 1 nry = nyn - nys + 1 ic = 0 ! !-- Loop over all children PEs DO k = 1, size_of_array(2) ! Replace k by some other letter ! !-- Area along y required by actual child PE DO j = coarse_bound_all(3,k), coarse_bound_all(4,k) !: j = jcs, jcn of PE# k ! !-- Area along x required by actual child PE DO i = coarse_bound_all(1,k), coarse_bound_all(2,k) !: i = icl, icr of PE# k px = i / nrx py = j / nry scoord(1) = px scoord(2) = py CALL MPI_CART_RANK( comm2d, scoord, parent_pe, ierr ) ic = ic + 1 ! !-- First index in parent array index_list(1,ic) = i - ( px * nrx ) + 1 + nbgp ! !-- Second index in parent array index_list(2,ic) = j - ( py * nry ) + 1 + nbgp ! !-- x index of child coarse grid index_list(3,ic) = i - coarse_bound_all(1,k) + 1 ! !-- y index of child coarse grid index_list(4,ic) = j - coarse_bound_all(3,k) + 1 ! !-- PE number of child index_list(5,ic) = k - 1 ! !-- PE number of parent index_list(6,ic) = parent_pe ENDDO ENDDO ENDDO ! !-- TO_DO: Klaus: comment what is done here CALL pmc_s_set_2d_index_list( child_id, index_list(:,1:ic) ) ELSE ! !-- TO_DO: Klaus: comment why this dummy allocation is required ALLOCATE( index_list(6,1) ) CALL pmc_s_set_2d_index_list( child_id, index_list ) ENDIF DEALLOCATE(index_list) END SUBROUTINE pmci_create_index_list SUBROUTINE set_child_edge_coords IMPLICIT NONE INTEGER(iwp) :: nbgp_lpm = 1 nbgp_lpm = min(nbgp_lpm, nbgp) childgrid(m)%nx = nx_cl childgrid(m)%ny = ny_cl childgrid(m)%nz = nz_cl childgrid(m)%dx = dx_cl childgrid(m)%dy = dy_cl childgrid(m)%dz = dz_cl childgrid(m)%lx_coord = cl_coord_x(0) childgrid(m)%lx_coord_b = cl_coord_x(-nbgp_lpm) childgrid(m)%rx_coord = cl_coord_x(nx_cl)+dx_cl childgrid(m)%rx_coord_b = cl_coord_x(nx_cl+nbgp_lpm)+dx_cl childgrid(m)%sy_coord = cl_coord_y(0) childgrid(m)%sy_coord_b = cl_coord_y(-nbgp_lpm) childgrid(m)%ny_coord = cl_coord_y(ny_cl)+dy_cl childgrid(m)%ny_coord_b = cl_coord_y(ny_cl+nbgp_lpm)+dy_cl childgrid(m)%uz_coord = zmax_coarse(2) childgrid(m)%uz_coord_b = zmax_coarse(1) END SUBROUTINE set_child_edge_coords #endif END SUBROUTINE pmci_setup_parent SUBROUTINE pmci_setup_child #if defined( __parallel ) IMPLICIT NONE CHARACTER(LEN=da_namelen) :: myname !< INTEGER(iwp) :: ierr !< MPI error code INTEGER(iwp) :: icl !< Left index limit for children's parent-grid arrays INTEGER(iwp) :: icla !< Left index limit for allocation of index-mapping and other auxiliary arrays INTEGER(iwp) :: iclw !< Left index limit for children's parent-grid work arrays INTEGER(iwp) :: icr !< Left index limit for children's parent-grid arrays INTEGER(iwp) :: icra !< Right index limit for allocation of index-mapping and other auxiliary arrays INTEGER(iwp) :: icrw !< Right index limit for children's parent-grid work arrays INTEGER(iwp) :: jcn !< North index limit for children's parent-grid arrays INTEGER(iwp) :: jcna !< North index limit for allocation of index-mapping and other auxiliary arrays INTEGER(iwp) :: jcnw !< North index limit for children's parent-grid work arrays INTEGER(iwp) :: jcs !< South index limit for children's parent-grid arrays INTEGER(iwp) :: jcsa !< South index limit for allocation of index-mapping and other auxiliary arrays INTEGER(iwp) :: jcsw !< South index limit for children's parent-grid work arrays INTEGER(iwp) :: n !< Running index for number of chemical species INTEGER(iwp), DIMENSION(3) :: val !< Array for sending the child-grid dimensions to parent REAL(wp) :: xcs !< REAL(wp) :: xce !< REAL(wp) :: ycs !< REAL(wp) :: yce !< REAL(wp), DIMENSION(5) :: fval !< Array for sending the child-grid spacings etc to parent ! !-- Child setup !-- Root model does not have a parent and is not a child, therefore no child setup on root model IF ( .NOT. pmc_is_rootmodel() ) THEN CALL pmc_childinit ! !-- The arrays, which actually will be exchanged between child and parent !-- are defined Here AND ONLY HERE. !-- If a variable is removed, it only has to be removed from here. !-- Please check, if the arrays are in the list of POSSIBLE exchange arrays !-- in subroutines: !-- pmci_set_array_pointer (for parent arrays) !-- pmci_create_child_arrays (for child arrays) CALL pmc_set_dataarray_name( 'coarse', 'u' ,'fine', 'u', ierr ) CALL pmc_set_dataarray_name( 'coarse', 'v' ,'fine', 'v', ierr ) CALL pmc_set_dataarray_name( 'coarse', 'w' ,'fine', 'w', ierr ) ! !-- Set data array name for TKE. Please note, nesting of TKE is actually !-- only done if both parent and child are in LES or in RANS mode. Due to !-- design of model coupler, however, data array names must be already !-- available at this point. CALL pmc_set_dataarray_name( 'coarse', 'e' ,'fine', 'e', ierr ) ! !-- Nesting of dissipation rate only if both parent and child are in RANS !-- mode and TKE-epsilo closure is applied. Please so also comment for TKE !-- above. CALL pmc_set_dataarray_name( 'coarse', 'diss' ,'fine', 'diss', ierr ) IF ( .NOT. neutral ) THEN CALL pmc_set_dataarray_name( 'coarse', 'pt' ,'fine', 'pt', ierr ) ENDIF IF ( humidity ) THEN CALL pmc_set_dataarray_name( 'coarse', 'q' ,'fine', 'q', ierr ) IF ( bulk_cloud_model .AND. microphysics_morrison ) THEN CALL pmc_set_dataarray_name( 'coarse', 'qc' ,'fine', 'qc', ierr ) CALL pmc_set_dataarray_name( 'coarse', 'nc' ,'fine', 'nc', ierr ) ENDIF IF ( bulk_cloud_model .AND. microphysics_seifert ) THEN CALL pmc_set_dataarray_name( 'coarse', 'qr' ,'fine', 'qr', ierr ) CALL pmc_set_dataarray_name( 'coarse', 'nr' ,'fine', 'nr', ierr ) ENDIF ENDIF IF ( passive_scalar ) THEN CALL pmc_set_dataarray_name( 'coarse', 's' ,'fine', 's', ierr ) ENDIF IF( particle_advection ) THEN CALL pmc_set_dataarray_name( 'coarse', 'nr_part' ,'fine', & 'nr_part', ierr ) CALL pmc_set_dataarray_name( 'coarse', 'part_adr' ,'fine', & 'part_adr', ierr ) ENDIF IF ( air_chemistry .AND. nest_chemistry ) THEN DO n = 1, nspec CALL pmc_set_dataarray_name( 'coarse', & 'chem_' // & TRIM( chem_species(n)%name ), & 'fine', & 'chem_' // & TRIM( chem_species(n)%name ), & ierr ) ENDDO ENDIF CALL pmc_set_dataarray_name( lastentry = .TRUE. ) ! !-- Send grid to parent val(1) = nx val(2) = ny val(3) = nz fval(1) = zw(nzt+1) fval(2) = zw(nzt) fval(3) = dx fval(4) = dy fval(5) = dz(1) IF ( myid == 0 ) THEN CALL pmc_send_to_parent( val, SIZE( val ), 0, 123, ierr ) CALL pmc_send_to_parent( fval, SIZE( fval ), 0, 124, ierr ) CALL pmc_send_to_parent( coord_x, nx + 1 + 2 * nbgp, 0, 11, ierr ) CALL pmc_send_to_parent( coord_y, ny + 1 + 2 * nbgp, 0, 12, ierr ) CALL pmc_recv_from_parent( rans_mode_parent, 1, 0, 19, ierr ) ! !-- Receive Coarse grid information. CALL pmc_recv_from_parent( parent_grid_info_real, & SIZE(parent_grid_info_real), 0, 21, ierr ) CALL pmc_recv_from_parent( parent_grid_info_int, 3, 0, 22, ierr ) ENDIF CALL MPI_BCAST( parent_grid_info_real, SIZE(parent_grid_info_real), & MPI_REAL, 0, comm2d, ierr ) CALL MPI_BCAST( parent_grid_info_int, 3, MPI_INTEGER, 0, comm2d, ierr ) cg%dx = parent_grid_info_real(3) cg%dy = parent_grid_info_real(4) cg%dz = parent_grid_info_real(7) cg%nx = parent_grid_info_int(1) cg%ny = parent_grid_info_int(2) cg%nz = parent_grid_info_int(3) ! !-- Get parent coordinates on coarse grid ALLOCATE( cg%coord_x(-nbgp:cg%nx+nbgp) ) ALLOCATE( cg%coord_y(-nbgp:cg%ny+nbgp) ) ALLOCATE( cg%dzu(1:cg%nz+1) ) ALLOCATE( cg%dzw(1:cg%nz+1) ) ALLOCATE( cg%zu(0:cg%nz+1) ) ALLOCATE( cg%zw(0:cg%nz+1) ) ! !-- Get coarse grid coordinates and values of the z-direction from the parent IF ( myid == 0) THEN CALL pmc_recv_from_parent( cg%coord_x, cg%nx+1+2*nbgp, 0, 24, ierr ) CALL pmc_recv_from_parent( cg%coord_y, cg%ny+1+2*nbgp, 0, 25, ierr ) CALL pmc_recv_from_parent( cg%dzu, cg%nz+1, 0, 26, ierr ) CALL pmc_recv_from_parent( cg%dzw, cg%nz+1, 0, 27, ierr ) CALL pmc_recv_from_parent( cg%zu, cg%nz+2, 0, 28, ierr ) CALL pmc_recv_from_parent( cg%zw, cg%nz+2, 0, 29, ierr ) ENDIF ! !-- Broadcast this information CALL MPI_BCAST( cg%coord_x, cg%nx+1+2*nbgp, MPI_REAL, 0, comm2d, ierr ) CALL MPI_BCAST( cg%coord_y, cg%ny+1+2*nbgp, MPI_REAL, 0, comm2d, ierr ) CALL MPI_BCAST( cg%dzu, cg%nz+1, MPI_REAL, 0, comm2d, ierr ) CALL MPI_BCAST( cg%dzw, cg%nz+1, MPI_REAL, 0, comm2d, ierr ) CALL MPI_BCAST( cg%zu, cg%nz+2, MPI_REAL, 0, comm2d, ierr ) CALL MPI_BCAST( cg%zw, cg%nz+2, MPI_REAL, 0, comm2d, ierr ) CALL MPI_BCAST( rans_mode_parent, 1, MPI_LOGICAL, 0, comm2d, ierr ) ! !-- Find the index bounds for the nest domain in the coarse-grid index space CALL pmci_map_fine_to_coarse_grid ! !-- TO_DO: Klaus give a comment what is happening here CALL pmc_c_get_2d_index_list ! !-- Include couple arrays into child content !-- TO_DO: Klaus: better explain the above comment (what is child content?) CALL pmc_c_clear_next_array_list n = 1 DO WHILE ( pmc_c_getnextarray( myname ) ) !-- Note that cg%nz is not the original nz of parent, but the highest !-- parent-grid level needed for nesting. !-- Please note, in case of chemical species an additional parameter !-- need to be passed, which is required to set the pointer correctly !-- to the chemical-species data structure. Hence, first check if current !-- variable is a chemical species. If so, pass index id of respective !-- species and increment this subsequently. IF ( INDEX( TRIM( myname ), 'chem_' ) /= 0 ) THEN CALL pmci_create_child_arrays ( myname, icl, icr, jcs, jcn, cg%nz, n ) n = n + 1 ELSE CALL pmci_create_child_arrays ( myname, icl, icr, jcs, jcn, cg%nz ) ENDIF ENDDO CALL pmc_c_setind_and_allocmem ! !-- Precompute the index-mapping arrays CALL pmci_define_index_mapping ! !-- Check that the child and parent grid lines do match CALL pmci_check_grid_matching ENDIF CONTAINS SUBROUTINE pmci_map_fine_to_coarse_grid ! !-- Determine index bounds of interpolation/anterpolation area in the coarse !-- grid index space IMPLICIT NONE INTEGER(iwp), DIMENSION(5,numprocs) :: coarse_bound_all !< Transfer array for parent-grid index bounds INTEGER(iwp), DIMENSION(4) :: parent_bound_global !< Transfer array for global parent-grid index bounds INTEGER(iwp), DIMENSION(2) :: size_of_array !< INTEGER(iwp) :: i !< INTEGER(iwp) :: iauxl !< INTEGER(iwp) :: iauxr !< INTEGER(iwp) :: ijaux !< INTEGER(iwp) :: j !< INTEGER(iwp) :: jauxs !< INTEGER(iwp) :: jauxn !< REAL(wp) :: xexl !< Parent-grid array exceedance behind the left edge of the child PE subdomain REAL(wp) :: xexr !< Parent-grid array exceedance behind the right edge of the child PE subdomain REAL(wp) :: yexs !< Parent-grid array exceedance behind the south edge of the child PE subdomain REAL(wp) :: yexn !< Parent-grid array exceedance behind the north edge of the child PE subdomain ! !-- Determine the anterpolation index limits. If at least half of the !-- parent-grid cell is within the current child sub-domain, then it !-- is included in the current sub-domain's anterpolation domain. !-- Else the parent-grid cell is included in the neighbouring subdomain's !-- anterpolation domain, or not included at all if we are at the outer !-- edge of the child domain. ! !-- Left IF ( bc_dirichlet_l ) THEN xexl = 2 * cg%dx iauxl = 0 ELSE xexl = 0.0_wp iauxl = 1 ENDIF xcs = coord_x(nxl) - xexl DO i = 0, cg%nx IF ( cg%coord_x(i) + 0.5_wp * cg%dx >= xcs ) THEN icl = MAX( 0, i ) EXIT ENDIF ENDDO ! !-- Right IF ( bc_dirichlet_r ) THEN xexr = 2 * cg%dx iauxr = 0 ELSE xexr = 0.0_wp iauxr = 1 ENDIF xce = coord_x(nxr+1) + xexr DO i = cg%nx, 0 , -1 IF ( cg%coord_x(i) + 0.5_wp * cg%dx <= xce ) THEN icr = MIN( cg%nx, MAX( icl, i ) ) EXIT ENDIF ENDDO ! !-- South IF ( bc_dirichlet_s ) THEN yexs = 2 * cg%dy jauxs = 0 ELSE yexs = 0.0_wp jauxs = 1 ENDIF ycs = coord_y(nys) - yexs DO j = 0, cg%ny IF ( cg%coord_y(j) + 0.5_wp * cg%dy >= ycs ) THEN jcs = MAX( 0, j ) EXIT ENDIF ENDDO ! !-- North IF ( bc_dirichlet_n ) THEN yexn = 2 * cg%dy jauxn = 0 ELSE yexn = 0.0_wp jauxn = 1 ENDIF yce = coord_y(nyn+1) + yexn DO j = cg%ny, 0 , -1 IF ( cg%coord_y(j) + 0.5_wp * cg%dy <= yce ) THEN jcn = MIN( cg%ny, MAX( jcs, j ) ) EXIT ENDIF ENDDO ! !-- Make sure that the indexing is contiguous (no gaps, no overlaps) #if defined( __parallel ) IF ( nxl == 0 ) THEN CALL MPI_SEND( icr, 1, MPI_INTEGER, pright, 717, comm2d, ierr ) ELSE IF ( nxr == nx ) THEN CALL MPI_RECV( ijaux, 1, MPI_INTEGER, pleft, 717, comm2d, status, ierr ) icl = ijaux + 1 ELSE CALL MPI_SEND( icr, 1, MPI_INTEGER, pright, 717, comm2d, ierr ) CALL MPI_RECV( ijaux, 1, MPI_INTEGER, pleft, 717, comm2d, status, ierr ) icl = ijaux + 1 ENDIF IF ( nys == 0 ) THEN CALL MPI_SEND( jcn, 1, MPI_INTEGER, pnorth, 719, comm2d, ierr ) ELSE IF ( nyn == ny ) THEN CALL MPI_RECV( ijaux, 1, MPI_INTEGER, psouth, 719, comm2d, status, ierr ) jcs = ijaux + 1 ELSE CALL MPI_SEND( jcn, 1, MPI_INTEGER, pnorth, 719, comm2d, ierr ) CALL MPI_RECV( ijaux, 1, MPI_INTEGER, psouth, 719, comm2d, status, ierr ) jcs = ijaux + 1 ENDIF #endif WRITE(9,"('Pmci_map_fine_to_coarse_grid. Parent-grid array bounds: ',4(i4,2x))") icl, icr, jcs, jcn FLUSH(9) coarse_bound(1) = icl coarse_bound(2) = icr coarse_bound(3) = jcs coarse_bound(4) = jcn coarse_bound(5) = myid ! !-- The following index bounds are used for allocating index mapping and some other auxiliary arrays coarse_bound_aux(1) = icl - iauxl coarse_bound_aux(2) = icr + iauxr coarse_bound_aux(3) = jcs - jauxs coarse_bound_aux(4) = jcn + jauxn ! !-- Note that MPI_Gather receives data from all processes in the rank order !-- This fact is exploited in creating the index list in pmci_create_index_list CALL MPI_GATHER( coarse_bound, 5, MPI_INTEGER, coarse_bound_all, 5, & MPI_INTEGER, 0, comm2d, ierr ) IF ( myid == 0 ) THEN size_of_array(1) = SIZE( coarse_bound_all, 1 ) size_of_array(2) = SIZE( coarse_bound_all, 2 ) CALL pmc_send_to_parent( size_of_array, 2, 0, 40, ierr ) CALL pmc_send_to_parent( coarse_bound_all, SIZE( coarse_bound_all ), & 0, 41, ierr ) ! !-- Determine the global parent-grid index bounds parent_bound_global(1) = MINVAL( coarse_bound_all(1,:) ) parent_bound_global(2) = MAXVAL( coarse_bound_all(2,:) ) parent_bound_global(3) = MINVAL( coarse_bound_all(3,:) ) parent_bound_global(4) = MAXVAL( coarse_bound_all(4,:) ) ENDIF ! !-- Broadcat the global parent-grid index bounds to all current child processes CALL MPI_BCAST( parent_bound_global, 4, MPI_INTEGER, 0, comm2d, ierr ) iplg = parent_bound_global(1) iprg = parent_bound_global(2) jpsg = parent_bound_global(3) jpng = parent_bound_global(4) WRITE(9,"('Pmci_map_fine_to_coarse_grid. Global parent-grid index bounds iplg, iprg, jpsg, jpng: ',4(i4,2x))") iplg, iprg, jpsg, jpng FLUSH(9) END SUBROUTINE pmci_map_fine_to_coarse_grid SUBROUTINE pmci_define_index_mapping ! !-- Precomputation of the mapping of the child- and parent-grid indices. IMPLICIT NONE INTEGER(iwp) :: i !< Child-grid index INTEGER(iwp) :: ii !< Parent-grid index INTEGER(iwp) :: istart !< INTEGER(iwp) :: ir !< INTEGER(iwp) :: iw !< Child-grid index limited to -1 <= iw <= nx+1 INTEGER(iwp) :: j !< Child-grid index INTEGER(iwp) :: jj !< Parent-grid index INTEGER(iwp) :: jstart !< INTEGER(iwp) :: jr !< INTEGER(iwp) :: jw !< Child-grid index limited to -1 <= jw <= ny+1 INTEGER(iwp) :: k !< Child-grid index INTEGER(iwp) :: kk !< Parent-grid index INTEGER(iwp) :: kstart !< INTEGER(iwp) :: kw !< Child-grid index limited to kw <= nzt+1 ! !-- Allocate child-grid work arrays for interpolation. igsr = NINT( cg%dx / dx, iwp ) jgsr = NINT( cg%dy / dy, iwp ) kgsr = NINT( cg%dzw(1) / dzw(1), iwp ) WRITE(9,"('igsr, jgsr, kgsr: ',3(i3,2x))") igsr, jgsr, kgsr FLUSH(9) ! !-- Determine index bounds for the parent-grid work arrays for !-- interpolation and allocate them. CALL pmci_allocate_workarrays ! !-- Define the MPI-datatypes for parent-grid work array !-- exchange between the PE-subdomains. CALL pmci_create_workarray_exchange_datatypes ! !-- First determine kcto and kctw which refer to the uppermost !-- coarse-grid levels below the child top-boundary level. kk = 0 DO WHILE ( cg%zu(kk) <= zu(nzt) ) kk = kk + 1 ENDDO kcto = kk - 1 kk = 0 DO WHILE ( cg%zw(kk) <= zw(nzt-1) ) kk = kk + 1 ENDDO kctw = kk - 1 WRITE(9,"('kcto, kctw = ', 2(i3,2x))") kcto, kctw FLUSH(9) icla = coarse_bound_aux(1) icra = coarse_bound_aux(2) jcsa = coarse_bound_aux(3) jcna = coarse_bound_aux(4) ALLOCATE( iflu(icla:icra) ) ALLOCATE( iflo(icla:icra) ) ALLOCATE( ifuu(icla:icra) ) ALLOCATE( ifuo(icla:icra) ) ALLOCATE( jflv(jcsa:jcna) ) ALLOCATE( jflo(jcsa:jcna) ) ALLOCATE( jfuv(jcsa:jcna) ) ALLOCATE( jfuo(jcsa:jcna) ) ALLOCATE( kflw(0:cg%nz+1) ) ALLOCATE( kflo(0:cg%nz+1) ) ALLOCATE( kfuw(0:cg%nz+1) ) ALLOCATE( kfuo(0:cg%nz+1) ) ALLOCATE( ijkfc_u(0:cg%nz+1,jcsa:jcna,icla:icra) ) ALLOCATE( ijkfc_v(0:cg%nz+1,jcsa:jcna,icla:icra) ) ALLOCATE( ijkfc_w(0:cg%nz+1,jcsa:jcna,icla:icra) ) ALLOCATE( ijkfc_s(0:cg%nz+1,jcsa:jcna,icla:icra) ) ijkfc_u = 0 ijkfc_v = 0 ijkfc_w = 0 ijkfc_s = 0 ! !-- i-indices of u for each ii-index value istart = nxlg DO ii = icla, icra ! !-- The parent and child grid lines do always match in x, hence we !-- use only the local k,j-child-grid plane for the anterpolation. i = istart DO WHILE ( coord_x(i) < cg%coord_x(ii) .AND. i < nxrg ) i = i + 1 ENDDO iflu(ii) = MIN( MAX( i, nxlg ), nxrg ) ifuu(ii) = iflu(ii) istart = iflu(ii) ! !-- Print out the index bounds for checking and debugging purposes WRITE(9,"('pmci_define_index_mapping, ii, iflu, ifuu: ', 3(i4,2x))") & ii, iflu(ii), ifuu(ii) FLUSH(9) ENDDO WRITE(9,*) ! !-- i-indices of others for each ii-index value istart = nxlg DO ii = icla, icra i = istart DO WHILE ( ( coord_x(i) + 0.5_wp * dx < cg%coord_x(ii) ) .AND. & ( i < nxrg ) ) i = i + 1 ENDDO iflo(ii) = MIN( MAX( i, nxlg ), nxrg ) ir = i DO WHILE ( ( coord_x(ir) + 0.5_wp * dx <= cg%coord_x(ii) + cg%dx ) & .AND. ( i < nxrg+1 ) ) i = i + 1 ir = MIN( i, nxrg ) ENDDO ifuo(ii) = MIN( MAX( i-1, iflo(ii) ), nxrg ) istart = iflo(ii) ! !-- Print out the index bounds for checking and debugging purposes WRITE(9,"('pmci_define_index_mapping, ii, iflo, ifuo: ', 3(i4,2x))") & ii, iflo(ii), ifuo(ii) FLUSH(9) ENDDO WRITE(9,*) ! !-- j-indices of v for each jj-index value jstart = nysg DO jj = jcsa, jcna ! !-- The parent and child grid lines do always match in y, hence we !-- use only the local k,i-child-grid plane for the anterpolation. j = jstart DO WHILE ( coord_y(j) < cg%coord_y(jj) .AND. j < nyng ) j = j + 1 ENDDO jflv(jj) = MIN( MAX( j, nysg ), nyng ) jfuv(jj) = jflv(jj) jstart = jflv(jj) ! !-- Print out the index bounds for checking and debugging purposes WRITE(9,"('pmci_define_index_mapping, jj, jflv, jfuv: ', 3(i4,2x))") & jj, jflv(jj), jfuv(jj) FLUSH(9) ENDDO WRITE(9,*) ! !-- j-indices of others for each jj-index value jstart = nysg DO jj = jcsa, jcna j = jstart DO WHILE ( ( coord_y(j) + 0.5_wp * dy < cg%coord_y(jj) ) .AND. & ( j < nyng ) ) j = j + 1 ENDDO jflo(jj) = MIN( MAX( j, nysg ), nyng ) jr = j DO WHILE ( ( coord_y(jr) + 0.5_wp * dy <= cg%coord_y(jj) + cg%dy ) & .AND. ( j < nyng+1 ) ) j = j + 1 jr = MIN( j, nyng ) ENDDO jfuo(jj) = MIN( MAX( j-1, jflo(jj) ), nyng ) jstart = jflo(jj) ! !-- Print out the index bounds for checking and debugging purposes WRITE(9,"('pmci_define_index_mapping, jj, jflo, jfuo: ', 3(i4,2x))") & jj, jflo(jj), jfuo(jj) FLUSH(9) ENDDO WRITE(9,*) ! !-- k-indices of w for each kk-index value !-- Note that anterpolation index limits are needed also for the top boundary !-- ghost cell level because they are used also in the interpolation. kstart = 0 kflw(0) = 0 kfuw(0) = 0 DO kk = 1, cg%nz+1 ! !-- The parent and child grid lines do always match in z, hence we !-- use only the local j,i-child-grid plane for the anterpolation. k = kstart DO WHILE ( ( zw(k) < cg%zw(kk) ) .AND. ( k < nzt+1 ) ) k = k + 1 ENDDO kflw(kk) = MIN( MAX( k, 1 ), nzt + 1 ) kfuw(kk) = kflw(kk) kstart = kflw(kk) ! !-- Print out the index bounds for checking and debugging purposes WRITE(9,"('pmci_define_index_mapping, kk, kflw, kfuw: ', 4(i4,2x), 2(e12.5,2x))") & kk, kflw(kk), kfuw(kk), nzt, cg%zu(kk), cg%zw(kk) FLUSH(9) ENDDO WRITE(9,*) ! !-- k-indices of others for each kk-index value kstart = 0 kflo(0) = 0 kfuo(0) = 0 ! !-- Note that anterpolation index limits are needed also for the top boundary !-- ghost cell level because they are used also in the interpolation. DO kk = 1, cg%nz+1 k = kstart DO WHILE ( ( zu(k) < cg%zw(kk-1) ) .AND. ( k <= nzt ) ) k = k + 1 ENDDO kflo(kk) = MIN( MAX( k, 1 ), nzt + 1 ) DO WHILE ( ( zu(k) <= cg%zw(kk) ) .AND. ( k <= nzt+1 ) ) k = k + 1 IF ( k > nzt + 1 ) EXIT ! This EXIT is to prevent zu(k) from flowing over. ENDDO kfuo(kk) = MIN( MAX( k-1, kflo(kk) ), nzt + 1 ) kstart = kflo(kk) ENDDO ! !-- Set the k-index bounds separately for the parent-grid cells cg%nz and cg%nz+1 !-- although they are not actually needed. kflo(cg%nz) = nzt+1 kfuo(cg%nz) = nzt+kgsr kflo(cg%nz+1) = nzt+kgsr kfuo(cg%nz+1) = nzt+kgsr ! !-- Print out the index bounds for checking and debugging purposes DO kk = 1, cg%nz+1 WRITE(9,"('pmci_define_index_mapping, kk, kflo, kfuo: ', 4(i4,2x), 2(e12.5,2x))") & kk, kflo(kk), kfuo(kk), nzt, cg%zu(kk), cg%zw(kk) FLUSH(9) ENDDO WRITE(9,*) ! !-- Precomputation of number of fine-grid nodes inside parent-grid cells. !-- Note that ii, jj, and kk are parent-grid indices. !-- This information is needed in anterpolation and in reversibility !-- correction in interpolation. DO ii = icla, icra DO jj = jcsa, jcna DO kk = 0, cg%nz+1 ! !-- u-component DO i = iflu(ii), ifuu(ii) iw = MAX( MIN( i, nx+1 ), -1 ) DO j = jflo(jj), jfuo(jj) jw = MAX( MIN( j, ny+1 ), -1 ) DO k = kflo(kk), kfuo(kk) kw = MIN( k, nzt+1 ) ijkfc_u(kk,jj,ii) = ijkfc_u(kk,jj,ii) & + MERGE( 1, 0, BTEST( wall_flags_0(kw,jw,iw), 1 ) ) ENDDO ENDDO ENDDO ! !-- v-component DO i = iflo(ii), ifuo(ii) iw = MAX( MIN( i, nx+1 ), -1 ) DO j = jflv(jj), jfuv(jj) jw = MAX( MIN( j, ny+1 ), -1 ) DO k = kflo(kk), kfuo(kk) kw = MIN( k, nzt+1 ) ijkfc_v(kk,jj,ii) = ijkfc_v(kk,jj,ii) & + MERGE( 1, 0, BTEST( wall_flags_0(kw,jw,iw), 2 ) ) ENDDO ENDDO ENDDO ! !-- scalars DO i = iflo(ii), ifuo(ii) iw = MAX( MIN( i, nx+1 ), -1 ) DO j = jflo(jj), jfuo(jj) jw = MAX( MIN( j, ny+1 ), -1 ) DO k = kflo(kk), kfuo(kk) kw = MIN( k, nzt+1 ) ijkfc_s(kk,jj,ii) = ijkfc_s(kk,jj,ii) & + MERGE( 1, 0, BTEST( wall_flags_0(kw,jw,iw), 0 ) ) ENDDO ENDDO ENDDO ! !-- w-component DO i = iflo(ii), ifuo(ii) iw = MAX( MIN( i, nx+1 ), -1 ) DO j = jflo(jj), jfuo(jj) jw = MAX( MIN( j, ny+1 ), -1 ) DO k = kflw(kk), kfuw(kk) kw = MIN( k, nzt+1 ) ijkfc_w(kk,jj,ii) = ijkfc_w(kk,jj,ii) + MERGE( 1, 0, & BTEST( wall_flags_0(kw,jw,iw), 3 ) ) ENDDO ENDDO ENDDO ENDDO ! kk ENDDO ! jj ENDDO ! ii END SUBROUTINE pmci_define_index_mapping SUBROUTINE pmci_allocate_workarrays ! !-- Allocate parent-grid work-arrays for interpolation IMPLICIT NONE ! !-- Determine and store the PE-subdomain dependent index bounds IF ( bc_dirichlet_l ) THEN iclw = icl + 1 ELSE iclw = icl - 1 ENDIF IF ( bc_dirichlet_r ) THEN icrw = icr - 1 ELSE icrw = icr + 1 ENDIF IF ( bc_dirichlet_s ) THEN jcsw = jcs + 1 ELSE jcsw = jcs - 1 ENDIF IF ( bc_dirichlet_n ) THEN jcnw = jcn - 1 ELSE jcnw = jcn + 1 ENDIF coarse_bound_w(1) = iclw coarse_bound_w(2) = icrw coarse_bound_w(3) = jcsw coarse_bound_w(4) = jcnw ! !-- Left and right boundaries. ALLOCATE( workarrc_lr(0:cg%nz+1,jcsw:jcnw,0:2) ) ! !-- South and north boundaries. ALLOCATE( workarrc_sn(0:cg%nz+1,0:2,iclw:icrw) ) ! !-- Top boundary. ALLOCATE( workarrc_t(0:2,jcsw:jcnw,iclw:icrw) ) END SUBROUTINE pmci_allocate_workarrays SUBROUTINE pmci_create_workarray_exchange_datatypes ! !-- Define specific MPI types for workarrc-exhchange. IMPLICIT NONE #if defined( __parallel ) ! !-- For the left and right boundaries CALL MPI_TYPE_VECTOR( 3, cg%nz+2, (jcnw-jcsw+1)*(cg%nz+2), MPI_REAL, & workarrc_lr_exchange_type, ierr ) CALL MPI_TYPE_COMMIT( workarrc_lr_exchange_type, ierr ) ! !-- For the south and north boundaries CALL MPI_TYPE_VECTOR( 1, 3*(cg%nz+2), 3*(cg%nz+2), MPI_REAL, & workarrc_sn_exchange_type, ierr ) CALL MPI_TYPE_COMMIT( workarrc_sn_exchange_type, ierr ) ! !-- For the top-boundary x-slices CALL MPI_TYPE_VECTOR( icrw-iclw+1, 3, 3*(jcnw-jcsw+1), MPI_REAL, & workarrc_t_exchange_type_x, ierr ) CALL MPI_TYPE_COMMIT( workarrc_t_exchange_type_x, ierr ) ! !-- For the top-boundary y-slices CALL MPI_TYPE_VECTOR( 1, 3*(jcnw-jcsw+1), 3*(jcnw-jcsw+1), MPI_REAL, & workarrc_t_exchange_type_y, ierr ) CALL MPI_TYPE_COMMIT( workarrc_t_exchange_type_y, ierr ) #endif END SUBROUTINE pmci_create_workarray_exchange_datatypes SUBROUTINE pmci_check_grid_matching ! !-- Check that the grid lines of child and parent do match. !-- Also check that the child subdomain width is not smaller than !-- the parent grid spacing in the respective direction. IMPLICIT NONE REAL(wp), PARAMETER :: tolefac = 1.0E-6_wp !< Relative tolerence for grid-line matching REAL(wp) :: pesdwx !< Child subdomain width in x-direction REAL(wp) :: pesdwy !< Child subdomain width in y-direction REAL(wp) :: tolex !< Tolerance for grid-line matching in x-direction REAL(wp) :: toley !< Tolerance for grid-line matching in y-direction REAL(wp) :: tolez !< Tolerance for grid-line matching in z-direction INTEGER(iwp) :: non_matching_lower_left_corner !< Flag for non-matching lower left corner INTEGER(iwp) :: non_int_gsr_x !< Flag for non-integer grid-spacing ration in x-direction INTEGER(iwp) :: non_int_gsr_y !< Flag for non-integer grid-spacing ration in y-direction INTEGER(iwp) :: non_int_gsr_z !< Flag for non-integer grid-spacing ration in z-direction INTEGER(iwp) :: too_narrow_pesd_x !< Flag for too narrow pe-subdomain in x-direction INTEGER(iwp) :: too_narrow_pesd_y !< Flag for too narrow pe-subdomain in y-direction non_matching_lower_left_corner = 0 non_int_gsr_x = 0 non_int_gsr_y = 0 non_int_gsr_z = 0 too_narrow_pesd_x = 0 too_narrow_pesd_y = 0 IF ( myid == 0 ) THEN tolex = tolefac * dx toley = tolefac * dy tolez = tolefac * MINVAL( dzw ) ! !-- First check that the child grid lower left corner matches the paren grid lines. IF ( MOD( lower_left_coord_x, cg%dx ) > tolex ) non_matching_lower_left_corner = 1 IF ( MOD( lower_left_coord_y, cg%dy ) > toley ) non_matching_lower_left_corner = 1 ! !-- Then check that the grid-spacing ratios in each direction are integer valued. IF ( MOD( cg%dx, dx ) > tolex ) non_int_gsr_x = 1 IF ( MOD( cg%dy, dy ) > toley ) non_int_gsr_y = 1 DO n = 0, kctw+1 IF ( ABS( cg%zw(n) - zw(kflw(n)) ) > tolez ) non_int_gsr_z = 1 ENDDO pesdwx = REAL( nxr - nxl + 1, KIND=wp ) IF ( pesdwx / REAL( igsr, KIND=wp ) < 1.0_wp ) too_narrow_pesd_x = 1 pesdwy = REAL( nyn - nys + 1, KIND=wp ) IF ( pesdwy / REAL( jgsr, KIND=wp ) < 1.0_wp ) too_narrow_pesd_y = 1 ENDIF CALL MPI_BCAST( non_matching_lower_left_corner, 1, MPI_INTEGER, 0, & comm2d, ierr ) IF ( non_matching_lower_left_corner > 0 ) THEN WRITE ( message_string, * ) 'nested child domain lower left ', & 'corner must match its parent grid lines' CALL message( 'pmci_check_grid_matching', 'PA0414', 3, 2, 0, 6, 0 ) ENDIF CALL MPI_BCAST( non_int_gsr_x, 1, MPI_INTEGER, 0, comm2d, ierr ) IF ( non_int_gsr_x > 0 ) THEN WRITE ( message_string, * ) 'nesting grid-spacing ratio ', & '( parent dx / child dx ) must have an integer value' CALL message( 'pmci_check_grid_matching', 'PA0416', 3, 2, 0, 6, 0 ) ENDIF CALL MPI_BCAST( non_int_gsr_y, 1, MPI_INTEGER, 0, comm2d, ierr ) IF ( non_int_gsr_y > 0 ) THEN WRITE ( message_string, * ) 'nesting grid-spacing ratio ', & '( parent dy / child dy ) must have an integer value' CALL message( 'pmci_check_grid_matching', 'PA0416', 3, 2, 0, 6, 0 ) ENDIF CALL MPI_BCAST( non_int_gsr_z, 1, MPI_INTEGER, 0, comm2d, ierr ) IF ( non_int_gsr_z > 0 ) THEN WRITE ( message_string, * ) 'nesting grid-spacing ratio ', & '( parent dz / child dz ) must have an integer value for each z-level' CALL message( 'pmci_check_grid_matching', 'PA0416', 3, 2, 0, 6, 0 ) ENDIF CALL MPI_BCAST( too_narrow_pesd_x, 1, MPI_INTEGER, 0, comm2d, ierr ) IF ( too_narrow_pesd_x > 0 ) THEN WRITE ( message_string, * ) 'child subdomain width in x-direction ', & 'must not be smaller than its parent grid dx. Change the PE-grid ', & 'setting (npex, npey) to satisfy this requirement.' CALL message( 'pmci_check_grid_matching', 'PA0587', 3, 2, 0, 6, 0 ) ENDIF CALL MPI_BCAST( too_narrow_pesd_y, 1, MPI_INTEGER, 0, comm2d, ierr ) IF ( too_narrow_pesd_y > 0 ) THEN WRITE ( message_string, * ) 'child subdomain width in y-direction ', & 'must not be smaller than its parent grid dy. Change the PE-grid ', & 'setting (npex, npey) to satisfy this requirement.' CALL message( 'pmci_check_grid_matching', 'PA0587', 3, 2, 0, 6, 0 ) ENDIF END SUBROUTINE pmci_check_grid_matching #endif END SUBROUTINE pmci_setup_child SUBROUTINE pmci_setup_coordinates #if defined( __parallel ) IMPLICIT NONE INTEGER(iwp) :: i !< INTEGER(iwp) :: j !< ! !-- Create coordinate arrays. ALLOCATE( coord_x(-nbgp:nx+nbgp) ) ALLOCATE( coord_y(-nbgp:ny+nbgp) ) DO i = -nbgp, nx + nbgp coord_x(i) = lower_left_coord_x + i * dx ENDDO DO j = -nbgp, ny + nbgp coord_y(j) = lower_left_coord_y + j * dy ENDDO #endif END SUBROUTINE pmci_setup_coordinates !------------------------------------------------------------------------------! ! Description: ! ------------ !> In this subroutine the number of coupled arrays is determined. !------------------------------------------------------------------------------! SUBROUTINE pmci_num_arrays #if defined( __parallel ) USE pmc_general, & ONLY: pmc_max_array IMPLICIT NONE ! !-- The number of coupled arrays depends on the model settings. At least !-- 5 arrays need to be coupled (u, v, w, e, diss). Please note, actually !-- e and diss (TKE and dissipation rate) are only required if RANS-RANS !-- nesting is applied, but memory is allocated nevertheless. This is because !-- the information whether they are needed or not is retrieved at a later !-- point in time. In case e and diss are not needed, they are also not !-- exchanged between parent and child. pmc_max_array = 5 ! !-- pt IF ( .NOT. neutral ) pmc_max_array = pmc_max_array + 1 IF ( humidity ) THEN ! !-- q pmc_max_array = pmc_max_array + 1 ! !-- qc, nc IF ( bulk_cloud_model .AND. microphysics_morrison ) & pmc_max_array = pmc_max_array + 2 ! !-- qr, nr IF ( bulk_cloud_model .AND. microphysics_seifert ) & pmc_max_array = pmc_max_array + 2 ENDIF ! !-- s IF ( passive_scalar ) pmc_max_array = pmc_max_array + 1 ! !-- nr_part, part_adr IF ( particle_advection ) pmc_max_array = pmc_max_array + 2 ! !-- Chemistry, depends on number of species IF ( air_chemistry .AND. nest_chemistry ) & pmc_max_array = pmc_max_array + nspec #endif END SUBROUTINE pmci_num_arrays SUBROUTINE pmci_set_array_pointer( name, child_id, nz_cl, n ) IMPLICIT NONE INTEGER(iwp), INTENT(IN) :: child_id !< INTEGER(iwp), INTENT(IN) :: nz_cl !< INTEGER(iwp), INTENT(IN),OPTIONAL :: n !< index of chemical species CHARACTER(LEN=*), INTENT(IN) :: name !< #if defined( __parallel ) INTEGER(iwp) :: ierr !< MPI error code REAL(wp), POINTER, DIMENSION(:,:) :: p_2d !< REAL(wp), POINTER, DIMENSION(:,:,:) :: p_3d !< REAL(wp), POINTER, DIMENSION(:,:,:) :: p_3d_sec !< INTEGER(idp), POINTER, DIMENSION(:,:) :: i_2d !< NULLIFY( p_3d ) NULLIFY( p_2d ) NULLIFY( i_2d ) ! !-- List of array names, which can be coupled. !-- In case of 3D please change also the second array for the pointer version IF ( TRIM(name) == "u" ) p_3d => u IF ( TRIM(name) == "v" ) p_3d => v IF ( TRIM(name) == "w" ) p_3d => w IF ( TRIM(name) == "e" ) p_3d => e IF ( TRIM(name) == "pt" ) p_3d => pt IF ( TRIM(name) == "q" ) p_3d => q IF ( TRIM(name) == "qc" ) p_3d => qc IF ( TRIM(name) == "qr" ) p_3d => qr IF ( TRIM(name) == "nr" ) p_3d => nr IF ( TRIM(name) == "nc" ) p_3d => nc IF ( TRIM(name) == "s" ) p_3d => s IF ( TRIM(name) == "diss" ) p_3d => diss IF ( TRIM(name) == "nr_part" ) i_2d => nr_part IF ( TRIM(name) == "part_adr" ) i_2d => part_adr IF ( INDEX( TRIM(name), "chem_" ) /= 0 ) p_3d => chem_species(n)%conc ! !-- Next line is just an example for a 2D array (not active for coupling!) !-- Please note, that z0 has to be declared as TARGET array in modules.f90 ! IF ( TRIM(name) == "z0" ) p_2d => z0 IF ( TRIM(name) == "u" ) p_3d_sec => u_2 IF ( TRIM(name) == "v" ) p_3d_sec => v_2 IF ( TRIM(name) == "w" ) p_3d_sec => w_2 IF ( TRIM(name) == "e" ) p_3d_sec => e_2 IF ( TRIM(name) == "pt" ) p_3d_sec => pt_2 IF ( TRIM(name) == "q" ) p_3d_sec => q_2 IF ( TRIM(name) == "qc" ) p_3d_sec => qc_2 IF ( TRIM(name) == "qr" ) p_3d_sec => qr_2 IF ( TRIM(name) == "nr" ) p_3d_sec => nr_2 IF ( TRIM(name) == "nc" ) p_3d_sec => nc_2 IF ( TRIM(name) == "s" ) p_3d_sec => s_2 IF ( TRIM(name) == "diss" ) p_3d_sec => diss_2 IF ( INDEX( TRIM(name), "chem_" ) /= 0 ) p_3d_sec => spec_conc_2(:,:,:,n) IF ( ASSOCIATED( p_3d ) ) THEN CALL pmc_s_set_dataarray( child_id, p_3d, nz_cl, nz, & array_2 = p_3d_sec ) ELSEIF ( ASSOCIATED( p_2d ) ) THEN CALL pmc_s_set_dataarray( child_id, p_2d ) ELSEIF ( ASSOCIATED( i_2d ) ) THEN CALL pmc_s_set_dataarray( child_id, i_2d ) ELSE ! !-- Give only one message for the root domain IF ( myid == 0 .AND. cpl_id == 1 ) THEN message_string = 'pointer for array "' // TRIM( name ) // & '" can''t be associated' CALL message( 'pmci_set_array_pointer', 'PA0117', 3, 2, 0, 6, 0 ) ELSE ! !-- Avoid others to continue CALL MPI_BARRIER( comm2d, ierr ) ENDIF ENDIF #endif END SUBROUTINE pmci_set_array_pointer INTEGER FUNCTION get_number_of_childs () ! Change the name to "get_number_of_children" IMPLICIT NONE #if defined( __parallel ) get_number_of_childs = SIZE( pmc_parent_for_child ) - 1 #else get_number_of_childs = 0 #endif RETURN END FUNCTION get_number_of_childs INTEGER FUNCTION get_childid (id_index) IMPLICIT NONE INTEGER,INTENT(IN) :: id_index #if defined( __parallel ) get_childid = pmc_parent_for_child(id_index) #else get_childid = 0 #endif RETURN END FUNCTION get_childid SUBROUTINE get_child_edges (m, lx_coord, lx_coord_b, rx_coord, rx_coord_b, & sy_coord, sy_coord_b, ny_coord, ny_coord_b, & uz_coord, uz_coord_b) IMPLICIT NONE INTEGER,INTENT(IN) :: m REAL(wp),INTENT(OUT) :: lx_coord, lx_coord_b REAL(wp),INTENT(OUT) :: rx_coord, rx_coord_b REAL(wp),INTENT(OUT) :: sy_coord, sy_coord_b REAL(wp),INTENT(OUT) :: ny_coord, ny_coord_b REAL(wp),INTENT(OUT) :: uz_coord, uz_coord_b lx_coord = childgrid(m)%lx_coord rx_coord = childgrid(m)%rx_coord sy_coord = childgrid(m)%sy_coord ny_coord = childgrid(m)%ny_coord uz_coord = childgrid(m)%uz_coord lx_coord_b = childgrid(m)%lx_coord_b rx_coord_b = childgrid(m)%rx_coord_b sy_coord_b = childgrid(m)%sy_coord_b ny_coord_b = childgrid(m)%ny_coord_b uz_coord_b = childgrid(m)%uz_coord_b END SUBROUTINE get_child_edges SUBROUTINE get_child_gridspacing( m, dx,dy,dz ) IMPLICIT NONE INTEGER,INTENT(IN) :: m REAL(wp),INTENT(OUT) :: dx,dy REAL(wp),INTENT(OUT),OPTIONAL :: dz dx = childgrid(m)%dx dy = childgrid(m)%dy IF(PRESENT(dz)) THEN dz = childgrid(m)%dz ENDIF END SUBROUTINE get_child_gridspacing SUBROUTINE pmci_create_child_arrays( name, is, ie, js, je, nzc, n ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name !< INTEGER(iwp), INTENT(IN) :: ie !< INTEGER(iwp), INTENT(IN) :: is !< INTEGER(iwp), INTENT(IN) :: je !< INTEGER(iwp), INTENT(IN) :: js !< INTEGER(iwp), INTENT(IN) :: nzc !< nzc is cg%nz, but note that cg%nz is not the original nz of parent, but the highest parent-grid level needed for nesting. INTEGER(iwp), INTENT(IN), OPTIONAL :: n !< number of chemical species #if defined( __parallel ) INTEGER(iwp) :: ierr !< REAL(wp), POINTER,DIMENSION(:,:) :: p_2d !< REAL(wp), POINTER,DIMENSION(:,:,:) :: p_3d !< INTEGER(idp), POINTER,DIMENSION(:,:) :: i_2d !< NULLIFY( p_3d ) NULLIFY( p_2d ) NULLIFY( i_2d ) ! !-- List of array names, which can be coupled IF ( TRIM( name ) == "u" ) THEN IF ( .NOT. ALLOCATED( uc ) ) ALLOCATE( uc(0:nzc+1,js:je,is:ie) ) p_3d => uc ELSEIF ( TRIM( name ) == "v" ) THEN IF ( .NOT. ALLOCATED( vc ) ) ALLOCATE( vc(0:nzc+1,js:je,is:ie) ) p_3d => vc ELSEIF ( TRIM( name ) == "w" ) THEN IF ( .NOT. ALLOCATED( wc ) ) ALLOCATE( wc(0:nzc+1,js:je,is:ie) ) p_3d => wc ELSEIF ( TRIM( name ) == "e" ) THEN IF ( .NOT. ALLOCATED( ec ) ) ALLOCATE( ec(0:nzc+1,js:je,is:ie) ) p_3d => ec ELSEIF ( TRIM( name ) == "diss" ) THEN IF ( .NOT. ALLOCATED( dissc ) ) ALLOCATE( dissc(0:nzc+1,js:je,is:ie) ) p_3d => dissc ELSEIF ( TRIM( name ) == "pt") THEN IF ( .NOT. ALLOCATED( ptc ) ) ALLOCATE( ptc(0:nzc+1,js:je,is:ie) ) p_3d => ptc ELSEIF ( TRIM( name ) == "q") THEN IF ( .NOT. ALLOCATED( q_c ) ) ALLOCATE( q_c(0:nzc+1,js:je,is:ie) ) p_3d => q_c ELSEIF ( TRIM( name ) == "qc") THEN IF ( .NOT. ALLOCATED( qcc ) ) ALLOCATE( qcc(0:nzc+1,js:je,is:ie) ) p_3d => qcc ELSEIF ( TRIM( name ) == "qr") THEN IF ( .NOT. ALLOCATED( qrc ) ) ALLOCATE( qrc(0:nzc+1,js:je,is:ie) ) p_3d => qrc ELSEIF ( TRIM( name ) == "nr") THEN IF ( .NOT. ALLOCATED( nrc ) ) ALLOCATE( nrc(0:nzc+1,js:je,is:ie) ) p_3d => nrc ELSEIF ( TRIM( name ) == "nc") THEN IF ( .NOT. ALLOCATED( ncc ) ) ALLOCATE( ncc(0:nzc+1,js:je,is:ie) ) p_3d => ncc ELSEIF ( TRIM( name ) == "s") THEN IF ( .NOT. ALLOCATED( sc ) ) ALLOCATE( sc(0:nzc+1,js:je,is:ie) ) p_3d => sc ELSEIF ( TRIM( name ) == "nr_part") THEN IF ( .NOT. ALLOCATED( nr_partc ) ) ALLOCATE( nr_partc(js:je,is:ie) ) i_2d => nr_partc ELSEIF ( TRIM( name ) == "part_adr") THEN IF ( .NOT. ALLOCATED( part_adrc ) ) ALLOCATE( part_adrc(js:je,is:ie) ) i_2d => part_adrc ELSEIF ( TRIM( name(1:5) ) == "chem_" ) THEN IF ( .NOT. ALLOCATED( chem_spec_c ) ) & ALLOCATE( chem_spec_c(0:nzc+1,js:je,is:ie,1:nspec) ) p_3d => chem_spec_c(:,:,:,n) !ELSEIF (trim(name) == "z0") then !IF (.not.allocated(z0c)) allocate(z0c(js:je, is:ie)) !p_2d => z0c ENDIF IF ( ASSOCIATED( p_3d ) ) THEN CALL pmc_c_set_dataarray( p_3d ) ELSEIF ( ASSOCIATED( p_2d ) ) THEN CALL pmc_c_set_dataarray( p_2d ) ELSEIF ( ASSOCIATED( i_2d ) ) THEN CALL pmc_c_set_dataarray( i_2d ) ELSE ! !-- Give only one message for the first child domain IF ( myid == 0 .AND. cpl_id == 2 ) THEN message_string = 'pointer for array "' // TRIM( name ) // & '" can''t be associated' CALL message( 'pmci_create_child_arrays', 'PA0170', 3, 2, 0, 6, 0 ) ELSE ! !-- Prevent others from continuing CALL MPI_BARRIER( comm2d, ierr ) ENDIF ENDIF #endif END SUBROUTINE pmci_create_child_arrays SUBROUTINE pmci_parent_initialize ! !-- Send data for the children in order to let them create initial !-- conditions by interpolating the parent-domain fields. #if defined( __parallel ) IMPLICIT NONE INTEGER(iwp) :: child_id !< INTEGER(iwp) :: m !< REAL(wp) :: waittime !< DO m = 1, SIZE( pmc_parent_for_child ) - 1 child_id = pmc_parent_for_child(m) CALL pmc_s_fillbuffer( child_id, waittime=waittime ) ENDDO #endif END SUBROUTINE pmci_parent_initialize SUBROUTINE pmci_child_initialize ! !-- Create initial conditions for the current child domain by interpolating !-- the parent-domain fields. #if defined( __parallel ) IMPLICIT NONE INTEGER(iwp) :: i !< INTEGER(iwp) :: icl !< INTEGER(iwp) :: icla !< INTEGER(iwp) :: iclw !< INTEGER(iwp) :: icr !< INTEGER(iwp) :: icra !< INTEGER(iwp) :: icrw !< INTEGER(iwp) :: j !< INTEGER(iwp) :: jcn !< INTEGER(iwp) :: jcna !< INTEGER(iwp) :: jcnw !< INTEGER(iwp) :: jcs !< INTEGER(iwp) :: jcsa !< INTEGER(iwp) :: jcsw !< INTEGER(iwp) :: k !< INTEGER(iwp) :: n !< running index for chemical species REAL(wp) :: waittime !< ! !-- Root model is never anyone's child IF ( cpl_id > 1 ) THEN ! !-- Child domain boundaries in the parent index space icl = coarse_bound(1) icr = coarse_bound(2) jcs = coarse_bound(3) jcn = coarse_bound(4) icla = coarse_bound_aux(1) icra = coarse_bound_aux(2) jcsa = coarse_bound_aux(3) jcna = coarse_bound_aux(4) iclw = coarse_bound_w(1) icrw = coarse_bound_w(2) jcsw = coarse_bound_w(3) jcnw = coarse_bound_w(4) ! !-- Get data from the parent CALL pmc_c_getbuffer( waittime = waittime ) ! !-- The interpolation. CALL pmci_interp_1sto_all ( u, uc, kcto, iflu, ifuu, jflo, jfuo, kflo, kfuo, 'u' ) CALL pmci_interp_1sto_all ( v, vc, kcto, iflo, ifuo, jflv, jfuv, kflo, kfuo, 'v' ) CALL pmci_interp_1sto_all ( w, wc, kctw, iflo, ifuo, jflo, jfuo, kflw, kfuw, 'w' ) IF ( ( rans_mode_parent .AND. rans_mode ) .OR. & ( .NOT. rans_mode_parent .AND. .NOT. rans_mode .AND. & .NOT. constant_diffusion ) ) THEN CALL pmci_interp_1sto_all ( e, ec, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, 'e' ) ENDIF IF ( rans_mode_parent .AND. rans_mode .AND. rans_tke_e ) THEN CALL pmci_interp_1sto_all ( diss, dissc, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, 's' ) ENDIF IF ( .NOT. neutral ) THEN CALL pmci_interp_1sto_all ( pt, ptc, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, 's' ) ENDIF IF ( humidity ) THEN CALL pmci_interp_1sto_all ( q, q_c, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, 's' ) IF ( bulk_cloud_model .AND. microphysics_morrison ) THEN CALL pmci_interp_1sto_all ( qc, qcc, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, 's' ) CALL pmci_interp_1sto_all ( nc, ncc, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, 's' ) ENDIF IF ( bulk_cloud_model .AND. microphysics_seifert ) THEN CALL pmci_interp_1sto_all ( qr, qrc, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, 's' ) CALL pmci_interp_1sto_all ( nr, nrc, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, 's' ) ENDIF ENDIF IF ( passive_scalar ) THEN CALL pmci_interp_1sto_all ( s, sc, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, 's' ) ENDIF IF ( air_chemistry .AND. nest_chemistry ) THEN DO n = 1, nspec CALL pmci_interp_1sto_all ( chem_species(n)%conc, chem_spec_c(:,:,:,n), & kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, 's' ) ENDDO ENDIF IF ( topography /= 'flat' ) THEN ! !-- Inside buildings set velocities and TKE back to zero. !-- Other scalars (pt, q, s, km, kh, p, sa, ...) are ignored at present, !-- maybe revise later. DO i = nxlg, nxrg DO j = nysg, nyng DO k = nzb, nzt u(k,j,i) = MERGE( u(k,j,i), 0.0_wp, & BTEST( wall_flags_0(k,j,i), 1 ) ) v(k,j,i) = MERGE( v(k,j,i), 0.0_wp, & BTEST( wall_flags_0(k,j,i), 2 ) ) w(k,j,i) = MERGE( w(k,j,i), 0.0_wp, & BTEST( wall_flags_0(k,j,i), 3 ) ) ! e(k,j,i) = MERGE( e(k,j,i), 0.0_wp, & ! BTEST( wall_flags_0(k,j,i), 0 ) ) u_p(k,j,i) = MERGE( u_p(k,j,i), 0.0_wp, & BTEST( wall_flags_0(k,j,i), 1 ) ) v_p(k,j,i) = MERGE( v_p(k,j,i), 0.0_wp, & BTEST( wall_flags_0(k,j,i), 2 ) ) w_p(k,j,i) = MERGE( w_p(k,j,i), 0.0_wp, & BTEST( wall_flags_0(k,j,i), 3 ) ) ! e_p(k,j,i) = MERGE( e_p(k,j,i), 0.0_wp, & ! BTEST( wall_flags_0(k,j,i), 0 ) ) ENDDO ENDDO ENDDO ENDIF ENDIF CONTAINS SUBROUTINE pmci_interp_1sto_all( f, fc, kct, ifl, ifu, jfl, jfu, kfl, kfu, var ) ! !-- Interpolation of the internal values for the child-domain initialization IMPLICIT NONE REAL(wp), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg), INTENT(INOUT) :: f !< Child-grid array REAL(wp), DIMENSION(0:cg%nz+1,jcs:jcn,icl:icr), INTENT(IN) :: fc !< Parent-grid array INTEGER(iwp) :: kct !< The parent-grid index in z-direction just below the boundary value node INTEGER(iwp), DIMENSION(icla:icra), INTENT(IN) :: ifl !< Indicates start index of child cells belonging to certain parent cell - x direction INTEGER(iwp), DIMENSION(icla:icra), INTENT(IN) :: ifu !< Indicates end index of child cells belonging to certain parent cell - x direction INTEGER(iwp), DIMENSION(jcsa:jcna), INTENT(IN) :: jfl !< Indicates start index of child cells belonging to certain parent cell - y direction INTEGER(iwp), DIMENSION(jcsa:jcna), INTENT(IN) :: jfu !< Indicates end index of child cells belonging to certain parent cell - y direction INTEGER(iwp), DIMENSION(0:cg%nz+1), INTENT(IN) :: kfl !< Indicates start index of child cells belonging to certain parent cell - z direction INTEGER(iwp), DIMENSION(0:cg%nz+1), INTENT(IN) :: kfu !< Indicates end index of child cells belonging to certain parent cell - z direction CHARACTER(LEN=1), INTENT(IN) :: var !< Variable symbol: 'u', 'v', 'w' or 's' ! !-- Local variables: INTEGER(iwp) :: i !< INTEGER(iwp) :: ib !< INTEGER(iwp) :: ie !< INTEGER(iwp) :: ifirst !< INTEGER(iwp) :: ilast !< INTEGER(iwp) :: j !< INTEGER(iwp) :: jb !< INTEGER(iwp) :: je !< INTEGER(iwp) :: jfirst !< INTEGER(iwp) :: jlast !< INTEGER(iwp) :: k !< INTEGER(iwp) :: l !< INTEGER(iwp) :: lb !< INTEGER(iwp) :: le !< INTEGER(iwp) :: m !< INTEGER(iwp) :: mb !< INTEGER(iwp) :: me !< INTEGER(iwp) :: n !< lb = icl le = icr mb = jcs me = jcn ifirst = nxl ilast = nxr jfirst = nys jlast = nyn IF ( nesting_mode /= 'vertical' ) THEN IF ( bc_dirichlet_l ) THEN lb = icl + 1 ifirst = nxl - 1 ! !-- For u, nxl is a ghost node, but not for the other variables IF ( var == 'u' ) THEN lb = icl + 2 ifirst = nxl ENDIF ENDIF IF ( bc_dirichlet_s ) THEN mb = jcs + 1 jfirst = nys - 1 ! !-- For v, nys is a ghost node, but not for the other variables IF ( var == 'v' ) THEN mb = jcs + 2 jfirst = nys ENDIF ENDIF IF ( bc_dirichlet_r ) THEN le = icr - 1 ilast = nxr + 1 ENDIF IF ( bc_dirichlet_n ) THEN me = jcn - 1 jlast = nyn + 1 ENDIF ENDIF f(:,:,:) = 0.0_wp IF ( var == 'u' ) THEN ib = ifl(lb) ie = ifl(le+1) - 1 jb = jfl(mb) je = jfu(me) DO l = lb, le DO m = mb, me DO n = 0, kct + 1 DO i = ifl(l), ifl(l+1)-1 DO j = jfl(m), jfu(m) DO k = kfl(n), MIN( kfu(n), nzt+1 ) f(k,j,i) = fc(n,m,l) ENDDO ENDDO ENDDO ENDDO ENDDO ENDDO ELSE IF ( var == 'v' ) THEN ib = ifl(lb) ie = ifu(le) jb = jfl(mb) je = jfl(me+1) - 1 DO l = lb, le DO m = mb, me DO n = 0, kct + 1 DO i = ifl(l), ifu(l) DO j = jfl(m), jfl(m+1)-1 DO k = kfl(n), MIN( kfu(n), nzt+1 ) f(k,j,i) = fc(n,m,l) ENDDO ENDDO ENDDO ENDDO ENDDO ENDDO ELSE IF ( var == 'w' ) THEN ib = ifl(lb) ie = ifu(le) jb = jfl(mb) je = jfu(me) DO l = lb, le DO m = mb, me DO n = 1, kct + 1 DO i = ifl(l), ifu(l) DO j = jfl(m), jfu(m) f(nzb,j,i) = 0.0_wp ! Because the n-loop starts from n=1 instead of 0 DO k = kfu(n-1)+1, kfu(n) f(k,j,i) = fc(n,m,l) ENDDO ENDDO ENDDO ENDDO ENDDO ENDDO ELSE ! scalars ib = ifl(lb) ie = ifu(le) jb = jfl(mb) je = jfu(me) DO l = lb, le DO m = mb, me DO n = 0, kct + 1 DO i = ifl(l), ifu(l) DO j = jfl(m), jfu(m) DO k = kfl(n), MIN( kfu(n), nzt+1 ) f(k,j,i) = fc(n,m,l) ENDDO ENDDO ENDDO ENDDO ENDDO ENDDO ENDIF ! var ! !-- If the subdomain i- and/or j-dimension (nx/npex and/or ny/npey) is !-- not integer divisible by the grid spacing ratio in its direction, !-- the above loops will return with unfilled gaps in the initial fields. !-- These gaps, if present, are filled here. IF ( ib > ifirst ) THEN DO i = ifirst, ib-1 f(:,:,i) = f(:,:,ib) ENDDO ENDIF IF ( ie < ilast ) THEN DO i = ie+1, ilast f(:,:,i) = f(:,:,ie) ENDDO ENDIF IF ( jb > jfirst ) THEN DO j = jfirst, jb-1 f(:,j,:) = f(:,jb,:) ENDDO ENDIF IF ( je < jlast ) THEN DO j = je+1, jlast f(:,j,:) = f(:,je,:) ENDDO ENDIF END SUBROUTINE pmci_interp_1sto_all #endif END SUBROUTINE pmci_child_initialize SUBROUTINE pmci_check_setting_mismatches ! !-- Check for mismatches between settings of master and child variables !-- (e.g., all children have to follow the end_time settings of the root model). !-- The root model overwrites variables in the other models, so these variables !-- only need to be set once in file PARIN. #if defined( __parallel ) USE control_parameters, & ONLY: dt_restart, end_time, message_string, restart_time, time_restart IMPLICIT NONE INTEGER :: ierr REAL(wp) :: dt_restart_root REAL(wp) :: end_time_root REAL(wp) :: restart_time_root REAL(wp) :: time_restart_root ! !-- Check the time to be simulated. !-- Here, and in the following, the root process communicates the respective !-- variable to all others, and its value will then be compared with the local !-- values. IF ( pmc_is_rootmodel() ) end_time_root = end_time CALL MPI_BCAST( end_time_root, 1, MPI_REAL, 0, comm_world_nesting, ierr ) IF ( .NOT. pmc_is_rootmodel() ) THEN IF ( end_time /= end_time_root ) THEN WRITE( message_string, * ) 'mismatch between root model and ', & 'child settings:& end_time(root) = ', end_time_root, & '& end_time(child) = ', end_time, '& child value is set', & ' to root value' CALL message( 'pmci_check_setting_mismatches', 'PA0419', 0, 1, 0, 6, & 0 ) end_time = end_time_root ENDIF ENDIF ! !-- Same for restart time IF ( pmc_is_rootmodel() ) restart_time_root = restart_time CALL MPI_BCAST( restart_time_root, 1, MPI_REAL, 0, comm_world_nesting, ierr ) IF ( .NOT. pmc_is_rootmodel() ) THEN IF ( restart_time /= restart_time_root ) THEN WRITE( message_string, * ) 'mismatch between root model and ', & 'child settings: & restart_time(root) = ', restart_time_root, & '& restart_time(child) = ', restart_time, '& child ', & 'value is set to root value' CALL message( 'pmci_check_setting_mismatches', 'PA0419', 0, 1, 0, 6, & 0 ) restart_time = restart_time_root ENDIF ENDIF ! !-- Same for dt_restart IF ( pmc_is_rootmodel() ) dt_restart_root = dt_restart CALL MPI_BCAST( dt_restart_root, 1, MPI_REAL, 0, comm_world_nesting, ierr ) IF ( .NOT. pmc_is_rootmodel() ) THEN IF ( dt_restart /= dt_restart_root ) THEN WRITE( message_string, * ) 'mismatch between root model and ', & 'child settings: & dt_restart(root) = ', dt_restart_root, & '& dt_restart(child) = ', dt_restart, '& child ', & 'value is set to root value' CALL message( 'pmci_check_setting_mismatches', 'PA0419', 0, 1, 0, 6, & 0 ) dt_restart = dt_restart_root ENDIF ENDIF ! !-- Same for time_restart IF ( pmc_is_rootmodel() ) time_restart_root = time_restart CALL MPI_BCAST( time_restart_root, 1, MPI_REAL, 0, comm_world_nesting, ierr ) IF ( .NOT. pmc_is_rootmodel() ) THEN IF ( time_restart /= time_restart_root ) THEN WRITE( message_string, * ) 'mismatch between root model and ', & 'child settings: & time_restart(root) = ', time_restart_root, & '& time_restart(child) = ', time_restart, '& child ', & 'value is set to root value' CALL message( 'pmci_check_setting_mismatches', 'PA0419', 0, 1, 0, 6, & 0 ) time_restart = time_restart_root ENDIF ENDIF #endif END SUBROUTINE pmci_check_setting_mismatches SUBROUTINE pmci_synchronize #if defined( __parallel ) ! !-- Unify the time steps for each model and synchronize using !-- MPI_ALLREDUCE with the MPI_MIN operator over all processes using !-- the global communicator MPI_COMM_WORLD. IMPLICIT NONE INTEGER(iwp) :: ierr !< REAL(wp), DIMENSION(1) :: dtl !< REAL(wp), DIMENSION(1) :: dtg !< dtl(1) = dt_3d CALL MPI_ALLREDUCE( dtl, dtg, 1, MPI_REAL, MPI_MIN, MPI_COMM_WORLD, ierr ) dt_3d = dtg(1) #endif END SUBROUTINE pmci_synchronize SUBROUTINE pmci_set_swaplevel( swaplevel ) ! !-- After each Runge-Kutta sub-timestep, alternately set buffer one or buffer !-- two active IMPLICIT NONE INTEGER(iwp), INTENT(IN) :: swaplevel !< swaplevel (1 or 2) of PALM's !< timestep INTEGER(iwp) :: child_id !< INTEGER(iwp) :: m !< #if defined( __parallel ) DO m = 1, SIZE( pmc_parent_for_child )-1 child_id = pmc_parent_for_child(m) CALL pmc_s_set_active_data_array( child_id, swaplevel ) ENDDO #endif END SUBROUTINE pmci_set_swaplevel SUBROUTINE pmci_datatrans( local_nesting_mode ) ! !-- This subroutine controls the nesting according to the nestpar !-- parameter nesting_mode (two-way (default) or one-way) and the !-- order of anterpolations according to the nestpar parameter !-- nesting_datatransfer_mode (cascade, overlap or mixed (default)). !-- Although nesting_mode is a variable of this model, pass it as !-- an argument to allow for example to force one-way initialization !-- phase. !-- Note that interpolation ( parent_to_child ) must always be carried !-- out before anterpolation ( child_to_parent ). IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: local_nesting_mode IF ( TRIM( local_nesting_mode ) == 'one-way' ) THEN CALL pmci_child_datatrans( parent_to_child ) CALL pmci_parent_datatrans( parent_to_child ) ELSE IF( nesting_datatransfer_mode == 'cascade' ) THEN CALL pmci_child_datatrans( parent_to_child ) CALL pmci_parent_datatrans( parent_to_child ) CALL pmci_parent_datatrans( child_to_parent ) CALL pmci_child_datatrans( child_to_parent ) ELSEIF( nesting_datatransfer_mode == 'overlap') THEN CALL pmci_parent_datatrans( parent_to_child ) CALL pmci_child_datatrans( parent_to_child ) CALL pmci_child_datatrans( child_to_parent ) CALL pmci_parent_datatrans( child_to_parent ) ELSEIF( TRIM( nesting_datatransfer_mode ) == 'mixed' ) THEN CALL pmci_parent_datatrans( parent_to_child ) CALL pmci_child_datatrans( parent_to_child ) CALL pmci_parent_datatrans( child_to_parent ) CALL pmci_child_datatrans( child_to_parent ) ENDIF ENDIF END SUBROUTINE pmci_datatrans SUBROUTINE pmci_parent_datatrans( direction ) IMPLICIT NONE INTEGER(iwp), INTENT(IN) :: direction !< #if defined( __parallel ) INTEGER(iwp) :: child_id !< INTEGER(iwp) :: i !< INTEGER(iwp) :: j !< INTEGER(iwp) :: k !< INTEGER(iwp) :: m !< DO m = 1, SIZE( pmc_parent_for_child ) - 1 child_id = pmc_parent_for_child(m) IF ( direction == parent_to_child ) THEN CALL cpu_log( log_point_s(71), 'pmc parent send', 'start' ) CALL pmc_s_fillbuffer( child_id ) CALL cpu_log( log_point_s(71), 'pmc parent send', 'stop' ) ELSE ! !-- Communication from child to parent CALL cpu_log( log_point_s(72), 'pmc parent recv', 'start' ) child_id = pmc_parent_for_child(m) CALL pmc_s_getdata_from_buffer( child_id ) CALL cpu_log( log_point_s(72), 'pmc parent recv', 'stop' ) ! !-- The anterpolated data is now available in u etc IF ( topography /= 'flat' ) THEN ! !-- Inside buildings/topography reset velocities back to zero. !-- Scalars (pt, q, s, km, kh, p, sa, ...) are ignored at !-- present, maybe revise later. !-- Resetting of e is removed as unnecessary since e is not !-- anterpolated, and as incorrect since it overran the default !-- Neumann condition (bc_e_b). DO i = nxlg, nxrg DO j = nysg, nyng DO k = nzb, nzt+1 u(k,j,i) = MERGE( u(k,j,i), 0.0_wp, & BTEST( wall_flags_0(k,j,i), 1 ) ) v(k,j,i) = MERGE( v(k,j,i), 0.0_wp, & BTEST( wall_flags_0(k,j,i), 2 ) ) w(k,j,i) = MERGE( w(k,j,i), 0.0_wp, & BTEST( wall_flags_0(k,j,i), 3 ) ) ! !-- TO_DO: zero setting of temperature within topography creates !-- wrong results ! pt(nzb:nzb_s_inner(j,i),j,i) = 0.0_wp ! IF ( humidity .OR. passive_scalar ) THEN ! q(nzb:nzb_s_inner(j,i),j,i) = 0.0_wp ! ENDIF ENDDO ENDDO ENDDO ENDIF ENDIF ENDDO #endif END SUBROUTINE pmci_parent_datatrans SUBROUTINE pmci_child_datatrans( direction ) IMPLICIT NONE INTEGER(iwp), INTENT(IN) :: direction !< Transfer direction: parent_to_child or child_to_parent #if defined( __parallel ) INTEGER(iwp) :: icl !< Parent-grid array index bound, left INTEGER(iwp) :: icr !< Parent-grid array index bound, right INTEGER(iwp) :: jcn !< Parent-grid array index bound, north INTEGER(iwp) :: jcs !< Parent-grid array index bound, south INTEGER(iwp) :: icla !< Auxiliary-array (index-mapping etc) index bound, left INTEGER(iwp) :: icra !< Auxiliary-array (index-mapping etc) index bound, right INTEGER(iwp) :: jcna !< Auxiliary-array (index-mapping etc) index bound, north INTEGER(iwp) :: jcsa !< Auxiliary-array (index-mapping etc) index bound, south INTEGER(iwp) :: iclw !< Parent-grid work array index bound, left INTEGER(iwp) :: icrw !< Parent-grid work array index bound, right INTEGER(iwp) :: jcnw !< Parent-grid work array index bound, north INTEGER(iwp) :: jcsw !< Parent-grid work array index bound, south REAL(wp), DIMENSION(1) :: dtl !< Time step size dtl = dt_3d IF ( cpl_id > 1 ) THEN ! !-- Child domain boundaries in the parent indice space. icl = coarse_bound(1) icr = coarse_bound(2) jcs = coarse_bound(3) jcn = coarse_bound(4) icla = coarse_bound_aux(1) icra = coarse_bound_aux(2) jcsa = coarse_bound_aux(3) jcna = coarse_bound_aux(4) iclw = coarse_bound_w(1) icrw = coarse_bound_w(2) jcsw = coarse_bound_w(3) jcnw = coarse_bound_w(4) IF ( direction == parent_to_child ) THEN CALL cpu_log( log_point_s(73), 'pmc child recv', 'start' ) CALL pmc_c_getbuffer( ) CALL cpu_log( log_point_s(73), 'pmc child recv', 'stop' ) CALL cpu_log( log_point_s(75), 'pmc interpolation', 'start' ) CALL pmci_interpolation CALL cpu_log( log_point_s(75), 'pmc interpolation', 'stop' ) ELSE ! !-- direction == child_to_parent CALL cpu_log( log_point_s(76), 'pmc anterpolation', 'start' ) CALL pmci_anterpolation CALL cpu_log( log_point_s(76), 'pmc anterpolation', 'stop' ) CALL cpu_log( log_point_s(74), 'pmc child send', 'start' ) CALL pmc_c_putbuffer( ) CALL cpu_log( log_point_s(74), 'pmc child send', 'stop' ) ENDIF ENDIF CONTAINS SUBROUTINE pmci_interpolation ! !-- A wrapper routine for all interpolation actions IMPLICIT NONE INTEGER(iwp) :: ibgp !< index running over the nbgp boundary ghost points in i-direction INTEGER(iwp) :: jbgp !< index running over the nbgp boundary ghost points in j-direction INTEGER(iwp) :: n !< running index for number of chemical species ! !-- In case of vertical nesting no interpolation is needed for the !-- horizontal boundaries IF ( nesting_mode /= 'vertical' ) THEN ! !-- Left border pe: IF ( bc_dirichlet_l ) THEN CALL pmci_interp_1sto_lr( u, uc, kcto, jflo, jfuo, kflo, kfuo, 'l', 'u' ) CALL pmci_interp_1sto_lr( v, vc, kcto, jflv, jfuv, kflo, kfuo, 'l', 'v' ) CALL pmci_interp_1sto_lr( w, wc, kctw, jflo, jfuo, kflw, kfuw, 'l', 'w' ) IF ( ( rans_mode_parent .AND. rans_mode ) .OR. & ( .NOT. rans_mode_parent .AND. .NOT. rans_mode .AND. & .NOT. constant_diffusion ) ) THEN ! CALL pmci_interp_1sto_lr( e, ec, kcto, jflo, jfuo, kflo, kfuo, 'l', 'e' ) ! !-- Interpolation of e is replaced by the Neumann condition. DO ibgp = -nbgp, -1 e(nzb:nzt,nys:nyn,ibgp) = e(nzb:nzt,nys:nyn,0) ENDDO ENDIF IF ( rans_mode_parent .AND. rans_mode .AND. rans_tke_e ) THEN CALL pmci_interp_1sto_lr( diss, dissc, kcto, jflo, jfuo, kflo, kfuo, 'l', 's' ) ENDIF IF ( .NOT. neutral ) THEN CALL pmci_interp_1sto_lr( pt, ptc, kcto, jflo, jfuo, kflo, kfuo, 'l', 's' ) ENDIF IF ( humidity ) THEN CALL pmci_interp_1sto_lr( q, q_c, kcto, jflo, jfuo, kflo, kfuo, 'l', 's' ) IF ( bulk_cloud_model .AND. microphysics_morrison ) THEN CALL pmci_interp_1sto_lr( qc, qcc, kcto, jflo, jfuo, kflo, kfuo, 'l', 's' ) CALL pmci_interp_1sto_lr( nc, ncc, kcto, jflo, jfuo, kflo, kfuo, 'l', 's' ) ENDIF IF ( bulk_cloud_model .AND. microphysics_seifert ) THEN CALL pmci_interp_1sto_lr( qr, qrc, kcto, jflo, jfuo, kflo, kfuo, 'l', 's' ) CALL pmci_interp_1sto_lr( nr, nrc, kcto, jflo, jfuo, kflo, kfuo, 'l', 's' ) ENDIF ENDIF IF ( passive_scalar ) THEN CALL pmci_interp_1sto_lr( s, sc, kcto, jflo, jfuo, kflo, kfuo, 'l', 's' ) ENDIF IF ( air_chemistry .AND. nest_chemistry ) THEN DO n = 1, nspec CALL pmci_interp_1sto_lr( chem_species(n)%conc, chem_spec_c(:,:,:,n), & kcto, jflo, jfuo, kflo, kfuo, 'l', 's' ) ENDDO ENDIF ENDIF ! !-- Right border pe IF ( bc_dirichlet_r ) THEN CALL pmci_interp_1sto_lr( u, uc, kcto, jflo, jfuo, kflo, kfuo, 'r', 'u' ) CALL pmci_interp_1sto_lr( v, vc, kcto, jflv, jfuv, kflo, kfuo, 'r', 'v' ) CALL pmci_interp_1sto_lr( w, wc, kctw, jflo, jfuo, kflw, kfuw, 'r', 'w' ) IF ( ( rans_mode_parent .AND. rans_mode ) .OR. & ( .NOT. rans_mode_parent .AND. .NOT. rans_mode .AND. & .NOT. constant_diffusion ) ) THEN ! CALL pmci_interp_1sto_lr( e, ec, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, 'r', 'e' ) ! !-- Interpolation of e is replaced by the Neumann condition. DO ibgp = nx+1, nx+nbgp e(nzb:nzt,nys:nyn,ibgp) = e(nzb:nzt,nys:nyn,nx) ENDDO ENDIF IF ( rans_mode_parent .AND. rans_mode .AND. rans_tke_e ) THEN CALL pmci_interp_1sto_lr( diss, dissc, kcto, jflo, jfuo, kflo, kfuo, 'r', 's' ) ENDIF IF ( .NOT. neutral ) THEN CALL pmci_interp_1sto_lr( pt, ptc, kcto, jflo, jfuo, kflo, kfuo, 'r', 's' ) ENDIF IF ( humidity ) THEN CALL pmci_interp_1sto_lr( q, q_c, kcto, jflo, jfuo, kflo, kfuo, 'r', 's' ) IF ( bulk_cloud_model .AND. microphysics_morrison ) THEN CALL pmci_interp_1sto_lr( qc, qcc, kcto, jflo, jfuo, kflo, kfuo, 'r', 's' ) CALL pmci_interp_1sto_lr( nc, ncc, kcto, jflo, jfuo, kflo, kfuo, 'r', 's' ) ENDIF IF ( bulk_cloud_model .AND. microphysics_seifert ) THEN CALL pmci_interp_1sto_lr( qr, qrc, kcto, jflo, jfuo, kflo, kfuo, 'r', 's' ) CALL pmci_interp_1sto_lr( nr, nrc, kcto, jflo, jfuo, kflo, kfuo, 'r', 's' ) ENDIF ENDIF IF ( passive_scalar ) THEN CALL pmci_interp_1sto_lr( s, sc, kcto, jflo, jfuo, kflo, kfuo, 'r', 's' ) ENDIF IF ( air_chemistry .AND. nest_chemistry ) THEN DO n = 1, nspec CALL pmci_interp_1sto_lr( chem_species(n)%conc, chem_spec_c(:,:,:,n), & kcto, jflo, jfuo, kflo, kfuo, 'r', 's' ) ENDDO ENDIF ENDIF ! !-- South border pe IF ( bc_dirichlet_s ) THEN CALL pmci_interp_1sto_sn( v, vc, kcto, iflo, ifuo, kflo, kfuo, 's', 'v' ) CALL pmci_interp_1sto_sn( w, wc, kctw, iflo, ifuo, kflw, kfuw, 's', 'w' ) CALL pmci_interp_1sto_sn( u, uc, kcto, iflu, ifuu, kflo, kfuo, 's', 'u' ) IF ( ( rans_mode_parent .AND. rans_mode ) .OR. & ( .NOT. rans_mode_parent .AND. .NOT. rans_mode .AND. & .NOT. constant_diffusion ) ) THEN ! CALL pmci_interp_1sto_sn( e, ec, kcto, iflo, ifuo, kflo, kfuo, 's', 'e' ) ! !-- Interpolation of e is replaced by the Neumann condition. DO jbgp = -nbgp, -1 e(nzb:nzt,jbgp,nxl:nxr) = e(nzb:nzt,0,nxl:nxr) ENDDO ENDIF IF ( rans_mode_parent .AND. rans_mode .AND. rans_tke_e ) THEN CALL pmci_interp_1sto_sn( diss, dissc, kcto, iflo, ifuo, kflo, kfuo, 's', 's' ) ENDIF IF ( .NOT. neutral ) THEN CALL pmci_interp_1sto_sn( pt, ptc, kcto, iflo, ifuo, kflo, kfuo, 's', 's' ) ENDIF IF ( humidity ) THEN CALL pmci_interp_1sto_sn( q, q_c, kcto, iflo, ifuo, kflo, kfuo, 's', 's' ) IF ( bulk_cloud_model .AND. microphysics_morrison ) THEN CALL pmci_interp_1sto_sn( qc, qcc, kcto, iflo, ifuo, kflo, kfuo, 's', 's' ) CALL pmci_interp_1sto_sn( nc, ncc, kcto, iflo, ifuo, kflo, kfuo, 's', 's' ) ENDIF IF ( bulk_cloud_model .AND. microphysics_seifert ) THEN CALL pmci_interp_1sto_sn( qr, qrc, kcto, iflo, ifuo, kflo, kfuo, 's', 's' ) CALL pmci_interp_1sto_sn( nr, nrc, kcto, iflo, ifuo, kflo, kfuo, 's', 's' ) ENDIF ENDIF IF ( passive_scalar ) THEN CALL pmci_interp_1sto_sn( s, sc, kcto, iflo, ifuo, kflo, kfuo, 's', 's' ) ENDIF IF ( air_chemistry .AND. nest_chemistry ) THEN DO n = 1, nspec CALL pmci_interp_1sto_sn( chem_species(n)%conc, chem_spec_c(:,:,:,n), & kcto, iflo, ifuo, kflo, kfuo, 's', 's' ) ENDDO ENDIF ENDIF ! !-- North border pe IF ( bc_dirichlet_n ) THEN CALL pmci_interp_1sto_sn( v, vc, kcto, iflo, ifuo, kflo, kfuo, 'n', 'v' ) CALL pmci_interp_1sto_sn( w, wc, kctw, iflo, ifuo, kflw, kfuw, 'n', 'w' ) CALL pmci_interp_1sto_sn( u, uc, kcto, iflu, ifuu, kflo, kfuo, 'n', 'u' ) IF ( ( rans_mode_parent .AND. rans_mode ) .OR. & ( .NOT. rans_mode_parent .AND. .NOT. rans_mode .AND. & .NOT. constant_diffusion ) ) THEN ! CALL pmci_interp_1sto_sn( e, ec, kcto, iflo, ifuo, kflo, kfuo, 'n', 'e' ) ! !-- Interpolation of e is replaced by the Neumann condition. DO jbgp = ny+1, ny+nbgp e(nzb:nzt,jbgp,nxl:nxr) = e(nzb:nzt,ny,nxl:nxr) ENDDO ENDIF IF ( rans_mode_parent .AND. rans_mode .AND. rans_tke_e ) THEN CALL pmci_interp_1sto_sn( diss, dissc, kcto, iflo, ifuo, kflo, kfuo, 'n', 's' ) ENDIF IF ( .NOT. neutral ) THEN CALL pmci_interp_1sto_sn( pt, ptc, kcto, iflo, ifuo, kflo, kfuo, 'n', 's' ) ENDIF IF ( humidity ) THEN CALL pmci_interp_1sto_sn( q, q_c, kcto, iflo, ifuo, kflo, kfuo, 'n', 's' ) IF ( bulk_cloud_model .AND. microphysics_morrison ) THEN CALL pmci_interp_1sto_sn( qc, qcc, kcto, iflo, ifuo, kflo, kfuo, 'n', 's' ) CALL pmci_interp_1sto_sn( nc, ncc, kcto, iflo, ifuo, kflo, kfuo, 'n', 's' ) ENDIF IF ( bulk_cloud_model .AND. microphysics_seifert ) THEN CALL pmci_interp_1sto_sn( qr, qrc, kcto, iflo, ifuo, kflo, kfuo, 'n', 's' ) CALL pmci_interp_1sto_sn( nr, nrc, kcto, iflo, ifuo, kflo, kfuo, 'n', 's' ) ENDIF ENDIF IF ( passive_scalar ) THEN CALL pmci_interp_1sto_sn( s, sc, kcto, iflo, ifuo, kflo, kfuo, 'n', 's' ) ENDIF IF ( air_chemistry .AND. nest_chemistry ) THEN DO n = 1, nspec CALL pmci_interp_1sto_sn( chem_species(n)%conc, chem_spec_c(:,:,:,n), & kcto, iflo, ifuo, kflo, kfuo, 'n', 's' ) ENDDO ENDIF ENDIF ENDIF ! IF ( nesting_mode /= 'vertical' ) ! !-- All PEs are top-border PEs CALL pmci_interp_1sto_t( w, wc, kctw, iflo, ifuo, jflo, jfuo, 'w' ) CALL pmci_interp_1sto_t( u, uc, kcto, iflu, ifuu, jflo, jfuo, 'u' ) CALL pmci_interp_1sto_t( v, vc, kcto, iflo, ifuo, jflv, jfuv, 'v' ) IF ( ( rans_mode_parent .AND. rans_mode ) .OR. & ( .NOT. rans_mode_parent .AND. .NOT. rans_mode .AND. & .NOT. constant_diffusion ) ) THEN ! CALL pmci_interp_1sto_t( e, ec, kcto, iflo, ifuo, jflo, jfuo, 'e' ) ! !-- Interpolation of e is replaced by the Neumann condition. e(nzt+1,nys:nyn,nxl:nxr) = e(nzt,nys:nyn,nxl:nxr) ENDIF IF ( rans_mode_parent .AND. rans_mode .AND. rans_tke_e ) THEN CALL pmci_interp_1sto_t( diss, dissc, kcto, iflo, ifuo, jflo, jfuo, 's' ) ENDIF IF ( .NOT. neutral ) THEN CALL pmci_interp_1sto_t( pt, ptc, kcto, iflo, ifuo, jflo, jfuo, 's' ) ENDIF IF ( humidity ) THEN CALL pmci_interp_1sto_t( q, q_c, kcto, iflo, ifuo, jflo, jfuo, 's' ) IF ( bulk_cloud_model .AND. microphysics_morrison ) THEN CALL pmci_interp_1sto_t( qc, qcc, kcto, iflo, ifuo, jflo, jfuo, 's' ) CALL pmci_interp_1sto_t( nc, ncc, kcto, iflo, ifuo, jflo, jfuo, 's' ) ENDIF IF ( bulk_cloud_model .AND. microphysics_seifert ) THEN CALL pmci_interp_1sto_t( qr, qrc, kcto, iflo, ifuo, jflo, jfuo, 's' ) CALL pmci_interp_1sto_t( nr, nrc, kcto, iflo, ifuo, jflo, jfuo, 's' ) ENDIF ENDIF IF ( passive_scalar ) THEN CALL pmci_interp_1sto_t( s, sc, kcto, iflo, ifuo, jflo, jfuo, 's' ) ENDIF IF ( air_chemistry .AND. nest_chemistry ) THEN DO n = 1, nspec CALL pmci_interp_1sto_t( chem_species(n)%conc, chem_spec_c(:,:,:,n), & kcto, iflo, ifuo, jflo, jfuo, 's' ) ENDDO ENDIF END SUBROUTINE pmci_interpolation SUBROUTINE pmci_anterpolation ! !-- A wrapper routine for all anterpolation actions. !-- Note that TKE is not anterpolated. IMPLICIT NONE INTEGER(iwp) :: n !< running index for number of chemical species CALL pmci_anterp_tophat( u, uc, kcto, iflu, ifuu, jflo, jfuo, kflo, kfuo, ijkfc_u, 'u' ) CALL pmci_anterp_tophat( v, vc, kcto, iflo, ifuo, jflv, jfuv, kflo, kfuo, ijkfc_v, 'v' ) CALL pmci_anterp_tophat( w, wc, kctw, iflo, ifuo, jflo, jfuo, kflw, kfuw, ijkfc_w, 'w' ) ! !-- Anterpolation of TKE and dissipation rate if parent and child are in !-- RANS mode. IF ( rans_mode_parent .AND. rans_mode ) THEN CALL pmci_anterp_tophat( e, ec, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, ijkfc_s, 'e' ) ! !-- Anterpolation of dissipation rate only if TKE-e closure is applied. IF ( rans_tke_e ) THEN CALL pmci_anterp_tophat( diss, dissc, kcto, iflo, ifuo, jflo, jfuo,& kflo, kfuo, ijkfc_s, 'diss' ) ENDIF ENDIF IF ( .NOT. neutral ) THEN CALL pmci_anterp_tophat( pt, ptc, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, ijkfc_s, 'pt' ) ENDIF IF ( humidity ) THEN CALL pmci_anterp_tophat( q, q_c, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, ijkfc_s, 'q' ) IF ( bulk_cloud_model .AND. microphysics_morrison ) THEN CALL pmci_anterp_tophat( qc, qcc, kcto, iflo, ifuo, jflo, jfuo, & kflo, kfuo, ijkfc_s, 'qc' ) CALL pmci_anterp_tophat( nc, ncc, kcto, iflo, ifuo, jflo, jfuo, & kflo, kfuo, ijkfc_s, 'nc' ) ENDIF IF ( bulk_cloud_model .AND. microphysics_seifert ) THEN CALL pmci_anterp_tophat( qr, qrc, kcto, iflo, ifuo, jflo, jfuo, & kflo, kfuo, ijkfc_s, 'qr' ) CALL pmci_anterp_tophat( nr, nrc, kcto, iflo, ifuo, jflo, jfuo, & kflo, kfuo, ijkfc_s, 'nr' ) ENDIF ENDIF IF ( passive_scalar ) THEN CALL pmci_anterp_tophat( s, sc, kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, ijkfc_s, 's' ) ENDIF IF ( air_chemistry .AND. nest_chemistry ) THEN DO n = 1, nspec CALL pmci_anterp_tophat( chem_species(n)%conc, chem_spec_c(:,:,:,n), & kcto, iflo, ifuo, jflo, jfuo, kflo, kfuo, ijkfc_s, 's' ) ENDDO ENDIF END SUBROUTINE pmci_anterpolation SUBROUTINE pmci_interp_1sto_lr( f, fc, kct, jfl, jfu, kfl, kfu, edge, var ) ! !-- Interpolation of ghost-node values used as the child-domain boundary !-- conditions. This subroutine handles the left and right boundaries. IMPLICIT NONE REAL(wp), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg), INTENT(INOUT) :: f !< Child-grid array REAL(wp), DIMENSION(0:cg%nz+1,jcs:jcn,icl:icr), INTENT(IN) :: fc !< Parent-grid array INTEGER(iwp) :: kct !< The parent-grid index in z-direction just below the boundary value node INTEGER(iwp), DIMENSION(jcsa:jcna), INTENT(IN) :: jfl !< Indicates start index of child cells belonging to certain parent cell - y direction INTEGER(iwp), DIMENSION(jcsa:jcna), INTENT(IN) :: jfu !< Indicates end index of child cells belonging to certain parent cell - y direction INTEGER(iwp), DIMENSION(0:cg%nz+1), INTENT(IN) :: kfl !< Indicates start index of child cells belonging to certain parent cell - z direction INTEGER(iwp), DIMENSION(0:cg%nz+1), INTENT(IN) :: kfu !< Indicates end index of child cells belonging to certain parent cell - z direction CHARACTER(LEN=1), INTENT(IN) :: edge !< Edge symbol: 'l' or 'r' CHARACTER(LEN=1), INTENT(IN) :: var !< Variable symbol: 'u', 'v', 'w' or 's' ! !-- Local variables: INTEGER(iwp) :: ib !< Fixed i-index pointing to the node just behind the boundary-value node INTEGER(iwp) :: ibc !< Fixed i-index pointing to the boundary-value nodes (either i or iend) INTEGER(iwp) :: ibgp !< Index running over the redundant boundary ghost points in i-direction INTEGER(iwp) :: ierr !< MPI error code INTEGER(iwp) :: j !< Running index in the y-direction INTEGER(iwp) :: k !< Running index in the z-direction INTEGER(iwp) :: lbeg !< l-index pointing to the starting point of workarrc_lr in the l-direction INTEGER(iwp) :: lw !< Reduced l-index for workarrc_lr pointing to the boundary ghost node INTEGER(iwp) :: lwp !< Reduced l-index for workarrc_lr pointing to the first prognostic node INTEGER(iwp) :: m !< Parent-grid running index in the y-direction INTEGER(iwp) :: n !< Parent-grid running index in the z-direction REAL(wp) :: cb !< Interpolation coefficient for the boundary ghost node REAL(wp) :: cp !< Interpolation coefficient for the first prognostic node REAL(wp) :: f_interp_1 !< Value interpolated in x direction from the parent-grid data REAL(wp) :: f_interp_2 !< Auxiliary value interpolated in x direction from the parent-grid data ! !-- Check which edge is to be handled IF ( edge == 'l' ) THEN ! !-- For u, nxl is a ghost node, but not for the other variables IF ( var == 'u' ) THEN ibc = nxl ib = ibc - 1 lw = 2 lwp = lw ! This is redundant when var == 'u' lbeg = icl ELSE ibc = nxl - 1 ib = ibc - 1 lw = 1 lwp = 2 lbeg = icl ENDIF ELSEIF ( edge == 'r' ) THEN IF ( var == 'u' ) THEN ibc = nxr + 1 ib = ibc + 1 lw = 1 lwp = lw ! This is redundant when var == 'u' lbeg = icr - 2 ELSE ibc = nxr + 1 ib = ibc + 1 lw = 1 lwp = 0 lbeg = icr - 2 ENDIF ENDIF ! !-- Interpolation coefficients IF ( interpolation_scheme_lrsn == 1 ) THEN cb = 1.0_wp ! 1st-order upwind ELSE IF ( interpolation_scheme_lrsn == 2 ) THEN cb = 0.5_wp ! 2nd-order central ELSE cb = 0.5_wp ! 2nd-order central (default) ENDIF cp = 1.0_wp - cb ! !-- Substitute the necessary parent-grid data to the work array workarrc_lr. workarrc_lr = 0.0_wp IF ( pdims(2) > 1 ) THEN #if defined( __parallel ) IF ( bc_dirichlet_s ) THEN workarrc_lr(0:cg%nz+1,jcsw:jcnw-1,0:2) & = fc(0:cg%nz+1,jcsw:jcnw-1,lbeg:lbeg+2) ELSE IF ( bc_dirichlet_n ) THEN workarrc_lr(0:cg%nz+1,jcsw+1:jcnw,0:2) & = fc(0:cg%nz+1,jcsw+1:jcnw,lbeg:lbeg+2) ELSE workarrc_lr(0:cg%nz+1,jcsw+1:jcnw-1,0:2) & = fc(0:cg%nz+1,jcsw+1:jcnw-1,lbeg:lbeg+2) ENDIF ! !-- South-north exchange if more than one PE subdomain in the y-direction. !-- Note that in case of 3-D nesting the south (psouth == MPI_PROC_NULL) !-- and north (pnorth == MPI_PROC_NULL) boundaries are not exchanged !-- because the nest domain is not cyclic. !-- From south to north CALL MPI_SENDRECV( workarrc_lr(0,jcsw+1,0), 1, & workarrc_lr_exchange_type, psouth, 0, & workarrc_lr(0,jcnw,0), 1, & workarrc_lr_exchange_type, pnorth, 0, & comm2d, status, ierr ) ! !-- From north to south CALL MPI_SENDRECV( workarrc_lr(0,jcnw-1,0), 1, & workarrc_lr_exchange_type, pnorth, 1, & workarrc_lr(0,jcsw,0), 1, & workarrc_lr_exchange_type, psouth, 1, & comm2d, status, ierr ) #endif ELSE workarrc_lr(0:cg%nz+1,jcsw:jcnw,0:2) & = fc(0:cg%nz+1,jcsw:jcnw,lbeg:lbeg+2) ENDIF IF ( var == 'u' ) THEN DO m = jcsw, jcnw DO n = 0, kct DO j = jfl(m), jfu(m) DO k = kfl(n), kfu(n) f(k,j,ibc) = workarrc_lr(n,m,lw) ENDDO ENDDO ENDDO ENDDO ELSE IF ( var == 'v' ) THEN DO m = jcsw, jcnw-1 DO n = 0, kct ! !-- First interpolate to the flux point f_interp_1 = cb * workarrc_lr(n,m,lw) + cp * workarrc_lr(n,m,lwp) f_interp_2 = cb * workarrc_lr(n,m+1,lw) + cp * workarrc_lr(n,m+1,lwp) ! !-- Use averages of the neighbouring matching grid-line values DO j = jfl(m), jfl(m+1) f(kfl(n):kfu(n),j,ibc) = 0.5_wp * ( f_interp_1 + f_interp_2 ) ENDDO ! !-- Then set the values along the matching grid-lines IF ( MOD( jfl(m), jgsr ) == 0 ) THEN f(kfl(n):kfu(n),jfl(m),ibc) = f_interp_1 ENDIF ENDDO ENDDO ! !-- Finally, set the values along the last matching grid-line IF ( MOD( jfl(jcnw), jgsr ) == 0 ) THEN DO n = 0, kct f_interp_1 = cb * workarrc_lr(n,jcnw,lw) + cp * workarrc_lr(n,jcnw,lwp) f(kfl(n):kfu(n),jfl(jcnw),ibc) = f_interp_1 ENDDO ENDIF ! !-- A gap may still remain in some cases if the subdomain size is not !-- divisible by the grid-spacing ratio. In such a case, fill the !-- gap. Note however, this operation may produce some additional !-- momentum conservation error. IF ( jfl(jcnw) < nyn ) THEN DO n = 0, kct DO j = jfl(jcnw)+1, nyn f(kfl(n):kfu(n),j,ibc) = f(kfl(n):kfu(n),jfl(jcnw),ibc) ENDDO ENDDO ENDIF ELSE IF ( var == 'w' ) THEN DO m = jcsw, jcnw DO n = 0, kct + 1 ! It is important to go up to kct+1 ! !-- Interpolate to the flux point f_interp_1 = cb * workarrc_lr(n,m,lw) + cp * workarrc_lr(n,m,lwp) ! !-- First substitute only the matching-node values f(kfu(n),jfl(m):jfu(m),ibc) = f_interp_1 ENDDO ENDDO DO m = jcsw, jcnw DO n = 1, kct + 1 ! It is important to go up to kct+1 ! !-- Then fill up the nodes in between with the averages DO k = kfu(n-1)+1, kfu(n)-1 f(k,jfl(m):jfu(m),ibc) = 0.5_wp * ( f(kfu(n-1),jfl(m):jfu(m),ibc) & + f(kfu(n),jfl(m):jfu(m),ibc) ) ENDDO ENDDO ENDDO ELSE ! any scalar DO m = jcsw, jcnw DO n = 0, kct ! !-- Interpolate to the flux point f_interp_1 = cb * workarrc_lr(n,m,lw) + cp * workarrc_lr(n,m,lwp) DO j = jfl(m), jfu(m) DO k = kfl(n), kfu(n) f(k,j,ibc) = f_interp_1 ENDDO ENDDO ENDDO ENDDO ENDIF ! var ! !-- Fill up also the redundant 2nd and 3rd ghost-node layers IF ( edge == 'l' ) THEN DO ibgp = -nbgp, ib f(0:nzt+1,nysg:nyng,ibgp) = f(0:nzt+1,nysg:nyng,ibc) ENDDO ELSEIF ( edge == 'r' ) THEN DO ibgp = ib, nx+nbgp f(0:nzt+1,nysg:nyng,ibgp) = f(0:nzt+1,nysg:nyng,ibc) ENDDO ENDIF END SUBROUTINE pmci_interp_1sto_lr SUBROUTINE pmci_interp_1sto_sn( f, fc, kct, ifl, ifu, kfl, kfu, edge, var ) ! !-- Interpolation of ghost-node values used as the child-domain boundary !-- conditions. This subroutine handles the south and north boundaries. IMPLICIT NONE REAL(wp), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg), INTENT(INOUT) :: f !< Child-grid array REAL(wp), DIMENSION(0:cg%nz+1,jcs:jcn,icl:icr), INTENT(IN) :: fc !< Parent-grid array INTEGER(iwp) :: kct !< The parent-grid index in z-direction just below the boundary value node INTEGER(iwp), DIMENSION(icla:icra), INTENT(IN) :: ifl !< Indicates start index of child cells belonging to certain parent cell - x direction INTEGER(iwp), DIMENSION(icla:icra), INTENT(IN) :: ifu !< Indicates end index of child cells belonging to certain parent cell - x direction INTEGER(iwp), DIMENSION(0:cg%nz+1), INTENT(IN) :: kfl !< Indicates start index of child cells belonging to certain parent cell - z direction INTEGER(iwp), DIMENSION(0:cg%nz+1), INTENT(IN) :: kfu !< Indicates end index of child cells belonging to certain parent cell - z direction CHARACTER(LEN=1), INTENT(IN) :: edge !< Edge symbol: 's' or 'n' CHARACTER(LEN=1), INTENT(IN) :: var !< Variable symbol: 'u', 'v', 'w' or 's' ! !-- Local variables: INTEGER(iwp) :: i !< Running index in the x-direction INTEGER(iwp) :: ierr !< MPI error code INTEGER(iwp) :: jb !< Fixed j-index pointing to the node just behind the boundary-value node INTEGER(iwp) :: jbc !< Fixed j-index pointing to the boundary-value nodes (either j or jend) INTEGER(iwp) :: jbgp !< Index running over the redundant boundary ghost points in j-direction INTEGER(iwp) :: k !< Running index in the z-direction INTEGER(iwp) :: l !< Parent-grid running index in the x-direction INTEGER(iwp) :: mbeg !< m-index pointing to the starting point of workarrc_sn in the m-direction INTEGER(iwp) :: mw !< Reduced m-index for workarrc_sn pointing to the boundary ghost node INTEGER(iwp) :: mwp !< Reduced m-index for workarrc_sn pointing to the first prognostic node INTEGER(iwp) :: n !< Parent-grid running index in the z-direction REAL(wp) :: cb !< Interpolation coefficient for the boundary ghost node REAL(wp) :: cp !< Interpolation coefficient for the first prognostic node REAL(wp) :: f_interp_1 !< Value interpolated in y direction from the parent-grid data REAL(wp) :: f_interp_2 !< Auxiliary value interpolated in y direction from the parent-grid data ! !-- Check which edge is to be handled: south or north IF ( edge == 's' ) THEN ! !-- For v, nys is a ghost node, but not for the other variables IF ( var == 'v' ) THEN jbc = nys jb = jbc - 1 mw = 2 mwp = 2 ! This is redundant when var == 'v' mbeg = jcs ELSE jbc = nys - 1 jb = jbc - 1 mw = 1 mwp = 2 mbeg = jcs ENDIF ELSEIF ( edge == 'n' ) THEN IF ( var == 'v' ) THEN jbc = nyn + 1 jb = jbc + 1 mw = 1 mwp = 0 ! This is redundant when var == 'v' mbeg = jcn - 2 ELSE jbc = nyn + 1 jb = jbc + 1 mw = 1 mwp = 0 mbeg = jcn - 2 ENDIF ENDIF ! !-- Interpolation coefficients IF ( interpolation_scheme_lrsn == 1 ) THEN cb = 1.0_wp ! 1st-order upwind ELSE IF ( interpolation_scheme_lrsn == 2 ) THEN cb = 0.5_wp ! 2nd-order central ELSE cb = 0.5_wp ! 2nd-order central (default) ENDIF cp = 1.0_wp - cb ! !-- Substitute the necessary parent-grid data to the work array workarrc_sn. workarrc_sn = 0.0_wp IF ( pdims(1) > 1 ) THEN #if defined( __parallel ) IF ( bc_dirichlet_l ) THEN workarrc_sn(0:cg%nz+1,0:2,iclw:icrw-1) & = fc(0:cg%nz+1,mbeg:mbeg+2,iclw:icrw-1) ELSE IF ( bc_dirichlet_r ) THEN workarrc_sn(0:cg%nz+1,0:2,iclw+1:icrw) & = fc(0:cg%nz+1,mbeg:mbeg+2,iclw+1:icrw) ELSE workarrc_sn(0:cg%nz+1,0:2,iclw+1:icrw-1) & = fc(0:cg%nz+1,mbeg:mbeg+2,iclw+1:icrw-1) ENDIF ! !-- Left-right exchange if more than one PE subdomain in the x-direction. !-- Note that in case of 3-D nesting the left (pleft == MPI_PROC_NULL) and !-- right (pright == MPI_PROC_NULL) boundaries are not exchanged because !-- the nest domain is not cyclic. !-- From left to right CALL MPI_SENDRECV( workarrc_sn(0,0,iclw+1), 1, & workarrc_sn_exchange_type, pleft, 0, & workarrc_sn(0,0,icrw), 1, & workarrc_sn_exchange_type, pright, 0, & comm2d, status, ierr ) ! !-- From right to left CALL MPI_SENDRECV( workarrc_sn(0,0,icrw-1), 1, & workarrc_sn_exchange_type, pright, 1, & workarrc_sn(0,0,iclw), 1, & workarrc_sn_exchange_type, pleft, 1, & comm2d, status, ierr ) #endif ELSE workarrc_sn(0:cg%nz+1,0:2,iclw+1:icrw-1) & = fc(0:cg%nz+1,mbeg:mbeg+2,iclw+1:icrw-1) ENDIF IF ( var == 'v' ) THEN DO l = iclw, icrw DO n = 0, kct DO i = ifl(l), ifu(l) DO k = kfl(n), kfu(n) f(k,jbc,i) = workarrc_sn(n,mw,l) ENDDO ENDDO ENDDO ENDDO ELSE IF ( var == 'u' ) THEN DO l = iclw, icrw-1 DO n = 0, kct ! !-- First interpolate to the flux point f_interp_1 = cb * workarrc_sn(n,mw,l) + cp * workarrc_sn(n,mwp,l) f_interp_2 = cb * workarrc_sn(n,mw,l+1) + cp * workarrc_sn(n,mwp,l+1) ! !-- Use averages of the neighbouring matching grid-line values DO i = ifl(l), ifl(l+1) f(kfl(n):kfu(n),jbc,i) = 0.5_wp * ( f_interp_1 + f_interp_2 ) ENDDO ! !-- Then set the values along the matching grid-lines IF ( MOD( ifl(l), igsr ) == 0 ) THEN f(kfl(n):kfu(n),jbc,ifl(l)) = f_interp_1 ENDIF ENDDO ENDDO ! !-- Finally, set the values along the last matching grid-line IF ( MOD( ifl(icrw), igsr ) == 0 ) THEN DO n = 0, kct f_interp_1 = cb * workarrc_sn(n,mw,icrw) + cp * workarrc_sn(n,mwp,icrw) f(kfl(n):kfu(n),jbc,ifl(icrw)) = f_interp_1 ENDDO ENDIF ! !-- A gap may still remain in some cases if the subdomain size is not !-- divisible by the grid-spacing ratio. In such a case, fill the !-- gap. Note however, this operation may produce some additional !-- momentum conservation error. IF ( ifl(icrw) < nxr ) THEN DO n = 0, kct DO i = ifl(icrw)+1, nxr f(kfl(n):kfu(n),jbc,i) = f(kfl(n):kfu(n),jbc,ifl(icrw)) ENDDO ENDDO ENDIF ELSE IF ( var == 'w' ) THEN DO l = iclw, icrw DO n = 0, kct + 1 ! It is important to go up to kct+1 ! !-- Interpolate to the flux point f_interp_1 = cb * workarrc_sn(n,mw,l) + cp * workarrc_sn(n,mwp,l) ! !-- First substitute only the matching-node values f(kfu(n),jbc,ifl(l):ifu(l)) = f_interp_1 ENDDO ENDDO DO l = iclw, icrw DO n = 1, kct + 1 ! It is important to go up to kct+1 ! !-- Then fill up the nodes in between with the averages DO k = kfu(n-1)+1, kfu(n)-1 f(k,jbc,ifl(l):ifu(l)) = 0.5_wp * ( f(kfu(n-1),jbc,ifl(l):ifu(l)) & + f(kfu(n),jbc,ifl(l):ifu(l)) ) ENDDO ENDDO ENDDO ELSE ! Any scalar DO l = iclw, icrw DO n = 0, kct ! !-- Interpolate to the flux point f_interp_1 = cb * workarrc_sn(n,mw,l) + cp * workarrc_sn(n,mwp,l) DO i = ifl(l), ifu(l) DO k = kfl(n), kfu(n) f(k,jbc,i) = f_interp_1 ENDDO ENDDO ENDDO ENDDO ENDIF ! var ! !-- Fill up also the redundant 2nd and 3rd ghost-node layers IF ( edge == 's' ) THEN DO jbgp = -nbgp, jb f(0:nzt+1,jbgp,nxlg:nxrg) = f(0:nzt+1,jbc,nxlg:nxrg) ENDDO ELSEIF ( edge == 'n' ) THEN DO jbgp = jb, ny+nbgp f(0:nzt+1,jbgp,nxlg:nxrg) = f(0:nzt+1,jbc,nxlg:nxrg) ENDDO ENDIF END SUBROUTINE pmci_interp_1sto_sn SUBROUTINE pmci_interp_1sto_t( f, fc, kct, ifl, ifu, jfl, jfu, var ) ! !-- Interpolation of ghost-node values used as the child-domain boundary !-- conditions. This subroutine handles the top boundary. IMPLICIT NONE REAL(wp), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg), INTENT(INOUT) :: f !< Child-grid array REAL(wp), DIMENSION(0:cg%nz+1,jcs:jcn,icl:icr), INTENT(IN) :: fc !< Parent-grid array INTEGER(iwp), DIMENSION(icla:icra), INTENT(IN) :: ifl !< Indicates start index of child cells belonging to certain parent cell - x direction INTEGER(iwp), DIMENSION(icla:icra), INTENT(IN) :: ifu !< Indicates end index of child cells belonging to certain parent cell - x direction INTEGER(iwp), DIMENSION(jcsa:jcna), INTENT(IN) :: jfl !< Indicates start index of child cells belonging to certain parent cell - y direction INTEGER(iwp), DIMENSION(jcsa:jcna), INTENT(IN) :: jfu !< Indicates end index of child cells belonging to certain parent cell - y direction INTEGER(iwp) :: kct !< The parent-grid index in z-direction just below the boundary value node CHARACTER(LEN=1), INTENT(IN) :: var !< Variable symbol: 'u', 'v', 'w' or 's' ! !-- Local variables: REAL(wp) :: c31 !< Interpolation coefficient for the 3rd-order WS scheme REAL(wp) :: c32 !< Interpolation coefficient for the 3rd-order WS scheme REAL(wp) :: c33 !< Interpolation coefficient for the 3rd-order WS scheme REAL(wp) :: f_interp_1 !< Value interpolated in z direction from the parent-grid data REAL(wp) :: f_interp_2 !< Auxiliary value interpolated in z direction from the parent-grid data INTEGER(iwp) :: i !< Child-grid index in x-direction INTEGER(iwp) :: iclc !< Lower i-index limit for copying fc-data to workarrc_t INTEGER(iwp) :: icrc !< Upper i-index limit for copying fc-data to workarrc_t INTEGER(iwp) :: ierr !< MPI error code INTEGER(iwp) :: j !< Child-grid index in y-direction INTEGER(iwp) :: jcsc !< Lower j-index limit for copying fc-data to workarrc_t INTEGER(iwp) :: jcnc !< Upper j-index limit for copying fc-data to workarrc_t INTEGER(iwp) :: k !< Vertical child-grid index fixed to the boundary-value level INTEGER(iwp) :: l !< Parent-grid index in x-direction INTEGER(iwp) :: m !< Parent-grid index in y-direction INTEGER(iwp) :: nw !< Reduced n-index for workarrc_t pointing to the boundary ghost node IF ( var == 'w' ) THEN k = nzt ELSE k = nzt + 1 ENDIF nw = 1 ! !-- Interpolation coefficients IF ( interpolation_scheme_t == 1 ) THEN c31 = 0.0_wp ! 1st-order upwind c32 = 1.0_wp c33 = 0.0_wp ELSE IF ( interpolation_scheme_t == 2 ) THEN c31 = 0.5_wp ! 2nd-order central c32 = 0.5_wp c33 = 0.0_wp ELSE c31 = 2.0_wp / 6.0_wp ! 3rd-order WS upwind biased (default) c32 = 5.0_wp / 6.0_wp c33 = -1.0_wp / 6.0_wp ENDIF ! !-- Substitute the necessary parent-grid data to the work array. !-- Note that the dimension of workarrc_t is (0:2,jcsw:jcnw,iclw:icrw), !-- And the jc?w and ic?w-index bounds depend on the location of the PE- !-- subdomain relative to the side boundaries. iclc = iclw + 1 icrc = icrw - 1 jcsc = jcsw + 1 jcnc = jcnw - 1 IF ( bc_dirichlet_l ) THEN iclc = iclw ENDIF IF ( bc_dirichlet_r ) THEN icrc = icrw ENDIF IF ( bc_dirichlet_s ) THEN jcsc = jcsw ENDIF IF ( bc_dirichlet_n ) THEN jcnc = jcnw ENDIF workarrc_t = 0.0_wp ! workarrc_t(-2:3,jcsc:jcnc,iclc:icrc) = fc(kct-2:kct+3,jcsc:jcnc,iclc:icrc) workarrc_t(0:2,jcsc:jcnc,iclc:icrc) = fc(kct:kct+2,jcsc:jcnc,iclc:icrc) ! !-- Left-right exchange if more than one PE subdomain in the x-direction. !-- Note that in case of 3-D nesting the left and right boundaries are !-- not exchanged because the nest domain is not cyclic. #if defined( __parallel ) IF ( pdims(1) > 1 ) THEN ! !-- From left to right CALL MPI_SENDRECV( workarrc_t(0,jcsw,iclw+1), 1, & workarrc_t_exchange_type_y, pleft, 0, & workarrc_t(0,jcsw,icrw), 1, & workarrc_t_exchange_type_y, pright, 0, & comm2d, status, ierr ) ! !-- From right to left CALL MPI_SENDRECV( workarrc_t(0,jcsw,icrw-1), 1, & workarrc_t_exchange_type_y, pright, 1, & workarrc_t(0,jcsw,iclw), 1, & workarrc_t_exchange_type_y, pleft, 1, & comm2d, status, ierr ) ENDIF ! !-- South-north exchange if more than one PE subdomain in the y-direction. !-- Note that in case of 3-D nesting the south and north boundaries are !-- not exchanged because the nest domain is not cyclic. IF ( pdims(2) > 1 ) THEN ! !-- From south to north CALL MPI_SENDRECV( workarrc_t(0,jcsw+1,iclw), 1, & workarrc_t_exchange_type_x, psouth, 2, & workarrc_t(0,jcnw,iclw), 1, & workarrc_t_exchange_type_x, pnorth, 2, & comm2d, status, ierr ) ! !-- From north to south CALL MPI_SENDRECV( workarrc_t(0,jcnw-1,iclw), 1, & workarrc_t_exchange_type_x, pnorth, 3, & workarrc_t(0,jcsw,iclw), 1, & workarrc_t_exchange_type_x, psouth, 3, & comm2d, status, ierr ) ENDIF #endif IF ( var == 'w' ) THEN DO l = iclw, icrw DO m = jcsw, jcnw DO i = ifl(l), ifu(l) DO j = jfl(m), jfu(m) f(k,j,i) = workarrc_t(nw,m,l) ENDDO ENDDO ENDDO ENDDO ELSE IF ( var == 'u' ) THEN DO l = iclw, icrw-1 DO m = jcsw, jcnw ! !-- First interpolate to the flux point using the 3rd-order WS scheme f_interp_1 = c31 * workarrc_t(nw-1,m,l) + c32 * workarrc_t(nw,m,l) + c33 * workarrc_t(nw+1,m,l) f_interp_2 = c31 * workarrc_t(nw-1,m,l+1) + c32 * workarrc_t(nw,m,l+1) + c33 * workarrc_t(nw+1,m,l+1) ! !-- Use averages of the neighbouring matching grid-line values DO i = ifl(l), ifl(l+1) ! f(k,jfl(m):jfu(m),i) = 0.5_wp * ( workarrc_t(nw,m,l) & ! + workarrc_t(nw,m,l+1) ) f(k,jfl(m):jfu(m),i) = 0.5_wp * ( f_interp_1 + f_interp_2 ) ENDDO ! !-- Then set the values along the matching grid-lines IF ( MOD( ifl(l), igsr ) == 0 ) THEN ! !-- First interpolate to the flux point using the 3rd-order WS scheme f_interp_1 = c31 * workarrc_t(nw-1,m,l) + c32 * workarrc_t(nw,m,l) + c33 * workarrc_t(nw+1,m,l) ! f(k,jfl(m):jfu(m),ifl(l)) = workarrc_t(nw,m,l) f(k,jfl(m):jfu(m),ifl(l)) = f_interp_1 ENDIF ENDDO ENDDO ! !-- Finally, set the values along the last matching grid-line IF ( MOD( ifl(icrw), igsr ) == 0 ) THEN DO m = jcsw, jcnw ! !-- First interpolate to the flux point using the 3rd-order WS scheme f_interp_1 = c31 * workarrc_t(nw-1,m,icrw) + c32 * workarrc_t(nw,m,icrw) + c33 * workarrc_t(nw+1,m,icrw) ! f(k,jfl(m):jfu(m),ifl(icrw)) = workarrc_t(nw,m,icrw) f(k,jfl(m):jfu(m),ifl(icrw)) = f_interp_1 ENDDO ENDIF ! !-- A gap may still remain in some cases if the subdomain size is not !-- divisible by the grid-spacing ratio. In such a case, fill the !-- gap. Note however, this operation may produce some additional !-- momentum conservation error. IF ( ifl(icrw) < nxr ) THEN DO m = jcsw, jcnw DO i = ifl(icrw)+1, nxr f(k,jfl(m):jfu(m),i) = f(k,jfl(m):jfu(m),ifl(icrw)) ENDDO ENDDO ENDIF ELSE IF ( var == 'v' ) THEN DO l = iclw, icrw DO m = jcsw, jcnw-1 ! !-- First interpolate to the flux point using the 3rd-order WS scheme f_interp_1 = c31 * workarrc_t(nw-1,m,l) + c32 * workarrc_t(nw,m,l) + c33 * workarrc_t(nw+1,m,l) f_interp_2 = c31 * workarrc_t(nw-1,m+1,l) + c32 * workarrc_t(nw,m+1,l) + c33 * workarrc_t(nw+1,m+1,l) ! !-- Use averages of the neighbouring matching grid-line values DO j = jfl(m), jfl(m+1) ! f(k,j,ifl(l):ifu(l)) = 0.5_wp * ( workarrc_t(nw,m,l) & ! + workarrc_t(nw,m+1,l) ) f(k,j,ifl(l):ifu(l)) = 0.5_wp * ( f_interp_1 + f_interp_2 ) ENDDO ! !-- Then set the values along the matching grid-lines IF ( MOD( jfl(m), jgsr ) == 0 ) THEN f_interp_1 = c31 * workarrc_t(nw-1,m,l) + c32 * workarrc_t(nw,m,l) + c33 * workarrc_t(nw+1,m,l) ! f(k,jfl(m),ifl(l):ifu(l)) = workarrc_t(nw,m,l) f(k,jfl(m),ifl(l):ifu(l)) = f_interp_1 ENDIF ENDDO ENDDO ! !-- Finally, set the values along the last matching grid-line IF ( MOD( jfl(jcnw), jgsr ) == 0 ) THEN DO l = iclw, icrw ! !-- First interpolate to the flux point using the 3rd-order WS scheme f_interp_1 = c31 * workarrc_t(nw-1,jcnw,l) + c32 * workarrc_t(nw,jcnw,l) + c33 * workarrc_t(nw+1,jcnw,l) ! f(k,jfl(jcnw),ifl(l):ifu(l)) = workarrc_t(nw,jcnw,l) f(k,jfl(jcnw),ifl(l):ifu(l)) = f_interp_1 ENDDO ENDIF ! !-- A gap may still remain in some cases if the subdomain size is not !-- divisible by the grid-spacing ratio. In such a case, fill the !-- gap. Note however, this operation may produce some additional !-- momentum conservation error. IF ( jfl(jcnw) < nyn ) THEN DO l = iclw, icrw DO j = jfl(jcnw)+1, nyn f(k,j,ifl(l):ifu(l)) = f(k,jfl(jcnw),ifl(l):ifu(l)) ENDDO ENDDO ENDIF ELSE ! any scalar variable DO l = iclw, icrw DO m = jcsw, jcnw ! !-- First interpolate to the flux point using the 3rd-order WS scheme f_interp_1 = c31 * workarrc_t(nw-1,m,l) + c32 * workarrc_t(nw,m,l) + c33 * workarrc_t(nw+1,m,l) DO i = ifl(l), ifu(l) DO j = jfl(m), jfu(m) ! f(k,j,i) = workarrc_t(nw,m,l) f(k,j,i) = f_interp_1 ENDDO ENDDO ENDDO ENDDO ENDIF ! var ! !-- Just fill up the redundant second ghost-node layer in case of var == w. IF ( var == 'w' ) THEN f(nzt+1,:,:) = f(nzt,:,:) ENDIF END SUBROUTINE pmci_interp_1sto_t SUBROUTINE pmci_anterp_tophat( f, fc, kct, ifl, ifu, jfl, jfu, kfl, kfu, ijkfc, var ) ! !-- Anterpolation of internal-node values to be used as the parent-domain !-- values. This subroutine is based on the first-order numerical !-- integration of the child-grid values contained within the anterpolation !-- cell. IMPLICIT NONE REAL(wp), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg), INTENT(IN) :: f !< Child-grid array REAL(wp), DIMENSION(0:cg%nz+1,jcs:jcn,icl:icr), INTENT(INOUT) :: fc !< Parent-grid array INTEGER(iwp), DIMENSION(0:cg%nz+1,jcsa:jcna,icla:icra), INTENT(IN) :: ijkfc !< number of child grid points contributing to a parent grid box INTEGER(iwp), INTENT(IN) :: kct !< Top boundary index for anterpolation along z INTEGER(iwp), DIMENSION(icla:icra), INTENT(IN) :: ifl !< Indicates start index of child cells belonging to certain parent cell - x direction INTEGER(iwp), DIMENSION(icla:icra), INTENT(IN) :: ifu !< Indicates end index of child cells belonging to certain parent cell - x direction INTEGER(iwp), DIMENSION(jcsa:jcna), INTENT(IN) :: jfl !< Indicates start index of child cells belonging to certain parent cell - y direction INTEGER(iwp), DIMENSION(jcsa:jcna), INTENT(IN) :: jfu !< Indicates end index of child cells belonging to certain parent cell - y direction INTEGER(iwp), DIMENSION(0:cg%nz+1), INTENT(IN) :: kfl !< Indicates start index of child cells belonging to certain parent cell - z direction INTEGER(iwp), DIMENSION(0:cg%nz+1), INTENT(IN) :: kfu !< Indicates end index of child cells belonging to certain parent cell - z direction CHARACTER(LEN=*), INTENT(IN) :: var !< Variable symbol: 'u', 'v', 'w' or 's' ! !-- Local variables: REAL(wp) :: cellsum !< sum of respective child cells belonging to parent cell INTEGER(iwp) :: i !< Running index x-direction - child grid INTEGER(iwp) :: iclant !< Left boundary index for anterpolation along x INTEGER(iwp) :: icrant !< Right boundary index for anterpolation along x INTEGER(iwp) :: j !< Running index y-direction - child grid INTEGER(iwp) :: jcnant !< North boundary index for anterpolation along y INTEGER(iwp) :: jcsant !< South boundary index for anterpolation along y INTEGER(iwp) :: k !< Running index z-direction - child grid INTEGER(iwp) :: kcb = 0 !< Bottom boundary index for anterpolation along z INTEGER(iwp) :: kctant !< Top boundary index for anterpolation along z INTEGER(iwp) :: l !< Running index x-direction - parent grid INTEGER(iwp) :: m !< Running index y-direction - parent grid INTEGER(iwp) :: n !< Running index z-direction - parent grid INTEGER(iwp) :: var_flag !< bit number used to flag topography on respective grid ! !-- Define the index bounds iclant, icrant, jcsant and jcnant. !-- Note that kcb is simply zero and kct enters here as a parameter and it is !-- determined in pmci_define_index_mapping. !-- Note that the grid points used also for interpolation (from parent to !-- child) are excluded in anterpolation, e.g. anterpolation is only from !-- nzb:kct-1, as kct is used for interpolation. An additional buffer is !-- also applied (default value for anterpolation_buffer_width = 2) in order !-- to avoid unphysical accumulation of kinetic energy. iclant = icl icrant = icr jcsant = jcs jcnant = jcn ! kctant = kct - 1 kctant = kct - 1 - anterpolation_buffer_width kcb = 0 IF ( nesting_mode /= 'vertical' ) THEN ! !-- Set the anterpolation buffers on the lateral boundaries iclant = MAX( icl, iplg + 3 + anterpolation_buffer_width ) icrant = MIN( icr, iprg - 3 - anterpolation_buffer_width ) jcsant = MAX( jcs, jpsg + 3 + anterpolation_buffer_width ) jcnant = MIN( jcn, jpng - 3 - anterpolation_buffer_width ) ENDIF ! !-- Set masking bit for topography flags IF ( var == 'u' ) THEN var_flag = 1 ELSEIF ( var == 'v' ) THEN var_flag = 2 ELSEIF ( var == 'w' ) THEN var_flag = 3 ELSE var_flag = 0 ENDIF ! !-- Note that l, m, and n are parent-grid indices and i,j, and k !-- are child-grid indices. DO l = iclant, icrant DO m = jcsant, jcnant ! !-- For simplicity anterpolate within buildings and under elevated !-- terrain too DO n = kcb, kctant cellsum = 0.0_wp DO i = ifl(l), ifu(l) DO j = jfl(m), jfu(m) DO k = kfl(n), kfu(n) cellsum = cellsum + MERGE( f(k,j,i), 0.0_wp, & BTEST( wall_flags_0(k,j,i), var_flag ) ) ENDDO ENDDO ENDDO ! !-- In case all child grid points are inside topography, i.e. !-- ijkfc and cellsum are zero, also parent solution would have !-- zero values at that grid point, which may cause problems in !-- particular for the temperature. Therefore, in case cellsum is !-- zero, keep the parent solution at this point. IF ( ijkfc(n,m,l) /= 0 ) THEN fc(n,m,l) = cellsum / REAL( ijkfc(n,m,l), KIND=wp ) ENDIF ENDDO ENDDO ENDDO END SUBROUTINE pmci_anterp_tophat #endif END SUBROUTINE pmci_child_datatrans ! Description: ! ------------ !> Set boundary conditions for the prognostic quantities after interpolation !> and anterpolation at upward- and downward facing surfaces. !> @todo: add Dirichlet boundary conditions for pot. temperature, humdidity and !> passive scalar. !------------------------------------------------------------------------------! SUBROUTINE pmci_boundary_conds USE chem_modules, & ONLY: ibc_cs_b USE control_parameters, & ONLY: ibc_pt_b, ibc_q_b, ibc_s_b, ibc_uv_b USE surface_mod, & ONLY: bc_h IMPLICIT NONE INTEGER(iwp) :: i !< Index along x-direction INTEGER(iwp) :: j !< Index along y-direction INTEGER(iwp) :: k !< Index along z-direction INTEGER(iwp) :: m !< Running index for surface type INTEGER(iwp) :: n !< running index for number of chemical species ! !-- Set Dirichlet boundary conditions for horizontal velocity components IF ( ibc_uv_b == 0 ) THEN ! !-- Upward-facing surfaces DO m = 1, bc_h(0)%ns i = bc_h(0)%i(m) j = bc_h(0)%j(m) k = bc_h(0)%k(m) u(k-1,j,i) = 0.0_wp v(k-1,j,i) = 0.0_wp ENDDO ! !-- Downward-facing surfaces DO m = 1, bc_h(1)%ns i = bc_h(1)%i(m) j = bc_h(1)%j(m) k = bc_h(1)%k(m) u(k+1,j,i) = 0.0_wp v(k+1,j,i) = 0.0_wp ENDDO ENDIF ! !-- Set Dirichlet boundary conditions for vertical velocity component !-- Upward-facing surfaces DO m = 1, bc_h(0)%ns i = bc_h(0)%i(m) j = bc_h(0)%j(m) k = bc_h(0)%k(m) w(k-1,j,i) = 0.0_wp ENDDO ! !-- Downward-facing surfaces DO m = 1, bc_h(1)%ns i = bc_h(1)%i(m) j = bc_h(1)%j(m) k = bc_h(1)%k(m) w(k+1,j,i) = 0.0_wp ENDDO ! !-- Set Neumann boundary conditions for potential temperature IF ( .NOT. neutral ) THEN IF ( ibc_pt_b == 1 ) THEN DO m = 1, bc_h(0)%ns i = bc_h(0)%i(m) j = bc_h(0)%j(m) k = bc_h(0)%k(m) pt(k-1,j,i) = pt(k,j,i) ENDDO DO m = 1, bc_h(1)%ns i = bc_h(1)%i(m) j = bc_h(1)%j(m) k = bc_h(1)%k(m) pt(k+1,j,i) = pt(k,j,i) ENDDO ENDIF ENDIF ! !-- Set Neumann boundary conditions for humidity and cloud-physical quantities IF ( humidity ) THEN IF ( ibc_q_b == 1 ) THEN DO m = 1, bc_h(0)%ns i = bc_h(0)%i(m) j = bc_h(0)%j(m) k = bc_h(0)%k(m) q(k-1,j,i) = q(k,j,i) ENDDO DO m = 1, bc_h(1)%ns i = bc_h(1)%i(m) j = bc_h(1)%j(m) k = bc_h(1)%k(m) q(k+1,j,i) = q(k,j,i) ENDDO ENDIF IF ( bulk_cloud_model .AND. microphysics_morrison ) THEN DO m = 1, bc_h(0)%ns i = bc_h(0)%i(m) j = bc_h(0)%j(m) k = bc_h(0)%k(m) nc(k-1,j,i) = 0.0_wp qc(k-1,j,i) = 0.0_wp ENDDO DO m = 1, bc_h(1)%ns i = bc_h(1)%i(m) j = bc_h(1)%j(m) k = bc_h(1)%k(m) nc(k+1,j,i) = 0.0_wp qc(k+1,j,i) = 0.0_wp ENDDO ENDIF IF ( bulk_cloud_model .AND. microphysics_seifert ) THEN DO m = 1, bc_h(0)%ns i = bc_h(0)%i(m) j = bc_h(0)%j(m) k = bc_h(0)%k(m) nr(k-1,j,i) = 0.0_wp qr(k-1,j,i) = 0.0_wp ENDDO DO m = 1, bc_h(1)%ns i = bc_h(1)%i(m) j = bc_h(1)%j(m) k = bc_h(1)%k(m) nr(k+1,j,i) = 0.0_wp qr(k+1,j,i) = 0.0_wp ENDDO ENDIF ENDIF ! !-- Set Neumann boundary conditions for passive scalar IF ( passive_scalar ) THEN IF ( ibc_s_b == 1 ) THEN DO m = 1, bc_h(0)%ns i = bc_h(0)%i(m) j = bc_h(0)%j(m) k = bc_h(0)%k(m) s(k-1,j,i) = s(k,j,i) ENDDO DO m = 1, bc_h(1)%ns i = bc_h(1)%i(m) j = bc_h(1)%j(m) k = bc_h(1)%k(m) s(k+1,j,i) = s(k,j,i) ENDDO ENDIF ENDIF ! !-- Set Neumann boundary conditions for chemical species IF ( air_chemistry .AND. nest_chemistry ) THEN IF ( ibc_cs_b == 1 ) THEN DO n = 1, nspec DO m = 1, bc_h(0)%ns i = bc_h(0)%i(m) j = bc_h(0)%j(m) k = bc_h(0)%k(m) chem_species(n)%conc(k-1,j,i) = chem_species(n)%conc(k,j,i) ENDDO DO m = 1, bc_h(1)%ns i = bc_h(1)%i(m) j = bc_h(1)%j(m) k = bc_h(1)%k(m) chem_species(n)%conc(k+1,j,i) = chem_species(n)%conc(k,j,i) ENDDO ENDDO ENDIF ENDIF END SUBROUTINE pmci_boundary_conds END MODULE pmc_interface