Changeset 4626
- Timestamp:
- Jul 26, 2020 9:49:48 AM (4 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/dynamics_mod.f90
r4517 r4626 3 3 ! This file is part of the PALM model system. 4 4 ! 5 ! PALM is free software: you can redistribute it and/or modify it under the 6 ! terms of the GNU General Public License as published by the Free Software 7 ! Foundation, either version 3 of the License, or (at your option) any later 8 ! version. 9 ! 10 ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY 11 ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR 12 ! A PARTICULAR PURPOSE. See the GNU General Public License for more details. 13 ! 14 ! You should have received a copy of the GNU General Public License along with 15 ! PALM. If not, see <http://www.gnu.org/licenses/>. 5 ! PALM is free software: you can redistribute it and/or modify it under the terms of the GNU General 6 ! Public License as published by the Free Software Foundation, either version 3 of the License, or 7 ! (at your option) any later version. 8 ! 9 ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the 10 ! implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General 11 ! Public License for more details. 12 ! 13 ! You should have received a copy of the GNU General Public License along with PALM. If not, see 14 ! <http://www.gnu.org/licenses/>. 16 15 ! 17 16 ! Copyright 1997-2020 Leibniz Universitaet Hannover … … 20 19 ! Current revisions: 21 20 ! ----------------- 22 ! 23 ! 21 ! 22 ! 24 23 ! Former revisions: 25 24 ! ----------------- 26 25 ! $Id$ 26 ! file re-formatted to follow the PALM coding standard 27 ! 28 ! 4517 2020-05-03 14:29:30Z raasch 27 29 ! added restart with MPI-IO for reading local arrays 28 ! 30 ! 29 31 ! 4505 2020-04-20 15:37:15Z schwenkel 30 32 ! Add flag for saturation check 31 ! 33 ! 32 34 ! 4495 2020-04-13 20:11:20Z resler 33 35 ! restart data handling with MPI-IO added 34 ! 36 ! 35 37 ! 4360 2020-01-07 11:25:50Z suehring 36 38 ! Bugfix for last commit. 37 ! 39 ! 38 40 ! 4359 2019-12-30 13:36:50Z suehring 39 41 ! Refine post-initialization check for realistically inital values of mixing ratio. Give an error 40 ! message for faulty initial values, but only a warning in a restart run. 41 ! 42 ! message for faulty initial values, but only a warning in a restart run. 43 ! 42 44 ! 4347 2019-12-18 13:18:33Z suehring 43 45 ! Implement post-initialization check for realistically inital values of mixing ratio 44 ! 46 ! 45 47 ! 4281 2019-10-29 15:15:39Z schwenkel 46 48 ! Moved boundary conditions in dynamics module 47 ! 49 ! 48 50 ! 4097 2019-07-15 11:59:11Z suehring 49 51 ! Avoid overlong lines - limit is 132 characters per line … … 60 62 61 63 62 USE arrays_3d, &63 ONLY: c_u, c_u_m, c_u_m_l, c_v, c_v_m, c_v_m_l, c_w, c_w_m, c_w_m_l, &64 dzu, &65 exner, &66 hyp, &67 pt, pt_1, pt_2, pt_init, pt_p, &68 q, q_1, q_2, q_p, &69 s, s_1, s_2, s_p, &70 u, u_1, u_2, u_init, u_p, u_m_l, u_m_n, u_m_r, u_m_s, &71 v, v_1, v_2, v_p, v_init, v_m_l, v_m_n, v_m_r, v_m_s, &64 USE arrays_3d, & 65 ONLY: c_u, c_u_m, c_u_m_l, c_v, c_v_m, c_v_m_l, c_w, c_w_m, c_w_m_l, & 66 dzu, & 67 exner, & 68 hyp, & 69 pt, pt_1, pt_2, pt_init, pt_p, & 70 q, q_1, q_2, q_p, & 71 s, s_1, s_2, s_p, & 72 u, u_1, u_2, u_init, u_p, u_m_l, u_m_n, u_m_r, u_m_s, & 73 v, v_1, v_2, v_p, v_init, v_m_l, v_m_n, v_m_r, v_m_s, & 72 74 w, w_1, w_2, w_p, w_m_l, w_m_n, w_m_r, w_m_s 73 75 … … 76 78 rd_d_rv 77 79 78 USE control_parameters, &79 ONLY: bc_dirichlet_l, &80 bc_dirichlet_s, &81 bc_radiation_l, &82 bc_radiation_n, &83 bc_radiation_r, &84 bc_radiation_s, &85 bc_pt_t_val, &86 bc_q_t_val, &87 bc_s_t_val, &88 check_realistic_q, &89 child_domain, &90 coupling_mode, &91 dt_3d, &92 ibc_pt_b,&93 ibc_pt_ t,&94 ibc_ q_b,&95 ibc_q_ t,&96 ibc_ s_b,&97 ibc_s_ t,&98 ibc_ uv_b,&99 ibc_uv_ t,&100 i nitializing_actions,&101 in termediate_timestep_count,&102 length,&103 message_string,&104 nesting_offline,&105 n udging,&106 restart_string,&107 humidity,&108 neutral,&109 passive_scalar,&110 tsc, &80 USE control_parameters, & 81 ONLY: bc_dirichlet_l, & 82 bc_dirichlet_s, & 83 bc_radiation_l, & 84 bc_radiation_n, & 85 bc_radiation_r, & 86 bc_radiation_s, & 87 bc_pt_t_val, & 88 bc_q_t_val, & 89 bc_s_t_val, & 90 check_realistic_q, & 91 child_domain, & 92 coupling_mode, & 93 dt_3d, & 94 humidity, & 95 ibc_pt_b, & 96 ibc_pt_t, & 97 ibc_q_b, & 98 ibc_q_t, & 99 ibc_s_b, & 100 ibc_s_t, & 101 ibc_uv_b, & 102 ibc_uv_t, & 103 initializing_actions, & 104 intermediate_timestep_count, & 105 length, & 106 message_string, & 107 nesting_offline, & 108 neutral, & 109 nudging, & 110 passive_scalar, & 111 restart_string, & 112 tsc, & 111 113 use_cmax 112 114 113 USE grid_variables, &114 ONLY: ddx, &115 ddy, &116 dx, &115 USE grid_variables, & 116 ONLY: ddx, & 117 ddy, & 118 dx, & 117 119 dy 118 120 119 USE indices, &120 ONLY: nbgp, &121 nx, &122 nxl, &123 nxlg, &124 nxr, &125 nxrg, &126 ny, &127 nys, &128 nysg, &129 nyn, &130 nyng, &131 nzb, &121 USE indices, & 122 ONLY: nbgp, & 123 nx, & 124 nxl, & 125 nxlg, & 126 nxr, & 127 nxrg, & 128 ny, & 129 nys, & 130 nysg, & 131 nyn, & 132 nyng, & 133 nzb, & 132 134 nzt 133 135 … … 136 138 USE pegrid 137 139 138 USE pmc_interface, &140 USE pmc_interface, & 139 141 ONLY : nesting_mode 140 142 … … 142 144 ! ONLY: 143 145 144 USE surface_mod, &146 USE surface_mod, & 145 147 ONLY : bc_h 146 148 … … 156 158 ! 157 159 !-- Public functions 158 PUBLIC &159 dynamics_parin, &160 dynamics_check_parameters, &161 dynamics_check_data_output_ts, &162 dynamics_check_data_output_pr, &163 dynamics_check_data_output, &164 dynamics_init_masks, &165 dynamics_define_netcdf_grid, &166 dynamics_init_arrays, &167 dynamics_init, &168 dynamics_init_checks, &169 dynamics_header, &170 dynamics_actions, &171 dynamics_non_advective_processes, &172 dynamics_exchange_horiz, &173 dynamics_prognostic_equations, &174 dynamics_boundary_conditions, &175 dynamics_swap_timelevel, &176 dynamics_3d_data_averaging, &177 dynamics_data_output_2d, &178 dynamics_data_output_3d, &179 dynamics_statistics, &180 dynamics_rrd_global, &181 dynamics_rrd_local, &182 dynamics_wrd_global, &183 dynamics_wrd_local, &160 PUBLIC & 161 dynamics_parin, & 162 dynamics_check_parameters, & 163 dynamics_check_data_output_ts, & 164 dynamics_check_data_output_pr, & 165 dynamics_check_data_output, & 166 dynamics_init_masks, & 167 dynamics_define_netcdf_grid, & 168 dynamics_init_arrays, & 169 dynamics_init, & 170 dynamics_init_checks, & 171 dynamics_header, & 172 dynamics_actions, & 173 dynamics_non_advective_processes, & 174 dynamics_exchange_horiz, & 175 dynamics_prognostic_equations, & 176 dynamics_boundary_conditions, & 177 dynamics_swap_timelevel, & 178 dynamics_3d_data_averaging, & 179 dynamics_data_output_2d, & 180 dynamics_data_output_3d, & 181 dynamics_statistics, & 182 dynamics_rrd_global, & 183 dynamics_rrd_local, & 184 dynamics_wrd_global, & 185 dynamics_wrd_local, & 184 186 dynamics_last_actions 185 187 186 188 ! 187 189 !-- Public parameters, constants and initial values 188 PUBLIC &190 PUBLIC & 189 191 dynamics_module_enabled 190 192 … … 312 314 CHARACTER (LEN=80) :: line !< dummy string that contains the current line of the parameter file 313 315 314 NAMELIST /dynamics_parameters/ &316 NAMELIST /dynamics_parameters/ & 315 317 dynamics_module_enabled 318 316 319 317 320 line = ' ' … … 360 363 SUBROUTINE dynamics_check_data_output_ts( dots_max, dots_num, dots_label, dots_unit ) 361 364 365 CHARACTER (LEN=*), DIMENSION(dots_max), INTENT(INOUT) :: dots_label 366 CHARACTER (LEN=*), DIMENSION(dots_max), INTENT(INOUT) :: dots_unit 362 367 363 368 INTEGER(iwp), INTENT(IN) :: dots_max 364 369 INTEGER(iwp), INTENT(INOUT) :: dots_num 365 CHARACTER (LEN=*), DIMENSION(dots_max), INTENT(INOUT) :: dots_label366 CHARACTER (LEN=*), DIMENSION(dots_max), INTENT(INOUT) :: dots_unit367 370 368 371 ! … … 384 387 385 388 389 CHARACTER (LEN=*) :: dopr_unit !< local value of dopr_unit 386 390 CHARACTER (LEN=*) :: unit !< 387 391 CHARACTER (LEN=*) :: variable !< 388 CHARACTER (LEN=*) :: dopr_unit !< local value of dopr_unit389 392 390 393 INTEGER(iwp) :: var_count !< … … 433 436 434 437 435 !------------------------------------------------------------------------------ !438 !--------------------------------------------------------------------------------------------------! 436 439 ! 437 440 ! Description: 438 441 ! ------------ 439 442 !> Initialize module-specific masked output 440 !------------------------------------------------------------------------------ !443 !--------------------------------------------------------------------------------------------------! 441 444 SUBROUTINE dynamics_init_masks( variable, unit ) 442 445 … … 499 502 500 503 ! 501 !-- Check for realistic initial mixing ratio. This must be in a realistic phyiscial range and must 502 !-- not exceed the saturation mixing ratio by more than 2 percent. Please note, the check is 503 !-- performed for each grid point (not just for a vertical profile), in order to cover also 504 !-- Check for realistic initial mixing ratio. This must be in a realistic phyiscial range and must 505 !-- not exceed the saturation mixing ratio by more than 2 percent. Please note, the check is 506 !-- performed for each grid point (not just for a vertical profile), in order to cover also 504 507 !-- three-dimensional initialization. Note, this check gives an error only for the initial run not 505 508 !-- for a restart run. In case there are no cloud physics considered, the mixing ratio can exceed 506 !-- the saturation moisture. This case a warning is given. 509 !-- the saturation moisture. This case a warning is given. 507 510 IF ( humidity .AND. .NOT. neutral .AND. check_realistic_q ) THEN 508 511 DO i = nxl, nxr … … 516 519 q_s = rd_d_rv * e_s / ( hyp(k) - e_s ) 517 520 518 IF ( q(k,j,i) > 1.02_wp * q_s ) realistic_q = .FALSE. 521 IF ( q(k,j,i) > 1.02_wp * q_s ) realistic_q = .FALSE. 519 522 ENDDO 520 523 ENDDO 521 524 ENDDO 522 525 ! 523 !-- Since the check is performed locally, merge the logical flag from all mpi ranks, 526 !-- Since the check is performed locally, merge the logical flag from all mpi ranks, 524 527 !-- in order to do not print the error message multiple times. 525 528 #if defined( __parallel ) … … 598 601 !-- Format-descriptors 599 602 100 FORMAT (//' *** dynamic module disabled'/) 600 110 FORMAT (//1X,78('#') &601 //' User-defined variables and actions:'/ &603 110 FORMAT (//1X,78('#') & 604 //' User-defined variables and actions:'/ & 602 605 ' -----------------------------------'//) 603 606 … … 621 624 ! 622 625 !-- Here the user-defined actions follow 623 !-- No calls for single grid points are allowed at locations before and 624 !-- after the timestep, sincethese calls are not within an i,j-loop626 !-- No calls for single grid points are allowed at locations before and after the timestep, since 627 !-- these calls are not within an i,j-loop 625 628 SELECT CASE ( location ) 626 629 … … 688 691 CASE ( 'u-tendency' ) 689 692 693 ! 690 694 !-- Next line is to avoid compiler warning about unused variables. Please remove. 691 695 IF ( i + j < 0 ) CONTINUE … … 784 788 785 789 INTEGER(iwp), INTENT(IN) :: i !< grid index in x-direction 790 INTEGER(iwp), INTENT(IN) :: i_omp_start !< first loop index of i-loop in prognostic_equations 786 791 INTEGER(iwp), INTENT(IN) :: j !< grid index in y-direction 787 INTEGER(iwp), INTENT(IN) :: i_omp_start !< first loop index of i-loop in prognostic_equations788 792 INTEGER(iwp), INTENT(IN) :: tn !< task number of openmp task 789 793 … … 848 852 ! 849 853 !-- Vertical nesting: Vertical velocity not zero at the top of the fine grid 850 IF ( .NOT. child_domain .AND. .NOT. nesting_offline .AND.&851 854 IF ( .NOT. child_domain .AND. .NOT. nesting_offline .AND. & 855 TRIM(coupling_mode) /= 'vnested_fine' ) THEN 852 856 !$ACC KERNELS PRESENT(w_p) 853 857 w_p(nzt:nzt+1,:,:) = 0.0_wp !< nzt is not a prognostic level (but cf. pres) … … 857 861 ! 858 862 !-- Temperature at bottom and top boundary. 859 !-- In case of coupled runs (ibc_pt_b = 2) the temperature is given by 860 !-- the sea surface temperatureof the coupled ocean model.863 !-- In case of coupled runs (ibc_pt_b = 2) the temperature is given by the sea surface temperature 864 !-- of the coupled ocean model. 861 865 !-- Dirichlet 862 866 IF ( .NOT. neutral ) THEN … … 906 910 ENDIF 907 911 ! 908 !-- Boundary conditions for total water content, 909 !-- bottom and top boundary (see also temperature) 912 !-- Boundary conditions for total water content, bottom and top boundary (see also temperature) 910 913 IF ( humidity ) THEN 911 914 ! 912 915 !-- Surface conditions for constant_humidity_flux 913 !-- Run loop over all non-natural and natural walls. Note, in wall-datatype 914 !-- the k coordinate belongs to the atmospheric grid point, therefore, set 915 !-- q_p at k-1 916 !-- Run loop over all non-natural and natural walls. Note, in wall-datatype the k coordinate 917 !-- belongs to the atmospheric grid point, therefore, set q_p at k-1 916 918 IF ( ibc_q_b == 0 ) THEN 917 919 … … 947 949 ENDIF 948 950 ! 949 !-- Boundary conditions for scalar, 950 !-- bottom and top boundary (see also temperature) 951 !-- Boundary conditions for scalar, bottom and top boundary (see also temperature) 951 952 IF ( passive_scalar ) THEN 952 953 ! 953 954 !-- Surface conditions for constant_humidity_flux 954 !-- Run loop over all non-natural and natural walls. Note, in wall-datatype 955 !-- the k coordinate belongs to the atmospheric grid point, therefore, set 956 !-- s_p at k-1 955 !-- Run loop over all non-natural and natural walls. Note, in wall-datatype the k coordinate 956 !-- belongs to the atmospheric grid point, therefore, set s_p at k-1 957 957 IF ( ibc_s_b == 0 ) THEN 958 958 … … 991 991 ENDIF 992 992 ! 993 !-- In case of inflow or nest boundary at the south boundary the boundary for v 994 !-- is at nys and in case of inflow or nest boundary at the left boundary the 995 !-- boundary for u is at nxl. Since in prognostic_equations (cache optimized 996 !-- version) these levels are handled as a prognostic level, boundary values 997 !-- have to be restored here. 993 !-- In case of inflow or nest boundary at the south boundary the boundary for v is at nys and in 994 !-- case of inflow or nest boundary at the left boundary the boundary for u is at nxl. Since in 995 !-- prognostic_equations (cache optimized version) these levels are handled as a prognostic level, 996 !-- boundary values have to be restored here. 998 997 IF ( bc_dirichlet_s ) THEN 999 998 v_p(:,nys,:) = v_p(:,nys-1,:) … … 1003 1002 1004 1003 ! 1005 !-- The same restoration for u at i=nxl and v at j=nys as above must be made 1006 !-- in case of nest boundaries. This must not be done in case of vertical nesting 1007 !-- mode as in that case the lateral boundaries are actually cyclic. 1008 !-- Lateral oundary conditions for TKE and dissipation are set 1009 !-- in tcm_boundary_conds. 1004 !-- The same restoration for u at i=nxl and v at j=nys as above must be made in case of nest 1005 !-- boundaries. This must not be done in case of vertical nesting mode as in that case the lateral 1006 !-- boundaries are actually cyclic. 1007 !-- Lateral oundary conditions for TKE and dissipation are set in tcm_boundary_conds. 1010 1008 IF ( nesting_mode /= 'vertical' .OR. nesting_offline ) THEN 1011 1009 IF ( bc_dirichlet_s ) THEN … … 1019 1017 ! 1020 1018 !-- Lateral boundary conditions for scalar quantities at the outflow. 1021 !-- Lateral oundary conditions for TKE and dissipation are set 1022 !-- in tcm_boundary_conds. 1019 !-- Lateral oundary conditions for TKE and dissipation are set in tcm_boundary_conds. 1023 1020 IF ( bc_radiation_s ) THEN 1024 1021 pt_p(:,nys-1,:) = pt_p(:,nys,:) … … 1049 1046 ! 1050 1047 !-- Radiation boundary conditions for the velocities at the respective outflow. 1051 !-- The phase velocity is either assumed to the maximum phase velocity that 1052 !-- ensures numerical stability (CFL-condition) or calculated after1053 !-- Orlanski(1976) and averaged along the outflowboundary.1048 !-- The phase velocity is either assumed to the maximum phase velocity that ensures numerical 1049 !-- stability (CFL-condition) or calculated after Orlanski(1976) and averaged along the outflow 1050 !-- boundary. 1054 1051 IF ( bc_radiation_s ) THEN 1055 1052 … … 1071 1068 1072 1069 ! 1073 !-- Calculate the phase speeds for u, v, and w, first local and then 1074 !-- average along the outflowboundary.1070 !-- Calculate the phase speeds for u, v, and w, first local and then average along the outflow 1071 !-- boundary. 1075 1072 DO k = nzb+1, nzt+1 1076 1073 DO i = nxl, nxr … … 1124 1121 #if defined( __parallel ) 1125 1122 IF ( collective_wait ) CALL MPI_BARRIER( comm1dx, ierr ) 1126 CALL MPI_ALLREDUCE( c_u_m_l(nzb+1), c_u_m(nzb+1), nzt-nzb, MPI_REAL, &1127 MPI_SUM, comm1dx,ierr )1123 CALL MPI_ALLREDUCE( c_u_m_l(nzb+1), c_u_m(nzb+1), nzt-nzb, MPI_REAL, MPI_SUM, comm1dx, & 1124 ierr ) 1128 1125 IF ( collective_wait ) CALL MPI_BARRIER( comm1dx, ierr ) 1129 CALL MPI_ALLREDUCE( c_v_m_l(nzb+1), c_v_m(nzb+1), nzt-nzb, MPI_REAL, &1130 MPI_SUM, comm1dx,ierr )1126 CALL MPI_ALLREDUCE( c_v_m_l(nzb+1), c_v_m(nzb+1), nzt-nzb, MPI_REAL, MPI_SUM, comm1dx, & 1127 ierr ) 1131 1128 IF ( collective_wait ) CALL MPI_BARRIER( comm1dx, ierr ) 1132 CALL MPI_ALLREDUCE( c_w_m_l(nzb+1), c_w_m(nzb+1), nzt-nzb, MPI_REAL, &1133 MPI_SUM, comm1dx,ierr )1129 CALL MPI_ALLREDUCE( c_w_m_l(nzb+1), c_w_m(nzb+1), nzt-nzb, MPI_REAL, MPI_SUM, comm1dx, & 1130 ierr ) 1134 1131 #else 1135 1132 c_u_m = c_u_m_l … … 1154 1151 DO k = nzb+1, nzt+1 1155 1152 DO i = nxlg, nxrg 1156 u_p(k,-1,i) = u(k,-1,i) - dt_3d * tsc(2) * c_u_m(k) * &1157 ( u(k,-1,i) - u(k,0,i) ) * ddy1158 1159 v_p(k,0,i) = v(k,0,i) - dt_3d * tsc(2) * c_v_m(k) * &1160 ( v(k,0,i) - v(k,1,i) ) * ddy1161 1162 w_p(k,-1,i) = w(k,-1,i) - dt_3d * tsc(2) * c_w_m(k) * &1163 ( w(k,-1,i) - w(k,0,i) ) * ddy1153 u_p(k,-1,i) = u(k,-1,i) - dt_3d * tsc(2) * c_u_m(k) * & 1154 ( u(k,-1,i) - u(k,0,i) ) * ddy 1155 1156 v_p(k,0,i) = v(k,0,i) - dt_3d * tsc(2) * c_v_m(k) * & 1157 ( v(k,0,i) - v(k,1,i) ) * ddy 1158 1159 w_p(k,-1,i) = w(k,-1,i) - dt_3d * tsc(2) * c_w_m(k) * & 1160 ( w(k,-1,i) - w(k,0,i) ) * ddy 1164 1161 ENDDO 1165 1162 ENDDO … … 1210 1207 1211 1208 ! 1212 !-- Calculate the phase speeds for u, v, and w, first local and then 1213 !-- average along the outflowboundary.1209 !-- Calculate the phase speeds for u, v, and w, first local and then average along the outflow 1210 !-- boundary. 1214 1211 DO k = nzb+1, nzt+1 1215 1212 DO i = nxl, nxr … … 1263 1260 #if defined( __parallel ) 1264 1261 IF ( collective_wait ) CALL MPI_BARRIER( comm1dx, ierr ) 1265 CALL MPI_ALLREDUCE( c_u_m_l(nzb+1), c_u_m(nzb+1), nzt-nzb, MPI_REAL, &1266 MPI_SUM, comm1dx,ierr )1262 CALL MPI_ALLREDUCE( c_u_m_l(nzb+1), c_u_m(nzb+1), nzt-nzb, MPI_REAL, MPI_SUM, comm1dx, & 1263 ierr ) 1267 1264 IF ( collective_wait ) CALL MPI_BARRIER( comm1dx, ierr ) 1268 CALL MPI_ALLREDUCE( c_v_m_l(nzb+1), c_v_m(nzb+1), nzt-nzb, MPI_REAL, &1269 MPI_SUM, comm1dx,ierr )1265 CALL MPI_ALLREDUCE( c_v_m_l(nzb+1), c_v_m(nzb+1), nzt-nzb, MPI_REAL, MPI_SUM, comm1dx, & 1266 ierr ) 1270 1267 IF ( collective_wait ) CALL MPI_BARRIER( comm1dx, ierr ) 1271 CALL MPI_ALLREDUCE( c_w_m_l(nzb+1), c_w_m(nzb+1), nzt-nzb, MPI_REAL, &1272 MPI_SUM, comm1dx,ierr )1268 CALL MPI_ALLREDUCE( c_w_m_l(nzb+1), c_w_m(nzb+1), nzt-nzb, MPI_REAL, MPI_SUM, comm1dx, & 1269 ierr ) 1273 1270 #else 1274 1271 c_u_m = c_u_m_l … … 1293 1290 DO k = nzb+1, nzt+1 1294 1291 DO i = nxlg, nxrg 1295 u_p(k,ny+1,i) = u(k,ny+1,i) - dt_3d * tsc(2) * c_u_m(k) * &1296 ( u(k,ny+1,i) - u(k,ny,i) ) * ddy1297 1298 v_p(k,ny+1,i) = v(k,ny+1,i) - dt_3d * tsc(2) * c_v_m(k) * &1299 ( v(k,ny+1,i) - v(k,ny,i) ) * ddy1300 1301 w_p(k,ny+1,i) = w(k,ny+1,i) - dt_3d * tsc(2) * c_w_m(k) * &1302 ( w(k,ny+1,i) - w(k,ny,i) ) * ddy1292 u_p(k,ny+1,i) = u(k,ny+1,i) - dt_3d * tsc(2) * c_u_m(k) * & 1293 ( u(k,ny+1,i) - u(k,ny,i) ) * ddy 1294 1295 v_p(k,ny+1,i) = v(k,ny+1,i) - dt_3d * tsc(2) * c_v_m(k) * & 1296 ( v(k,ny+1,i) - v(k,ny,i) ) * ddy 1297 1298 w_p(k,ny+1,i) = w(k,ny+1,i) - dt_3d * tsc(2) * c_w_m(k) * & 1299 ( w(k,ny+1,i) - w(k,ny,i) ) * ddy 1303 1300 ENDDO 1304 1301 ENDDO … … 1349 1346 1350 1347 ! 1351 !-- Calculate the phase speeds for u, v, and w, first local and then 1352 !-- average along the outflowboundary.1348 !-- Calculate the phase speeds for u, v, and w, first local and then average along the outflow 1349 !-- boundary. 1353 1350 DO k = nzb+1, nzt+1 1354 1351 DO j = nys, nyn … … 1402 1399 #if defined( __parallel ) 1403 1400 IF ( collective_wait ) CALL MPI_BARRIER( comm1dy, ierr ) 1404 CALL MPI_ALLREDUCE( c_u_m_l(nzb+1), c_u_m(nzb+1), nzt-nzb, MPI_REAL, &1405 MPI_SUM, comm1dy,ierr )1401 CALL MPI_ALLREDUCE( c_u_m_l(nzb+1), c_u_m(nzb+1), nzt-nzb, MPI_REAL, MPI_SUM, comm1dy, & 1402 ierr ) 1406 1403 IF ( collective_wait ) CALL MPI_BARRIER( comm1dy, ierr ) 1407 CALL MPI_ALLREDUCE( c_v_m_l(nzb+1), c_v_m(nzb+1), nzt-nzb, MPI_REAL, &1408 MPI_SUM, comm1dy,ierr )1404 CALL MPI_ALLREDUCE( c_v_m_l(nzb+1), c_v_m(nzb+1), nzt-nzb, MPI_REAL, MPI_SUM, comm1dy, & 1405 ierr ) 1409 1406 IF ( collective_wait ) CALL MPI_BARRIER( comm1dy, ierr ) 1410 CALL MPI_ALLREDUCE( c_w_m_l(nzb+1), c_w_m(nzb+1), nzt-nzb, MPI_REAL, &1411 MPI_SUM, comm1dy,ierr )1407 CALL MPI_ALLREDUCE( c_w_m_l(nzb+1), c_w_m(nzb+1), nzt-nzb, MPI_REAL, MPI_SUM, comm1dy, & 1408 ierr ) 1412 1409 #else 1413 1410 c_u_m = c_u_m_l … … 1432 1429 DO k = nzb+1, nzt+1 1433 1430 DO j = nysg, nyng 1434 u_p(k,j,0) = u(k,j,0) - dt_3d * tsc(2) * c_u_m(k) *&1435 ( u(k,j,0) - u(k,j,1) ) * ddx1436 1437 v_p(k,j,-1) = v(k,j,-1) - dt_3d * tsc(2) * c_v_m(k) * &1438 ( v(k,j,-1) - v(k,j,0) ) * ddx1439 1440 w_p(k,j,-1) = w(k,j,-1) - dt_3d * tsc(2) * c_w_m(k) * &1441 ( w(k,j,-1) - w(k,j,0) ) * ddx1431 u_p(k,j,0) = u(k,j,0) - dt_3d * tsc(2) * c_u_m(k) * & 1432 ( u(k,j,0) - u(k,j,1) ) * ddx 1433 1434 v_p(k,j,-1) = v(k,j,-1) - dt_3d * tsc(2) * c_v_m(k) * & 1435 ( v(k,j,-1) - v(k,j,0) ) * ddx 1436 1437 w_p(k,j,-1) = w(k,j,-1) - dt_3d * tsc(2) * c_w_m(k) * & 1438 ( w(k,j,-1) - w(k,j,0) ) * ddx 1442 1439 ENDDO 1443 1440 ENDDO … … 1488 1485 1489 1486 ! 1490 !-- Calculate the phase speeds for u, v, and w, first local and then 1491 !-- average along the outflowboundary.1487 !-- Calculate the phase speeds for u, v, and w, first local and then average along the outflow 1488 !-- boundary. 1492 1489 DO k = nzb+1, nzt+1 1493 1490 DO j = nys, nyn … … 1541 1538 #if defined( __parallel ) 1542 1539 IF ( collective_wait ) CALL MPI_BARRIER( comm1dy, ierr ) 1543 CALL MPI_ALLREDUCE( c_u_m_l(nzb+1), c_u_m(nzb+1), nzt-nzb, MPI_REAL, &1544 MPI_SUM, comm1dy,ierr )1540 CALL MPI_ALLREDUCE( c_u_m_l(nzb+1), c_u_m(nzb+1), nzt-nzb, MPI_REAL, MPI_SUM, comm1dy, & 1541 ierr ) 1545 1542 IF ( collective_wait ) CALL MPI_BARRIER( comm1dy, ierr ) 1546 CALL MPI_ALLREDUCE( c_v_m_l(nzb+1), c_v_m(nzb+1), nzt-nzb, MPI_REAL, &1547 MPI_SUM, comm1dy,ierr )1543 CALL MPI_ALLREDUCE( c_v_m_l(nzb+1), c_v_m(nzb+1), nzt-nzb, MPI_REAL, MPI_SUM, comm1dy, & 1544 ierr ) 1548 1545 IF ( collective_wait ) CALL MPI_BARRIER( comm1dy, ierr ) 1549 CALL MPI_ALLREDUCE( c_w_m_l(nzb+1), c_w_m(nzb+1), nzt-nzb, MPI_REAL, &1550 MPI_SUM, comm1dy,ierr )1546 CALL MPI_ALLREDUCE( c_w_m_l(nzb+1), c_w_m(nzb+1), nzt-nzb, MPI_REAL, MPI_SUM, comm1dy, & 1547 ierr ) 1551 1548 #else 1552 1549 c_u_m = c_u_m_l … … 1571 1568 DO k = nzb+1, nzt+1 1572 1569 DO j = nysg, nyng 1573 u_p(k,j,nx+1) = u(k,j,nx+1) - dt_3d * tsc(2) * c_u_m(k) * &1574 ( u(k,j,nx+1) - u(k,j,nx) ) * ddx1575 1576 v_p(k,j,nx+1) = v(k,j,nx+1) - dt_3d * tsc(2) * c_v_m(k) * &1577 ( v(k,j,nx+1) - v(k,j,nx) ) * ddx1578 1579 w_p(k,j,nx+1) = w(k,j,nx+1) - dt_3d * tsc(2) * c_w_m(k) * &1580 ( w(k,j,nx+1) - w(k,j,nx) ) * ddx1570 u_p(k,j,nx+1) = u(k,j,nx+1) - dt_3d * tsc(2) * c_u_m(k) * & 1571 ( u(k,j,nx+1) - u(k,j,nx) ) * ddx 1572 1573 v_p(k,j,nx+1) = v(k,j,nx+1) - dt_3d * tsc(2) * c_v_m(k) * & 1574 ( v(k,j,nx+1) - v(k,j,nx) ) * ddx 1575 1576 w_p(k,j,nx+1) = w(k,j,nx+1) - dt_3d * tsc(2) * c_w_m(k) * & 1577 ( w(k,j,nx+1) - w(k,j,nx) ) * ddx 1581 1578 ENDDO 1582 1579 ENDDO … … 1609 1606 1610 1607 END SUBROUTINE dynamics_boundary_conditions 1611 !------------------------------------------------------------------------------ !1608 !--------------------------------------------------------------------------------------------------! 1612 1609 ! Description: 1613 1610 ! ------------ 1614 1611 !> Swap timelevels of module-specific array pointers 1615 !------------------------------------------------------------------------------ !1612 !--------------------------------------------------------------------------------------------------! 1616 1613 SUBROUTINE dynamics_swap_timelevel ( mod_count ) 1617 1614 … … 1660 1657 ! Description: 1661 1658 ! ------------ 1662 !> Sum up and time-average module-specific output quantities 1663 !> as well as allocate the array necessaryfor storing the average.1659 !> Sum up and time-average module-specific output quantities as well as allocate the array necessary 1660 !> for storing the average. 1664 1661 !--------------------------------------------------------------------------------------------------! 1665 1662 SUBROUTINE dynamics_3d_data_averaging( mode, variable ) … … 1709 1706 ! Description: 1710 1707 ! ------------ 1711 !> Resorts the module-specific output quantity with indices (k,j,i) to a 1712 !> temporary array withindices (i,j,k) and sets the grid on which it is defined.1708 !> Resorts the module-specific output quantity with indices (k,j,i) to a temporary array with 1709 !> indices (i,j,k) and sets the grid on which it is defined. 1713 1710 !> Allowed values for grid are "zu" and "zw". 1714 1711 !--------------------------------------------------------------------------------------------------! 1715 SUBROUTINE dynamics_data_output_2d( av, variable, found, grid, mode, local_pf, &1716 two_d, nzb_do,nzt_do, fill_value )1717 1718 1719 CHARACTER (LEN=*) :: grid!<1712 SUBROUTINE dynamics_data_output_2d( av, variable, found, grid, mode, local_pf, two_d, nzb_do, & 1713 nzt_do, fill_value ) 1714 1715 1716 CHARACTER (LEN=*) :: grid !< 1720 1717 CHARACTER (LEN=*), INTENT(IN) :: mode !< either 'xy', 'xz' or 'yz' 1721 CHARACTER (LEN=*) :: variable!<1718 CHARACTER (LEN=*) :: variable !< 1722 1719 1723 1720 INTEGER(iwp) :: av !< flag to control data output of instantaneous or time-averaged data … … 1759 1756 ! Description: 1760 1757 ! ------------ 1761 !> Resorts the module-specific output quantity with indices (k,j,i) 1762 !> to a temporary array withindices (i,j,k).1758 !> Resorts the module-specific output quantity with indices (k,j,i) to a temporary array with 1759 !> indices (i,j,k). 1763 1760 !--------------------------------------------------------------------------------------------------! 1764 1761 SUBROUTINE dynamics_data_output_3d( av, variable, found, local_pf, fill_value, nzb_do, nzt_do ) … … 1836 1833 SUBROUTINE dynamics_rrd_global_ftn( found ) 1837 1834 1838 1839 1835 LOGICAL, INTENT(OUT) :: found 1840 1836 … … 1864 1860 !--------------------------------------------------------------------------------------------------! 1865 1861 SUBROUTINE dynamics_rrd_global_mpi 1866 1867 1862 1868 1863 ! CALL rrd_mpi_io( 'global_parameter', global_parameter ) … … 1880 1875 !> to the subdomain of the current PE (c). They have been calculated in routine rrd_local. 1881 1876 !--------------------------------------------------------------------------------------------------! 1882 SUBROUTINE dynamics_rrd_local_ftn( k, nxlf, nxlc, nxl_on_file, nxrf, nxrc, nxr_on_file, nynf, nync, & 1883 nyn_on_file, nysf, nysc, nys_on_file, tmp_2d, tmp_3d, found ) 1877 SUBROUTINE dynamics_rrd_local_ftn( k, nxlf, nxlc, nxl_on_file, nxrf, nxrc, nxr_on_file, nynf, & 1878 nync, nyn_on_file, nysf, nysc, nys_on_file, tmp_2d, tmp_3d, & 1879 found ) 1884 1880 1885 1881 … … 1905 1901 ! 1906 1902 !-- Next line is to avoid compiler warning about unused variables. Please remove. 1907 IF ( k + nxlc + nxlf + nxrc + nxrf + nync + nynf + nysc + nysf + &1908 tmp_2d(nys_on_file,nxl_on_file) + &1903 IF ( k + nxlc + nxlf + nxrc + nxrf + nync + nynf + nysc + nysf + & 1904 tmp_2d(nys_on_file,nxl_on_file) + & 1909 1905 tmp_3d(nzb,nys_on_file,nxl_on_file) < 0.0 ) CONTINUE 1910 1906 ! -
palm/trunk/SOURCE/exchange_horiz_mod.f90
r4474 r4626 1 1 !> @file exchange_horiz.f90 2 !------------------------------------------------------------------------------ !2 !--------------------------------------------------------------------------------------------------! 3 3 ! This file is part of the PALM model system. 4 4 ! 5 ! PALM is free software: you can redistribute it and/or modify it under the 6 ! terms of the GNU General Public License as published by the Free Software 7 ! Foundation, either version 3 of the License, or (at your option) any later 8 ! version. 9 ! 10 ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY 11 ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR 12 ! A PARTICULAR PURPOSE. See the GNU General Public License for more details. 13 ! 14 ! You should have received a copy of the GNU General Public License along with 15 ! PALM. If not, see <http://www.gnu.org/licenses/>. 5 ! PALM is free software: you can redistribute it and/or modify it under the terms of the GNU General 6 ! Public License as published by the Free Software Foundation, either version 3 of the License, or 7 ! (at your option) any later version. 8 ! 9 ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the 10 ! implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General 11 ! Public License for more details. 12 ! 13 ! You should have received a copy of the GNU General Public License along with PALM. If not, see 14 ! <http://www.gnu.org/licenses/>. 16 15 ! 17 16 ! Copyright 1997-2020 Leibniz Universitaet Hannover 18 !------------------------------------------------------------------------------ !17 !--------------------------------------------------------------------------------------------------! 19 18 ! 20 19 ! Current revisions: 21 20 ! ----------------- 22 ! 23 ! 21 ! 22 ! 24 23 ! Former revisions: 25 24 ! ----------------- 26 25 ! $Id$ 26 ! file re-formatted to follow the PALM coding standard 27 ! 28 ! 4474 2020-03-26 09:32:18Z raasch 27 29 ! bugfix for correct usage of alternative communicators in case of 1d-decompositions and in 28 30 ! non-parallel mode 29 ! 31 ! 30 32 ! 4461 2020-03-12 16:51:59Z raasch 31 33 ! optional communicator added to exchange_horiz 32 ! 34 ! 33 35 ! 4457 2020-03-11 14:20:43Z raasch 34 36 ! routine has been modularized, file exchange_horiz_2d has been merged 35 ! 37 ! 36 38 ! 4429 2020-02-27 15:24:30Z raasch 37 39 ! bugfix: cpp-directives added for serial mode 38 ! 40 ! 39 41 ! 4360 2020-01-07 11:25:50Z suehring 40 42 ! Corrected "Former revisions" section 41 ! 43 ! 42 44 ! 3761 2019-02-25 15:31:42Z raasch 43 45 ! OpenACC directives re-formatted 44 ! 46 ! 45 47 ! 3657 2019-01-07 20:14:18Z knoop 46 48 ! OpenACC port for SPEC … … 52 54 ! Description: 53 55 ! ------------ 54 !> Exchange of ghost point layers for subdomains (in parallel mode) and setting 55 !> of cyclic lateralboundary conditions for the total domain .56 !------------------------------------------------------------------------------ !56 !> Exchange of ghost point layers for subdomains (in parallel mode) and setting of cyclic lateral 57 !> boundary conditions for the total domain . 58 !--------------------------------------------------------------------------------------------------! 57 59 MODULE exchange_horiz_mod 58 60 … … 91 93 92 94 93 !------------------------------------------------------------------------------ !95 !--------------------------------------------------------------------------------------------------! 94 96 ! Description: 95 97 ! ------------ 96 !> Exchange of ghost point layers for subdomains (in parallel mode) and setting 97 !> of cyclic lateralboundary conditions for the total domain.98 !> Exchange of ghost point layers for subdomains (in parallel mode) and setting of cyclic lateral 99 !> boundary conditions for the total domain. 98 100 !> This routine is for REAL 3d-arrays. 99 !------------------------------------------------------------------------------ !101 !--------------------------------------------------------------------------------------------------! 100 102 SUBROUTINE exchange_horiz( ar, nbgp_local, alternative_communicator) 101 103 102 USE control_parameters, &104 USE control_parameters, & 103 105 ONLY: bc_lr_cyc, bc_ns_cyc 104 106 105 107 #if defined( __parallel ) 106 USE control_parameters, &108 USE control_parameters, & 107 109 ONLY: grid_level, mg_switch_to_pe0, synchronous_exchange 108 110 #endif 109 110 USE cpulog, &111 112 USE cpulog, & 111 113 ONLY: cpu_log, log_point_s 112 113 USE indices, &114 115 USE indices, & 114 116 ONLY: nxl, nxr, nyn, nys, nzb, nzt 115 117 116 118 117 119 #if defined( _OPENACC ) … … 120 122 121 123 INTEGER(iwp), OPTIONAL :: alternative_communicator !< alternative MPI communicator to be used 124 122 125 INTEGER(iwp) :: communicator !< communicator that is used as argument in MPI calls 123 126 INTEGER(iwp) :: left_pe !< id of left pe that is used as argument in MPI calls … … 126 129 INTEGER(iwp) :: right_pe !< id of right pe that is used as argument in MPI calls 127 130 INTEGER(iwp) :: south_pe !< id of south pe that is used as argument in MPI calls 128 129 REAL(wp), DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &131 132 REAL(wp), DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, & 130 133 nxl-nbgp_local:nxr+nbgp_local) :: ar !< 3d-array for which exchange is done 131 134 132 135 133 136 CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' ) … … 184 187 IF ( pdims(1) == 1 .OR. mg_switch_to_pe0 ) THEN 185 188 ! 186 !-- One-dimensional decomposition along y, boundary values can be exchanged 187 !-- within the PE memory 189 !-- One-dimensional decomposition along y, boundary values can be exchanged within the PE memory. 188 190 IF ( PRESENT( alternative_communicator ) ) THEN 189 191 IF ( alternative_communicator <= 2 ) THEN … … 253 255 IF ( pdims(2) == 1 .OR. mg_switch_to_pe0 ) THEN 254 256 ! 255 !-- One-dimensional decomposition along x, boundary values can be exchanged 256 !-- within the PE memory 257 !-- One-dimensional decomposition along x, boundary values can be exchanged within the PE memory 257 258 IF ( PRESENT( alternative_communicator ) ) THEN 258 259 IF ( alternative_communicator == 1 .OR. alternative_communicator == 3 ) THEN … … 317 318 ! 318 319 !-- Lateral boundary conditions in the non-parallel case. 319 !-- Case dependent, because in GPU mode still not all arrays are on device. This 320 !-- workaround has to be removed later. Also, since PGI compiler 12.5 has problems321 !-- with array syntax, explicit loopsare used.320 !-- Case dependent, because in GPU mode still not all arrays are on device. This workaround has to 321 !-- be removed later. Also, since PGI compiler 12.5 has problems with array syntax, explicit loops 322 !-- are used. 322 323 IF ( PRESENT( alternative_communicator ) ) THEN 323 324 IF ( alternative_communicator <= 2 ) THEN … … 371 372 372 373 373 !------------------------------------------------------------------------------ !374 !--------------------------------------------------------------------------------------------------! 374 375 ! Description: 375 376 ! ------------ 376 377 !> @todo Missing subroutine description. 377 !------------------------------------------------------------------------------ !378 !--------------------------------------------------------------------------------------------------! 378 379 SUBROUTINE exchange_horiz_int( ar, nys_l, nyn_l, nxl_l, nxr_l, nzt_l, nbgp_local ) 379 380 380 381 381 USE control_parameters, &382 USE control_parameters, & 382 383 ONLY: bc_lr_cyc, bc_ns_cyc 383 384 384 385 #if defined( __parallel ) 385 USE control_parameters, &386 USE control_parameters, & 386 387 ONLY: grid_level 387 388 #endif 388 389 USE indices, &389 390 USE indices, & 390 391 ONLY: nzb 391 392 393 INTEGER(iwp) :: nbgp_local !< number of ghost points 392 394 INTEGER(iwp) :: nxl_l !< local index bound at current grid level, left side 393 395 INTEGER(iwp) :: nxr_l !< local index bound at current grid level, right side … … 395 397 INTEGER(iwp) :: nys_l !< local index bound at current grid level, south side 396 398 INTEGER(iwp) :: nzt_l !< local index bound at current grid level, top 397 INTEGER(iwp) :: nbgp_local !< number of ghost points 398 399 INTEGER(iwp), DIMENSION(nzb:nzt_l+1,nys_l-nbgp_local:nyn_l+nbgp_local, & 399 400 INTEGER(iwp), DIMENSION(nzb:nzt_l+1,nys_l-nbgp_local:nyn_l+nbgp_local, & 400 401 nxl_l-nbgp_local:nxr_l+nbgp_local) :: ar !< treated array 401 402 … … 404 405 IF ( pdims(1) == 1 ) THEN 405 406 ! 406 !-- One-dimensional decomposition along y, boundary values can be exchanged 407 !-- within the PE memory 407 !-- One-dimensional decomposition along y, boundary values can be exchanged within the PE memory 408 408 IF ( bc_lr_cyc ) THEN 409 409 ar(:,:,nxl_l-nbgp_local:nxl_l-1) = ar(:,:,nxr_l-nbgp_local+1:nxr_l) … … 413 413 ! 414 414 !-- Send left boundary, receive right one (synchronous) 415 CALL MPI_SENDRECV( & 416 ar(nzb,nys_l-nbgp_local,nxl_l), 1, type_yz_int(grid_level), pleft, 0,& 417 ar(nzb,nys_l-nbgp_local,nxr_l+1), 1, type_yz_int(grid_level), pright, 0,& 418 comm2d, status, ierr ) 415 CALL MPI_SENDRECV( ar(nzb,nys_l-nbgp_local,nxl_l), 1, type_yz_int(grid_level), pleft, 0, & 416 ar(nzb,nys_l-nbgp_local,nxr_l+1), 1, type_yz_int(grid_level), pright, 0, & 417 comm2d, status, ierr ) 419 418 ! 420 419 !-- Send right boundary, receive left one (synchronous) 421 CALL MPI_SENDRECV( & 422 ar(nzb,nys_l-nbgp_local,nxr_l+1-nbgp_local), 1, type_yz_int(grid_level),& 423 pright, 1, & 424 ar(nzb,nys_l-nbgp_local,nxl_l-nbgp_local), 1, type_yz_int(grid_level),& 425 pleft, 1, & 426 comm2d, status, ierr ) 420 CALL MPI_SENDRECV( ar(nzb,nys_l-nbgp_local,nxr_l+1-nbgp_local), 1, type_yz_int(grid_level), & 421 pright, 1, & 422 ar(nzb,nys_l-nbgp_local,nxl_l-nbgp_local), 1, type_yz_int(grid_level), & 423 pleft, 1, & 424 comm2d, status, ierr ) 427 425 ENDIF 428 426 … … 430 428 IF ( pdims(2) == 1 ) THEN 431 429 ! 432 !-- One-dimensional decomposition along x, boundary values can be exchanged 433 !-- within the PE memory 430 !-- One-dimensional decomposition along x, boundary values can be exchanged within the PE memory 434 431 IF ( bc_ns_cyc ) THEN 435 432 ar(:,nys_l-nbgp_local:nys_l-1,:) = ar(:,nyn_l-nbgp_local+1:nyn_l,:) … … 441 438 ! 442 439 !-- Send front boundary, receive rear one (synchronous) 443 CALL MPI_SENDRECV( & 444 ar(nzb,nys_l,nxl_l-nbgp_local), 1, type_xz_int(grid_level), psouth, 0,& 445 ar(nzb,nyn_l+1,nxl_l-nbgp_local), 1, type_xz_int(grid_level), pnorth, 0,& 446 comm2d, status, ierr ) 440 CALL MPI_SENDRECV( ar(nzb,nys_l,nxl_l-nbgp_local), 1, type_xz_int(grid_level), psouth, 0, & 441 ar(nzb,nyn_l+1,nxl_l-nbgp_local), 1, type_xz_int(grid_level), pnorth, 0, & 442 comm2d, status, ierr ) 447 443 ! 448 444 !-- Send rear boundary, receive front one (synchronous) 449 CALL MPI_SENDRECV( ar(nzb,nyn_l-nbgp_local+1,nxl_l-nbgp_local), 1, &450 type_xz_int(grid_level), pnorth, 1, &451 ar(nzb,nys_l-nbgp_local,nxl_l-nbgp_local), 1, &452 type_xz_int(grid_level), psouth, 1, &445 CALL MPI_SENDRECV( ar(nzb,nyn_l-nbgp_local+1,nxl_l-nbgp_local), 1, & 446 type_xz_int(grid_level), pnorth, 1, & 447 ar(nzb,nys_l-nbgp_local,nxl_l-nbgp_local), 1, & 448 type_xz_int(grid_level), psouth, 1, & 453 449 comm2d, status, ierr ) 454 450 … … 473 469 ! Description: 474 470 ! ------------ 475 !> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic 476 !> boundary conditions,respectively, for 2D-arrays.477 !------------------------------------------------------------------------------ !471 !> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic boundary conditions, 472 !> respectively, for 2D-arrays. 473 !--------------------------------------------------------------------------------------------------! 478 474 SUBROUTINE exchange_horiz_2d( ar ) 479 475 480 USE control_parameters, & 481 ONLY : bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, & 482 bc_dirichlet_s, bc_radiation_l, & 483 bc_radiation_n, bc_radiation_r, bc_radiation_s 484 485 USE cpulog, & 476 USE control_parameters, & 477 ONLY : bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, & 478 bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s 479 480 USE cpulog, & 486 481 ONLY : cpu_log, log_point_s 487 482 488 USE indices, &483 USE indices, & 489 484 ONLY : nbgp, nxl, nxlg, nxr, nxrg, nyn, nyng, nys, nysg 490 485 491 486 #if ! defined( __parallel ) 492 USE control_parameters, &487 USE control_parameters, & 493 488 ONLY: bc_lr_cyc, bc_ns_cyc 494 489 #endif … … 509 504 510 505 ! 511 !-- One-dimensional decomposition along y, boundary values can be exchanged 512 !-- within the PE memory 506 !-- One-dimensional decomposition along y, boundary values can be exchanged within the PE memory 513 507 ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr) 514 508 ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1) … … 518 512 !-- Send left boundary, receive right one 519 513 520 CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y, pleft, 0, &521 ar(nysg,nxr+1), 1, type_y, pright, 0, &514 CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y, pleft, 0, & 515 ar(nysg,nxr+1), 1, type_y, pright, 0, & 522 516 comm2d, status, ierr ) 523 517 ! 524 518 !-- Send right boundary, receive left one 525 CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y, pright, 1, &526 ar(nysg,nxlg), 1, type_y, pleft, 1, &519 CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y, pright, 1, & 520 ar(nysg,nxlg), 1, type_y, pleft, 1, & 527 521 comm2d, status, ierr ) 528 522 … … 532 526 IF ( pdims(2) == 1 ) THEN 533 527 ! 534 !-- One-dimensional decomposition along x, boundary values can be exchanged 535 !-- within the PE memory 528 !-- One-dimensional decomposition along x, boundary values can be exchanged within the PE memory 536 529 ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:) 537 530 ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:) … … 541 534 !-- Send front boundary, receive rear one 542 535 543 CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x, psouth, 0, &544 ar(nyn+1,nxlg), 1, type_x, pnorth, 0, &536 CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x, psouth, 0, & 537 ar(nyn+1,nxlg), 1, type_x, pnorth, 0, & 545 538 comm2d, status, ierr ) 546 539 ! 547 540 !-- Send rear boundary, receive front one 548 CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x, pnorth, 1, &549 ar(nysg,nxlg), 1, type_x, psouth, 1, &541 CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x, pnorth, 1, & 542 ar(nysg,nxlg), 1, type_x, psouth, 1, & 550 543 comm2d, status, ierr ) 551 544 … … 596 589 597 590 598 !------------------------------------------------------------------------------ !591 !--------------------------------------------------------------------------------------------------! 599 592 ! Description: 600 593 ! ------------ 601 !> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic 602 !> boundary conditions,respectively, for 2D 8-bit integer arrays.603 !------------------------------------------------------------------------------ !594 !> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic boundary conditions, 595 !> respectively, for 2D 8-bit integer arrays. 596 !--------------------------------------------------------------------------------------------------! 604 597 SUBROUTINE exchange_horiz_2d_byte( ar, nys_l, nyn_l, nxl_l, nxr_l, nbgp_local ) 605 598 606 599 607 USE control_parameters, & 608 ONLY: bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, & 609 bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s, & 600 USE control_parameters, & 601 ONLY: bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, & 610 602 bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s 611 603 612 USE cpulog, &604 USE cpulog, & 613 605 ONLY: cpu_log, log_point_s 614 606 615 607 #if ! defined( __parallel ) 616 USE control_parameters, &608 USE control_parameters, & 617 609 ONLY: bc_lr_cyc, bc_ns_cyc 618 610 #endif 619 611 612 INTEGER(iwp) :: nbgp_local !< number of ghost layers to be exchanged 620 613 INTEGER(iwp) :: i !< dummy index to zero-gradient conditions at in/outflow boundaries 621 614 INTEGER(iwp) :: nxl_l !< local index bound at current grid level, left side … … 623 616 INTEGER(iwp) :: nyn_l !< local index bound at current grid level, north side 624 617 INTEGER(iwp) :: nys_l !< local index bound at current grid level, south side 625 INTEGER(iwp) :: nbgp_local !< number of ghost layers to be exchanged 626 627 INTEGER(KIND=1), DIMENSION(nys_l-nbgp_local:nyn_l+nbgp_local, & 618 619 INTEGER(KIND=1), DIMENSION(nys_l-nbgp_local:nyn_l+nbgp_local, & 628 620 nxl_l-nbgp_local:nxr_l+nbgp_local) :: ar !< treated array 629 621 … … 637 629 638 630 ! 639 !-- One-dimensional decomposition along y, boundary values can be exchanged 640 !-- within the PE memory 631 !-- One-dimensional decomposition along y, boundary values can be exchanged within the PE memory 641 632 ar(:,nxl_l-nbgp_local:nxl_l-1) = ar(:,nxr_l-nbgp_local+1:nxr_l) 642 633 ar(:,nxr_l+1:nxr_l+nbgp_local) = ar(:,nxl_l:nxl_l+nbgp_local-1) … … 645 636 ! 646 637 !-- Send left boundary, receive right one 647 CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxl_l), 1, &648 type_y_byte, pleft, 0, &649 ar(nys_l-nbgp_local,nxr_l+1), 1, &650 type_y_byte, pright, 0, &638 CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxl_l), 1, & 639 type_y_byte, pleft, 0, & 640 ar(nys_l-nbgp_local,nxr_l+1), 1, & 641 type_y_byte, pright, 0, & 651 642 comm2d, status, ierr ) 652 643 ! 653 644 !-- Send right boundary, receive left one 654 CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxr_l+1-nbgp_local), 1, &655 type_y_byte, pright, 1, &656 ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, &657 type_y_byte, pleft, 1, &645 CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxr_l+1-nbgp_local), 1, & 646 type_y_byte, pright, 1, & 647 ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, & 648 type_y_byte, pleft, 1, & 658 649 comm2d, status, ierr ) 659 650 … … 662 653 IF ( pdims(2) == 1 ) THEN 663 654 ! 664 !-- One-dimensional decomposition along x, boundary values can be exchanged 665 !-- within the PE memory 655 !-- One-dimensional decomposition along x, boundary values can be exchanged within the PE memory 666 656 ar(nys_l-nbgp_local:nys_l-1,:) = ar(nyn_l+1-nbgp_local:nyn_l,:) 667 657 ar(nyn_l+1:nyn_l+nbgp_local,:) = ar(nys_l:nys_l-1+nbgp_local,:) … … 671 661 ! 672 662 !-- Send front boundary, receive rear one 673 CALL MPI_SENDRECV( ar(nys_l,nxl_l-nbgp_local), 1, &674 type_x_byte, psouth, 0, &675 ar(nyn_l+1,nxl_l-nbgp_local), 1, &676 type_x_byte, pnorth, 0, &663 CALL MPI_SENDRECV( ar(nys_l,nxl_l-nbgp_local), 1, & 664 type_x_byte, psouth, 0, & 665 ar(nyn_l+1,nxl_l-nbgp_local), 1, & 666 type_x_byte, pnorth, 0, & 677 667 comm2d, status, ierr ) 678 668 679 669 ! 680 670 !-- Send rear boundary, receive front one 681 CALL MPI_SENDRECV( ar(nyn_l+1-nbgp_local,nxl_l-nbgp_local), 1, &682 type_x_byte, pnorth, 1, &683 ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, &684 type_x_byte, psouth, 1, &671 CALL MPI_SENDRECV( ar(nyn_l+1-nbgp_local,nxl_l-nbgp_local), 1, & 672 type_x_byte, pnorth, 1, & 673 ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, & 674 type_x_byte, psouth, 1, & 685 675 comm2d, status, ierr ) 686 676 … … 730 720 731 721 732 !------------------------------------------------------------------------------ !722 !--------------------------------------------------------------------------------------------------! 733 723 ! Description: 734 724 ! ------------ 735 !> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic 736 !> boundary conditions,respectively, for 2D 32-bit integer arrays.737 !------------------------------------------------------------------------------ !725 !> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic boundary conditions, 726 !> respectively, for 2D 32-bit integer arrays. 727 !--------------------------------------------------------------------------------------------------! 738 728 SUBROUTINE exchange_horiz_2d_int( ar, nys_l, nyn_l, nxl_l, nxr_l, nbgp_local ) 739 729 740 730 741 USE control_parameters, & 742 ONLY: bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, & 743 bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s, & 731 USE control_parameters, & 732 ONLY: bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r, bc_dirichlet_s, & 744 733 bc_radiation_l, bc_radiation_n, bc_radiation_r, bc_radiation_s 745 734 746 735 #if defined( __parallel ) 747 USE control_parameters, &736 USE control_parameters, & 748 737 ONLY: grid_level 749 738 #endif 750 739 751 USE cpulog, &740 USE cpulog, & 752 741 ONLY: cpu_log, log_point_s 753 742 754 743 #if ! defined( __parallel ) 755 USE control_parameters, &744 USE control_parameters, & 756 745 ONLY: bc_lr_cyc, bc_ns_cyc 757 746 #endif 758 747 748 INTEGER(iwp) :: nbgp_local !< number of ghost layers to be exchanged 759 749 INTEGER(iwp) :: i !< dummy index to zero-gradient conditions at in/outflow boundaries 760 750 INTEGER(iwp) :: nxl_l !< local index bound at current grid level, left side … … 762 752 INTEGER(iwp) :: nyn_l !< local index bound at current grid level, north side 763 753 INTEGER(iwp) :: nys_l !< local index bound at current grid level, south side 764 INTEGER(iwp) :: nbgp_local !< number of ghost layers to be exchanged 765 766 INTEGER(iwp), DIMENSION(nys_l-nbgp_local:nyn_l+nbgp_local, & 754 755 INTEGER(iwp), DIMENSION(nys_l-nbgp_local:nyn_l+nbgp_local, & 767 756 nxl_l-nbgp_local:nxr_l+nbgp_local) :: ar !< treated array 768 757 … … 776 765 777 766 ! 778 !-- One-dimensional decomposition along y, boundary values can be exchanged 779 !-- within the PE memory 767 !-- One-dimensional decomposition along y, boundary values can be exchanged within the PE memory 780 768 ar(:,nxl_l-nbgp_local:nxl_l-1) = ar(:,nxr_l-nbgp_local+1:nxr_l) 781 769 ar(:,nxr_l+1:nxr_l+nbgp_local) = ar(:,nxl_l:nxl_l+nbgp_local-1) … … 784 772 ! 785 773 !-- Send left boundary, receive right one 786 CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxl_l), 1, &787 type_y_int(grid_level), pleft, 0, &788 ar(nys_l-nbgp_local,nxr_l+1), 1, &789 type_y_int(grid_level), pright, 0, &774 CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxl_l), 1, & 775 type_y_int(grid_level), pleft, 0, & 776 ar(nys_l-nbgp_local,nxr_l+1), 1, & 777 type_y_int(grid_level), pright, 0, & 790 778 comm2d, status, ierr ) 791 779 ! 792 780 !-- Send right boundary, receive left one 793 CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxr_l+1-nbgp_local), 1, &794 type_y_int(grid_level), pright, 1, &795 ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, &796 type_y_int(grid_level), pleft, 1, &781 CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxr_l+1-nbgp_local), 1, & 782 type_y_int(grid_level), pright, 1, & 783 ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, & 784 type_y_int(grid_level), pleft, 1, & 797 785 comm2d, status, ierr ) 798 786 … … 801 789 IF ( pdims(2) == 1 ) THEN 802 790 ! 803 !-- One-dimensional decomposition along x, boundary values can be exchanged 804 !-- within the PE memory 791 !-- One-dimensional decomposition along x, boundary values can be exchanged within the PE memory 805 792 ar(nys_l-nbgp_local:nys_l-1,:) = ar(nyn_l+1-nbgp_local:nyn_l,:) 806 793 ar(nyn_l+1:nyn_l+nbgp_local,:) = ar(nys_l:nys_l-1+nbgp_local,:) … … 810 797 ! 811 798 !-- Send front boundary, receive rear one 812 CALL MPI_SENDRECV( ar(nys_l,nxl_l-nbgp_local), 1, &813 type_x_int(grid_level), psouth, 0, &814 ar(nyn_l+1,nxl_l-nbgp_local), 1, &815 type_x_int(grid_level), pnorth, 0, &799 CALL MPI_SENDRECV( ar(nys_l,nxl_l-nbgp_local), 1, & 800 type_x_int(grid_level), psouth, 0, & 801 ar(nyn_l+1,nxl_l-nbgp_local), 1, & 802 type_x_int(grid_level), pnorth, 0, & 816 803 comm2d, status, ierr ) 817 804 818 805 ! 819 806 !-- Send rear boundary, receive front one 820 CALL MPI_SENDRECV( ar(nyn_l+1-nbgp_local,nxl_l-nbgp_local), 1, &821 type_x_int(grid_level), pnorth, 1, &822 ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, &823 type_x_int(grid_level), psouth, 1, &807 CALL MPI_SENDRECV( ar(nyn_l+1-nbgp_local,nxl_l-nbgp_local), 1, & 808 type_x_int(grid_level), pnorth, 1, & 809 ar(nys_l-nbgp_local,nxl_l-nbgp_local), 1, & 810 type_x_int(grid_level), psouth, 1, & 824 811 comm2d, status, ierr ) 825 812
Note: See TracChangeset
for help on using the changeset viewer.