1 | !> @file pres.f90 |
---|
2 | !--------------------------------------------------------------------------------------------------! |
---|
3 | ! This file is part of the PALM model system. |
---|
4 | ! |
---|
5 | ! PALM is free software: you can redistribute it and/or modify it under the terms of the GNU General |
---|
6 | ! Public License as published by the Free Software Foundation, either version 3 of the License, or |
---|
7 | ! (at your option) any later version. |
---|
8 | ! |
---|
9 | ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the |
---|
10 | ! implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General |
---|
11 | ! Public License for more details. |
---|
12 | ! |
---|
13 | ! You should have received a copy of the GNU General Public License along with PALM. If not, see |
---|
14 | ! <http://www.gnu.org/licenses/>. |
---|
15 | ! |
---|
16 | ! Copyright 1997-2021 Leibniz Universitaet Hannover |
---|
17 | !--------------------------------------------------------------------------------------------------! |
---|
18 | ! |
---|
19 | ! |
---|
20 | ! Current revisions: |
---|
21 | ! ----------------- |
---|
22 | ! |
---|
23 | ! |
---|
24 | ! Former revisions: |
---|
25 | ! ----------------- |
---|
26 | ! $Id: pres.f90 4828 2021-01-05 11:21:41Z Giersch $ |
---|
27 | ! Bugfix in OpenMP directives - intel compiler do not allow reduction operations on array elements |
---|
28 | ! |
---|
29 | ! 4719 2020-10-01 11:28:33Z pavelkrc |
---|
30 | ! Bugfix of previous commit (div_old discrepancy) |
---|
31 | ! |
---|
32 | ! 4717 2020-09-30 22:27:40Z pavelkrc |
---|
33 | ! Fixes and optimizations of OpenMP parallelization, formatting of OpenMP |
---|
34 | ! directives (J. Resler) |
---|
35 | ! |
---|
36 | ! 4651 2020-08-27 07:17:45Z raasch |
---|
37 | ! preprocessor branch for ibm removed |
---|
38 | ! |
---|
39 | ! 4649 2020-08-25 12:11:17Z raasch |
---|
40 | ! File re-formatted to follow the PALM coding standard |
---|
41 | ! |
---|
42 | ! 4457 2020-03-11 14:20:43Z raasch |
---|
43 | ! use statement for exchange horiz added |
---|
44 | ! |
---|
45 | ! 4360 2020-01-07 11:25:50Z suehring |
---|
46 | ! Introduction of wall_flags_total_0, which currently sets bits based on static |
---|
47 | ! topography information used in wall_flags_static_0 |
---|
48 | ! |
---|
49 | ! 4329 2019-12-10 15:46:36Z motisi |
---|
50 | ! Renamed wall_flags_0 to wall_flags_static_0 |
---|
51 | ! |
---|
52 | ! 4182 2019-08-22 15:20:23Z scharf |
---|
53 | ! Corrected "Former revisions" section |
---|
54 | ! |
---|
55 | ! 4015 2019-06-05 13:25:35Z raasch |
---|
56 | ! variable child_domain_nvn eliminated |
---|
57 | ! |
---|
58 | ! 3849 2019-04-01 16:35:16Z knoop |
---|
59 | ! OpenACC port for SPEC |
---|
60 | ! |
---|
61 | ! Revision 1.1 1997/07/24 11:24:44 raasch |
---|
62 | ! Initial revision |
---|
63 | ! |
---|
64 | ! |
---|
65 | !--------------------------------------------------------------------------------------------------! |
---|
66 | ! Description: |
---|
67 | ! ------------ |
---|
68 | !> Compute the divergence of the provisional velocity field. Solve the Poisson equation for the |
---|
69 | !> perturbation pressure. Compute the final velocities using this perturbation pressure. Compute the |
---|
70 | !> remaining divergence. |
---|
71 | !--------------------------------------------------------------------------------------------------! |
---|
72 | SUBROUTINE pres |
---|
73 | |
---|
74 | |
---|
75 | USE arrays_3d, & |
---|
76 | ONLY: d, & |
---|
77 | ddzu, & |
---|
78 | ddzu_pres, & |
---|
79 | ddzw, & |
---|
80 | dzw, & |
---|
81 | p, & |
---|
82 | p_loc, & |
---|
83 | rho_air, & |
---|
84 | rho_air_zw, & |
---|
85 | tend, & |
---|
86 | u, & |
---|
87 | v, & |
---|
88 | w |
---|
89 | |
---|
90 | USE control_parameters, & |
---|
91 | ONLY: bc_lr_cyc, & |
---|
92 | bc_ns_cyc, & |
---|
93 | bc_radiation_l, & |
---|
94 | bc_radiation_n, & |
---|
95 | bc_radiation_r, & |
---|
96 | bc_radiation_s, & |
---|
97 | child_domain, & |
---|
98 | conserve_volume_flow, & |
---|
99 | coupling_mode, & |
---|
100 | dt_3d, & |
---|
101 | gathered_size, & |
---|
102 | ibc_p_b, & |
---|
103 | ibc_p_t, & |
---|
104 | intermediate_timestep_count, & |
---|
105 | intermediate_timestep_count_max, & |
---|
106 | mg_switch_to_pe0_level, & |
---|
107 | nesting_offline, & |
---|
108 | psolver, & |
---|
109 | subdomain_size, & |
---|
110 | topography, & |
---|
111 | volume_flow, & |
---|
112 | volume_flow_area, & |
---|
113 | volume_flow_initial |
---|
114 | |
---|
115 | USE cpulog, & |
---|
116 | ONLY: cpu_log, & |
---|
117 | log_point, & |
---|
118 | log_point_s |
---|
119 | |
---|
120 | USE exchange_horiz_mod, & |
---|
121 | ONLY: exchange_horiz |
---|
122 | |
---|
123 | USE grid_variables, & |
---|
124 | ONLY: ddx, & |
---|
125 | ddy |
---|
126 | |
---|
127 | USE indices, & |
---|
128 | ONLY: nbgp, & |
---|
129 | ngp_2dh_outer, & |
---|
130 | nx, & |
---|
131 | nxl, & |
---|
132 | nxlg, & |
---|
133 | nxl_mg, & |
---|
134 | nxr, & |
---|
135 | nxrg, & |
---|
136 | nxr_mg, & |
---|
137 | ny, & |
---|
138 | nys, & |
---|
139 | nysg, & |
---|
140 | nys_mg, & |
---|
141 | nyn, & |
---|
142 | nyng, & |
---|
143 | nyn_mg, & |
---|
144 | nzb, & |
---|
145 | nzt, & |
---|
146 | nzt_mg, & |
---|
147 | wall_flags_total_0 |
---|
148 | |
---|
149 | USE kinds |
---|
150 | |
---|
151 | USE pegrid |
---|
152 | |
---|
153 | USE pmc_interface, & |
---|
154 | ONLY: nesting_mode |
---|
155 | |
---|
156 | USE poisfft_mod, & |
---|
157 | ONLY: poisfft |
---|
158 | |
---|
159 | USE poismg_mod |
---|
160 | |
---|
161 | USE poismg_noopt_mod |
---|
162 | |
---|
163 | USE statistics, & |
---|
164 | ONLY: statistic_regions, & |
---|
165 | sums_divnew_l, & |
---|
166 | sums_divold_l, & |
---|
167 | weight_pres, & |
---|
168 | weight_substep |
---|
169 | |
---|
170 | USE surface_mod, & |
---|
171 | ONLY : bc_h |
---|
172 | |
---|
173 | IMPLICIT NONE |
---|
174 | |
---|
175 | INTEGER(iwp) :: i !< |
---|
176 | INTEGER(iwp) :: j !< |
---|
177 | INTEGER(iwp) :: k !< |
---|
178 | INTEGER(iwp) :: m !< |
---|
179 | |
---|
180 | REAL(wp) :: ddt_3d !< |
---|
181 | REAL(wp) :: d_weight_pres !< |
---|
182 | REAL(wp) :: localsum !< |
---|
183 | REAL(wp) :: threadsum !< |
---|
184 | REAL(wp) :: weight_pres_l !< |
---|
185 | REAL(wp) :: weight_substep_l !< |
---|
186 | |
---|
187 | REAL(wp), DIMENSION(1:3) :: volume_flow_l !< |
---|
188 | REAL(wp), DIMENSION(1:3) :: volume_flow_offset !< |
---|
189 | REAL(wp), DIMENSION(1:nzt) :: w_l !< |
---|
190 | REAL(wp), DIMENSION(1:nzt) :: w_l_l !< |
---|
191 | |
---|
192 | |
---|
193 | CALL cpu_log( log_point(8), 'pres', 'start' ) |
---|
194 | |
---|
195 | ! |
---|
196 | !-- Calculate quantities to be used locally |
---|
197 | ddt_3d = 1.0_wp / dt_3d |
---|
198 | IF ( intermediate_timestep_count == 0 ) THEN |
---|
199 | ! |
---|
200 | !-- If pres is called before initial time step |
---|
201 | weight_pres_l = 1.0_wp |
---|
202 | d_weight_pres = 1.0_wp |
---|
203 | weight_substep_l = 1.0_wp |
---|
204 | ELSE |
---|
205 | weight_pres_l = weight_pres(intermediate_timestep_count) |
---|
206 | d_weight_pres = 1.0_wp / weight_pres(intermediate_timestep_count) |
---|
207 | weight_substep_l = weight_substep(intermediate_timestep_count) |
---|
208 | ENDIF |
---|
209 | |
---|
210 | ! |
---|
211 | !-- Multigrid method expects array d to have one ghost layer. |
---|
212 | !-- |
---|
213 | IF ( psolver(1:9) == 'multigrid' ) THEN |
---|
214 | |
---|
215 | DEALLOCATE( d ) |
---|
216 | ALLOCATE( d(nzb:nzt+1,nys-1:nyn+1,nxl-1:nxr+1) ) |
---|
217 | |
---|
218 | ! |
---|
219 | !-- Since p is later used to hold the weighted average of the substeps, it cannot be used in the |
---|
220 | !-- iterative solver. Therefore, its initial value is stored on p_loc, which is then iteratively |
---|
221 | !-- advanced in every substep. |
---|
222 | IF ( intermediate_timestep_count <= 1 ) THEN |
---|
223 | DO i = nxl-1, nxr+1 |
---|
224 | DO j = nys-1, nyn+1 |
---|
225 | DO k = nzb, nzt+1 |
---|
226 | p_loc(k,j,i) = p(k,j,i) |
---|
227 | ENDDO |
---|
228 | ENDDO |
---|
229 | ENDDO |
---|
230 | ENDIF |
---|
231 | |
---|
232 | ELSEIF ( psolver == 'sor' .AND. intermediate_timestep_count <= 1 ) THEN |
---|
233 | |
---|
234 | ! |
---|
235 | !-- Since p is later used to hold the weighted average of the substeps, it cannot be used in the |
---|
236 | !-- iterative solver. Therefore, its initial value is stored on p_loc, which is then iteratively |
---|
237 | !-- advanced in every substep. |
---|
238 | p_loc = p |
---|
239 | |
---|
240 | ENDIF |
---|
241 | |
---|
242 | ! |
---|
243 | !-- Conserve the volume flow at the outflow in case of non-cyclic lateral boundary conditions |
---|
244 | !-- WARNING: so far, this conservation does not work at the left/south boundary if the topography at |
---|
245 | !-- the inflow differs from that at the outflow! For this case, volume_flow_area needs |
---|
246 | !-- adjustment! |
---|
247 | ! |
---|
248 | !-- Left/right |
---|
249 | IF ( conserve_volume_flow .AND. ( bc_radiation_l .OR. bc_radiation_r ) ) THEN |
---|
250 | |
---|
251 | volume_flow(1) = 0.0_wp |
---|
252 | volume_flow_l(1) = 0.0_wp |
---|
253 | |
---|
254 | IF ( bc_radiation_l ) THEN |
---|
255 | i = 0 |
---|
256 | ELSEIF ( bc_radiation_r ) THEN |
---|
257 | i = nx+1 |
---|
258 | ENDIF |
---|
259 | |
---|
260 | DO j = nys, nyn |
---|
261 | ! |
---|
262 | !-- Sum up the volume flow through the south/north boundary |
---|
263 | DO k = nzb+1, nzt |
---|
264 | volume_flow_l(1) = volume_flow_l(1) + u(k,j,i) * dzw(k) & |
---|
265 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,i), 1 ) ) |
---|
266 | ENDDO |
---|
267 | ENDDO |
---|
268 | |
---|
269 | #if defined( __parallel ) |
---|
270 | IF ( collective_wait ) CALL MPI_BARRIER( comm1dy, ierr ) |
---|
271 | CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 1, MPI_REAL, MPI_SUM, comm1dy, ierr ) |
---|
272 | #else |
---|
273 | volume_flow = volume_flow_l |
---|
274 | #endif |
---|
275 | volume_flow_offset(1) = ( volume_flow_initial(1) - volume_flow(1) ) / volume_flow_area(1) |
---|
276 | |
---|
277 | DO j = nysg, nyng |
---|
278 | DO k = nzb+1, nzt |
---|
279 | u(k,j,i) = u(k,j,i) + volume_flow_offset(1) & |
---|
280 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,i), 1 ) ) |
---|
281 | ENDDO |
---|
282 | ENDDO |
---|
283 | |
---|
284 | ENDIF |
---|
285 | |
---|
286 | ! |
---|
287 | !-- South/north |
---|
288 | IF ( conserve_volume_flow .AND. ( bc_radiation_n .OR. bc_radiation_s ) ) THEN |
---|
289 | |
---|
290 | volume_flow(2) = 0.0_wp |
---|
291 | volume_flow_l(2) = 0.0_wp |
---|
292 | |
---|
293 | IF ( bc_radiation_s ) THEN |
---|
294 | j = 0 |
---|
295 | ELSEIF ( bc_radiation_n ) THEN |
---|
296 | j = ny+1 |
---|
297 | ENDIF |
---|
298 | |
---|
299 | DO i = nxl, nxr |
---|
300 | ! |
---|
301 | !-- Sum up the volume flow through the south/north boundary |
---|
302 | DO k = nzb+1, nzt |
---|
303 | volume_flow_l(2) = volume_flow_l(2) + v(k,j,i) * dzw(k) & |
---|
304 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,i), 2 ) ) |
---|
305 | ENDDO |
---|
306 | ENDDO |
---|
307 | |
---|
308 | #if defined( __parallel ) |
---|
309 | IF ( collective_wait ) CALL MPI_BARRIER( comm1dx, ierr ) |
---|
310 | CALL MPI_ALLREDUCE( volume_flow_l(2), volume_flow(2), 1, MPI_REAL, MPI_SUM, comm1dx, ierr ) |
---|
311 | #else |
---|
312 | volume_flow = volume_flow_l |
---|
313 | #endif |
---|
314 | volume_flow_offset(2) = ( volume_flow_initial(2) - volume_flow(2) ) / volume_flow_area(2) |
---|
315 | |
---|
316 | DO i = nxlg, nxrg |
---|
317 | DO k = nzb+1, nzt |
---|
318 | v(k,j,i) = v(k,j,i) + volume_flow_offset(2) & |
---|
319 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,i), 2 ) ) |
---|
320 | ENDDO |
---|
321 | ENDDO |
---|
322 | |
---|
323 | ENDIF |
---|
324 | |
---|
325 | ! |
---|
326 | !-- Remove mean vertical velocity in case that Neumann conditions are used both at bottom and top |
---|
327 | !-- boundary. With Neumann conditions at both vertical boundaries, the solver cannot remove mean |
---|
328 | !-- vertical velocities. They should be removed, because incompressibility requires that the |
---|
329 | !-- vertical gradient of vertical velocity is zero. Since w=0 at the solid surface, it must be zero |
---|
330 | !-- everywhere. |
---|
331 | !-- This must not be done in case of a 3d-nesting child domain, because a mean vertical velocity |
---|
332 | !-- can physically exist in such a domain. |
---|
333 | !-- Also in case of offline nesting, mean vertical velocities may exist (and must not be removed), |
---|
334 | !-- caused by horizontal divergence/convergence of the large scale flow that is prescribed at the |
---|
335 | !-- side boundaries. |
---|
336 | !-- The removal cannot be done before the first initial time step because ngp_2dh_outer is not yet |
---|
337 | !-- known then. |
---|
338 | IF ( ibc_p_b == 1 .AND. ibc_p_t == 1 .AND. .NOT. nesting_offline & |
---|
339 | .AND. .NOT. ( child_domain .AND. nesting_mode /= 'vertical' ) & |
---|
340 | .AND. intermediate_timestep_count /= 0 ) THEN |
---|
341 | w_l = 0.0_wp; w_l_l = 0.0_wp |
---|
342 | DO i = nxl, nxr |
---|
343 | DO j = nys, nyn |
---|
344 | DO k = nzb+1, nzt |
---|
345 | w_l_l(k) = w_l_l(k) + w(k,j,i) & |
---|
346 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,i), 3 ) ) |
---|
347 | ENDDO |
---|
348 | ENDDO |
---|
349 | ENDDO |
---|
350 | #if defined( __parallel ) |
---|
351 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
352 | CALL MPI_ALLREDUCE( w_l_l(1), w_l(1), nzt, MPI_REAL, MPI_SUM, comm2d, ierr ) |
---|
353 | #else |
---|
354 | w_l = w_l_l |
---|
355 | #endif |
---|
356 | DO k = 1, nzt |
---|
357 | w_l(k) = w_l(k) / ngp_2dh_outer(k,0) |
---|
358 | ENDDO |
---|
359 | DO i = nxlg, nxrg |
---|
360 | DO j = nysg, nyng |
---|
361 | DO k = nzb+1, nzt |
---|
362 | w(k,j,i) = w(k,j,i) - w_l(k) & |
---|
363 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,i), 3 ) ) |
---|
364 | ENDDO |
---|
365 | ENDDO |
---|
366 | ENDDO |
---|
367 | ENDIF |
---|
368 | |
---|
369 | ! |
---|
370 | !-- Compute the divergence of the provisional velocity field. |
---|
371 | CALL cpu_log( log_point_s(1), 'divergence', 'start' ) |
---|
372 | |
---|
373 | IF ( psolver(1:9) == 'multigrid' ) THEN |
---|
374 | !$OMP PARALLEL DO PRIVATE (i,j,k) SCHEDULE( STATIC ) |
---|
375 | DO i = nxl-1, nxr+1 |
---|
376 | DO j = nys-1, nyn+1 |
---|
377 | DO k = nzb, nzt+1 |
---|
378 | d(k,j,i) = 0.0_wp |
---|
379 | ENDDO |
---|
380 | ENDDO |
---|
381 | ENDDO |
---|
382 | ELSE |
---|
383 | !$OMP PARALLEL DO PRIVATE (i,j,k) SCHEDULE( STATIC ) |
---|
384 | !$ACC PARALLEL LOOP COLLAPSE(3) PRIVATE(i, j, k) & |
---|
385 | !$ACC PRESENT(d) |
---|
386 | DO i = nxl, nxr |
---|
387 | DO j = nys, nyn |
---|
388 | DO k = nzb+1, nzt |
---|
389 | d(k,j,i) = 0.0_wp |
---|
390 | ENDDO |
---|
391 | ENDDO |
---|
392 | ENDDO |
---|
393 | ENDIF |
---|
394 | |
---|
395 | localsum = 0.0_wp |
---|
396 | threadsum = 0.0_wp |
---|
397 | |
---|
398 | !$OMP PARALLEL PRIVATE (i,j,k) |
---|
399 | !$OMP DO SCHEDULE( STATIC ) |
---|
400 | !$ACC PARALLEL LOOP COLLAPSE(3) PRIVATE(i, j, k) & |
---|
401 | !$ACC PRESENT(u, v, w, rho_air, rho_air_zw, ddzw, wall_flags_total_0) & |
---|
402 | !$ACC PRESENT(d) |
---|
403 | DO i = nxl, nxr |
---|
404 | DO j = nys, nyn |
---|
405 | DO k = 1, nzt |
---|
406 | d(k,j,i) = ( ( u(k,j,i+1) - u(k,j,i) ) * rho_air(k) * ddx + & |
---|
407 | ( v(k,j+1,i) - v(k,j,i) ) * rho_air(k) * ddy + & |
---|
408 | ( w(k,j,i) * rho_air_zw(k) - w(k-1,j,i) * rho_air_zw(k-1) ) & |
---|
409 | * ddzw(k) ) * ddt_3d * d_weight_pres & |
---|
410 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,i), 0 ) ) |
---|
411 | ENDDO |
---|
412 | ENDDO |
---|
413 | ENDDO |
---|
414 | !$OMP END PARALLEL |
---|
415 | |
---|
416 | ! |
---|
417 | !-- Compute possible PE-sum of divergences for flow_statistics. Carry out computation only at last |
---|
418 | !-- Runge-Kutta substep. |
---|
419 | IF ( intermediate_timestep_count == intermediate_timestep_count_max .OR. & |
---|
420 | intermediate_timestep_count == 0 ) THEN |
---|
421 | !$OMP PARALLEL PRIVATE (i,j,k) FIRSTPRIVATE(threadsum) REDUCTION(+:localsum) |
---|
422 | !$OMP DO SCHEDULE( STATIC ) |
---|
423 | !$ACC PARALLEL LOOP COLLAPSE(3) PRIVATE(i,j,k) & |
---|
424 | !$ACC REDUCTION(+:threadsum) COPY(threadsum) & |
---|
425 | !$ACC PRESENT(d) |
---|
426 | DO i = nxl, nxr |
---|
427 | DO j = nys, nyn |
---|
428 | DO k = nzb+1, nzt |
---|
429 | threadsum = threadsum + ABS( d(k,j,i) ) |
---|
430 | ENDDO |
---|
431 | ENDDO |
---|
432 | ENDDO |
---|
433 | localsum = localsum + threadsum * dt_3d * weight_pres_l |
---|
434 | !$OMP END PARALLEL |
---|
435 | ENDIF |
---|
436 | |
---|
437 | ! |
---|
438 | !-- For completeness, set the divergence sum of all statistic regions to those of the total domain |
---|
439 | IF ( intermediate_timestep_count == intermediate_timestep_count_max .OR. & |
---|
440 | intermediate_timestep_count == 0 ) THEN |
---|
441 | sums_divold_l(0:statistic_regions) = localsum |
---|
442 | ENDIF |
---|
443 | |
---|
444 | CALL cpu_log( log_point_s(1), 'divergence', 'stop' ) |
---|
445 | |
---|
446 | ! |
---|
447 | !-- Compute the pressure perturbation solving the Poisson equation |
---|
448 | IF ( psolver == 'poisfft' ) THEN |
---|
449 | |
---|
450 | ! |
---|
451 | !-- Solve Poisson equation via FFT and solution of tridiagonal matrices |
---|
452 | CALL poisfft( d ) |
---|
453 | |
---|
454 | ! |
---|
455 | !-- Store computed perturbation pressure and set boundary condition in z-direction |
---|
456 | !$OMP PARALLEL DO PRIVATE (i,j,k) SCHEDULE( STATIC ) |
---|
457 | !$ACC PARALLEL LOOP COLLAPSE(3) PRIVATE(i, j, k) & |
---|
458 | !$ACC PRESENT(d, tend) |
---|
459 | DO i = nxl, nxr |
---|
460 | DO j = nys, nyn |
---|
461 | DO k = nzb+1, nzt |
---|
462 | tend(k,j,i) = d(k,j,i) |
---|
463 | ENDDO |
---|
464 | ENDDO |
---|
465 | ENDDO |
---|
466 | |
---|
467 | ! |
---|
468 | !-- Bottom boundary: |
---|
469 | !-- This condition is only required for internal output. The pressure gradient |
---|
470 | !-- (dp(nzb+1)-dp(nzb))/dz is not used anywhere else. |
---|
471 | IF ( ibc_p_b == 1 ) THEN |
---|
472 | ! |
---|
473 | !-- Neumann (dp/dz = 0). Using surface data type, first for non-natural surfaces, then for |
---|
474 | !-- natural and urban surfaces |
---|
475 | !-- Upward facing |
---|
476 | !$OMP PARALLEL DO PRIVATE( m, i, j, k ) SCHEDULE( STATIC ) |
---|
477 | !$ACC PARALLEL LOOP PRIVATE(i, j, k) & |
---|
478 | !$ACC PRESENT(bc_h, tend) |
---|
479 | DO m = 1, bc_h(0)%ns |
---|
480 | i = bc_h(0)%i(m) |
---|
481 | j = bc_h(0)%j(m) |
---|
482 | k = bc_h(0)%k(m) |
---|
483 | tend(k-1,j,i) = tend(k,j,i) |
---|
484 | ENDDO |
---|
485 | ! |
---|
486 | !-- Downward facing |
---|
487 | !$OMP PARALLEL DO PRIVATE( m, i, j, k ) SCHEDULE( STATIC ) |
---|
488 | !$ACC PARALLEL LOOP PRIVATE(i, j, k) & |
---|
489 | !$ACC PRESENT(bc_h, tend) |
---|
490 | DO m = 1, bc_h(1)%ns |
---|
491 | i = bc_h(1)%i(m) |
---|
492 | j = bc_h(1)%j(m) |
---|
493 | k = bc_h(1)%k(m) |
---|
494 | tend(k+1,j,i) = tend(k,j,i) |
---|
495 | ENDDO |
---|
496 | |
---|
497 | ELSE |
---|
498 | ! |
---|
499 | !-- Dirichlet. Using surface data type, first for non-natural surfaces, then for natural and |
---|
500 | !-- urban surfaces |
---|
501 | !-- Upward facing |
---|
502 | !$OMP PARALLEL DO PRIVATE( m, i, j, k ) SCHEDULE( STATIC ) |
---|
503 | DO m = 1, bc_h(0)%ns |
---|
504 | i = bc_h(0)%i(m) |
---|
505 | j = bc_h(0)%j(m) |
---|
506 | k = bc_h(0)%k(m) |
---|
507 | tend(k-1,j,i) = 0.0_wp |
---|
508 | ENDDO |
---|
509 | ! |
---|
510 | !-- Downward facing |
---|
511 | !$OMP PARALLEL DO PRIVATE( m, i, j, k ) SCHEDULE( STATIC ) |
---|
512 | DO m = 1, bc_h(1)%ns |
---|
513 | i = bc_h(1)%i(m) |
---|
514 | j = bc_h(1)%j(m) |
---|
515 | k = bc_h(1)%k(m) |
---|
516 | tend(k+1,j,i) = 0.0_wp |
---|
517 | ENDDO |
---|
518 | |
---|
519 | ENDIF |
---|
520 | |
---|
521 | ! |
---|
522 | !-- Top boundary |
---|
523 | IF ( ibc_p_t == 1 ) THEN |
---|
524 | ! |
---|
525 | !-- Neumann |
---|
526 | !$OMP PARALLEL DO PRIVATE (i,j) SCHEDULE( STATIC ) |
---|
527 | DO i = nxlg, nxrg |
---|
528 | DO j = nysg, nyng |
---|
529 | tend(nzt+1,j,i) = tend(nzt,j,i) |
---|
530 | ENDDO |
---|
531 | ENDDO |
---|
532 | |
---|
533 | ELSE |
---|
534 | ! |
---|
535 | !-- Dirichlet |
---|
536 | !$OMP PARALLEL DO PRIVATE (i,j) SCHEDULE( STATIC ) |
---|
537 | !$ACC PARALLEL LOOP COLLAPSE(2) PRIVATE(i, j) & |
---|
538 | !$ACC PRESENT(tend) |
---|
539 | DO i = nxlg, nxrg |
---|
540 | DO j = nysg, nyng |
---|
541 | tend(nzt+1,j,i) = 0.0_wp |
---|
542 | ENDDO |
---|
543 | ENDDO |
---|
544 | |
---|
545 | ENDIF |
---|
546 | |
---|
547 | ! |
---|
548 | !-- Exchange boundaries for p |
---|
549 | CALL exchange_horiz( tend, nbgp ) |
---|
550 | |
---|
551 | ELSEIF ( psolver == 'sor' ) THEN |
---|
552 | |
---|
553 | ! |
---|
554 | !-- Solve Poisson equation for perturbation pressure using SOR-Red/Black scheme |
---|
555 | CALL sor( d, ddzu_pres, ddzw, p_loc ) |
---|
556 | tend = p_loc |
---|
557 | |
---|
558 | ELSEIF ( psolver(1:9) == 'multigrid' ) THEN |
---|
559 | |
---|
560 | ! |
---|
561 | !-- Solve Poisson equation for perturbation pressure using Multigrid scheme, array tend is used |
---|
562 | !-- to store the residuals. |
---|
563 | |
---|
564 | !-- If the number of grid points of the gathered grid, which is collected on PE0, is larger than |
---|
565 | !-- the number of grid points of an PE, than array tend will be enlarged. |
---|
566 | IF ( gathered_size > subdomain_size ) THEN |
---|
567 | DEALLOCATE( tend ) |
---|
568 | ALLOCATE( tend(nzb:nzt_mg(mg_switch_to_pe0_level)+1,nys_mg( & |
---|
569 | mg_switch_to_pe0_level)-1:nyn_mg(mg_switch_to_pe0_level)+1, & |
---|
570 | nxl_mg(mg_switch_to_pe0_level)-1:nxr_mg(mg_switch_to_pe0_level)+1) ) |
---|
571 | ENDIF |
---|
572 | |
---|
573 | IF ( psolver == 'multigrid' ) THEN |
---|
574 | CALL poismg( tend ) |
---|
575 | ELSE |
---|
576 | CALL poismg_noopt( tend ) |
---|
577 | ENDIF |
---|
578 | |
---|
579 | IF ( gathered_size > subdomain_size ) THEN |
---|
580 | DEALLOCATE( tend ) |
---|
581 | ALLOCATE( tend(nzb:nzt+1,nysg:nyng,nxlg:nxrg) ) |
---|
582 | ENDIF |
---|
583 | |
---|
584 | ! |
---|
585 | !-- Restore perturbation pressure on tend because this array is used further below to correct the |
---|
586 | !-- velocity fields |
---|
587 | DO i = nxl-1, nxr+1 |
---|
588 | DO j = nys-1, nyn+1 |
---|
589 | DO k = nzb, nzt+1 |
---|
590 | tend(k,j,i) = p_loc(k,j,i) |
---|
591 | ENDDO |
---|
592 | ENDDO |
---|
593 | ENDDO |
---|
594 | |
---|
595 | ENDIF |
---|
596 | |
---|
597 | ! |
---|
598 | !-- Store perturbation pressure on array p, used for pressure data output. |
---|
599 | !-- Ghost layers are added in the output routines (except sor-method: see below) |
---|
600 | IF ( intermediate_timestep_count <= 1 ) THEN |
---|
601 | !$OMP PARALLEL DO PRIVATE (i,j,k) SCHEDULE( STATIC ) |
---|
602 | !$ACC PARALLEL LOOP COLLAPSE(3) PRIVATE(i, j, k) & |
---|
603 | !$ACC PRESENT(p, tend) |
---|
604 | DO i = nxl-1, nxr+1 |
---|
605 | DO j = nys-1, nyn+1 |
---|
606 | DO k = nzb, nzt+1 |
---|
607 | p(k,j,i) = tend(k,j,i) * weight_substep_l |
---|
608 | ENDDO |
---|
609 | ENDDO |
---|
610 | ENDDO |
---|
611 | |
---|
612 | ELSEIF ( intermediate_timestep_count > 1 ) THEN |
---|
613 | !$OMP PARALLEL DO PRIVATE (i,j,k) SCHEDULE( STATIC ) |
---|
614 | !$ACC PARALLEL LOOP COLLAPSE(3) PRIVATE(i, j, k) & |
---|
615 | !$ACC PRESENT(p, tend) |
---|
616 | DO i = nxl-1, nxr+1 |
---|
617 | DO j = nys-1, nyn+1 |
---|
618 | DO k = nzb, nzt+1 |
---|
619 | p(k,j,i) = p(k,j,i) + tend(k,j,i) * weight_substep_l |
---|
620 | ENDDO |
---|
621 | ENDDO |
---|
622 | ENDDO |
---|
623 | |
---|
624 | ENDIF |
---|
625 | |
---|
626 | ! |
---|
627 | !-- SOR-method needs ghost layers for the next timestep |
---|
628 | IF ( psolver == 'sor' ) CALL exchange_horiz( p, nbgp ) |
---|
629 | |
---|
630 | ! |
---|
631 | !-- Correction of the provisional velocities with the current perturbation pressure just computed |
---|
632 | IF ( conserve_volume_flow .AND. ( bc_lr_cyc .OR. bc_ns_cyc ) ) THEN |
---|
633 | volume_flow_l(1) = 0.0_wp |
---|
634 | volume_flow_l(2) = 0.0_wp |
---|
635 | ENDIF |
---|
636 | ! |
---|
637 | !-- Add pressure gradients to the velocity components. Note, the loops are running over the entire |
---|
638 | !-- model domain, even though, in case of non-cyclic boundaries u- and v-component are not |
---|
639 | !-- prognostic at i=0 and j=0, respectiveley. However, in case of Dirichlet boundary conditions for |
---|
640 | !-- the velocities, zero-gradient conditions for the pressure are set, so that no modification is |
---|
641 | !-- imposed at the boundaries. |
---|
642 | !$OMP PARALLEL DO PRIVATE (i,j,k) SCHEDULE( STATIC ) |
---|
643 | !$ACC PARALLEL LOOP COLLAPSE(2) PRIVATE(i, j, k) & |
---|
644 | !$ACC PRESENT(u, v, w, tend, ddzu, wall_flags_total_0) |
---|
645 | DO i = nxl, nxr |
---|
646 | DO j = nys, nyn |
---|
647 | |
---|
648 | DO k = nzb+1, nzt |
---|
649 | w(k,j,i) = w(k,j,i) - dt_3d * ( tend(k+1,j,i) - tend(k,j,i) ) * ddzu(k+1) & |
---|
650 | * weight_pres_l & |
---|
651 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,i), 3 ) ) |
---|
652 | ENDDO |
---|
653 | |
---|
654 | DO k = nzb+1, nzt |
---|
655 | u(k,j,i) = u(k,j,i) - dt_3d * ( tend(k,j,i) - tend(k,j,i-1) ) * ddx * weight_pres_l & |
---|
656 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,i), 1 ) ) |
---|
657 | ENDDO |
---|
658 | |
---|
659 | DO k = nzb+1, nzt |
---|
660 | v(k,j,i) = v(k,j,i) - dt_3d * ( tend(k,j,i) - tend(k,j-1,i) ) * ddy * weight_pres_l & |
---|
661 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,i), 2 ) ) |
---|
662 | ENDDO |
---|
663 | |
---|
664 | ENDDO |
---|
665 | ENDDO |
---|
666 | |
---|
667 | ! |
---|
668 | !-- The vertical velocity is not set to zero at nzt + 1 for nested domains. Instead it is set to the |
---|
669 | !-- values of nzt (see routine vnest_boundary_conds or pmci_interp_tril_t) BEFORE calling the |
---|
670 | !-- pressure solver. To avoid jumps while plotting profiles, w at the top has to be set to the |
---|
671 | !-- values in height nzt after above modifications. Hint: w level nzt+1 does not impact results. |
---|
672 | IF ( child_domain .OR. coupling_mode == 'vnested_fine' ) THEN |
---|
673 | w(nzt+1,:,:) = w(nzt,:,:) |
---|
674 | ENDIF |
---|
675 | |
---|
676 | ! |
---|
677 | !-- Sum up the volume flow through the right and north boundary |
---|
678 | IF ( conserve_volume_flow .AND. bc_lr_cyc .AND. bc_ns_cyc .AND. nxr == nx ) THEN |
---|
679 | threadsum = 0.0_wp |
---|
680 | ! |
---|
681 | !-- Summation of the volume flow is done on threadsum rather than on volumen_flow itself. |
---|
682 | !-- This is because intel compiler, when compiled with openmp, do not allow reduction |
---|
683 | !-- operation on array elements. |
---|
684 | !$OMP PARALLEL DO PRIVATE (j,k) REDUCTION (+:threadsum) |
---|
685 | DO j = nys, nyn |
---|
686 | DO k = nzb+1, nzt |
---|
687 | threadsum = threadsum + u(k,j,nxr) * dzw(k) & |
---|
688 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,nxr), 1 ) ) |
---|
689 | ENDDO |
---|
690 | ENDDO |
---|
691 | volume_flow_l(1) = threadsum |
---|
692 | |
---|
693 | ENDIF |
---|
694 | |
---|
695 | IF ( conserve_volume_flow .AND. bc_ns_cyc .AND. bc_lr_cyc .AND. nyn == ny ) THEN |
---|
696 | threadsum = 0.0_wp |
---|
697 | !$OMP PARALLEL DO PRIVATE (j,k) REDUCTION (+:threadsum) |
---|
698 | DO i = nxl, nxr |
---|
699 | DO k = nzb+1, nzt |
---|
700 | threadsum = threadsum + v(k,nyn,i) * dzw(k) & |
---|
701 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,nyn,i), 2 ) ) |
---|
702 | ENDDO |
---|
703 | ENDDO |
---|
704 | volume_flow_l(2) = threadsum |
---|
705 | |
---|
706 | ENDIF |
---|
707 | |
---|
708 | ! |
---|
709 | !-- Conserve the volume flow |
---|
710 | IF ( conserve_volume_flow .AND. ( bc_lr_cyc .AND. bc_ns_cyc ) ) THEN |
---|
711 | |
---|
712 | #if defined( __parallel ) |
---|
713 | IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) |
---|
714 | CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 2, MPI_REAL, MPI_SUM, comm2d, ierr ) |
---|
715 | #else |
---|
716 | volume_flow = volume_flow_l |
---|
717 | #endif |
---|
718 | |
---|
719 | volume_flow_offset(1:2) = ( volume_flow_initial(1:2) - volume_flow(1:2) ) & |
---|
720 | / volume_flow_area(1:2) |
---|
721 | |
---|
722 | !$OMP PARALLEL DO PRIVATE (i,j,k) SCHEDULE( STATIC ) |
---|
723 | DO i = nxl, nxr |
---|
724 | DO j = nys, nyn |
---|
725 | DO k = nzb+1, nzt |
---|
726 | u(k,j,i) = u(k,j,i) + volume_flow_offset(1) & |
---|
727 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,i), 1 ) ) |
---|
728 | ENDDO |
---|
729 | DO k = nzb+1, nzt |
---|
730 | v(k,j,i) = v(k,j,i) + volume_flow_offset(2) & |
---|
731 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,i), 2 ) ) |
---|
732 | ENDDO |
---|
733 | ENDDO |
---|
734 | ENDDO |
---|
735 | |
---|
736 | ENDIF |
---|
737 | |
---|
738 | ! |
---|
739 | !-- Exchange of boundaries for the velocities |
---|
740 | CALL exchange_horiz( u, nbgp ) |
---|
741 | CALL exchange_horiz( v, nbgp ) |
---|
742 | CALL exchange_horiz( w, nbgp ) |
---|
743 | |
---|
744 | ! |
---|
745 | !-- Compute the divergence of the corrected velocity field. |
---|
746 | !-- A possible PE-sum is computed in flow_statistics. Carry out computation only at last |
---|
747 | !-- Runge-Kutta step. |
---|
748 | IF ( intermediate_timestep_count == intermediate_timestep_count_max .OR. & |
---|
749 | intermediate_timestep_count == 0 ) THEN |
---|
750 | CALL cpu_log( log_point_s(1), 'divergence', 'start' ) |
---|
751 | sums_divnew_l = 0.0_wp |
---|
752 | |
---|
753 | ! |
---|
754 | !-- d must be reset to zero because it can contain nonzero values below the topography |
---|
755 | IF ( topography /= 'flat' ) d = 0.0_wp |
---|
756 | |
---|
757 | localsum = 0.0_wp |
---|
758 | threadsum = 0.0_wp |
---|
759 | |
---|
760 | !$OMP PARALLEL PRIVATE (i,j,k) FIRSTPRIVATE(threadsum) REDUCTION(+:localsum) |
---|
761 | !$OMP DO SCHEDULE( STATIC ) |
---|
762 | !$ACC PARALLEL LOOP COLLAPSE(3) PRIVATE(i, j, k) & |
---|
763 | !$ACC PRESENT(u, v, w, rho_air, rho_air_zw, ddzw, wall_flags_total_0) & |
---|
764 | !$ACC PRESENT(d) |
---|
765 | DO i = nxl, nxr |
---|
766 | DO j = nys, nyn |
---|
767 | DO k = nzb+1, nzt |
---|
768 | d(k,j,i) = ( ( u(k,j,i+1) - u(k,j,i) ) * rho_air(k) * ddx + & |
---|
769 | ( v(k,j+1,i) - v(k,j,i) ) * rho_air(k) * ddy + & |
---|
770 | ( w(k,j,i) * rho_air_zw(k) - w(k-1,j,i) * rho_air_zw(k-1) ) & |
---|
771 | * ddzw(k) ) & |
---|
772 | * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_total_0(k,j,i), 0 ) ) |
---|
773 | ENDDO |
---|
774 | ENDDO |
---|
775 | ENDDO |
---|
776 | ! |
---|
777 | !-- Compute possible PE-sum of divergences for flow_statistics |
---|
778 | !$OMP DO SCHEDULE( STATIC ) |
---|
779 | !$ACC PARALLEL LOOP COLLAPSE(3) PRIVATE(i, j, k) & |
---|
780 | !$ACC REDUCTION(+:threadsum) COPY(threadsum) & |
---|
781 | !$ACC PRESENT(d) |
---|
782 | DO i = nxl, nxr |
---|
783 | DO j = nys, nyn |
---|
784 | DO k = nzb+1, nzt |
---|
785 | threadsum = threadsum + ABS( d(k,j,i) ) |
---|
786 | ENDDO |
---|
787 | ENDDO |
---|
788 | ENDDO |
---|
789 | |
---|
790 | localsum = localsum + threadsum |
---|
791 | !$OMP END PARALLEL |
---|
792 | |
---|
793 | ! |
---|
794 | !-- For completeness, set the divergence sum of all statistic regions to those of the total |
---|
795 | !-- domain |
---|
796 | sums_divnew_l(0:statistic_regions) = localsum |
---|
797 | |
---|
798 | CALL cpu_log( log_point_s(1), 'divergence', 'stop' ) |
---|
799 | |
---|
800 | ENDIF |
---|
801 | |
---|
802 | CALL cpu_log( log_point(8), 'pres', 'stop' ) |
---|
803 | |
---|
804 | END SUBROUTINE pres |
---|