source: palm/trunk/SOURCE/pres.f90 @ 1112

Last change on this file since 1112 was 1112, checked in by raasch, 11 years ago

last commit documented

  • Property svn:keywords set to Id
File size: 21.4 KB
Line 
1 SUBROUTINE pres
2
3!--------------------------------------------------------------------------------!
4! This file is part of PALM.
5!
6! PALM is free software: you can redistribute it and/or modify it under the terms
7! of the GNU General Public License as published by the Free Software Foundation,
8! either version 3 of the License, or (at your option) any later version.
9!
10! PALM is distributed in the hope that it will be useful, but WITHOUT ANY
11! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
12! A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
13!
14! You should have received a copy of the GNU General Public License along with
15! PALM. If not, see <http://www.gnu.org/licenses/>.
16!
17! Copyright 1997-2012  Leibniz University Hannover
18!--------------------------------------------------------------------------------!
19!
20! Current revisions:
21! -----------------
22!
23!
24! Former revisions:
25! -----------------
26! $Id: pres.f90 1112 2013-03-09 00:34:37Z raasch $
27!
28! 1111 2013-03-08 23:54:10Z
29! openACC statements added,
30! ibc_p_b = 2 removed
31!
32! 1092 2013-02-02 11:24:22Z raasch
33! unused variables removed
34!
35! 1036 2012-10-22 13:43:42Z raasch
36! code put under GPL (PALM 3.9)
37!
38! 1003 2012-09-14 14:35:53Z raasch
39! adjustment of array tend for cases with unequal subdomain sizes removed
40!
41! 778 2011-11-07 14:18:25Z fricke
42! New allocation of tend when multigrid is used and the collected field on PE0
43! has more grid points than the subdomain of an PE.
44!
45! 719 2011-04-06 13:05:23Z gryschka
46! Bugfix in volume flow control for double cyclic boundary conditions
47!
48! 709 2011-03-30 09:31:40Z raasch
49! formatting adjustments
50!
51! 707 2011-03-29 11:39:40Z raasch
52! Calculation of weighted average of p is now handled in the same way
53! regardless of the number of ghost layers (advection scheme),
54! multigrid and sor method are using p_loc for iterative advancements of
55! pressure,
56! localsum calculation modified for proper OpenMP reduction,
57! bc_lr/ns replaced by bc_lr/ns_cyc
58!
59! 693 2011-03-08 09:..:..Z raasch
60! bugfix: weighting coefficient added to ibm branch
61!
62! 680 2011-02-04 23:16:06Z gryschka
63! bugfix: collective_wait
64!
65! 675 2011-01-19 10:56:55Z suehring
66! Removed bugfix while copying tend.
67!
68! 673 2011-01-18 16:19:48Z suehring
69! Weighting coefficients added for right computation of the pressure during
70! Runge-Kutta substeps.
71!
72! 667 2010-12-23 12:06:00Z suehring/gryschka
73! New allocation of tend when ws-scheme and multigrid is used. This is due to
74! reasons of perforance of the data_exchange. The same is done with p after
75! poismg is called.
76! nxl-1, nxr+1, nys-1, nyn+1 replaced by nxlg, nxrg, nysg, nyng when no
77! multigrid is used. Calls of exchange_horiz are modified.
78! bugfix: After pressure correction no volume flow correction in case of
79! non-cyclic boundary conditions
80! (has to be done only before pressure correction)
81! Call of SOR routine is referenced with ddzu_pres.
82!
83! 622 2010-12-10 08:08:13Z raasch
84! optional barriers included in order to speed up collective operations
85!
86! 151 2008-03-07 13:42:18Z raasch
87! Bugfix in volume flow control for non-cyclic boundary conditions
88!
89! 106 2007-08-16 14:30:26Z raasch
90! Volume flow conservation added for the remaining three outflow boundaries
91!
92! 85 2007-05-11 09:35:14Z raasch
93! Division through dt_3d replaced by multiplication of the inverse.
94! For performance optimisation, this is done in the loop calculating the
95! divergence instead of using a seperate loop.
96!
97! 75 2007-03-22 09:54:05Z raasch
98! Volume flow control for non-cyclic boundary conditions added (currently only
99! for the north boundary!!), 2nd+3rd argument removed from exchange horiz,
100! mean vertical velocity is removed in case of Neumann boundary conditions
101! both at the bottom and the top
102!
103! RCS Log replace by Id keyword, revision history cleaned up
104!
105! Revision 1.25  2006/04/26 13:26:12  raasch
106! OpenMP optimization (+localsum, threadsum)
107!
108! Revision 1.1  1997/07/24 11:24:44  raasch
109! Initial revision
110!
111!
112! Description:
113! ------------
114! Compute the divergence of the provisional velocity field. Solve the Poisson
115! equation for the perturbation pressure. Compute the final velocities using
116! this perturbation pressure. Compute the remaining divergence.
117!------------------------------------------------------------------------------!
118
119    USE arrays_3d
120    USE constants
121    USE control_parameters
122    USE cpulog
123    USE grid_variables
124    USE indices
125    USE interfaces
126    USE pegrid
127    USE poisfft_mod
128    USE poisfft_hybrid_mod
129    USE statistics
130
131    IMPLICIT NONE
132
133    INTEGER ::  i, j, k
134
135    REAL    ::  ddt_3d, localsum, threadsum, d_weight_pres
136
137    REAL, DIMENSION(1:2) ::  volume_flow_l, volume_flow_offset
138    REAL, DIMENSION(1:nzt) ::  w_l, w_l_l
139
140
141    CALL cpu_log( log_point(8), 'pres', 'start' )
142
143
144    ddt_3d = 1.0 / dt_3d
145    d_weight_pres = 1.0 / weight_pres(intermediate_timestep_count)
146
147!
148!-- Multigrid method expects array d to have one ghost layer.
149!--
150    IF ( psolver == 'multigrid' )  THEN
151     
152       DEALLOCATE( d )
153       ALLOCATE( d(nzb:nzt+1,nys-1:nyn+1,nxl-1:nxr+1) ) 
154
155!
156!--    Since p is later used to hold the weighted average of the substeps, it
157!--    cannot be used in the iterative solver. Therefore, its initial value is
158!--    stored on p_loc, which is then iteratively advanced in every substep.
159       IF ( intermediate_timestep_count == 1 )  THEN
160          DO  i = nxl-1, nxr+1
161             DO  j = nys-1, nyn+1
162                DO  k = nzb, nzt+1
163                   p_loc(k,j,i) = p(k,j,i)
164                ENDDO
165             ENDDO
166          ENDDO
167       ENDIF
168       
169    ELSEIF ( psolver == 'sor'  .AND.  intermediate_timestep_count == 1 )  THEN
170
171!
172!--    Since p is later used to hold the weighted average of the substeps, it
173!--    cannot be used in the iterative solver. Therefore, its initial value is
174!--    stored on p_loc, which is then iteratively advanced in every substep.
175       p_loc = p
176
177    ENDIF
178
179!
180!-- Conserve the volume flow at the outflow in case of non-cyclic lateral
181!-- boundary conditions
182!-- WARNING: so far, this conservation does not work at the left/south
183!--          boundary if the topography at the inflow differs from that at the
184!--          outflow! For this case, volume_flow_area needs adjustment!
185!
186!-- Left/right
187    IF ( conserve_volume_flow  .AND.  ( outflow_l .OR. outflow_r ) )  THEN
188
189       volume_flow(1)   = 0.0
190       volume_flow_l(1) = 0.0
191
192       IF ( outflow_l )  THEN
193          i = 0
194       ELSEIF ( outflow_r )  THEN
195          i = nx+1
196       ENDIF
197
198       DO  j = nys, nyn
199!
200!--       Sum up the volume flow through the south/north boundary
201          DO  k = nzb_2d(j,i)+1, nzt
202             volume_flow_l(1) = volume_flow_l(1) + u(k,j,i) * dzw(k)
203          ENDDO
204       ENDDO
205
206#if defined( __parallel )   
207       IF ( collective_wait )  CALL MPI_BARRIER( comm1dy, ierr )
208       CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 1, MPI_REAL, &
209                           MPI_SUM, comm1dy, ierr )   
210#else
211       volume_flow = volume_flow_l 
212#endif
213       volume_flow_offset(1) = ( volume_flow_initial(1) - volume_flow(1) ) &
214                               / volume_flow_area(1)
215
216       DO  j = nysg, nyng
217          DO  k = nzb_2d(j,i)+1, nzt
218             u(k,j,i) = u(k,j,i) + volume_flow_offset(1)
219          ENDDO
220       ENDDO
221
222    ENDIF
223
224!
225!-- South/north
226    IF ( conserve_volume_flow  .AND.  ( outflow_n .OR. outflow_s ) )  THEN
227
228       volume_flow(2)   = 0.0
229       volume_flow_l(2) = 0.0
230
231       IF ( outflow_s )  THEN
232          j = 0
233       ELSEIF ( outflow_n )  THEN
234          j = ny+1
235       ENDIF
236
237       DO  i = nxl, nxr
238!
239!--       Sum up the volume flow through the south/north boundary
240          DO  k = nzb_2d(j,i)+1, nzt
241             volume_flow_l(2) = volume_flow_l(2) + v(k,j,i) * dzw(k)
242          ENDDO
243       ENDDO
244
245#if defined( __parallel )   
246       IF ( collective_wait )  CALL MPI_BARRIER( comm1dx, ierr )
247       CALL MPI_ALLREDUCE( volume_flow_l(2), volume_flow(2), 1, MPI_REAL, &
248                           MPI_SUM, comm1dx, ierr )   
249#else
250       volume_flow = volume_flow_l 
251#endif
252       volume_flow_offset(2) = ( volume_flow_initial(2) - volume_flow(2) )    &
253                               / volume_flow_area(2)
254
255       DO  i = nxlg, nxrg
256          DO  k = nzb_v_inner(j,i)+1, nzt
257             v(k,j,i) = v(k,j,i) + volume_flow_offset(2)
258          ENDDO
259       ENDDO
260
261    ENDIF
262
263!
264!-- Remove mean vertical velocity
265    IF ( ibc_p_b == 1  .AND.  ibc_p_t == 1 )  THEN
266       IF ( simulated_time > 0.0 )  THEN ! otherwise nzb_w_inner not yet known
267          w_l = 0.0;  w_l_l = 0.0
268          DO  i = nxl, nxr
269             DO  j = nys, nyn
270                DO  k = nzb_w_inner(j,i)+1, nzt
271                   w_l_l(k) = w_l_l(k) + w(k,j,i)
272                ENDDO
273             ENDDO
274          ENDDO
275#if defined( __parallel )   
276          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
277          CALL MPI_ALLREDUCE( w_l_l(1), w_l(1), nzt, MPI_REAL, MPI_SUM, &
278                              comm2d, ierr )
279#else
280          w_l = w_l_l 
281#endif
282          DO  k = 1, nzt
283             w_l(k) = w_l(k) / ngp_2dh_outer(k,0)
284          ENDDO
285          DO  i = nxlg, nxrg
286             DO  j = nysg, nyng
287                DO  k = nzb_w_inner(j,i)+1, nzt
288                   w(k,j,i) = w(k,j,i) - w_l(k)
289                ENDDO
290             ENDDO
291          ENDDO
292       ENDIF
293    ENDIF
294
295!
296!-- Compute the divergence of the provisional velocity field.
297    CALL cpu_log( log_point_s(1), 'divergence', 'start' )
298
299    IF ( psolver == 'multigrid' )  THEN
300       !$OMP PARALLEL DO SCHEDULE( STATIC )
301       DO  i = nxl-1, nxr+1
302          DO  j = nys-1, nyn+1
303             DO  k = nzb, nzt+1
304                d(k,j,i) = 0.0
305             ENDDO
306          ENDDO
307       ENDDO
308    ELSE
309       !$OMP PARALLEL DO SCHEDULE( STATIC )
310       !$acc kernels present( d )
311       !$acc loop
312       DO  i = nxl, nxr
313          DO  j = nys, nyn
314             !$acc loop vector(32)
315             DO  k = nzb+1, nzt
316                d(k,j,i) = 0.0
317             ENDDO
318          ENDDO
319       ENDDO
320       !$acc end kernels
321    ENDIF
322
323    localsum  = 0.0
324    threadsum = 0.0
325
326#if defined( __ibm )
327    !$OMP PARALLEL PRIVATE (i,j,k) FIRSTPRIVATE(threadsum) REDUCTION(+:localsum)
328    !$OMP DO SCHEDULE( STATIC )
329    DO  i = nxl, nxr
330       DO  j = nys, nyn
331          DO  k = nzb_s_inner(j,i)+1, nzt
332             d(k,j,i) = ( ( u(k,j,i+1) - u(k,j,i) ) * ddx + &
333                          ( v(k,j+1,i) - v(k,j,i) ) * ddy + &
334                          ( w(k,j,i) - w(k-1,j,i) ) * ddzw(k) ) * ddt_3d      &
335                                                                * d_weight_pres 
336          ENDDO
337!
338!--       Compute possible PE-sum of divergences for flow_statistics
339          DO  k = nzb_s_inner(j,i)+1, nzt
340             threadsum = threadsum + ABS( d(k,j,i) )
341          ENDDO
342
343       ENDDO
344    ENDDO
345
346    localsum = localsum + threadsum * dt_3d * &
347                          weight_pres(intermediate_timestep_count)
348
349    !$OMP END PARALLEL
350#else
351
352    !$OMP PARALLEL PRIVATE (i,j,k)
353    !$OMP DO SCHEDULE( STATIC )
354    !$acc kernels present( d, ddzw, nzb_s_inner, u, v, w )
355    !$acc loop
356    DO  i = nxl, nxr
357       DO  j = nys, nyn
358          !$acc loop vector(32)
359          DO  k = 1, nzt
360             IF ( k > nzb_s_inner(j,i) )  THEN
361                d(k,j,i) = ( ( u(k,j,i+1) - u(k,j,i) ) * ddx + &
362                           ( v(k,j+1,i) - v(k,j,i) ) * ddy + &
363                           ( w(k,j,i) - w(k-1,j,i) ) * ddzw(k) ) * ddt_3d      &
364                           * d_weight_pres
365             ENDIF
366          ENDDO
367       ENDDO
368    ENDDO
369    !$acc end kernels
370    !$OMP END PARALLEL
371
372!
373!-- Compute possible PE-sum of divergences for flow_statistics
374    !$OMP PARALLEL PRIVATE (i,j,k) FIRSTPRIVATE(threadsum) REDUCTION(+:localsum)
375    !$OMP DO SCHEDULE( STATIC )
376    DO  i = nxl, nxr
377       DO  j = nys, nyn
378          DO  k = nzb+1, nzt
379             threadsum = threadsum + ABS( d(k,j,i) )
380          ENDDO
381       ENDDO
382    ENDDO
383    localsum = localsum + threadsum * dt_3d * &
384                          weight_pres(intermediate_timestep_count)
385    !$OMP END PARALLEL
386#endif
387
388!
389!-- For completeness, set the divergence sum of all statistic regions to those
390!-- of the total domain
391    sums_divold_l(0:statistic_regions) = localsum
392
393    CALL cpu_log( log_point_s(1), 'divergence', 'stop' )
394
395!
396!-- Compute the pressure perturbation solving the Poisson equation
397    IF ( psolver(1:7) == 'poisfft' )  THEN
398
399!
400!--    Solve Poisson equation via FFT and solution of tridiagonal matrices
401       IF ( psolver == 'poisfft' )  THEN
402!
403!--       Solver for 2d-decomposition
404          CALL poisfft( d, tend )
405          !$acc update host( d )
406       ELSEIF ( psolver == 'poisfft_hybrid' )  THEN 
407!
408!--       Solver for 1d-decomposition (using MPI and OpenMP).
409!--       The old hybrid-solver is still included here, as long as there
410!--       are some optimization problems in poisfft
411          CALL poisfft_hybrid( d )
412       ENDIF
413
414!
415!--    Store computed perturbation pressure and set boundary condition in
416!--    z-direction
417       !$OMP PARALLEL DO
418       DO  i = nxl, nxr
419          DO  j = nys, nyn
420             DO  k = nzb+1, nzt
421                tend(k,j,i) = d(k,j,i)
422             ENDDO
423          ENDDO
424       ENDDO
425
426!
427!--    Bottom boundary:
428!--    This condition is only required for internal output. The pressure
429!--    gradient (dp(nzb+1)-dp(nzb))/dz is not used anywhere else.
430       IF ( ibc_p_b == 1 )  THEN
431!
432!--       Neumann (dp/dz = 0)
433          !$OMP PARALLEL DO
434          DO  i = nxlg, nxrg
435             DO  j = nysg, nyng
436                tend(nzb_s_inner(j,i),j,i) = tend(nzb_s_inner(j,i)+1,j,i)
437             ENDDO
438          ENDDO
439
440       ELSE
441!
442!--       Dirichlet
443          !$OMP PARALLEL DO
444          DO  i = nxlg, nxrg
445             DO  j = nysg, nyng
446                tend(nzb_s_inner(j,i),j,i) = 0.0
447             ENDDO
448          ENDDO
449
450       ENDIF
451
452!
453!--    Top boundary
454       IF ( ibc_p_t == 1 )  THEN
455!
456!--       Neumann
457          !$OMP PARALLEL DO
458          DO  i = nxlg, nxrg
459             DO  j = nysg, nyng
460                tend(nzt+1,j,i) = tend(nzt,j,i)
461             ENDDO
462          ENDDO
463
464       ELSE
465!
466!--       Dirichlet
467          !$OMP PARALLEL DO
468          DO  i = nxlg, nxrg
469             DO  j = nysg, nyng
470                tend(nzt+1,j,i) = 0.0
471             ENDDO
472          ENDDO
473
474       ENDIF
475
476!
477!--    Exchange boundaries for p
478       CALL exchange_horiz( tend, nbgp )
479     
480    ELSEIF ( psolver == 'sor' )  THEN
481
482!
483!--    Solve Poisson equation for perturbation pressure using SOR-Red/Black
484!--    scheme
485       CALL sor( d, ddzu_pres, ddzw, p_loc )
486       tend = p_loc
487
488    ELSEIF ( psolver == 'multigrid' )  THEN
489
490!
491!--    Solve Poisson equation for perturbation pressure using Multigrid scheme,
492!--    array tend is used to store the residuals, logical exchange_mg is used
493!--    to discern data exchange in multigrid ( 1 ghostpoint ) and normal grid
494!--    ( nbgp ghost points ).
495
496!--    If the number of grid points of the gathered grid, which is collected
497!--    on PE0, is larger than the number of grid points of an PE, than array
498!--    tend will be enlarged.
499       IF ( gathered_size > subdomain_size )  THEN
500          DEALLOCATE( tend )
501          ALLOCATE( tend(nzb:nzt_mg(mg_switch_to_pe0_level)+1,nys_mg(          &
502                    mg_switch_to_pe0_level)-1:nyn_mg(mg_switch_to_pe0_level)+1,&
503                    nxl_mg(mg_switch_to_pe0_level)-1:nxr_mg(                   &
504                    mg_switch_to_pe0_level)+1) )
505       ENDIF
506
507       CALL poismg( tend )
508
509       IF ( gathered_size > subdomain_size )  THEN
510          DEALLOCATE( tend )
511          ALLOCATE( tend(nzb:nzt+1,nysg:nyng,nxlg:nxrg) )
512       ENDIF
513
514!
515!--    Restore perturbation pressure on tend because this array is used
516!--    further below to correct the velocity fields
517       DO  i = nxl-1, nxr+1
518          DO  j = nys-1, nyn+1
519             DO  k = nzb, nzt+1
520                tend(k,j,i) = p_loc(k,j,i)
521             ENDDO
522          ENDDO
523       ENDDO
524
525    ENDIF
526
527!
528!-- Store perturbation pressure on array p, used for pressure data output.
529!-- Ghost layers are added in the output routines (except sor-method: see below)
530    IF ( intermediate_timestep_count == 1 )  THEN
531       !$OMP PARALLEL PRIVATE (i,j,k)
532       !$OMP DO
533       DO  i = nxl-1, nxr+1
534          DO  j = nys-1, nyn+1
535             DO  k = nzb, nzt+1
536                p(k,j,i) = tend(k,j,i) * &
537                           weight_substep(intermediate_timestep_count)
538             ENDDO
539          ENDDO
540       ENDDO
541       !$OMP END PARALLEL
542
543    ELSE 
544       !$OMP PARALLEL PRIVATE (i,j,k)
545       !$OMP DO
546       DO  i = nxl-1, nxr+1
547          DO  j = nys-1, nyn+1
548             DO  k = nzb, nzt+1
549                p(k,j,i) = p(k,j,i) + tend(k,j,i) * &
550                           weight_substep(intermediate_timestep_count)
551             ENDDO
552          ENDDO
553       ENDDO
554       !$OMP END PARALLEL
555
556    ENDIF
557       
558!
559!-- SOR-method needs ghost layers for the next timestep
560    IF ( psolver == 'sor' )  CALL exchange_horiz( p, nbgp )
561
562!
563!-- Correction of the provisional velocities with the current perturbation
564!-- pressure just computed
565    !$acc update host( u, v, w )
566    IF ( conserve_volume_flow  .AND.  ( bc_lr_cyc .OR. bc_ns_cyc ) )  THEN
567       volume_flow_l(1) = 0.0
568       volume_flow_l(2) = 0.0
569    ENDIF
570
571    !$OMP PARALLEL PRIVATE (i,j,k)
572    !$OMP DO
573    DO  i = nxl, nxr   
574       DO  j = nys, nyn
575          DO  k = nzb_w_inner(j,i)+1, nzt
576             w(k,j,i) = w(k,j,i) - dt_3d *                                 &
577                        ( tend(k+1,j,i) - tend(k,j,i) ) * ddzu(k+1) *      &
578                        weight_pres(intermediate_timestep_count)
579          ENDDO
580          DO  k = nzb_u_inner(j,i)+1, nzt
581             u(k,j,i) = u(k,j,i) - dt_3d *                                 &
582                        ( tend(k,j,i) - tend(k,j,i-1) ) * ddx *            &
583                        weight_pres(intermediate_timestep_count)
584          ENDDO
585          DO  k = nzb_v_inner(j,i)+1, nzt
586             v(k,j,i) = v(k,j,i) - dt_3d *                                 &
587                        ( tend(k,j,i) - tend(k,j-1,i) ) * ddy *            &
588                        weight_pres(intermediate_timestep_count)
589          ENDDO                                                         
590!
591!--       Sum up the volume flow through the right and north boundary
592          IF ( conserve_volume_flow  .AND.  bc_lr_cyc  .AND.  bc_ns_cyc  .AND. &
593               i == nx )  THEN
594             !$OMP CRITICAL
595             DO  k = nzb_2d(j,i) + 1, nzt
596                volume_flow_l(1) = volume_flow_l(1) + u(k,j,i) * dzw(k)
597             ENDDO
598             !$OMP END CRITICAL
599          ENDIF
600          IF ( conserve_volume_flow  .AND.  bc_ns_cyc  .AND.  bc_lr_cyc  .AND. &
601               j == ny )  THEN
602             !$OMP CRITICAL
603             DO  k = nzb_2d(j,i) + 1, nzt
604                volume_flow_l(2) = volume_flow_l(2) + v(k,j,i) * dzw(k)
605             ENDDO
606             !$OMP END CRITICAL
607          ENDIF
608
609       ENDDO
610    ENDDO
611    !$OMP END PARALLEL
612   
613!
614!-- Conserve the volume flow
615    IF ( conserve_volume_flow  .AND.  ( bc_lr_cyc  .AND.  bc_ns_cyc ) )  THEN
616
617#if defined( __parallel )   
618       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
619       CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 2, MPI_REAL, &
620                           MPI_SUM, comm2d, ierr ) 
621#else
622       volume_flow = volume_flow_l 
623#endif   
624
625       volume_flow_offset = ( volume_flow_initial - volume_flow ) / &
626                            volume_flow_area
627
628       !$OMP PARALLEL PRIVATE (i,j,k)
629       !$OMP DO
630       DO  i = nxl, nxr
631          DO  j = nys, nyn
632             DO  k = nzb_u_inner(j,i) + 1, nzt
633                u(k,j,i) = u(k,j,i) + volume_flow_offset(1)
634             ENDDO
635             DO k = nzb_v_inner(j,i) + 1, nzt
636                v(k,j,i) = v(k,j,i) + volume_flow_offset(2)
637             ENDDO
638          ENDDO
639       ENDDO
640
641       !$OMP END PARALLEL
642
643    ENDIF
644
645!
646!-- Exchange of boundaries for the velocities
647    CALL exchange_horiz( u, nbgp )
648    CALL exchange_horiz( v, nbgp )
649    CALL exchange_horiz( w, nbgp )
650
651!
652!-- Compute the divergence of the corrected velocity field,
653!-- a possible PE-sum is computed in flow_statistics
654    CALL cpu_log( log_point_s(1), 'divergence', 'start' )
655    sums_divnew_l = 0.0
656
657!
658!-- d must be reset to zero because it can contain nonzero values below the
659!-- topography
660    IF ( topography /= 'flat' )  d = 0.0
661
662    localsum  = 0.0
663    threadsum = 0.0
664
665    !$OMP PARALLEL PRIVATE (i,j,k) FIRSTPRIVATE(threadsum) REDUCTION(+:localsum)
666    !$OMP DO SCHEDULE( STATIC )
667#if defined( __ibm )
668    DO  i = nxl, nxr
669       DO  j = nys, nyn
670          DO  k = nzb_s_inner(j,i)+1, nzt
671             d(k,j,i) = ( u(k,j,i+1) - u(k,j,i) ) * ddx + &
672                        ( v(k,j+1,i) - v(k,j,i) ) * ddy + &
673                        ( w(k,j,i) - w(k-1,j,i) ) * ddzw(k)
674          ENDDO
675          DO  k = nzb+1, nzt
676             threadsum = threadsum + ABS( d(k,j,i) )
677          ENDDO
678       ENDDO
679    ENDDO
680#else
681    DO  i = nxl, nxr
682       DO  j = nys, nyn
683          DO  k = nzb_s_inner(j,i)+1, nzt
684             d(k,j,i) = ( u(k,j,i+1) - u(k,j,i) ) * ddx + &
685                        ( v(k,j+1,i) - v(k,j,i) ) * ddy + &
686                        ( w(k,j,i) - w(k-1,j,i) ) * ddzw(k)
687             threadsum = threadsum + ABS( d(k,j,i) )
688          ENDDO
689       ENDDO
690    ENDDO
691#endif
692
693    localsum = localsum + threadsum
694    !$OMP END PARALLEL
695
696!
697!-- For completeness, set the divergence sum of all statistic regions to those
698!-- of the total domain
699    sums_divnew_l(0:statistic_regions) = localsum
700
701    CALL cpu_log( log_point_s(1), 'divergence', 'stop' )
702
703    !$acc update device( u, v, w )
704
705    CALL cpu_log( log_point(8), 'pres', 'stop' )
706   
707
708
709 END SUBROUTINE pres
Note: See TracBrowser for help on using the repository browser.