SUBROUTINE pres !--------------------------------------------------------------------------------! ! This file is part of PALM. ! ! PALM is free software: you can redistribute it and/or modify it under the terms ! of the GNU General Public License as published by the Free Software Foundation, ! either version 3 of the License, or (at your option) any later version. ! ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR ! A PARTICULAR PURPOSE. See the GNU General Public License for more details. ! ! You should have received a copy of the GNU General Public License along with ! PALM. If not, see . ! ! Copyright 1997-2014 Leibniz Universitaet Hannover !--------------------------------------------------------------------------------! ! ! Current revisions: ! ------------------ ! ! ! Former revisions: ! ----------------- ! $Id: pres.f90 1310 2014-03-14 08:01:56Z fricke $ ! ! 1306 2014-03-13 14:30:59Z raasch ! second argument removed from routine poisfft ! ! 1257 2013-11-08 15:18:40Z raasch ! openacc loop and loop vector clauses removed, independent clauses added, ! end parallel replaced by end parallel loop ! ! 1221 2013-09-10 08:59:13Z raasch ! openACC porting of reduction operations, loops for calculating d are ! using the rflags_s_inner multiply flag instead of the nzb_s_inner loop index ! ! 1212 2013-08-15 08:46:27Z raasch ! call of poisfft_hybrid removed ! ! 1117 2013-03-27 11:15:36Z suehring ! Bugfix in OpenMP parallelization. ! ! 1113 2013-03-10 02:48:14Z raasch ! GPU-porting of several loops, some loops rearranged ! ! 1111 2013-03-08 23:54:10Z ! openACC statements added, ! ibc_p_b = 2 removed ! ! 1092 2013-02-02 11:24:22Z raasch ! unused variables removed ! ! 1036 2012-10-22 13:43:42Z raasch ! code put under GPL (PALM 3.9) ! ! 1003 2012-09-14 14:35:53Z raasch ! adjustment of array tend for cases with unequal subdomain sizes removed ! ! 778 2011-11-07 14:18:25Z fricke ! New allocation of tend when multigrid is used and the collected field on PE0 ! has more grid points than the subdomain of an PE. ! ! 719 2011-04-06 13:05:23Z gryschka ! Bugfix in volume flow control for double cyclic boundary conditions ! ! 709 2011-03-30 09:31:40Z raasch ! formatting adjustments ! ! 707 2011-03-29 11:39:40Z raasch ! Calculation of weighted average of p is now handled in the same way ! regardless of the number of ghost layers (advection scheme), ! multigrid and sor method are using p_loc for iterative advancements of ! pressure, ! localsum calculation modified for proper OpenMP reduction, ! bc_lr/ns replaced by bc_lr/ns_cyc ! ! 693 2011-03-08 09:..:..Z raasch ! bugfix: weighting coefficient added to ibm branch ! ! 680 2011-02-04 23:16:06Z gryschka ! bugfix: collective_wait ! ! 675 2011-01-19 10:56:55Z suehring ! Removed bugfix while copying tend. ! ! 673 2011-01-18 16:19:48Z suehring ! Weighting coefficients added for right computation of the pressure during ! Runge-Kutta substeps. ! ! 667 2010-12-23 12:06:00Z suehring/gryschka ! New allocation of tend when ws-scheme and multigrid is used. This is due to ! reasons of perforance of the data_exchange. The same is done with p after ! poismg is called. ! nxl-1, nxr+1, nys-1, nyn+1 replaced by nxlg, nxrg, nysg, nyng when no ! multigrid is used. Calls of exchange_horiz are modified. ! bugfix: After pressure correction no volume flow correction in case of ! non-cyclic boundary conditions ! (has to be done only before pressure correction) ! Call of SOR routine is referenced with ddzu_pres. ! ! 622 2010-12-10 08:08:13Z raasch ! optional barriers included in order to speed up collective operations ! ! 151 2008-03-07 13:42:18Z raasch ! Bugfix in volume flow control for non-cyclic boundary conditions ! ! 106 2007-08-16 14:30:26Z raasch ! Volume flow conservation added for the remaining three outflow boundaries ! ! 85 2007-05-11 09:35:14Z raasch ! Division through dt_3d replaced by multiplication of the inverse. ! For performance optimisation, this is done in the loop calculating the ! divergence instead of using a seperate loop. ! ! 75 2007-03-22 09:54:05Z raasch ! Volume flow control for non-cyclic boundary conditions added (currently only ! for the north boundary!!), 2nd+3rd argument removed from exchange horiz, ! mean vertical velocity is removed in case of Neumann boundary conditions ! both at the bottom and the top ! ! RCS Log replace by Id keyword, revision history cleaned up ! ! Revision 1.25 2006/04/26 13:26:12 raasch ! OpenMP optimization (+localsum, threadsum) ! ! Revision 1.1 1997/07/24 11:24:44 raasch ! Initial revision ! ! ! Description: ! ------------ ! Compute the divergence of the provisional velocity field. Solve the Poisson ! equation for the perturbation pressure. Compute the final velocities using ! this perturbation pressure. Compute the remaining divergence. !------------------------------------------------------------------------------! USE arrays_3d USE constants USE control_parameters USE cpulog USE grid_variables USE indices USE interfaces USE pegrid USE poisfft_mod USE statistics IMPLICIT NONE INTEGER :: i, j, k REAL :: ddt_3d, localsum, threadsum, d_weight_pres REAL, DIMENSION(1:2) :: volume_flow_l, volume_flow_offset REAL, DIMENSION(1:nzt) :: w_l, w_l_l CALL cpu_log( log_point(8), 'pres', 'start' ) ddt_3d = 1.0 / dt_3d d_weight_pres = 1.0 / weight_pres(intermediate_timestep_count) ! !-- Multigrid method expects array d to have one ghost layer. !-- IF ( psolver == 'multigrid' ) THEN DEALLOCATE( d ) ALLOCATE( d(nzb:nzt+1,nys-1:nyn+1,nxl-1:nxr+1) ) ! !-- Since p is later used to hold the weighted average of the substeps, it !-- cannot be used in the iterative solver. Therefore, its initial value is !-- stored on p_loc, which is then iteratively advanced in every substep. IF ( intermediate_timestep_count == 1 ) THEN DO i = nxl-1, nxr+1 DO j = nys-1, nyn+1 DO k = nzb, nzt+1 p_loc(k,j,i) = p(k,j,i) ENDDO ENDDO ENDDO ENDIF ELSEIF ( psolver == 'sor' .AND. intermediate_timestep_count == 1 ) THEN ! !-- Since p is later used to hold the weighted average of the substeps, it !-- cannot be used in the iterative solver. Therefore, its initial value is !-- stored on p_loc, which is then iteratively advanced in every substep. p_loc = p ENDIF ! !-- Conserve the volume flow at the outflow in case of non-cyclic lateral !-- boundary conditions !-- WARNING: so far, this conservation does not work at the left/south !-- boundary if the topography at the inflow differs from that at the !-- outflow! For this case, volume_flow_area needs adjustment! ! !-- Left/right IF ( conserve_volume_flow .AND. ( outflow_l .OR. outflow_r ) ) THEN volume_flow(1) = 0.0 volume_flow_l(1) = 0.0 IF ( outflow_l ) THEN i = 0 ELSEIF ( outflow_r ) THEN i = nx+1 ENDIF DO j = nys, nyn ! !-- Sum up the volume flow through the south/north boundary DO k = nzb_2d(j,i)+1, nzt volume_flow_l(1) = volume_flow_l(1) + u(k,j,i) * dzw(k) ENDDO ENDDO #if defined( __parallel ) IF ( collective_wait ) CALL MPI_BARRIER( comm1dy, ierr ) CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 1, MPI_REAL, & MPI_SUM, comm1dy, ierr ) #else volume_flow = volume_flow_l #endif volume_flow_offset(1) = ( volume_flow_initial(1) - volume_flow(1) ) & / volume_flow_area(1) DO j = nysg, nyng DO k = nzb_2d(j,i)+1, nzt u(k,j,i) = u(k,j,i) + volume_flow_offset(1) ENDDO ENDDO ENDIF ! !-- South/north IF ( conserve_volume_flow .AND. ( outflow_n .OR. outflow_s ) ) THEN volume_flow(2) = 0.0 volume_flow_l(2) = 0.0 IF ( outflow_s ) THEN j = 0 ELSEIF ( outflow_n ) THEN j = ny+1 ENDIF DO i = nxl, nxr ! !-- Sum up the volume flow through the south/north boundary DO k = nzb_2d(j,i)+1, nzt volume_flow_l(2) = volume_flow_l(2) + v(k,j,i) * dzw(k) ENDDO ENDDO #if defined( __parallel ) IF ( collective_wait ) CALL MPI_BARRIER( comm1dx, ierr ) CALL MPI_ALLREDUCE( volume_flow_l(2), volume_flow(2), 1, MPI_REAL, & MPI_SUM, comm1dx, ierr ) #else volume_flow = volume_flow_l #endif volume_flow_offset(2) = ( volume_flow_initial(2) - volume_flow(2) ) & / volume_flow_area(2) DO i = nxlg, nxrg DO k = nzb_v_inner(j,i)+1, nzt v(k,j,i) = v(k,j,i) + volume_flow_offset(2) ENDDO ENDDO ENDIF ! !-- Remove mean vertical velocity IF ( ibc_p_b == 1 .AND. ibc_p_t == 1 ) THEN IF ( simulated_time > 0.0 ) THEN ! otherwise nzb_w_inner not yet known w_l = 0.0; w_l_l = 0.0 DO i = nxl, nxr DO j = nys, nyn DO k = nzb_w_inner(j,i)+1, nzt w_l_l(k) = w_l_l(k) + w(k,j,i) ENDDO ENDDO ENDDO #if defined( __parallel ) IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) CALL MPI_ALLREDUCE( w_l_l(1), w_l(1), nzt, MPI_REAL, MPI_SUM, & comm2d, ierr ) #else w_l = w_l_l #endif DO k = 1, nzt w_l(k) = w_l(k) / ngp_2dh_outer(k,0) ENDDO DO i = nxlg, nxrg DO j = nysg, nyng DO k = nzb_w_inner(j,i)+1, nzt w(k,j,i) = w(k,j,i) - w_l(k) ENDDO ENDDO ENDDO ENDIF ENDIF ! !-- Compute the divergence of the provisional velocity field. CALL cpu_log( log_point_s(1), 'divergence', 'start' ) IF ( psolver == 'multigrid' ) THEN !$OMP PARALLEL DO SCHEDULE( STATIC ) DO i = nxl-1, nxr+1 DO j = nys-1, nyn+1 DO k = nzb, nzt+1 d(k,j,i) = 0.0 ENDDO ENDDO ENDDO ELSE !$OMP PARALLEL DO SCHEDULE( STATIC ) !$acc kernels present( d ) DO i = nxl, nxr DO j = nys, nyn DO k = nzb+1, nzt d(k,j,i) = 0.0 ENDDO ENDDO ENDDO !$acc end kernels ENDIF localsum = 0.0 threadsum = 0.0 #if defined( __ibm ) !$OMP PARALLEL PRIVATE (i,j,k) FIRSTPRIVATE(threadsum) REDUCTION(+:localsum) !$OMP DO SCHEDULE( STATIC ) DO i = nxl, nxr DO j = nys, nyn DO k = nzb_s_inner(j,i)+1, nzt d(k,j,i) = ( ( u(k,j,i+1) - u(k,j,i) ) * ddx + & ( v(k,j+1,i) - v(k,j,i) ) * ddy + & ( w(k,j,i) - w(k-1,j,i) ) * ddzw(k) ) * ddt_3d & * d_weight_pres ENDDO ! !-- Compute possible PE-sum of divergences for flow_statistics DO k = nzb_s_inner(j,i)+1, nzt threadsum = threadsum + ABS( d(k,j,i) ) ENDDO ENDDO ENDDO localsum = localsum + threadsum * dt_3d * & weight_pres(intermediate_timestep_count) !$OMP END PARALLEL #else !$OMP PARALLEL PRIVATE (i,j,k) !$OMP DO SCHEDULE( STATIC ) !$acc kernels present( d, ddzw, rflags_s_inner, u, v, w ) !$acc loop collapse( 3 ) DO i = nxl, nxr DO j = nys, nyn DO k = 1, nzt d(k,j,i) = ( ( u(k,j,i+1) - u(k,j,i) ) * ddx + & ( v(k,j+1,i) - v(k,j,i) ) * ddy + & ( w(k,j,i) - w(k-1,j,i) ) * ddzw(k) ) * ddt_3d & * d_weight_pres * rflags_s_inner(k,j,i) ENDDO ENDDO ENDDO !$acc end kernels !$OMP END PARALLEL ! !-- Compute possible PE-sum of divergences for flow_statistics !$OMP PARALLEL PRIVATE (i,j,k) FIRSTPRIVATE(threadsum) REDUCTION(+:localsum) !$OMP DO SCHEDULE( STATIC ) !$acc parallel loop collapse(3) present( d ) reduction(+:threadsum) DO i = nxl, nxr DO j = nys, nyn DO k = nzb+1, nzt threadsum = threadsum + ABS( d(k,j,i) ) ENDDO ENDDO ENDDO !$acc end parallel loop localsum = localsum + threadsum * dt_3d * & weight_pres(intermediate_timestep_count) !$OMP END PARALLEL #endif ! !-- For completeness, set the divergence sum of all statistic regions to those !-- of the total domain sums_divold_l(0:statistic_regions) = localsum CALL cpu_log( log_point_s(1), 'divergence', 'stop' ) ! !-- Compute the pressure perturbation solving the Poisson equation IF ( psolver(1:7) == 'poisfft' ) THEN ! !-- Solve Poisson equation via FFT and solution of tridiagonal matrices IF ( psolver == 'poisfft' ) THEN CALL poisfft( d ) ENDIF ! !-- Store computed perturbation pressure and set boundary condition in !-- z-direction !$OMP PARALLEL DO !$acc kernels present( d, tend ) DO i = nxl, nxr DO j = nys, nyn DO k = nzb+1, nzt tend(k,j,i) = d(k,j,i) ENDDO ENDDO ENDDO !$acc end kernels ! !-- Bottom boundary: !-- This condition is only required for internal output. The pressure !-- gradient (dp(nzb+1)-dp(nzb))/dz is not used anywhere else. IF ( ibc_p_b == 1 ) THEN ! !-- Neumann (dp/dz = 0) !$OMP PARALLEL DO !$acc kernels present( nzb_s_inner, tend ) DO i = nxlg, nxrg DO j = nysg, nyng tend(nzb_s_inner(j,i),j,i) = tend(nzb_s_inner(j,i)+1,j,i) ENDDO ENDDO !$acc end kernels ELSE ! !-- Dirichlet !$OMP PARALLEL DO !$acc kernels present( tend ) DO i = nxlg, nxrg DO j = nysg, nyng tend(nzb_s_inner(j,i),j,i) = 0.0 ENDDO ENDDO !$acc end kernels ENDIF ! !-- Top boundary IF ( ibc_p_t == 1 ) THEN ! !-- Neumann !$OMP PARALLEL DO !$acc kernels present( tend ) DO i = nxlg, nxrg DO j = nysg, nyng tend(nzt+1,j,i) = tend(nzt,j,i) ENDDO ENDDO !$acc end kernels ELSE ! !-- Dirichlet !$OMP PARALLEL DO !$acc kernels present( tend ) DO i = nxlg, nxrg DO j = nysg, nyng tend(nzt+1,j,i) = 0.0 ENDDO ENDDO !$acc end kernels ENDIF ! !-- Exchange boundaries for p IF ( numprocs == 1 ) THEN ! workaround for single-core GPU runs on_device = .TRUE. ! to be removed after complete porting ELSE ! of ghost point exchange !$acc update host( tend ) ENDIF CALL exchange_horiz( tend, nbgp ) IF ( numprocs == 1 ) THEN ! workaround for single-core GPU runs on_device = .FALSE. ! to be removed after complete porting ELSE ! of ghost point exchange !$acc update device( tend ) ENDIF ELSEIF ( psolver == 'sor' ) THEN ! !-- Solve Poisson equation for perturbation pressure using SOR-Red/Black !-- scheme CALL sor( d, ddzu_pres, ddzw, p_loc ) tend = p_loc ELSEIF ( psolver == 'multigrid' ) THEN ! !-- Solve Poisson equation for perturbation pressure using Multigrid scheme, !-- array tend is used to store the residuals, logical exchange_mg is used !-- to discern data exchange in multigrid ( 1 ghostpoint ) and normal grid !-- ( nbgp ghost points ). !-- If the number of grid points of the gathered grid, which is collected !-- on PE0, is larger than the number of grid points of an PE, than array !-- tend will be enlarged. IF ( gathered_size > subdomain_size ) THEN DEALLOCATE( tend ) ALLOCATE( tend(nzb:nzt_mg(mg_switch_to_pe0_level)+1,nys_mg( & mg_switch_to_pe0_level)-1:nyn_mg(mg_switch_to_pe0_level)+1,& nxl_mg(mg_switch_to_pe0_level)-1:nxr_mg( & mg_switch_to_pe0_level)+1) ) ENDIF CALL poismg( tend ) IF ( gathered_size > subdomain_size ) THEN DEALLOCATE( tend ) ALLOCATE( tend(nzb:nzt+1,nysg:nyng,nxlg:nxrg) ) ENDIF ! !-- Restore perturbation pressure on tend because this array is used !-- further below to correct the velocity fields DO i = nxl-1, nxr+1 DO j = nys-1, nyn+1 DO k = nzb, nzt+1 tend(k,j,i) = p_loc(k,j,i) ENDDO ENDDO ENDDO ENDIF ! !-- Store perturbation pressure on array p, used for pressure data output. !-- Ghost layers are added in the output routines (except sor-method: see below) IF ( intermediate_timestep_count == 1 ) THEN !$OMP PARALLEL PRIVATE (i,j,k) !$OMP DO !$acc kernels present( p, tend, weight_substep ) !$acc loop independent DO i = nxl-1, nxr+1 !$acc loop independent DO j = nys-1, nyn+1 !$acc loop independent DO k = nzb, nzt+1 p(k,j,i) = tend(k,j,i) * & weight_substep(intermediate_timestep_count) ENDDO ENDDO ENDDO !$acc end kernels !$OMP END PARALLEL ELSE !$OMP PARALLEL PRIVATE (i,j,k) !$OMP DO !$acc kernels present( p, tend, weight_substep ) !$acc loop independent DO i = nxl-1, nxr+1 !$acc loop independent DO j = nys-1, nyn+1 !$acc loop independent DO k = nzb, nzt+1 p(k,j,i) = p(k,j,i) + tend(k,j,i) * & weight_substep(intermediate_timestep_count) ENDDO ENDDO ENDDO !$acc end kernels !$OMP END PARALLEL ENDIF ! !-- SOR-method needs ghost layers for the next timestep IF ( psolver == 'sor' ) CALL exchange_horiz( p, nbgp ) ! !-- Correction of the provisional velocities with the current perturbation !-- pressure just computed IF ( conserve_volume_flow .AND. ( bc_lr_cyc .OR. bc_ns_cyc ) ) THEN volume_flow_l(1) = 0.0 volume_flow_l(2) = 0.0 ENDIF !$OMP PARALLEL PRIVATE (i,j,k) !$OMP DO !$acc kernels present( ddzu, nzb_u_inner, nzb_v_inner, nzb_w_inner, tend, u, v, w, weight_pres ) !$acc loop independent DO i = nxl, nxr !$acc loop independent DO j = nys, nyn !$acc loop independent DO k = 1, nzt IF ( k > nzb_w_inner(j,i) ) THEN w(k,j,i) = w(k,j,i) - dt_3d * & ( tend(k+1,j,i) - tend(k,j,i) ) * ddzu(k+1) * & weight_pres(intermediate_timestep_count) ENDIF ENDDO !$acc loop independent DO k = 1, nzt IF ( k > nzb_u_inner(j,i) ) THEN u(k,j,i) = u(k,j,i) - dt_3d * & ( tend(k,j,i) - tend(k,j,i-1) ) * ddx * & weight_pres(intermediate_timestep_count) ENDIF ENDDO !$acc loop independent DO k = 1, nzt IF ( k > nzb_v_inner(j,i) ) THEN v(k,j,i) = v(k,j,i) - dt_3d * & ( tend(k,j,i) - tend(k,j-1,i) ) * ddy * & weight_pres(intermediate_timestep_count) ENDIF ENDDO ENDDO ENDDO !$acc end kernels !$OMP END PARALLEL ! !-- Sum up the volume flow through the right and north boundary IF ( conserve_volume_flow .AND. bc_lr_cyc .AND. bc_ns_cyc .AND. & nxr == nx ) THEN !$OMP PARALLEL PRIVATE (j,k) !$OMP DO DO j = nys, nyn !$OMP CRITICAL DO k = nzb_2d(j,nx) + 1, nzt volume_flow_l(1) = volume_flow_l(1) + u(k,j,nx) * dzw(k) ENDDO !$OMP END CRITICAL ENDDO !$OMP END PARALLEL ENDIF IF ( conserve_volume_flow .AND. bc_ns_cyc .AND. bc_lr_cyc .AND. & nyn == ny ) THEN !$OMP PARALLEL PRIVATE (i,k) !$OMP DO DO i = nxl, nxr !$OMP CRITICAL DO k = nzb_2d(ny,i) + 1, nzt volume_flow_l(2) = volume_flow_l(2) + v(k,ny,i) * dzw(k) ENDDO !$OMP END CRITICAL ENDDO !$OMP END PARALLEL ENDIF ! !-- Conserve the volume flow IF ( conserve_volume_flow .AND. ( bc_lr_cyc .AND. bc_ns_cyc ) ) THEN #if defined( __parallel ) IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 2, MPI_REAL, & MPI_SUM, comm2d, ierr ) #else volume_flow = volume_flow_l #endif volume_flow_offset = ( volume_flow_initial - volume_flow ) / & volume_flow_area !$OMP PARALLEL PRIVATE (i,j,k) !$OMP DO DO i = nxl, nxr DO j = nys, nyn DO k = nzb_u_inner(j,i) + 1, nzt u(k,j,i) = u(k,j,i) + volume_flow_offset(1) ENDDO DO k = nzb_v_inner(j,i) + 1, nzt v(k,j,i) = v(k,j,i) + volume_flow_offset(2) ENDDO ENDDO ENDDO !$OMP END PARALLEL ENDIF ! !-- Exchange of boundaries for the velocities IF ( numprocs == 1 ) THEN ! workaround for single-core GPU runs on_device = .TRUE. ! to be removed after complete porting ELSE ! of ghost point exchange !$acc update host( u, v, w ) ENDIF CALL exchange_horiz( u, nbgp ) CALL exchange_horiz( v, nbgp ) CALL exchange_horiz( w, nbgp ) IF ( numprocs == 1 ) THEN ! workaround for single-core GPU runs on_device = .FALSE. ! to be removed after complete porting ELSE ! of ghost point exchange !$acc update device( u, v, w ) ENDIF ! !-- Compute the divergence of the corrected velocity field, !-- a possible PE-sum is computed in flow_statistics CALL cpu_log( log_point_s(1), 'divergence', 'start' ) sums_divnew_l = 0.0 ! !-- d must be reset to zero because it can contain nonzero values below the !-- topography IF ( topography /= 'flat' ) d = 0.0 localsum = 0.0 threadsum = 0.0 !$OMP PARALLEL PRIVATE (i,j,k) FIRSTPRIVATE(threadsum) REDUCTION(+:localsum) !$OMP DO SCHEDULE( STATIC ) #if defined( __ibm ) DO i = nxl, nxr DO j = nys, nyn DO k = nzb_s_inner(j,i)+1, nzt d(k,j,i) = ( u(k,j,i+1) - u(k,j,i) ) * ddx + & ( v(k,j+1,i) - v(k,j,i) ) * ddy + & ( w(k,j,i) - w(k-1,j,i) ) * ddzw(k) ENDDO DO k = nzb+1, nzt threadsum = threadsum + ABS( d(k,j,i) ) ENDDO ENDDO ENDDO #else !$acc kernels present( d, ddzw, rflags_s_inner, u, v, w ) !$acc loop collapse( 3 ) DO i = nxl, nxr DO j = nys, nyn DO k = 1, nzt d(k,j,i) = ( ( u(k,j,i+1) - u(k,j,i) ) * ddx + & ( v(k,j+1,i) - v(k,j,i) ) * ddy + & ( w(k,j,i) - w(k-1,j,i) ) * ddzw(k) & ) * rflags_s_inner(k,j,i) ENDDO ENDDO ENDDO !$acc end kernels ! !-- Compute possible PE-sum of divergences for flow_statistics !$acc parallel loop collapse(3) present( d ) reduction(+:threadsum) DO i = nxl, nxr DO j = nys, nyn DO k = nzb+1, nzt threadsum = threadsum + ABS( d(k,j,i) ) ENDDO ENDDO ENDDO !$acc end parallel loop #endif localsum = localsum + threadsum !$OMP END PARALLEL ! !-- For completeness, set the divergence sum of all statistic regions to those !-- of the total domain sums_divnew_l(0:statistic_regions) = localsum CALL cpu_log( log_point_s(1), 'divergence', 'stop' ) CALL cpu_log( log_point(8), 'pres', 'stop' ) END SUBROUTINE pres