SUBROUTINE pres !------------------------------------------------------------------------------! ! Current revisions: ! ----------------- ! ! ! Former revisions: ! ----------------- ! $Id: pres.f90 710 2011-03-30 09:45:27Z banzhafs $ ! ! 709 2011-03-30 09:31:40Z raasch ! formatting adjustments ! ! 707 2011-03-29 11:39:40Z raasch ! Calculation of weighted average of p is now handled in the same way ! regardless of the number of ghost layers (advection scheme), ! multigrid and sor method are using p_loc for iterative advancements of ! pressure, ! localsum calculation modified for proper OpenMP reduction, ! bc_lr/ns replaced by bc_lr/ns_cyc ! ! 693 2011-03-08 09:..:..Z raasch ! bugfix: weighting coefficient added to ibm branch ! ! 680 2011-02-04 23:16:06Z gryschka ! bugfix: collective_wait ! ! 675 2011-01-19 10:56:55Z suehring ! Removed bugfix while copying tend. ! ! 673 2011-01-18 16:19:48Z suehring ! Weighting coefficients added for right computation of the pressure during ! Runge-Kutta substeps. ! ! 667 2010-12-23 12:06:00Z suehring/gryschka ! New allocation of tend when ws-scheme and multigrid is used. This is due to ! reasons of perforance of the data_exchange. The same is done with p after ! poismg is called. ! nxl-1, nxr+1, nys-1, nyn+1 replaced by nxlg, nxrg, nysg, nyng when no ! multigrid is used. Calls of exchange_horiz are modified. ! bugfix: After pressure correction no volume flow correction in case of ! non-cyclic boundary conditions ! (has to be done only before pressure correction) ! Call of SOR routine is referenced with ddzu_pres. ! ! 622 2010-12-10 08:08:13Z raasch ! optional barriers included in order to speed up collective operations ! ! 151 2008-03-07 13:42:18Z raasch ! Bugfix in volume flow control for non-cyclic boundary conditions ! ! 106 2007-08-16 14:30:26Z raasch ! Volume flow conservation added for the remaining three outflow boundaries ! ! 85 2007-05-11 09:35:14Z raasch ! Division through dt_3d replaced by multiplication of the inverse. ! For performance optimisation, this is done in the loop calculating the ! divergence instead of using a seperate loop. ! ! 75 2007-03-22 09:54:05Z raasch ! Volume flow control for non-cyclic boundary conditions added (currently only ! for the north boundary!!), 2nd+3rd argument removed from exchange horiz, ! mean vertical velocity is removed in case of Neumann boundary conditions ! both at the bottom and the top ! ! RCS Log replace by Id keyword, revision history cleaned up ! ! Revision 1.25 2006/04/26 13:26:12 raasch ! OpenMP optimization (+localsum, threadsum) ! ! Revision 1.1 1997/07/24 11:24:44 raasch ! Initial revision ! ! ! Description: ! ------------ ! Compute the divergence of the provisional velocity field. Solve the Poisson ! equation for the perturbation pressure. Compute the final velocities using ! this perturbation pressure. Compute the remaining divergence. !------------------------------------------------------------------------------! USE arrays_3d USE constants USE control_parameters USE cpulog USE grid_variables USE indices USE interfaces USE pegrid USE poisfft_mod USE poisfft_hybrid_mod USE statistics IMPLICIT NONE INTEGER :: i, j, k, sr REAL :: ddt_3d, localsum, threadsum, d_weight_pres REAL, DIMENSION(1:2) :: volume_flow_l, volume_flow_offset REAL, DIMENSION(1:nzt) :: w_l, w_l_l CALL cpu_log( log_point(8), 'pres', 'start' ) ddt_3d = 1.0 / dt_3d d_weight_pres = 1.0 / weight_pres(intermediate_timestep_count) ! !-- Multigrid method expects array d to have one ghost layer. !-- IF ( psolver == 'multigrid' ) THEN DEALLOCATE( d ) ALLOCATE( d(nzb:nzt+1,nys-1:nyn+1,nxl-1:nxr+1) ) ! !-- Since p is later used to hold the weighted average of the substeps, it !-- cannot be used in the iterative solver. Therefore, its initial value is !-- stored on p_loc, which is then iteratively advanced in every substep. IF ( intermediate_timestep_count == 1 ) THEN DO i = nxl-1, nxr+1 DO j = nys-1, nyn+1 DO k = nzb, nzt+1 p_loc(k,j,i) = p(k,j,i) ENDDO ENDDO ENDDO ENDIF ELSEIF ( psolver == 'sor' .AND. intermediate_timestep_count == 1 ) THEN ! !-- Since p is later used to hold the weighted average of the substeps, it !-- cannot be used in the iterative solver. Therefore, its initial value is !-- stored on p_loc, which is then iteratively advanced in every substep. p_loc = p ENDIF ! !-- Conserve the volume flow at the outflow in case of non-cyclic lateral !-- boundary conditions !-- WARNING: so far, this conservation does not work at the left/south !-- boundary if the topography at the inflow differs from that at the !-- outflow! For this case, volume_flow_area needs adjustment! ! !-- Left/right IF ( conserve_volume_flow .AND. ( outflow_l .OR. outflow_r ) ) THEN volume_flow(1) = 0.0 volume_flow_l(1) = 0.0 IF ( outflow_l ) THEN i = 0 ELSEIF ( outflow_r ) THEN i = nx+1 ENDIF DO j = nys, nyn ! !-- Sum up the volume flow through the south/north boundary DO k = nzb_2d(j,i)+1, nzt volume_flow_l(1) = volume_flow_l(1) + u(k,j,i) * dzw(k) ENDDO ENDDO #if defined( __parallel ) IF ( collective_wait ) CALL MPI_BARRIER( comm1dy, ierr ) CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 1, MPI_REAL, & MPI_SUM, comm1dy, ierr ) #else volume_flow = volume_flow_l #endif volume_flow_offset(1) = ( volume_flow_initial(1) - volume_flow(1) ) & / volume_flow_area(1) DO j = nysg, nyng DO k = nzb_2d(j,i)+1, nzt u(k,j,i) = u(k,j,i) + volume_flow_offset(1) ENDDO ENDDO ENDIF ! !-- South/north IF ( conserve_volume_flow .AND. ( outflow_n .OR. outflow_s ) ) THEN volume_flow(2) = 0.0 volume_flow_l(2) = 0.0 IF ( outflow_s ) THEN j = 0 ELSEIF ( outflow_n ) THEN j = ny+1 ENDIF DO i = nxl, nxr ! !-- Sum up the volume flow through the south/north boundary DO k = nzb_2d(j,i)+1, nzt volume_flow_l(2) = volume_flow_l(2) + v(k,j,i) * dzw(k) ENDDO ENDDO #if defined( __parallel ) IF ( collective_wait ) CALL MPI_BARRIER( comm1dx, ierr ) CALL MPI_ALLREDUCE( volume_flow_l(2), volume_flow(2), 1, MPI_REAL, & MPI_SUM, comm1dx, ierr ) #else volume_flow = volume_flow_l #endif volume_flow_offset(2) = ( volume_flow_initial(2) - volume_flow(2) ) & / volume_flow_area(2) DO i = nxlg, nxrg DO k = nzb_v_inner(j,i)+1, nzt v(k,j,i) = v(k,j,i) + volume_flow_offset(2) ENDDO ENDDO ENDIF ! !-- Remove mean vertical velocity IF ( ibc_p_b == 1 .AND. ibc_p_t == 1 ) THEN IF ( simulated_time > 0.0 ) THEN ! otherwise nzb_w_inner not yet known w_l = 0.0; w_l_l = 0.0 DO i = nxl, nxr DO j = nys, nyn DO k = nzb_w_inner(j,i)+1, nzt w_l_l(k) = w_l_l(k) + w(k,j,i) ENDDO ENDDO ENDDO #if defined( __parallel ) IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) CALL MPI_ALLREDUCE( w_l_l(1), w_l(1), nzt, MPI_REAL, MPI_SUM, & comm2d, ierr ) #else w_l = w_l_l #endif DO k = 1, nzt w_l(k) = w_l(k) / ngp_2dh_outer(k,0) ENDDO DO i = nxlg, nxrg DO j = nysg, nyng DO k = nzb_w_inner(j,i)+1, nzt w(k,j,i) = w(k,j,i) - w_l(k) ENDDO ENDDO ENDDO ENDIF ENDIF ! !-- Compute the divergence of the provisional velocity field. CALL cpu_log( log_point_s(1), 'divergence', 'start' ) IF ( psolver == 'multigrid' ) THEN !$OMP PARALLEL DO SCHEDULE( STATIC ) DO i = nxl-1, nxr+1 DO j = nys-1, nyn+1 DO k = nzb, nzt+1 d(k,j,i) = 0.0 ENDDO ENDDO ENDDO ELSE !$OMP PARALLEL DO SCHEDULE( STATIC ) DO i = nxl, nxra DO j = nys, nyna DO k = nzb+1, nzta d(k,j,i) = 0.0 ENDDO ENDDO ENDDO ENDIF localsum = 0.0 threadsum = 0.0 #if defined( __ibm ) !$OMP PARALLEL PRIVATE (i,j,k) FIRSTPRIVATE(threadsum) REDUCTION(+:localsum) !$OMP DO SCHEDULE( STATIC ) DO i = nxl, nxr DO j = nys, nyn DO k = nzb_s_inner(j,i)+1, nzt d(k,j,i) = ( ( u(k,j,i+1) - u(k,j,i) ) * ddx + & ( v(k,j+1,i) - v(k,j,i) ) * ddy + & ( w(k,j,i) - w(k-1,j,i) ) * ddzw(k) ) * ddt_3d & * d_weight_pres ENDDO ! !-- Additional pressure boundary condition at the bottom boundary for !-- inhomogeneous Prandtl layer heat fluxes and temperatures, respectively !-- dp/dz = -(dtau13/dx + dtau23/dy) + g*pt'/pt0. !-- This condition must not be applied at the start of a run, because then !-- flow_statistics has not yet been called and thus sums = 0. IF ( ibc_p_b == 2 .AND. sums(nzb+1,4) /= 0.0 ) THEN k = nzb_s_inner(j,i) d(k+1,j,i) = d(k+1,j,i) + ( & ( usws(j,i+1) - usws(j,i) ) * ddx & + ( vsws(j+1,i) - vsws(j,i) ) * ddy & - g * ( pt(k+1,j,i) - sums(k+1,4) ) / & sums(k+1,4) & ) * ddzw(k+1) * ddt_3d * d_weight_pres ENDIF ! !-- Compute possible PE-sum of divergences for flow_statistics DO k = nzb_s_inner(j,i)+1, nzt threadsum = threadsum + ABS( d(k,j,i) ) ENDDO ENDDO ENDDO localsum = localsum + threadsum * dt_3d * & weight_pres(intermediate_timestep_count) !$OMP END PARALLEL #else IF ( ibc_p_b == 2 .AND. sums(nzb+1,4) /= 0.0 ) THEN !$OMP PARALLEL PRIVATE (i,j,k) !$OMP DO SCHEDULE( STATIC ) DO i = nxl, nxr DO j = nys, nyn DO k = nzb_s_inner(j,i)+1, nzt d(k,j,i) = ( ( u(k,j,i+1) - u(k,j,i) ) * ddx + & ( v(k,j+1,i) - v(k,j,i) ) * ddy + & ( w(k,j,i) - w(k-1,j,i) ) * ddzw(k) ) * ddt_3d & * d_weight_pres ENDDO ENDDO ! !-- Additional pressure boundary condition at the bottom boundary for !-- inhomogeneous Prandtl layer heat fluxes and temperatures, respectively !-- dp/dz = -(dtau13/dx + dtau23/dy) + g*pt'/pt0. !-- This condition must not be applied at the start of a run, because then !-- flow_statistics has not yet been called and thus sums = 0. DO j = nys, nyn k = nzb_s_inner(j,i) d(k+1,j,i) = d(k+1,j,i) + ( & ( usws(j,i+1) - usws(j,i) ) * ddx & + ( vsws(j+1,i) - vsws(j,i) ) * ddy & - g * ( pt(k+1,j,i) - sums(k+1,4) ) / & sums(k+1,4) & ) * ddzw(k+1) * ddt_3d & * d_weight_pres ENDDO ENDDO !$OMP END PARALLEL ELSE !$OMP PARALLEL PRIVATE (i,j,k) !$OMP DO SCHEDULE( STATIC ) DO i = nxl, nxr DO j = nys, nyn DO k = nzb_s_inner(j,i)+1, nzt d(k,j,i) = ( ( u(k,j,i+1) - u(k,j,i) ) * ddx + & ( v(k,j+1,i) - v(k,j,i) ) * ddy + & ( w(k,j,i) - w(k-1,j,i) ) * ddzw(k) ) * ddt_3d & * d_weight_pres ENDDO ENDDO ENDDO !$OMP END PARALLEL ENDIF ! !-- Compute possible PE-sum of divergences for flow_statistics !$OMP PARALLEL PRIVATE (i,j,k) FIRSTPRIVATE(threadsum) REDUCTION(+:localsum) !$OMP DO SCHEDULE( STATIC ) DO i = nxl, nxr DO j = nys, nyn DO k = nzb+1, nzt threadsum = threadsum + ABS( d(k,j,i) ) ENDDO ENDDO ENDDO localsum = localsum + threadsum * dt_3d * & weight_pres(intermediate_timestep_count) !$OMP END PARALLEL #endif ! !-- For completeness, set the divergence sum of all statistic regions to those !-- of the total domain sums_divold_l(0:statistic_regions) = localsum CALL cpu_log( log_point_s(1), 'divergence', 'stop' ) ! !-- Compute the pressure perturbation solving the Poisson equation IF ( psolver(1:7) == 'poisfft' ) THEN ! !-- Enlarge the size of tend, used as a working array for the transpositions IF ( nxra > nxr .OR. nyna > nyn .OR. nza > nz ) THEN DEALLOCATE( tend ) ALLOCATE( tend(1:nza,nys:nyna,nxl:nxra) ) ENDIF ! !-- Solve Poisson equation via FFT and solution of tridiagonal matrices IF ( psolver == 'poisfft' ) THEN ! !-- Solver for 2d-decomposition CALL poisfft( d, tend ) ELSEIF ( psolver == 'poisfft_hybrid' ) THEN ! !-- Solver for 1d-decomposition (using MPI and OpenMP). !-- The old hybrid-solver is still included here, as long as there !-- are some optimization problems in poisfft CALL poisfft_hybrid( d ) ENDIF ! !-- Resize tend to its normal size IF ( nxra > nxr .OR. nyna > nyn .OR. nza > nz ) THEN DEALLOCATE( tend ) ALLOCATE( tend(nzb:nzt+1,nysg:nyng,nxlg:nxrg) ) ENDIF ! !-- Store computed perturbation pressure and set boundary condition in !-- z-direction !$OMP PARALLEL DO DO i = nxl, nxr DO j = nys, nyn DO k = nzb+1, nzt tend(k,j,i) = d(k,j,i) ENDDO ENDDO ENDDO ! !-- Bottom boundary: !-- This condition is only required for internal output. The pressure !-- gradient (dp(nzb+1)-dp(nzb))/dz is not used anywhere else. IF ( ibc_p_b == 1 ) THEN ! !-- Neumann (dp/dz = 0) !$OMP PARALLEL DO DO i = nxlg, nxrg DO j = nysg, nyng tend(nzb_s_inner(j,i),j,i) = tend(nzb_s_inner(j,i)+1,j,i) ENDDO ENDDO ELSEIF ( ibc_p_b == 2 ) THEN ! !-- Neumann condition for inhomogeneous surfaces, !-- here currently still in the form of a zero gradient. Actually !-- dp/dz = -(dtau13/dx + dtau23/dy) + g*pt'/pt0 would have to be used for !-- the computation (cf. above: computation of divergences). !$OMP PARALLEL DO DO i = nxlg, nxrg DO j = nysg, nyng tend(nzb_s_inner(j,i),j,i) = tend(nzb_s_inner(j,i)+1,j,i) ENDDO ENDDO ELSE ! !-- Dirichlet !$OMP PARALLEL DO DO i = nxlg, nxrg DO j = nysg, nyng tend(nzb_s_inner(j,i),j,i) = 0.0 ENDDO ENDDO ENDIF ! !-- Top boundary IF ( ibc_p_t == 1 ) THEN ! !-- Neumann !$OMP PARALLEL DO DO i = nxlg, nxrg DO j = nysg, nyng tend(nzt+1,j,i) = tend(nzt,j,i) ENDDO ENDDO ELSE ! !-- Dirichlet !$OMP PARALLEL DO DO i = nxlg, nxrg DO j = nysg, nyng tend(nzt+1,j,i) = 0.0 ENDDO ENDDO ENDIF ! !-- Exchange boundaries for p CALL exchange_horiz( tend, nbgp ) ELSEIF ( psolver == 'sor' ) THEN ! !-- Solve Poisson equation for perturbation pressure using SOR-Red/Black !-- scheme CALL sor( d, ddzu_pres, ddzw, p_loc ) tend = p_loc ELSEIF ( psolver == 'multigrid' ) THEN ! !-- Solve Poisson equation for perturbation pressure using Multigrid scheme, !-- array tend is used to store the residuals, logical exchange_mg is used !-- to discern data exchange in multigrid ( 1 ghostpoint ) and normal grid !-- ( nbgp ghost points ). CALL poismg( tend ) ! !-- Restore perturbation pressure on tend because this array is used !-- further below to correct the velocity fields DO i = nxl-1, nxr+1 DO j = nys-1, nyn+1 DO k = nzb, nzt+1 tend(k,j,i) = p_loc(k,j,i) ENDDO ENDDO ENDDO ENDIF ! !-- Store perturbation pressure on array p, used for pressure data output. !-- Ghost layers are added in the output routines (except sor-method: see below) IF ( intermediate_timestep_count == 1 ) THEN !$OMP PARALLEL PRIVATE (i,j,k) !$OMP DO DO i = nxl-1, nxr+1 DO j = nys-1, nyn+1 DO k = nzb, nzt+1 p(k,j,i) = tend(k,j,i) * & weight_substep(intermediate_timestep_count) ENDDO ENDDO ENDDO !$OMP END PARALLEL ELSE !$OMP PARALLEL PRIVATE (i,j,k) !$OMP DO DO i = nxl-1, nxr+1 DO j = nys-1, nyn+1 DO k = nzb, nzt+1 p(k,j,i) = p(k,j,i) + tend(k,j,i) * & weight_substep(intermediate_timestep_count) ENDDO ENDDO ENDDO !$OMP END PARALLEL ENDIF ! !-- SOR-method needs ghost layers for the next timestep IF ( psolver == 'sor' ) CALL exchange_horiz( p, nbgp ) ! !-- Correction of the provisional velocities with the current perturbation !-- pressure just computed IF ( conserve_volume_flow .AND. ( bc_lr_cyc .OR. bc_ns_cyc ) ) THEN volume_flow_l(1) = 0.0 volume_flow_l(2) = 0.0 ENDIF !$OMP PARALLEL PRIVATE (i,j,k) !$OMP DO DO i = nxl, nxr DO j = nys, nyn DO k = nzb_w_inner(j,i)+1, nzt w(k,j,i) = w(k,j,i) - dt_3d * & ( tend(k+1,j,i) - tend(k,j,i) ) * ddzu(k+1) * & weight_pres(intermediate_timestep_count) ENDDO DO k = nzb_u_inner(j,i)+1, nzt u(k,j,i) = u(k,j,i) - dt_3d * & ( tend(k,j,i) - tend(k,j,i-1) ) * ddx * & weight_pres(intermediate_timestep_count) ENDDO DO k = nzb_v_inner(j,i)+1, nzt v(k,j,i) = v(k,j,i) - dt_3d * & ( tend(k,j,i) - tend(k,j-1,i) ) * ddy * & weight_pres(intermediate_timestep_count) ENDDO ! !-- Sum up the volume flow through the right and north boundary IF ( conserve_volume_flow .AND. bc_lr_cyc .AND. bc_ns_cyc .AND. & i == nx ) THEN !$OMP CRITICAL DO k = nzb_2d(j,i) + 1, nzt volume_flow_l(1) = volume_flow_l(1) + u(k,j,i) * dzw(k) ENDDO !$OMP END CRITICAL ENDIF IF ( conserve_volume_flow .AND. bc_ns_cyc .AND. bc_lr_cyc .AND. & j == ny ) THEN !$OMP CRITICAL DO k = nzb_2d(j,i) + 1, nzt volume_flow_l(2) = volume_flow_l(2) + v(k,j,i) * dzw(k) ENDDO !$OMP END CRITICAL ENDIF ENDDO ENDDO !$OMP END PARALLEL ! !-- Conserve the volume flow IF ( conserve_volume_flow .AND. ( bc_lr_cyc .AND. bc_ns_cyc ) ) THEN #if defined( __parallel ) IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 2, MPI_REAL, & MPI_SUM, comm2d, ierr ) #else volume_flow = volume_flow_l #endif volume_flow_offset = ( volume_flow_initial - volume_flow ) / & volume_flow_area !$OMP PARALLEL PRIVATE (i,j,k) !$OMP DO DO i = nxl, nxr DO j = nys, nyn DO k = nzb_u_inner(j,i) + 1, nzt u(k,j,i) = u(k,j,i) + volume_flow_offset(1) v(k,j,i) = v(k,j,i) + volume_flow_offset(2) ENDDO ENDDO ENDDO !$OMP END PARALLEL ENDIF ! !-- Exchange of boundaries for the velocities CALL exchange_horiz( u, nbgp ) CALL exchange_horiz( v, nbgp ) CALL exchange_horiz( w, nbgp ) ! !-- Compute the divergence of the corrected velocity field, !-- a possible PE-sum is computed in flow_statistics CALL cpu_log( log_point_s(1), 'divergence', 'start' ) sums_divnew_l = 0.0 ! !-- d must be reset to zero because it can contain nonzero values below the !-- topography IF ( topography /= 'flat' ) d = 0.0 localsum = 0.0 threadsum = 0.0 !$OMP PARALLEL PRIVATE (i,j,k) FIRSTPRIVATE(threadsum) REDUCTION(+:localsum) !$OMP DO SCHEDULE( STATIC ) #if defined( __ibm ) DO i = nxl, nxr DO j = nys, nyn DO k = nzb_s_inner(j,i)+1, nzt d(k,j,i) = ( u(k,j,i+1) - u(k,j,i) ) * ddx + & ( v(k,j+1,i) - v(k,j,i) ) * ddy + & ( w(k,j,i) - w(k-1,j,i) ) * ddzw(k) ENDDO DO k = nzb+1, nzt threadsum = threadsum + ABS( d(k,j,i) ) ENDDO ENDDO ENDDO #else DO i = nxl, nxr DO j = nys, nyn DO k = nzb_s_inner(j,i)+1, nzt d(k,j,i) = ( u(k,j,i+1) - u(k,j,i) ) * ddx + & ( v(k,j+1,i) - v(k,j,i) ) * ddy + & ( w(k,j,i) - w(k-1,j,i) ) * ddzw(k) threadsum = threadsum + ABS( d(k,j,i) ) ENDDO ENDDO ENDDO #endif localsum = localsum + threadsum !$OMP END PARALLEL ! !-- For completeness, set the divergence sum of all statistic regions to those !-- of the total domain sums_divnew_l(0:statistic_regions) = localsum CALL cpu_log( log_point_s(1), 'divergence', 'stop' ) CALL cpu_log( log_point(8), 'pres', 'stop' ) END SUBROUTINE pres