source: palm/trunk/SOURCE/inflow_turbulence.f90 @ 622

Last change on this file since 622 was 622, checked in by raasch, 13 years ago

New:
---

Optional barriers included in order to speed up collective operations
MPI_ALLTOALL and MPI_ALLREDUCE. This feature is controlled with new initial
parameter collective_wait. Default is .FALSE, but .TRUE. on SGI-type
systems. (advec_particles, advec_s_bc, buoyancy, check_for_restart,
cpu_statistics, data_output_2d, data_output_ptseries, flow_statistics,
global_min_max, inflow_turbulence, init_3d_model, init_particles, init_pegrid,
init_slope, parin, pres, poismg, set_particle_attributes, timestep,
read_var_list, user_statistics, write_compressed, write_var_list)

Adjustments for Kyushu Univ. (lcrte, ibmku). Concerning hybrid
(MPI/openMP) runs, the number of openMP threads per MPI tasks can now
be given as an argument to mrun-option -O. (mbuild, mrun, subjob)

Changed:


Initialization of the module command changed for SGI-ICE/lcsgi (mbuild, subjob)

Errors:


  • Property svn:keywords set to Id
File size: 6.4 KB
Line 
1 SUBROUTINE inflow_turbulence
2
3!------------------------------------------------------------------------------!
4! Current revisions:
5! -----------------
6! optional barriers included in order to speed up collective operations
7!
8! Former revisions:
9! -----------------
10! $Id: inflow_turbulence.f90 622 2010-12-10 08:08:13Z raasch $
11!
12! 222 2009-01-12 16:04:16Z letzel
13! Bugfix for nonparallel execution
14!
15! Initial version (2008/03/07)
16!
17! Description:
18! ------------
19! Imposing turbulence at the respective inflow using the turbulence
20! recycling method of Kataoka and Mizuno (2002).
21!------------------------------------------------------------------------------!
22
23    USE arrays_3d
24    USE control_parameters
25    USE cpulog
26    USE grid_variables
27    USE indices
28    USE interfaces
29    USE pegrid
30
31
32    IMPLICIT NONE
33
34    INTEGER ::  i, imax, j, k, ngp_ifd, ngp_pr
35
36    REAL, DIMENSION(1:2) ::  volume_flow_l, volume_flow_offset
37    REAL, DIMENSION(nzb:nzt+1,5) ::  avpr, avpr_l
38    REAL, DIMENSION(nzb:nzt+1,nys-1:nyn+1,5) ::  inflow_dist
39
40    CALL cpu_log( log_point(40), 'inflow_turbulence', 'start' )
41
42!
43!-- Carry out horizontal averaging in the recycling plane
44    avpr_l = 0.0
45    ngp_pr = ( nzt - nzb + 2 ) * 5
46    ngp_ifd = ngp_pr * ( nyn - nys + 3 )
47
48!
49!-- First, local averaging within the recycling domain
50    IF ( recycling_plane >= nxl )  THEN
51
52       imax = MIN( nxr, recycling_plane )
53
54       DO  i = nxl, imax
55          DO  j = nys, nyn
56             DO  k = nzb, nzt+1
57
58                avpr_l(k,1) = avpr_l(k,1) + u(k,j,i)
59                avpr_l(k,2) = avpr_l(k,2) + v(k,j,i)
60                avpr_l(k,3) = avpr_l(k,3) + w(k,j,i)
61                avpr_l(k,4) = avpr_l(k,4) + pt(k,j,i)
62                avpr_l(k,5) = avpr_l(k,5) + e(k,j,i)
63
64             ENDDO
65          ENDDO
66       ENDDO
67
68    ENDIF
69
70!    WRITE (9,*) '*** averaged profiles avpr_l'
71!    DO  k = nzb, nzt+1
72!       WRITE (9,'(F5.1,1X,F5.1,1X,F5.1,1X,F6.1,1X,F7.2)') avpr_l(k,1),avpr_l(k,2),avpr_l(k,3),avpr_l(k,4),avpr_l(k,5)
73!    ENDDO
74!    WRITE (9,*) ' '
75
76#if defined( __parallel )
77!
78!-- Now, averaging over all PEs
79    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
80    CALL MPI_ALLREDUCE( avpr_l(nzb,1), avpr(nzb,1), ngp_pr, MPI_REAL, MPI_SUM, &
81                        comm2d, ierr )
82#else
83    avpr = avpr_l
84#endif
85
86    avpr = avpr / ( ( ny + 1 ) * ( recycling_plane + 1 ) )
87
88!    WRITE (9,*) '*** averaged profiles'
89!    DO  k = nzb, nzt+1
90!       WRITE (9,'(F5.1,1X,F5.1,1X,F5.1,1X,F6.1,1X,F7.2)') avpr(k,1),avpr(k,2),avpr(k,3),avpr(k,4),avpr(k,5)
91!    ENDDO
92!    WRITE (9,*) ' '
93
94!
95!-- Calculate the disturbances at the recycling plane
96    i = recycling_plane
97
98#if defined( __parallel )
99    IF ( myidx == id_recycling )  THEN
100
101       DO  j = nys-1, nyn+1
102          DO  k = nzb, nzt+1
103
104              inflow_dist(k,j,1) = u(k,j,i+1) - avpr(k,1)
105              inflow_dist(k,j,2) = v(k,j,i)   - avpr(k,2)
106              inflow_dist(k,j,3) = w(k,j,i)   - avpr(k,3)
107              inflow_dist(k,j,4) = pt(k,j,i)  - avpr(k,4)
108              inflow_dist(k,j,5) = e(k,j,i)   - avpr(k,5)
109
110          ENDDO
111       ENDDO
112
113    ENDIF
114#else
115    DO  j = nys-1, nyn+1
116       DO  k = nzb, nzt+1
117
118          inflow_dist(k,j,1) = u(k,j,i+1) - avpr(k,1)
119          inflow_dist(k,j,2) = v(k,j,i)   - avpr(k,2)
120          inflow_dist(k,j,3) = w(k,j,i)   - avpr(k,3)
121          inflow_dist(k,j,4) = pt(k,j,i)  - avpr(k,4)
122          inflow_dist(k,j,5) = e(k,j,i)   - avpr(k,5)
123
124       ENDDO
125    ENDDO
126#endif
127
128!
129!-- For parallel runs, send the disturbances to the respective inflow PE
130#if defined( __parallel )
131    IF ( myidx == id_recycling  .AND.  myidx /= id_inflow )  THEN
132
133       CALL MPI_SEND( inflow_dist(nzb,nys-1,1), ngp_ifd, MPI_REAL, &
134                      id_inflow, 1, comm1dx, ierr )
135
136    ELSEIF ( myidx /= id_recycling  .AND.  myidx == id_inflow )  THEN
137
138       inflow_dist = 0.0
139       CALL MPI_RECV( inflow_dist(nzb,nys-1,1), ngp_ifd, MPI_REAL, &
140                      id_recycling, 1, comm1dx, status, ierr )
141
142    ENDIF
143#endif
144
145!
146!-- Add the disturbance at the inflow
147    IF ( nxl == 0 )  THEN
148
149       DO  j = nys-1, nyn+1
150          DO  k = nzb, nzt+1
151
152!              WRITE (9,*) 'j=',j,' k=',k
153!              WRITE (9,*) 'mean_u = ', mean_inflow_profiles(k,1), ' dist_u = ',&
154!                          inflow_dist(k,j,1)
155!              WRITE (9,*) 'mean_v = ', mean_inflow_profiles(k,2), ' dist_v = ',&
156!                          inflow_dist(k,j,2)
157!              WRITE (9,*) 'mean_w = 0.0', ' dist_w = ',&
158!                          inflow_dist(k,j,3)
159!              WRITE (9,*) 'mean_pt = ', mean_inflow_profiles(k,4), ' dist_pt = ',&
160!                          inflow_dist(k,j,4)
161!              WRITE (9,*) 'mean_e = ', mean_inflow_profiles(k,5), ' dist_e = ',&
162!                          inflow_dist(k,j,5)
163              u(k,j,0)   = mean_inflow_profiles(k,1) + &
164                           inflow_dist(k,j,1) * inflow_damping_factor(k)
165              v(k,j,-1)  = mean_inflow_profiles(k,2) + &
166                           inflow_dist(k,j,2) * inflow_damping_factor(k)
167              w(k,j,-1)  = inflow_dist(k,j,3) * inflow_damping_factor(k)
168              pt(k,j,-1) = mean_inflow_profiles(k,4) + &
169                           inflow_dist(k,j,4) * inflow_damping_factor(k)
170              e(k,j,-1)  = mean_inflow_profiles(k,5) + &
171                           inflow_dist(k,j,5) * inflow_damping_factor(k)
172              e(k,j,-1)  = MAX( e(k,j,-1), 0.0 )
173
174          ENDDO
175       ENDDO
176
177    ENDIF
178
179!
180!-- Conserve the volume flow at the inflow in order to avoid generation of
181!-- waves in the stable layer
182!    IF ( conserve_volume_flow  .AND.  inflow_l )  THEN
183
184!       volume_flow(1)   = 0.0
185!       volume_flow_l(1) = 0.0
186
187!       i = 0
188
189!       DO  j = nys, nyn
190!
191!--       Sum up the volume flow through the south/north boundary
192!          DO  k = nzb_2d(j,i) + 1, nzt
193!             volume_flow_l(1) = volume_flow_l(1) + u(k,j,i) * dzu(k)
194!          ENDDO
195!       ENDDO
196
197#if defined( __parallel )   
198!       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
199!       CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 1, MPI_REAL, &
200!                           MPI_SUM, comm1dy, ierr )   
201#else
202!       volume_flow = volume_flow_l 
203#endif
204!       volume_flow_offset(1) = ( volume_flow_initial(1) - volume_flow(1) )    &
205!                               / volume_flow_area(1)
206
207!       DO  j = nys-1, nyn+1
208!          DO  k = nzb_v_inner(j,i) + 1, nzt
209!             u(k,j,i) = u(k,j,i) + volume_flow_offset(1)
210!          ENDDO
211!       ENDDO
212
213!    ENDIF
214
215    CALL cpu_log( log_point(40), 'inflow_turbulence', 'stop' )
216
217
218 END SUBROUTINE inflow_turbulence
Note: See TracBrowser for help on using the repository browser.