source: palm/trunk/SOURCE/palm.f90 @ 1017

Last change on this file since 1017 was 1017, checked in by raasch, 12 years ago

last commit documented

  • Property svn:keywords set to Id
File size: 8.9 KB
Line 
1 PROGRAM palm
2
3!------------------------------------------------------------------------------!
4! Current revisions:
5! -----------------
6!
7!
8! Former revisions:
9! -----------------
10! $Id: palm.f90 1017 2012-09-27 11:28:50Z raasch $
11!
12! 1015 2012-09-27 09:23:24Z raasch
13! Version number changed from 3.8 to 3.8a.
14! OpenACC statements added + code changes required for GPU optimization
15!
16! 849 2012-03-15 10:35:09Z raasch
17! write_particles renamed lpm_write_restart_file
18!
19! 759 2011-09-15 13:58:31Z raasch
20! Splitting of parallel I/O, cpu measurement for write_3d_binary and opening
21! of unit 14 moved to here
22!
23! 495 2010-03-02 00:40:15Z raasch
24! Particle data for restart runs are only written if write_binary=.T..
25!
26! 215 2008-11-18 09:54:31Z raasch
27! Initialization of coupled runs modified for MPI-1 and moved to external
28! subroutine init_coupling
29!
30! 197 2008-09-16 15:29:03Z raasch
31! Workaround for getting information about the coupling mode
32!
33! 108 2007-08-24 15:10:38Z letzel
34! Get coupling mode from environment variable, change location of debug output
35!
36! 75 2007-03-22 09:54:05Z raasch
37! __vtk directives removed, write_particles is called only in case of particle
38! advection switched on, open unit 9 for debug output,
39! setting of palm version moved from modules to here
40!
41! RCS Log replace by Id keyword, revision history cleaned up
42!
43! Revision 1.10  2006/08/04 14:53:12  raasch
44! Distibution of run description header removed, call of header moved behind
45! init_3d_model
46!
47! Revision 1.2  2001/01/25 07:15:06  raasch
48! Program name changed to PALM, module test_variables removed.
49! Initialization of dvrp logging as well as exit of dvrp moved to new
50! subroutines init_dvrp_logging and close_dvrp (file init_dvrp.f90)
51!
52! Revision 1.1  1997/07/24 11:23:35  raasch
53! Initial revision
54!
55!
56! Description:
57! ------------
58! Large-Eddy Simulation (LES) model for the convective boundary layer,
59! optimized for use on parallel machines (implementation realized using the
60! Message Passing Interface (MPI)). The model can also be run on vector machines
61! (less well optimized) and workstations. Versions for the different types of
62! machines are controlled via cpp-directives.
63! Model runs are only feasible using the ksh-script mrun.
64!------------------------------------------------------------------------------!
65
66
67    USE arrays_3d
68    USE constants
69    USE control_parameters
70    USE cpulog
71    USE dvrp_variables
72    USE grid_variables
73    USE indices
74    USE interfaces
75    USE model_1d
76    USE particle_attributes
77    USE pegrid
78    USE spectrum
79    USE statistics
80
81#if defined( __openacc )
82    USE OPENACC
83#endif
84
85    IMPLICIT NONE
86
87!
88!-- Local variables
89    CHARACTER (LEN=9) ::  time_to_string
90    CHARACTER (LEN=1) ::  cdum
91    INTEGER           ::  i, run_description_header_i(80)
92#if defined( __openacc )
93    REAL, DIMENSION(100) ::  acc_dum
94#endif
95
96    version = 'PALM 3.8a'
97
98#if defined( __parallel )
99!
100!-- MPI initialisation. comm2d is preliminary set, because
101!-- it will be defined in init_pegrid but is used before in cpu_log.
102    CALL MPI_INIT( ierr )
103    CALL MPI_COMM_SIZE( MPI_COMM_WORLD, numprocs, ierr )
104    CALL MPI_COMM_RANK( MPI_COMM_WORLD, myid, ierr )
105    comm_palm = MPI_COMM_WORLD
106    comm2d    = MPI_COMM_WORLD
107
108!
109!-- Initialize PE topology in case of coupled runs
110    CALL init_coupling
111#endif
112
113#if defined( __openacc )
114!
115!-- Get the number of accelerator boards per node and assign the MPI processes
116!-- to these boards
117    num_acc_per_node  = ACC_GET_NUM_DEVICES( ACC_DEVICE_NVIDIA )
118    acc_rank = MOD( myid, num_acc_per_node )
119    CALL ACC_SET_DEVICE_NUM ( acc_rank, ACC_DEVICE_NVIDIA )
120!
121!-- Test output (to be removed later)
122    WRITE (*,'(A,I4,A,I3,A,I3,A,I3)') '*** Connect MPI-Task ', myid,' to CPU ',&
123                                      acc_rank, ' Devices: ', num_acc_per_node,&
124                                      ' connected to:',                        &
125                                      ACC_GET_DEVICE_NUM( ACC_DEVICE_NVIDIA )
126#endif
127!
128!-- Ensure that OpenACC first attaches the GPU devices by copying a dummy data
129!-- region
130    !$acc data copyin( acc_dum )
131
132!
133!-- Initialize measuring of the CPU-time remaining to the run
134    CALL local_tremain_ini
135
136!
137!-- Start of total CPU time measuring.
138    CALL cpu_log( log_point(1), 'total', 'start' )
139    CALL cpu_log( log_point(2), 'initialisation', 'start' )
140
141!
142!-- Open a file for debug output
143    WRITE (myid_char,'(''_'',I4.4)')  myid
144    OPEN( 9, FILE='DEBUG'//TRIM( coupling_char )//myid_char, FORM='FORMATTED' )
145
146!
147!-- Initialize dvrp logging. Also, one PE maybe split from the global
148!-- communicator for doing the dvrp output. In that case, the number of
149!-- PEs available for PALM is reduced by one and communicator comm_palm
150!-- is changed respectively.
151#if defined( __parallel )
152    CALL MPI_COMM_RANK( comm_palm, myid, ierr )
153!
154!-- TEST OUTPUT (TO BE REMOVED)
155    WRITE(9,*) '*** coupling_mode = "', TRIM( coupling_mode ), '"'
156    CALL LOCAL_FLUSH( 9 )
157    IF ( TRIM( coupling_mode ) /= 'uncoupled' )  THEN
158       PRINT*, '*** PE', myid, ' Global target PE:', target_id, &
159               TRIM( coupling_mode )
160    ENDIF
161#endif
162
163    CALL init_dvrp_logging
164
165!
166!-- Read control parameters from NAMELIST files and read environment-variables
167    CALL parin
168
169!
170!-- Determine processor topology and local array indices
171    CALL init_pegrid
172
173!
174!-- Generate grid parameters
175    CALL init_grid
176
177!
178!-- Check control parameters and deduce further quantities
179    CALL check_parameters
180
181
182!
183!-- Initialize all necessary variables
184    CALL init_3d_model
185
186!
187!-- Output of program header
188    IF ( myid == 0 )  CALL header
189
190    CALL cpu_log( log_point(2), 'initialisation', 'stop' )
191
192!
193!-- Set start time in format hh:mm:ss
194    simulated_time_chr = time_to_string( simulated_time )
195
196!
197!-- If required, output of initial arrays
198    IF ( do2d_at_begin )  THEN
199       CALL data_output_2d( 'xy', 0 )
200       CALL data_output_2d( 'xz', 0 )
201       CALL data_output_2d( 'yz', 0 )
202    ENDIF
203    IF ( do3d_at_begin )  THEN
204       CALL data_output_3d( 0 )
205    ENDIF
206
207!
208!-- Declare and initialize variables in the accelerator memory with their
209!-- host values
210    !$acc  data copyin( diss, e, e_p, kh, km, pt, pt_p, q, ql, tend, te_m, tpt_m, tu_m, tv_m, tw_m, u, u_p, v, vpt, v_p, w, w_p )          &
211    !$acc       copyin( ddzu, ddzw, dd2zu, l_grid, l_wall, ptdf_x, ptdf_y, pt_init, rdf, rdf_sc, ug, vg, zu, zw )   &
212    !$acc       copyin( hom, qs, qsws, qswst, rif, rif_wall, shf, ts, tswst, us, usws, uswst, vsws, vswst, z0, z0h )      &
213    !$acc       copyin( fxm, fxp, fym, fyp, fwxm, fwxp, fwym, fwyp, nzb_diff_s_inner, nzb_diff_s_outer, nzb_diff_u )       &
214    !$acc       copyin( nzb_diff_v, nzb_s_inner, nzb_s_outer, nzb_u_inner )    &
215    !$acc       copyin( nzb_u_outer, nzb_v_inner, nzb_v_outer, nzb_w_inner )   &
216    !$acc       copyin( nzb_w_outer, wall_heatflux, wall_e_x, wall_e_y, wall_u, wall_v, wall_w_x, wall_w_y, wall_flags_0 )
217!
218!-- Integration of the model equations using timestep-scheme
219    CALL time_integration
220
221!
222!-- If required, write binary data for restart runs
223    IF ( write_binary(1:4) == 'true' )  THEN
224
225       CALL cpu_log( log_point(22), 'write_3d_binary', 'start' )
226
227       CALL check_open( 14 )
228
229       DO  i = 0, io_blocks-1
230          IF ( i == io_group )  THEN
231!
232!--          Write flow field data
233             CALL write_3d_binary
234          ENDIF
235#if defined( __parallel )
236          CALL MPI_BARRIER( comm2d, ierr )
237#endif
238       ENDDO
239
240       CALL cpu_log( log_point(22), 'write_3d_binary', 'stop' )
241
242!
243!--    If required, write particle data
244       IF ( particle_advection )  CALL lpm_write_restart_file
245    ENDIF
246
247!
248!-- If required, repeat output of header including the required CPU-time
249    IF ( myid == 0 )  CALL header
250!
251!-- If required, final user-defined actions, and
252!-- last actions on the open files and close files. Unit 14 was opened
253!-- in write_3d_binary but it is closed here, to allow writing on this
254!-- unit in routine user_last_actions.
255    CALL cpu_log( log_point(4), 'last actions', 'start' )
256    DO  i = 0, io_blocks-1
257       IF ( i == io_group )  THEN
258          CALL user_last_actions
259          IF ( write_binary(1:4) == 'true' )  CALL close_file( 14 )
260       ENDIF
261#if defined( __parallel )
262       CALL MPI_BARRIER( comm2d, ierr )
263#endif
264    ENDDO
265    CALL close_file( 0 )
266    CALL close_dvrp
267    CALL cpu_log( log_point(4), 'last actions', 'stop' )
268
269#if defined( __mpi2 )
270!
271!-- Test exchange via intercommunicator in case of a MPI-2 coupling
272    IF ( coupling_mode == 'atmosphere_to_ocean' )  THEN
273       i = 12345 + myid
274       CALL MPI_SEND( i, 1, MPI_INTEGER, myid, 11, comm_inter, ierr )
275    ELSEIF ( coupling_mode == 'ocean_to_atmosphere' )  THEN
276       CALL MPI_RECV( i, 1, MPI_INTEGER, myid, 11, comm_inter, status, ierr )
277       PRINT*, '### myid: ', myid, '   received from atmosphere:  i = ', i
278    ENDIF
279#endif
280
281!
282!-- Close the OpenACC dummy data region
283    !$acc end data
284    !$acc end data
285
286!
287!-- Take final CPU-time for CPU-time analysis
288    CALL cpu_log( log_point(1), 'total', 'stop' )
289    CALL cpu_statistics
290
291#if defined( __parallel )
292    CALL MPI_FINALIZE( ierr )
293#endif
294
295 END PROGRAM palm
Note: See TracBrowser for help on using the repository browser.