1 | !> @file restart_data_mpi_io_mod.f90 |
---|
2 | !--------------------------------------------------------------------------------------------------! |
---|
3 | ! This file is part of the PALM model system. |
---|
4 | ! |
---|
5 | ! PALM is free software: you can redistribute it and/or modify it under the terms of the GNU General |
---|
6 | ! Public License as published by the Free Software Foundation, either version 3 of the License, or |
---|
7 | ! (at your option) any later version. |
---|
8 | ! |
---|
9 | ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the |
---|
10 | ! implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General |
---|
11 | ! Public License for more details. |
---|
12 | ! |
---|
13 | ! You should have received a copy of the GNU General Public License along with PALM. If not, see |
---|
14 | ! <http://www.gnu.org/licenses/>. |
---|
15 | ! |
---|
16 | ! Copyright 1997-2020 Leibniz Universitaet Hannover |
---|
17 | !--------------------------------------------------------------------------------------------------! |
---|
18 | ! |
---|
19 | ! |
---|
20 | ! Current revisions: |
---|
21 | ! ----------------- |
---|
22 | ! |
---|
23 | ! |
---|
24 | ! Former revisions: |
---|
25 | ! ----------------- |
---|
26 | ! $Id: restart_data_mpi_io_mod.f90 4628 2020-07-29 07:23:03Z raasch $ |
---|
27 | ! extensions required for MPI-I/O of particle data to restart files |
---|
28 | ! |
---|
29 | ! 4622 2020-07-23 09:02:23Z raasch |
---|
30 | ! unused variables removed |
---|
31 | ! |
---|
32 | ! 4621 2020-07-23 08:15:59Z raasch |
---|
33 | ! bugfixes for serial (non-parallel) mode |
---|
34 | ! |
---|
35 | ! 4619 2020-07-22 13:21:28Z raasch |
---|
36 | ! unused variable removed |
---|
37 | ! |
---|
38 | ! 4617 2020-07-22 09:48:50Z raasch |
---|
39 | ! Cyclic fill mode implemented |
---|
40 | ! |
---|
41 | ! 4598 2020-07-10 10:13:23Z suehring |
---|
42 | ! Bugfix in treatment of 3D soil arrays |
---|
43 | ! |
---|
44 | ! 4591 2020-07-06 15:56:08Z raasch |
---|
45 | ! File re-formatted to follow the PALM coding standard |
---|
46 | ! |
---|
47 | ! 4539 2020-05-18 14:05:17Z raasch |
---|
48 | ! Checks added, if index limits in header are exceeded |
---|
49 | ! Bugfix in rrd_mpi_io_int_2d |
---|
50 | ! |
---|
51 | ! 4536 2020-05-17 17:24:13Z raasch |
---|
52 | ! Messages and debug output converted to PALM routines |
---|
53 | ! |
---|
54 | ! 4534 2020-05-14 18:35:22Z raasch |
---|
55 | ! I/O on reduced number of cores added (using shared memory MPI) |
---|
56 | ! |
---|
57 | ! 4500 2020-04-17 10:12:45Z suehring |
---|
58 | ! Fix too long lines |
---|
59 | ! |
---|
60 | ! 4498 2020-04-15 14:26:31Z raasch |
---|
61 | ! Bugfix for creation of filetypes, argument removed from rd_mpi_io_open |
---|
62 | ! |
---|
63 | ! 4497 2020-04-15 10:20:51Z raasch |
---|
64 | ! Last bugfix deactivated because of compile problems |
---|
65 | ! |
---|
66 | ! 4496 2020-04-15 08:37:26Z raasch |
---|
67 | ! Problem with posix read arguments for surface data fixed |
---|
68 | ! |
---|
69 | ! 4495 2020-04-13 20:11:20Z raasch |
---|
70 | ! Initial version (K. Ketelsen), adjusted to PALM formatting standards (S. Raasch) |
---|
71 | ! |
---|
72 | ! |
---|
73 | ! Description: |
---|
74 | ! ------------ |
---|
75 | !> Routines for restart data handling using MPI-IO. |
---|
76 | !--------------------------------------------------------------------------------------------------! |
---|
77 | MODULE restart_data_mpi_io_mod |
---|
78 | |
---|
79 | #if defined( __parallel ) |
---|
80 | #if defined( __mpifh ) |
---|
81 | INCLUDE "mpif.h" |
---|
82 | #else |
---|
83 | USE MPI |
---|
84 | #endif |
---|
85 | #else |
---|
86 | USE posix_interface, & |
---|
87 | ONLY: posix_close, & |
---|
88 | posix_lseek, & |
---|
89 | posix_open, & |
---|
90 | posix_read, & |
---|
91 | posix_write |
---|
92 | #endif |
---|
93 | |
---|
94 | USE, INTRINSIC :: ISO_C_BINDING |
---|
95 | |
---|
96 | USE control_parameters, & |
---|
97 | ONLY: debug_output, & |
---|
98 | debug_string, & |
---|
99 | include_total_domain_boundaries, & |
---|
100 | message_string, & |
---|
101 | restart_data_format_input, & |
---|
102 | restart_data_format_output, & |
---|
103 | restart_file_size |
---|
104 | |
---|
105 | USE exchange_horiz_mod, & |
---|
106 | ONLY: exchange_horiz, & |
---|
107 | exchange_horiz_2d |
---|
108 | |
---|
109 | USE indices, & |
---|
110 | ONLY: nbgp, & |
---|
111 | nnx, & |
---|
112 | nny, & |
---|
113 | nx, & |
---|
114 | nxl, & |
---|
115 | nxlg, & |
---|
116 | nx_on_file, & |
---|
117 | nxr, & |
---|
118 | nxrg, & |
---|
119 | ny, & |
---|
120 | nyn, & |
---|
121 | nyng, & |
---|
122 | ny_on_file, & |
---|
123 | nys, & |
---|
124 | nysg, & |
---|
125 | nz, & |
---|
126 | nzb, & |
---|
127 | nzt |
---|
128 | |
---|
129 | USE kinds |
---|
130 | |
---|
131 | USE particle_attributes, & |
---|
132 | ONLY: grid_particles, & |
---|
133 | particles, & |
---|
134 | particle_type, & |
---|
135 | prt_count, & |
---|
136 | zero_particle |
---|
137 | |
---|
138 | USE pegrid, & |
---|
139 | ONLY: comm1dx, & |
---|
140 | comm1dy, & |
---|
141 | comm2d, & |
---|
142 | communicator_configurations, & |
---|
143 | myid, & |
---|
144 | myidx, & |
---|
145 | myidy, & |
---|
146 | npex, & |
---|
147 | npey, & |
---|
148 | numprocs, & |
---|
149 | pdims |
---|
150 | |
---|
151 | USE shared_memory_io_mod, & |
---|
152 | ONLY: domain_decomposition_grid_features, & |
---|
153 | sm_class |
---|
154 | |
---|
155 | |
---|
156 | IMPLICIT NONE |
---|
157 | |
---|
158 | CHARACTER(LEN=128) :: io_file_name !> internal variable to communicate filename between different subroutines |
---|
159 | |
---|
160 | #if defined( __parallel ) |
---|
161 | INTEGER(iwp) :: ierr !< error status of MPI-calls |
---|
162 | INTEGER(iwp), PARAMETER :: rd_offset_kind = MPI_OFFSET_KIND !< Adress or Offset kind |
---|
163 | INTEGER(iwp), PARAMETER :: rd_status_size = MPI_STATUS_SIZE !< |
---|
164 | #else |
---|
165 | INTEGER(iwp), PARAMETER :: rd_offset_kind = C_SIZE_T !< |
---|
166 | INTEGER(iwp), PARAMETER :: rd_status_size = 1 !< Not required in sequential mode |
---|
167 | #endif |
---|
168 | |
---|
169 | INTEGER(iwp) :: debug_level = 1 !< TODO: replace with standard debug output steering |
---|
170 | |
---|
171 | INTEGER(iwp) :: comm_io !< Communicator for MPI-IO |
---|
172 | INTEGER(iwp) :: fh = -1 !< MPI-IO file handle |
---|
173 | #if defined( __parallel ) |
---|
174 | INTEGER(iwp) :: fhs = -1 !< MPI-IO file handle to open file with comm2d always |
---|
175 | #endif |
---|
176 | INTEGER(iwp) :: ft_surf = -1 !< MPI filetype surface data |
---|
177 | #if defined( __parallel ) |
---|
178 | INTEGER(iwp) :: ft_2di_nb !< MPI filetype 2D array INTEGER no outer boundary |
---|
179 | INTEGER(iwp) :: ft_2d !< MPI filetype 2D array REAL with outer boundaries |
---|
180 | INTEGER(iwp) :: ft_3d !< MPI filetype 3D array REAL with outer boundaries |
---|
181 | INTEGER(iwp) :: ft_3di4 = -1 !< MPI filetype 3D array INTEGER*4 |
---|
182 | INTEGER(iwp) :: ft_3di8 = -1 !< MPI filetype 3D array INTEGER*8 |
---|
183 | INTEGER(iwp) :: ft_3dsoil !< MPI filetype for 3d-soil array |
---|
184 | #endif |
---|
185 | INTEGER(iwp) :: glo_start !< global start index on this PE |
---|
186 | #if defined( __parallel ) |
---|
187 | INTEGER(iwp) :: local_start !< |
---|
188 | #endif |
---|
189 | INTEGER(iwp) :: nr_iope !< |
---|
190 | INTEGER(iwp) :: nr_val !< local number of values in x and y direction |
---|
191 | #if defined( __parallel ) |
---|
192 | INTEGER(iwp) :: win_2di !< |
---|
193 | INTEGER(iwp) :: win_2dr !< |
---|
194 | INTEGER(iwp) :: win_3di4 = -1 !< |
---|
195 | INTEGER(iwp) :: win_3di8 = -1 !< |
---|
196 | INTEGER(iwp) :: win_3dr !< |
---|
197 | INTEGER(iwp) :: win_3ds !< |
---|
198 | INTEGER(iwp) :: win_surf = -1 !< |
---|
199 | #endif |
---|
200 | INTEGER(iwp) :: total_number_of_surface_values !< total number of values for one variable |
---|
201 | |
---|
202 | INTEGER(KIND=rd_offset_kind) :: array_position !< |
---|
203 | INTEGER(KIND=rd_offset_kind) :: header_position !< |
---|
204 | |
---|
205 | INTEGER(iwp), DIMENSION(:,:), POINTER, CONTIGUOUS :: array_2di !< |
---|
206 | |
---|
207 | INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_end_index !< |
---|
208 | INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_global_start !< |
---|
209 | INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_start_index !< |
---|
210 | |
---|
211 | INTEGER(isp), DIMENSION(:,:,:), POINTER, CONTIGUOUS :: array_3di4 !< |
---|
212 | INTEGER(idp), DIMENSION(:,:,:), POINTER, CONTIGUOUS :: array_3di8 !< |
---|
213 | |
---|
214 | LOGICAL :: all_pes_write !< all PEs have data to write |
---|
215 | LOGICAL :: filetypes_created !< |
---|
216 | LOGICAL :: io_on_limited_cores_per_node !< switch to shared memory MPI-IO |
---|
217 | LOGICAL :: rd_flag !< file is opened for read |
---|
218 | LOGICAL :: wr_flag !< file is opened for write |
---|
219 | |
---|
220 | #if defined( __parallel ) |
---|
221 | REAL(wp), DIMENSION(:), POINTER, CONTIGUOUS :: array_1d !< |
---|
222 | #endif |
---|
223 | REAL(wp), DIMENSION(:,:), POINTER, CONTIGUOUS :: array_2d !< |
---|
224 | REAL(wp), DIMENSION(:,:,:), POINTER, CONTIGUOUS :: array_3d !< |
---|
225 | REAL(wp), DIMENSION(:,:,:), POINTER, CONTIGUOUS :: array_3d_soil !< |
---|
226 | |
---|
227 | ! |
---|
228 | !-- Variable to store the grid features (index bounds) of the temporary arrays that are used |
---|
229 | !-- to read and write the restart data. They differ depending on if the outer boundary of the |
---|
230 | !-- total domain is contained in the restart data or not. iog stands for IO-grid. |
---|
231 | TYPE(domain_decomposition_grid_features) :: iog !< |
---|
232 | |
---|
233 | ! |
---|
234 | !-- General Header (first 32 byte in restart file) |
---|
235 | TYPE general_header |
---|
236 | INTEGER(iwp) :: endian !< little endian (1) or big endian (2) internal format |
---|
237 | INTEGER(iwp) :: i_outer_bound !< if 1, outer boundaries are stored in restart file |
---|
238 | INTEGER(iwp) :: nr_arrays !< number of arrays in restart files |
---|
239 | INTEGER(iwp) :: nr_char !< number of Text strings entries in header |
---|
240 | INTEGER(iwp) :: nr_int !< number of INTEGER entries in header |
---|
241 | INTEGER(iwp) :: nr_real !< number of REAL entries in header |
---|
242 | INTEGER(iwp) :: total_nx !< total number of points in x-direction |
---|
243 | INTEGER(iwp) :: total_ny !< total number of points in y-direction |
---|
244 | END TYPE general_header |
---|
245 | |
---|
246 | TYPE(general_header), TARGET :: tgh !< |
---|
247 | |
---|
248 | TYPE(sm_class) :: sm_io !< |
---|
249 | |
---|
250 | ! |
---|
251 | !-- Declaration of varibales for file header section |
---|
252 | INTEGER(KIND=rd_offset_kind) :: header_int_index |
---|
253 | INTEGER, PARAMETER :: max_nr_int=256 |
---|
254 | CHARACTER(LEN=32), DIMENSION(max_nr_int) :: int_names |
---|
255 | INTEGER(KIND=iwp), DIMENSION(max_nr_int) :: int_values |
---|
256 | |
---|
257 | INTEGER(KIND=rd_offset_kind) :: header_char_index |
---|
258 | INTEGER, PARAMETER :: max_nr_char=128 |
---|
259 | CHARACTER(LEN=128), DIMENSION(max_nr_char) :: text_lines |
---|
260 | |
---|
261 | INTEGER(KIND=rd_offset_kind) :: header_real_index |
---|
262 | INTEGER, PARAMETER :: max_nr_real=256 |
---|
263 | CHARACTER(LEN=32), DIMENSION(max_nr_real) :: real_names |
---|
264 | REAL(KIND=wp), DIMENSION(max_nr_real) :: real_values |
---|
265 | |
---|
266 | INTEGER(KIND=rd_offset_kind) :: header_array_index |
---|
267 | INTEGER, PARAMETER :: max_nr_arrays=600 |
---|
268 | CHARACTER(LEN=32), DIMENSION(max_nr_arrays) :: array_names |
---|
269 | INTEGER(KIND=rd_offset_kind), DIMENSION(max_nr_arrays) :: array_offset |
---|
270 | |
---|
271 | ! |
---|
272 | !-- Variables to handle the cyclic fill initialization mode |
---|
273 | INTEGER :: comm_cyclic_fill !< communicator for cyclic fill PEs |
---|
274 | #if defined( __parallel ) |
---|
275 | INTEGER :: rmawin_2di !< RMA window 2d INTEGER |
---|
276 | INTEGER :: rmawin_2d !< RMA window 2d REAL |
---|
277 | INTEGER :: rmawin_3d !< RMA window 3d |
---|
278 | #endif |
---|
279 | |
---|
280 | INTEGER(iwp), ALLOCATABLE, DIMENSION(:,:) :: remote_pe |
---|
281 | INTEGER(iwp), ALLOCATABLE, DIMENSION(:,:) :: remote_pe_s |
---|
282 | INTEGER(iwp), ALLOCATABLE, DIMENSION(:,:) :: rma_offset |
---|
283 | INTEGER(iwp), ALLOCATABLE, DIMENSION(:,:) :: rma_offset_s |
---|
284 | INTEGER(iwp), ALLOCATABLE, DIMENSION(:,:) :: rmabuf_2di |
---|
285 | |
---|
286 | LOGICAL :: cyclic_fill_mode !< arrays are filled cyclically with data from prerun |
---|
287 | LOGICAL :: pe_active_for_read = .TRUE. !< this PE is active for reading data from prerun or |
---|
288 | !< restart run. For restarts all PEs are active. |
---|
289 | |
---|
290 | REAL(wp), ALLOCATABLE, DIMENSION(:,:) :: rmabuf_2d |
---|
291 | REAL(wp), ALLOCATABLE, DIMENSION(:,:,:) :: rmabuf_3d |
---|
292 | |
---|
293 | TYPE(domain_decomposition_grid_features) :: mainrun_grid !< grid variables for the main run |
---|
294 | TYPE(domain_decomposition_grid_features) :: prerun_grid !< grid variables for the prerun |
---|
295 | |
---|
296 | ! |
---|
297 | !-- MPI_INTEGER8 is not standard MPI, but is supported on most MPI distibutions |
---|
298 | !-- If not suppported, a workaround could be enabled with the following preprocessor directive |
---|
299 | !#if defined( __NO_INTEGER8) |
---|
300 | ! INTEGER :: MPI_INTEGER8 !< MPI data type INTEGER8 |
---|
301 | !#endif |
---|
302 | |
---|
303 | SAVE |
---|
304 | |
---|
305 | PRIVATE |
---|
306 | |
---|
307 | PUBLIC restart_file_size, total_number_of_surface_values |
---|
308 | |
---|
309 | ! |
---|
310 | !-- PALM interfaces |
---|
311 | INTERFACE rd_mpi_io_check_array |
---|
312 | MODULE PROCEDURE rd_mpi_io_check_array |
---|
313 | END INTERFACE rd_mpi_io_check_array |
---|
314 | |
---|
315 | INTERFACE rd_mpi_io_close |
---|
316 | MODULE PROCEDURE rd_mpi_io_close |
---|
317 | END INTERFACE rd_mpi_io_close |
---|
318 | |
---|
319 | INTERFACE rd_mpi_io_check_open |
---|
320 | MODULE PROCEDURE rd_mpi_io_check_open |
---|
321 | END INTERFACE rd_mpi_io_check_open |
---|
322 | |
---|
323 | INTERFACE rd_mpi_io_open |
---|
324 | MODULE PROCEDURE rd_mpi_io_open |
---|
325 | END INTERFACE rd_mpi_io_open |
---|
326 | |
---|
327 | INTERFACE rrd_mpi_io |
---|
328 | MODULE PROCEDURE rrd_mpi_io_char |
---|
329 | MODULE PROCEDURE rrd_mpi_io_int |
---|
330 | MODULE PROCEDURE rrd_mpi_io_int_2d |
---|
331 | MODULE PROCEDURE rrd_mpi_io_int4_3d |
---|
332 | MODULE PROCEDURE rrd_mpi_io_int8_3d |
---|
333 | MODULE PROCEDURE rrd_mpi_io_logical |
---|
334 | MODULE PROCEDURE rrd_mpi_io_real |
---|
335 | MODULE PROCEDURE rrd_mpi_io_real_2d |
---|
336 | MODULE PROCEDURE rrd_mpi_io_real_3d |
---|
337 | MODULE PROCEDURE rrd_mpi_io_real_3d_soil |
---|
338 | END INTERFACE rrd_mpi_io |
---|
339 | |
---|
340 | INTERFACE rrd_mpi_io_global_array |
---|
341 | MODULE PROCEDURE rrd_mpi_io_global_array_int_1d |
---|
342 | MODULE PROCEDURE rrd_mpi_io_global_array_real_1d |
---|
343 | MODULE PROCEDURE rrd_mpi_io_global_array_real_2d |
---|
344 | MODULE PROCEDURE rrd_mpi_io_global_array_real_3d |
---|
345 | MODULE PROCEDURE rrd_mpi_io_global_array_real_4d |
---|
346 | END INTERFACE rrd_mpi_io_global_array |
---|
347 | |
---|
348 | INTERFACE rrd_mpi_io_surface |
---|
349 | MODULE PROCEDURE rrd_mpi_io_surface |
---|
350 | MODULE PROCEDURE rrd_mpi_io_surface_2d |
---|
351 | END INTERFACE rrd_mpi_io_surface |
---|
352 | |
---|
353 | INTERFACE rrd_mpi_io_particles |
---|
354 | MODULE PROCEDURE rrd_mpi_io_particles |
---|
355 | END INTERFACE rrd_mpi_io_particles |
---|
356 | |
---|
357 | INTERFACE rd_mpi_io_particle_filetypes |
---|
358 | MODULE PROCEDURE rd_mpi_io_particle_filetypes |
---|
359 | END INTERFACE rd_mpi_io_particle_filetypes |
---|
360 | |
---|
361 | INTERFACE rd_mpi_io_surface_filetypes |
---|
362 | MODULE PROCEDURE rd_mpi_io_surface_filetypes |
---|
363 | END INTERFACE rd_mpi_io_surface_filetypes |
---|
364 | |
---|
365 | INTERFACE wrd_mpi_io |
---|
366 | MODULE PROCEDURE wrd_mpi_io_char |
---|
367 | MODULE PROCEDURE wrd_mpi_io_int |
---|
368 | MODULE PROCEDURE wrd_mpi_io_int_2d |
---|
369 | MODULE PROCEDURE wrd_mpi_io_int4_3d |
---|
370 | MODULE PROCEDURE wrd_mpi_io_int8_3d |
---|
371 | MODULE PROCEDURE wrd_mpi_io_logical |
---|
372 | MODULE PROCEDURE wrd_mpi_io_real |
---|
373 | MODULE PROCEDURE wrd_mpi_io_real_2d |
---|
374 | MODULE PROCEDURE wrd_mpi_io_real_3d |
---|
375 | MODULE PROCEDURE wrd_mpi_io_real_3d_soil |
---|
376 | END INTERFACE wrd_mpi_io |
---|
377 | |
---|
378 | INTERFACE wrd_mpi_io_global_array |
---|
379 | MODULE PROCEDURE wrd_mpi_io_global_array_int_1d |
---|
380 | MODULE PROCEDURE wrd_mpi_io_global_array_real_1d |
---|
381 | MODULE PROCEDURE wrd_mpi_io_global_array_real_2d |
---|
382 | MODULE PROCEDURE wrd_mpi_io_global_array_real_3d |
---|
383 | MODULE PROCEDURE wrd_mpi_io_global_array_real_4d |
---|
384 | END INTERFACE wrd_mpi_io_global_array |
---|
385 | |
---|
386 | INTERFACE wrd_mpi_io_particles |
---|
387 | MODULE PROCEDURE wrd_mpi_io_particles |
---|
388 | END INTERFACE wrd_mpi_io_particles |
---|
389 | |
---|
390 | INTERFACE wrd_mpi_io_surface |
---|
391 | MODULE PROCEDURE wrd_mpi_io_surface |
---|
392 | MODULE PROCEDURE wrd_mpi_io_surface_2d |
---|
393 | END INTERFACE wrd_mpi_io_surface |
---|
394 | |
---|
395 | PUBLIC rd_mpi_io_check_array, & |
---|
396 | rd_mpi_io_check_open, & |
---|
397 | rd_mpi_io_close, & |
---|
398 | rd_mpi_io_open, & |
---|
399 | rd_mpi_io_particle_filetypes, & |
---|
400 | rd_mpi_io_surface_filetypes, & |
---|
401 | rrd_mpi_io, & |
---|
402 | rrd_mpi_io_global_array, & |
---|
403 | rrd_mpi_io_particles, & |
---|
404 | rrd_mpi_io_surface, & |
---|
405 | wrd_mpi_io, & |
---|
406 | wrd_mpi_io_global_array, & |
---|
407 | wrd_mpi_io_particles, & |
---|
408 | wrd_mpi_io_surface |
---|
409 | |
---|
410 | |
---|
411 | CONTAINS |
---|
412 | |
---|
413 | |
---|
414 | !--------------------------------------------------------------------------------------------------! |
---|
415 | ! Description: |
---|
416 | ! ------------ |
---|
417 | !> Open restart file for read or write with MPI-IO |
---|
418 | !--------------------------------------------------------------------------------------------------! |
---|
419 | SUBROUTINE rd_mpi_io_open( action, file_name, open_for_global_io_only ) |
---|
420 | |
---|
421 | IMPLICIT NONE |
---|
422 | |
---|
423 | CHARACTER(LEN=*), INTENT(IN) :: action !< |
---|
424 | CHARACTER(LEN=*), INTENT(IN) :: file_name !< |
---|
425 | |
---|
426 | INTEGER(iwp) :: i !< |
---|
427 | INTEGER(iwp) :: gh_size !< |
---|
428 | |
---|
429 | INTEGER(KIND=rd_offset_kind) :: offset !< |
---|
430 | |
---|
431 | #if defined( __parallel ) |
---|
432 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
433 | #endif |
---|
434 | |
---|
435 | LOGICAL, INTENT(IN), OPTIONAL :: open_for_global_io_only !< |
---|
436 | LOGICAL :: set_filetype !< |
---|
437 | |
---|
438 | #if ! defined( __parallel ) |
---|
439 | TYPE(C_PTR) :: buf_ptr !< |
---|
440 | #endif |
---|
441 | |
---|
442 | offset = 0 |
---|
443 | io_on_limited_cores_per_node = .FALSE. |
---|
444 | |
---|
445 | rd_flag = ( TRIM( action ) == 'READ' .OR. TRIM( action ) == 'read' ) |
---|
446 | wr_flag = ( TRIM( action ) == 'WRITE' .OR. TRIM( action ) == 'write' ) |
---|
447 | |
---|
448 | IF ( .NOT. ( rd_flag .OR. wr_flag ) ) THEN |
---|
449 | message_string = 'illegal action "' // TRIM( action ) // '" for opening restart files' |
---|
450 | CALL message( 'restart_data_mpi_io_mod', 'PA0720', 1, 2, 0, 6, 0 ) |
---|
451 | ENDIF |
---|
452 | ! |
---|
453 | !-- Store name of I/O file to communicate it internally within this module. |
---|
454 | io_file_name = file_name |
---|
455 | ! |
---|
456 | !-- Setup for IO on a limited number of threads per node (using shared memory MPI) |
---|
457 | IF ( rd_flag ) THEN |
---|
458 | set_filetype = .TRUE. |
---|
459 | IF ( TRIM( restart_data_format_input ) == 'mpi_shared_memory' ) THEN |
---|
460 | io_on_limited_cores_per_node = .TRUE. |
---|
461 | ENDIF |
---|
462 | ENDIF |
---|
463 | |
---|
464 | IF ( TRIM( restart_data_format_output ) == 'mpi_shared_memory' .AND. wr_flag ) THEN |
---|
465 | io_on_limited_cores_per_node = .TRUE. |
---|
466 | ENDIF |
---|
467 | ! |
---|
468 | !-- Shared memory MPI is not used for reading of global data |
---|
469 | IF ( PRESENT( open_for_global_io_only ) .AND. rd_flag ) THEN |
---|
470 | IF ( open_for_global_io_only ) THEN |
---|
471 | io_on_limited_cores_per_node = .FALSE. |
---|
472 | set_filetype = .FALSE. |
---|
473 | ENDIF |
---|
474 | ENDIF |
---|
475 | |
---|
476 | ! |
---|
477 | !-- Determine, if prerun data shall be read and mapped cyclically to the mainrun arrays. |
---|
478 | !-- In cyclic fill mode only a subset of the PEs will read. |
---|
479 | cyclic_fill_mode = .FALSE. |
---|
480 | pe_active_for_read = .TRUE. |
---|
481 | |
---|
482 | IF ( rd_flag .AND. .NOT. PRESENT( open_for_global_io_only ) .AND. & |
---|
483 | nx_on_file < nx .AND. ny_on_file < ny ) THEN |
---|
484 | cyclic_fill_mode = .TRUE. |
---|
485 | CALL setup_cyclic_fill |
---|
486 | ! |
---|
487 | !-- Shared memory IO on limited cores is not allowed for cyclic fill mode |
---|
488 | CALL sm_io%sm_init_comm( .FALSE. ) ! |
---|
489 | ELSE |
---|
490 | CALL sm_io%sm_init_comm( io_on_limited_cores_per_node ) |
---|
491 | ENDIF |
---|
492 | |
---|
493 | ! |
---|
494 | !-- TODO: add a more detailed meaningful comment about what is happening here |
---|
495 | !-- activate model grid |
---|
496 | IF( cyclic_fill_mode .AND. .NOT. pe_active_for_read ) THEN |
---|
497 | CALL mainrun_grid%activate_grid_from_this_class() |
---|
498 | RETURN |
---|
499 | ENDIF |
---|
500 | |
---|
501 | |
---|
502 | ! |
---|
503 | !-- Set communicator to be used. If all cores are doing I/O, comm2d is used as usual. |
---|
504 | IF( sm_io%is_sm_active() ) THEN |
---|
505 | comm_io = sm_io%comm_io |
---|
506 | ELSEIF ( cyclic_fill_mode ) THEN |
---|
507 | comm_io = comm_cyclic_fill |
---|
508 | ELSE |
---|
509 | comm_io = comm2d |
---|
510 | ENDIF |
---|
511 | |
---|
512 | ! |
---|
513 | !-- Create subarrays and file types |
---|
514 | filetypes_created = .FALSE. |
---|
515 | |
---|
516 | ! |
---|
517 | !-- In case of read it is not known yet if data include total domain. Filetypes will be created |
---|
518 | !-- further below. |
---|
519 | IF ( wr_flag ) THEN |
---|
520 | CALL rd_mpi_io_create_filetypes |
---|
521 | filetypes_created = .TRUE. |
---|
522 | ENDIF |
---|
523 | |
---|
524 | ! |
---|
525 | !-- Open file for MPI-IO |
---|
526 | #if defined( __parallel ) |
---|
527 | IF ( sm_io%iam_io_pe ) THEN |
---|
528 | |
---|
529 | IF ( rd_flag ) THEN |
---|
530 | |
---|
531 | IF ( debug_output ) THEN |
---|
532 | WRITE( debug_string, * ) 'open joint restart file "' // TRIM( io_file_name ) // & |
---|
533 | '" for read with MPI-IO' |
---|
534 | CALL debug_message( debug_string, 'start' ) |
---|
535 | ENDIF |
---|
536 | |
---|
537 | CALL MPI_FILE_OPEN( comm_io, TRIM( io_file_name ), MPI_MODE_RDONLY, MPI_INFO_NULL, fh, & |
---|
538 | ierr ) |
---|
539 | |
---|
540 | IF ( ierr /= 0 ) THEN |
---|
541 | message_string = 'error opening restart file "' // TRIM( io_file_name ) // & |
---|
542 | '" for reading with MPI-IO' |
---|
543 | CALL message( 'rrd_mpi_io_open', 'PA0727', 3, 2, 0, 6, 0 ) |
---|
544 | ENDIF |
---|
545 | |
---|
546 | IF ( debug_output ) THEN |
---|
547 | WRITE( debug_string, * ) 'open joint restart file "' // TRIM( io_file_name ) // & |
---|
548 | '" for read with MPI-IO' |
---|
549 | CALL debug_message( debug_string, 'end' ) |
---|
550 | ENDIF |
---|
551 | |
---|
552 | ELSEIF ( wr_flag ) THEN |
---|
553 | |
---|
554 | IF ( debug_output ) THEN |
---|
555 | WRITE( debug_string, * ) 'open joint restart file "' // TRIM( io_file_name ) // & |
---|
556 | '" for write with MPI-IO' |
---|
557 | CALL debug_message( debug_string, 'start' ) |
---|
558 | ENDIF |
---|
559 | |
---|
560 | CALL MPI_FILE_OPEN( comm_io, TRIM( io_file_name ), MPI_MODE_CREATE+MPI_MODE_WRONLY, & |
---|
561 | MPI_INFO_NULL, fh, ierr ) |
---|
562 | |
---|
563 | IF ( ierr /= 0 ) THEN |
---|
564 | message_string = 'error opening restart file "' // TRIM( io_file_name ) // & |
---|
565 | '" for writing with MPI-IO' |
---|
566 | CALL message( 'rrd_mpi_io_open', 'PA0728', 3, 2, 0, 6, 0 ) |
---|
567 | ENDIF |
---|
568 | |
---|
569 | IF ( debug_output ) THEN |
---|
570 | WRITE( debug_string, * ) 'open joint restart file "' // TRIM( io_file_name ) // & |
---|
571 | '" for write with MPI-IO' |
---|
572 | CALL debug_message( debug_string, 'end' ) |
---|
573 | ENDIF |
---|
574 | |
---|
575 | ENDIF |
---|
576 | |
---|
577 | ENDIF |
---|
578 | #else |
---|
579 | IF ( rd_flag ) THEN |
---|
580 | |
---|
581 | IF ( debug_output ) THEN |
---|
582 | WRITE( debug_string, * ) 'open restart file "' // TRIM( io_file_name ) // & |
---|
583 | '" for read in serial mode (posix)' |
---|
584 | CALL debug_message( debug_string, 'start' ) |
---|
585 | ENDIF |
---|
586 | |
---|
587 | fh = posix_open( TRIM( io_file_name ), .TRUE. ) |
---|
588 | |
---|
589 | IF ( debug_output ) THEN |
---|
590 | WRITE( debug_string, * ) 'open restart file "' // TRIM( io_file_name ) // & |
---|
591 | '" for read in serial mode (posix)' |
---|
592 | CALL debug_message( debug_string, 'end' ) |
---|
593 | ENDIF |
---|
594 | |
---|
595 | ELSEIF ( wr_flag ) THEN |
---|
596 | |
---|
597 | IF ( debug_output ) THEN |
---|
598 | WRITE( debug_string, * ) 'open restart file "' // TRIM( io_file_name ) // & |
---|
599 | '" for write in serial mode (posix)' |
---|
600 | CALL debug_message( debug_string, 'start' ) |
---|
601 | ENDIF |
---|
602 | |
---|
603 | fh = posix_open( TRIM( io_file_name ), .FALSE. ) |
---|
604 | |
---|
605 | IF ( debug_output ) THEN |
---|
606 | WRITE( debug_string, * ) 'open restart file "' // TRIM( io_file_name ) // & |
---|
607 | '" for write in serial mode (posix)' |
---|
608 | CALL debug_message( debug_string, 'end' ) |
---|
609 | ENDIF |
---|
610 | |
---|
611 | ENDIF |
---|
612 | |
---|
613 | IF ( fh < 0 ) THEN |
---|
614 | message_string = 'error opening restart file for posix I/O' |
---|
615 | CALL message( 'restart_data_mpi_io_mod', 'PA0721', 1, 2, 0, 6, 0 ) |
---|
616 | ENDIF |
---|
617 | #endif |
---|
618 | |
---|
619 | array_position = 65536 !> Start offset for writing 2-D and 3.D arrays at 64 k |
---|
620 | header_position = 0 |
---|
621 | |
---|
622 | header_int_index = 1 |
---|
623 | header_char_index = 1 |
---|
624 | header_real_index = 1 |
---|
625 | header_array_index = 1 |
---|
626 | |
---|
627 | int_names = ' ' |
---|
628 | int_values = 0 |
---|
629 | text_lines = ' ' |
---|
630 | real_names = ' ' |
---|
631 | real_values = 0.0 |
---|
632 | array_names = ' ' |
---|
633 | array_offset = 0 |
---|
634 | |
---|
635 | int_names(1) = 'nx' |
---|
636 | int_values(1) = nx |
---|
637 | int_names(2) = 'ny' |
---|
638 | int_values(2) = ny |
---|
639 | int_names(3) = 'nz' |
---|
640 | int_values(3) = nz |
---|
641 | header_int_index = header_int_index+3 |
---|
642 | |
---|
643 | DO i = 1, max_nr_arrays |
---|
644 | array_offset(i) = 0 |
---|
645 | array_names(i) = ' ' |
---|
646 | ENDDO |
---|
647 | |
---|
648 | gh_size = STORAGE_SIZE( tgh ) / 8 |
---|
649 | |
---|
650 | IF ( rd_flag ) THEN |
---|
651 | IF ( sm_io%iam_io_pe ) THEN |
---|
652 | ! |
---|
653 | !-- File is open for reading |
---|
654 | #if defined( __parallel ) |
---|
655 | !-- Set the default view |
---|
656 | CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) |
---|
657 | ! |
---|
658 | !-- Read the file header size |
---|
659 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
660 | CALL MPI_FILE_READ( fh, tgh, gh_size, MPI_BYTE, status, ierr ) |
---|
661 | #else |
---|
662 | CALL posix_lseek( fh, header_position ) |
---|
663 | buf_ptr = C_LOC( tgh ) |
---|
664 | CALL posix_read( fh, buf_ptr, gh_size ) |
---|
665 | #endif |
---|
666 | ENDIF |
---|
667 | #if defined( __parallel ) |
---|
668 | IF ( sm_io%is_sm_active() ) THEN |
---|
669 | CALL MPI_BCAST( tgh, gh_size, MPI_BYTE, 0, sm_io%comm_shared, ierr ) |
---|
670 | ENDIF |
---|
671 | #endif |
---|
672 | header_position = header_position + gh_size |
---|
673 | |
---|
674 | include_total_domain_boundaries = ( tgh%i_outer_bound == 1 ) |
---|
675 | |
---|
676 | ! |
---|
677 | !-- File types depend on boundaries of the total domain being included in data. This has been |
---|
678 | !-- checked with the previous statement. |
---|
679 | IF ( set_filetype ) THEN |
---|
680 | CALL rd_mpi_io_create_filetypes |
---|
681 | filetypes_created = .TRUE. |
---|
682 | ENDIF |
---|
683 | |
---|
684 | IF ( sm_io%iam_io_pe ) THEN |
---|
685 | #if defined( __parallel ) |
---|
686 | ! |
---|
687 | !-- Read INTEGER values |
---|
688 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
689 | CALL MPI_FILE_READ( fh, int_names, SIZE( int_names ) * 32, MPI_CHAR, status, ierr ) |
---|
690 | header_position = header_position + SIZE( int_names ) * 32 |
---|
691 | |
---|
692 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
693 | CALL MPI_FILE_READ (fh, int_values, SIZE( int_values ), MPI_INT, status, ierr ) |
---|
694 | header_position = header_position + SIZE( int_values ) * iwp |
---|
695 | ! |
---|
696 | !-- Character entries |
---|
697 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
698 | CALL MPI_FILE_READ( fh, text_lines, SIZE( text_lines ) * 128, MPI_CHAR, status, ierr ) |
---|
699 | header_position = header_position + SIZE ( text_lines ) * 128 |
---|
700 | ! |
---|
701 | !-- REAL values |
---|
702 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
703 | CALL MPI_FILE_READ( fh, real_names, SIZE( real_names ) * 32, MPI_CHAR, status, ierr ) |
---|
704 | header_position = header_position + SIZE( real_names ) * 32 |
---|
705 | |
---|
706 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
707 | CALL MPI_FILE_READ( fh, real_values, SIZE( real_values ), MPI_REAL, status, ierr ) |
---|
708 | header_position = header_position + SIZE( real_values ) * wp |
---|
709 | ! |
---|
710 | !-- 2d- and 3d-array headers |
---|
711 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
712 | CALL MPI_FILE_READ( fh, array_names, SIZE( array_names ) * 32, MPI_CHAR, status, ierr ) |
---|
713 | header_position = header_position + SIZE( array_names ) * 32 |
---|
714 | |
---|
715 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
716 | CALL MPI_FILE_READ( fh, array_offset, SIZE( array_offset ) * MPI_OFFSET_KIND, MPI_BYTE, & |
---|
717 | status,ierr ) ! there is no I*8 datatype in Fortran |
---|
718 | header_position = header_position + SIZE( array_offset ) * rd_offset_kind |
---|
719 | #else |
---|
720 | CALL posix_lseek( fh, header_position ) |
---|
721 | CALL posix_read( fh, int_names ) |
---|
722 | header_position = header_position + SIZE( int_names ) * 32 |
---|
723 | |
---|
724 | CALL posix_lseek( fh, header_position ) |
---|
725 | CALL posix_read( fh, int_values, SIZE( int_values ) ) |
---|
726 | header_position = header_position + SIZE( int_values ) * iwp |
---|
727 | ! |
---|
728 | !-- Character entries |
---|
729 | CALL posix_lseek( fh, header_position ) |
---|
730 | CALL posix_read( fh, text_lines ) |
---|
731 | header_position = header_position + SIZE( text_lines ) * 128 |
---|
732 | ! |
---|
733 | !-- REAL values |
---|
734 | CALL posix_lseek( fh, header_position ) |
---|
735 | CALL posix_read( fh, real_names ) |
---|
736 | header_position = header_position + SIZE( real_names ) * 32 |
---|
737 | |
---|
738 | CALL posix_lseek( fh, header_position ) |
---|
739 | CALL posix_read( fh, real_values, SIZE( real_values ) ) |
---|
740 | header_position = header_position + SIZE( real_values ) * wp |
---|
741 | ! |
---|
742 | !-- 2d- and 3d-array headers |
---|
743 | CALL posix_lseek( fh, header_position ) |
---|
744 | CALL posix_read( fh, array_names ) |
---|
745 | header_position = header_position + SIZE( array_names ) * 32 |
---|
746 | |
---|
747 | CALL posix_lseek( fh, header_position ) |
---|
748 | CALL posix_read( fh, array_offset, SIZE( array_offset ) ) ! there is no I*8 datatype in Fortran |
---|
749 | header_position = header_position + SIZE( array_offset ) * rd_offset_kind |
---|
750 | #endif |
---|
751 | IF ( debug_output ) CALL rd_mpi_io_print_header |
---|
752 | |
---|
753 | ENDIF |
---|
754 | |
---|
755 | #if defined( __parallel ) |
---|
756 | ! |
---|
757 | !-- Broadcast header to all remaining cores that are not involved in I/O |
---|
758 | IF ( sm_io%is_sm_active() ) THEN |
---|
759 | ! |
---|
760 | !-- Not sure, that it is possible to broadcast CHARACTER array in one MPI_Bcast call |
---|
761 | DO i = 1, SIZE( int_names ) |
---|
762 | CALL MPI_BCAST( int_names(i), 32, MPI_CHARACTER, 0, sm_io%comm_shared, ierr ) |
---|
763 | ENDDO |
---|
764 | CALL MPI_BCAST( int_values, SIZE( int_values ), MPI_INTEGER, 0, sm_io%comm_shared, ierr ) |
---|
765 | |
---|
766 | DO i = 1, SIZE( text_lines ) |
---|
767 | CALL MPI_BCAST( text_lines(i), 128, MPI_CHARACTER, 0, sm_io%comm_shared, ierr ) |
---|
768 | ENDDO |
---|
769 | |
---|
770 | DO i = 1, SIZE( real_names ) |
---|
771 | CALL MPI_BCAST( real_names(i), 32, MPI_CHARACTER, 0, sm_io%comm_shared, ierr ) |
---|
772 | ENDDO |
---|
773 | CALL MPI_BCAST( real_values, SIZE( real_values ), MPI_REAL, 0, sm_io%comm_shared, ierr ) |
---|
774 | |
---|
775 | DO i = 1, SIZE( array_names ) |
---|
776 | CALL MPI_BCAST( array_names(i), 32, MPI_CHARACTER, 0, sm_io%comm_shared, ierr ) |
---|
777 | ENDDO |
---|
778 | CALL MPI_BCAST( array_offset, SIZE( array_offset )*8, MPI_BYTE, 0, sm_io%comm_shared, & |
---|
779 | ierr ) ! there is no I*8 datatype in Fortran (array_offset is I*8!) |
---|
780 | |
---|
781 | CALL MPI_BCAST( header_position, rd_offset_kind, MPI_BYTE, 0, sm_io%comm_shared, ierr ) |
---|
782 | |
---|
783 | ENDIF |
---|
784 | #endif |
---|
785 | |
---|
786 | |
---|
787 | ENDIF |
---|
788 | |
---|
789 | ! |
---|
790 | !-- TODO: describe in more detail what is happening here |
---|
791 | !-- activate model grid |
---|
792 | IF ( cyclic_fill_mode ) CALL mainrun_grid%activate_grid_from_this_class() |
---|
793 | |
---|
794 | CONTAINS |
---|
795 | |
---|
796 | SUBROUTINE setup_cyclic_fill |
---|
797 | |
---|
798 | IMPLICIT NONE |
---|
799 | |
---|
800 | INTEGER :: color !< used to set the IO PEs for MPI_COMM_SPLIT |
---|
801 | INTEGER(iwp) :: i !< |
---|
802 | INTEGER(iwp) :: j !< |
---|
803 | #if defined( __parallel ) |
---|
804 | INTEGER :: ierr !< |
---|
805 | INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< size of RMA window |
---|
806 | #else |
---|
807 | INTEGER(idp) :: winsize |
---|
808 | #endif |
---|
809 | |
---|
810 | ! |
---|
811 | !-- TODO: describe in more detail what is done here and why it is done |
---|
812 | !-- save grid of main run |
---|
813 | CALL mainrun_grid%save_grid_into_this_class() |
---|
814 | |
---|
815 | ALLOCATE( remote_pe(0:nx_on_file,0:ny_on_file) ) |
---|
816 | ALLOCATE( remote_pe_s(0:nx_on_file,0:ny_on_file) ) |
---|
817 | ALLOCATE( rma_offset(0:nx_on_file,0:ny_on_file) ) |
---|
818 | ALLOCATE( rma_offset_s(0:nx_on_file,0:ny_on_file) ) |
---|
819 | |
---|
820 | remote_pe_s = 0 |
---|
821 | rma_offset_s = 0 |
---|
822 | ! |
---|
823 | !-- Determine, if gridpoints of the prerun are located on this thread. |
---|
824 | !-- Set the (cyclic) prerun grid. |
---|
825 | nxr = MIN( nxr, nx_on_file ) |
---|
826 | IF ( nxl > nx_on_file ) THEN |
---|
827 | nxl = -99 |
---|
828 | nxr = -99 |
---|
829 | nnx = 0 |
---|
830 | ELSE |
---|
831 | nnx =nxr-nxl+1 |
---|
832 | ENDIF |
---|
833 | |
---|
834 | nyn = MIN( nyn, ny_on_file ) |
---|
835 | IF ( nys > ny_on_file ) THEN |
---|
836 | nys = -99 |
---|
837 | nyn = -99 |
---|
838 | nny = 0 |
---|
839 | ELSE |
---|
840 | nny = nyn-nys+1 |
---|
841 | ENDIF |
---|
842 | |
---|
843 | nx = nx_on_file |
---|
844 | ny = ny_on_file |
---|
845 | ! |
---|
846 | !-- Determine, if this thread is doing IO |
---|
847 | IF ( nnx > 0 .AND. nny > 0 ) THEN |
---|
848 | color = 1 |
---|
849 | pe_active_for_read = .TRUE. |
---|
850 | remote_pe_s(nxl:nxr,nys:nyn) = myid ! myid from comm2d |
---|
851 | DO j = nys, nyn |
---|
852 | DO i = nxl, nxr |
---|
853 | rma_offset_s(i,j) = ( j-nys ) + ( i-nxl ) * nny |
---|
854 | ENDDO |
---|
855 | ENDDO |
---|
856 | ELSE |
---|
857 | #if defined( __parallel ) |
---|
858 | color = MPI_UNDEFINED |
---|
859 | #endif |
---|
860 | pe_active_for_read = .FALSE. |
---|
861 | ENDIF |
---|
862 | |
---|
863 | #if defined( __parallel ) |
---|
864 | CALL MPI_ALLREDUCE( remote_pe_s, remote_pe, SIZE(remote_pe_s), MPI_INTEGER, MPI_SUM, & |
---|
865 | comm2d, ierr ) |
---|
866 | CALL MPI_ALLREDUCE( rma_offset_s, rma_offset, SIZE(rma_offset_s), MPI_INTEGER, MPI_SUM, & |
---|
867 | comm2d, ierr ) |
---|
868 | CALL MPI_COMM_SPLIT( comm2d, color, 0, comm_cyclic_fill, ierr ) |
---|
869 | |
---|
870 | IF ( pe_active_for_read ) THEN |
---|
871 | CALL MPI_COMM_SIZE( comm_cyclic_fill, numprocs, ierr ) |
---|
872 | CALL MPI_COMM_RANK( comm_cyclic_fill, myid, ierr ) |
---|
873 | ENDIF |
---|
874 | #else |
---|
875 | remote_pe = remote_pe_s |
---|
876 | rma_offset = rma_offset_s |
---|
877 | myid = 0 |
---|
878 | numprocs = 1 |
---|
879 | #endif |
---|
880 | ! |
---|
881 | !-- Allocate 2d buffers as RMA window, accessible on all threads |
---|
882 | IF ( pe_active_for_read ) THEN |
---|
883 | ALLOCATE( rmabuf_2di(nys:nyn,nxl:nxr) ) |
---|
884 | ELSE |
---|
885 | ALLOCATE( rmabuf_2di(1,1) ) |
---|
886 | ENDIF |
---|
887 | winsize = SIZE( rmabuf_2di ) * iwp |
---|
888 | |
---|
889 | #if defined( __parallel ) |
---|
890 | CALL MPI_WIN_CREATE( rmabuf_2di, winsize, iwp, MPI_INFO_NULL, comm2d, rmawin_2di, ierr ) |
---|
891 | CALL MPI_WIN_FENCE( 0, rmawin_2di, ierr ) |
---|
892 | #endif |
---|
893 | |
---|
894 | IF ( pe_active_for_read ) THEN |
---|
895 | ALLOCATE( rmabuf_2d(nys:nyn,nxl:nxr) ) |
---|
896 | ELSE |
---|
897 | ALLOCATE( rmabuf_2d(1,1) ) |
---|
898 | ENDIF |
---|
899 | winsize = SIZE( rmabuf_2d ) * wp |
---|
900 | |
---|
901 | #if defined( __parallel ) |
---|
902 | CALL MPI_WIN_CREATE( rmabuf_2d, winsize, wp, MPI_INFO_NULL, comm2d, rmawin_2d, ierr ) |
---|
903 | CALL MPI_WIN_FENCE( 0, rmawin_2d, ierr ) |
---|
904 | #endif |
---|
905 | |
---|
906 | ! |
---|
907 | !-- Allocate 3d buffer as RMA window, accessable on all threads |
---|
908 | IF ( pe_active_for_read ) THEN |
---|
909 | ALLOCATE( rmabuf_3d(nzb:nzt+1,nys:nyn,nxl:nxr) ) |
---|
910 | ELSE |
---|
911 | ALLOCATE( rmabuf_3d(1,1,1) ) |
---|
912 | ENDIF |
---|
913 | winsize = SIZE( rmabuf_3d ) * wp |
---|
914 | |
---|
915 | #if defined( __parallel ) |
---|
916 | CALL MPI_WIN_CREATE( rmabuf_3d, winsize, wp, MPI_INFO_NULL, comm2d, rmawin_3d, ierr ) |
---|
917 | CALL MPI_WIN_FENCE( 0, rmawin_3d, ierr ) |
---|
918 | #endif |
---|
919 | |
---|
920 | ! |
---|
921 | !-- TODO: comment in more detail, what is done here, and why |
---|
922 | !-- save small grid |
---|
923 | CALL prerun_grid%save_grid_into_this_class() |
---|
924 | prerun_grid%comm2d = comm_cyclic_fill |
---|
925 | |
---|
926 | DEALLOCATE( remote_pe_s, rma_offset_s ) |
---|
927 | |
---|
928 | END SUBROUTINE setup_cyclic_fill |
---|
929 | |
---|
930 | END SUBROUTINE rd_mpi_io_open |
---|
931 | |
---|
932 | |
---|
933 | !--------------------------------------------------------------------------------------------------! |
---|
934 | ! Description: |
---|
935 | ! ------------ |
---|
936 | !> Check, if array exists in restart file |
---|
937 | !--------------------------------------------------------------------------------------------------! |
---|
938 | SUBROUTINE rd_mpi_io_check_array( name, found ) |
---|
939 | |
---|
940 | IMPLICIT NONE |
---|
941 | |
---|
942 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
943 | |
---|
944 | INTEGER(iwp) :: i !< |
---|
945 | |
---|
946 | LOGICAl :: found !< |
---|
947 | |
---|
948 | |
---|
949 | DO i = 1, tgh%nr_arrays |
---|
950 | IF ( TRIM( array_names(i) ) == TRIM( name ) ) THEN |
---|
951 | array_position = array_offset(i) |
---|
952 | found = .TRUE. |
---|
953 | RETURN |
---|
954 | ENDIF |
---|
955 | ENDDO |
---|
956 | |
---|
957 | found = .FALSE. |
---|
958 | |
---|
959 | END SUBROUTINE rd_mpi_io_check_array |
---|
960 | |
---|
961 | |
---|
962 | |
---|
963 | !--------------------------------------------------------------------------------------------------! |
---|
964 | ! Description: |
---|
965 | ! ------------ |
---|
966 | !> Read INTEGER with MPI-IO |
---|
967 | !--------------------------------------------------------------------------------------------------! |
---|
968 | SUBROUTINE rrd_mpi_io_int( name, value ) |
---|
969 | |
---|
970 | IMPLICIT NONE |
---|
971 | |
---|
972 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
973 | |
---|
974 | INTEGER(iwp) :: i !< |
---|
975 | INTEGER(KIND=iwp), INTENT(OUT) :: value !< |
---|
976 | |
---|
977 | LOGICAL :: found !< |
---|
978 | |
---|
979 | |
---|
980 | found = .FALSE. |
---|
981 | value = 0 |
---|
982 | |
---|
983 | DO i = 1, tgh%nr_int |
---|
984 | IF ( TRIM(int_names(i)) == TRIM( name ) ) THEN |
---|
985 | value = int_values(i) |
---|
986 | found = .TRUE. |
---|
987 | EXIT |
---|
988 | ENDIF |
---|
989 | ENDDO |
---|
990 | |
---|
991 | IF ( .NOT. found ) THEN |
---|
992 | message_string = 'INTEGER variable "' // TRIM( name ) // '" not found in restart file' |
---|
993 | CALL message( 'rrd_mpi_io_int', 'PA0722', 3, 2, 0, 6, 0 ) |
---|
994 | ENDIF |
---|
995 | |
---|
996 | END SUBROUTINE rrd_mpi_io_int |
---|
997 | |
---|
998 | |
---|
999 | |
---|
1000 | !--------------------------------------------------------------------------------------------------! |
---|
1001 | ! Description: |
---|
1002 | ! ------------ |
---|
1003 | !> Read REAL with MPI-IO |
---|
1004 | !--------------------------------------------------------------------------------------------------! |
---|
1005 | SUBROUTINE rrd_mpi_io_real( name, value ) |
---|
1006 | |
---|
1007 | IMPLICIT NONE |
---|
1008 | |
---|
1009 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
1010 | |
---|
1011 | INTEGER(iwp) :: i !< |
---|
1012 | |
---|
1013 | LOGICAL :: found !< |
---|
1014 | |
---|
1015 | REAL(KIND=wp), INTENT(OUT) :: value !< |
---|
1016 | |
---|
1017 | |
---|
1018 | found = .FALSE. |
---|
1019 | value = 0.0 |
---|
1020 | |
---|
1021 | DO i = 1, tgh%nr_real |
---|
1022 | IF ( TRIM(real_names(i)) == TRIM( name ) ) THEN |
---|
1023 | value = real_values(i) |
---|
1024 | found = .TRUE. |
---|
1025 | EXIT |
---|
1026 | ENDIF |
---|
1027 | ENDDO |
---|
1028 | |
---|
1029 | IF ( .NOT. found ) THEN |
---|
1030 | message_string = 'REAL variable "' // TRIM( name ) // '" not found in restart file' |
---|
1031 | CALL message( 'rrd_mpi_io_int', 'PA0722', 3, 2, 0, 6, 0 ) |
---|
1032 | ENDIF |
---|
1033 | |
---|
1034 | END SUBROUTINE rrd_mpi_io_real |
---|
1035 | |
---|
1036 | |
---|
1037 | |
---|
1038 | !--------------------------------------------------------------------------------------------------! |
---|
1039 | ! Description: |
---|
1040 | ! ------------ |
---|
1041 | !> Read 2d-real array with MPI-IO |
---|
1042 | !--------------------------------------------------------------------------------------------------! |
---|
1043 | SUBROUTINE rrd_mpi_io_real_2d( name, data ) |
---|
1044 | |
---|
1045 | IMPLICIT NONE |
---|
1046 | |
---|
1047 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
1048 | |
---|
1049 | #if defined( __parallel ) |
---|
1050 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
1051 | #endif |
---|
1052 | INTEGER(iwp) :: i !< |
---|
1053 | |
---|
1054 | LOGICAL :: found !< |
---|
1055 | |
---|
1056 | REAL(wp), INTENT(INOUT), DIMENSION(nysg:nyng,nxlg:nxrg) :: data !< |
---|
1057 | |
---|
1058 | |
---|
1059 | found = .FALSE. |
---|
1060 | |
---|
1061 | DO i = 1, tgh%nr_arrays |
---|
1062 | IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN |
---|
1063 | array_position = array_offset(i) |
---|
1064 | found = .TRUE. |
---|
1065 | EXIT |
---|
1066 | ENDIF |
---|
1067 | ENDDO |
---|
1068 | |
---|
1069 | IF ( found ) THEN |
---|
1070 | |
---|
1071 | IF ( cyclic_fill_mode ) THEN |
---|
1072 | |
---|
1073 | CALL rrd_mpi_io_real_2d_cyclic_fill |
---|
1074 | |
---|
1075 | ELSE |
---|
1076 | |
---|
1077 | #if defined( __parallel ) |
---|
1078 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited # of cores is inactive |
---|
1079 | IF ( sm_io%iam_io_pe ) THEN |
---|
1080 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_2d, 'native', MPI_INFO_NULL, & |
---|
1081 | ierr ) |
---|
1082 | CALL MPI_FILE_READ_ALL( fh, array_2d, SIZE( array_2d ), MPI_REAL, status, ierr ) |
---|
1083 | ENDIF |
---|
1084 | CALL sm_io%sm_node_barrier() |
---|
1085 | #else |
---|
1086 | CALL posix_lseek( fh, array_position ) |
---|
1087 | CALL posix_read( fh, array_2d, SIZE( array_2d ) ) |
---|
1088 | #endif |
---|
1089 | |
---|
1090 | IF ( include_total_domain_boundaries ) THEN |
---|
1091 | DO i = iog%nxl, iog%nxr |
---|
1092 | data(iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) = array_2d(i,iog%nys:iog%nyn) |
---|
1093 | ENDDO |
---|
1094 | IF ( debug_level >= 2) THEN |
---|
1095 | WRITE(9,*) 'r2f_ob ', TRIM(name),' ', SUM( data(nys:nyn,nxl:nxr) ) |
---|
1096 | ENDIF |
---|
1097 | ELSE |
---|
1098 | DO i = nxl, nxr |
---|
1099 | data(nys:nyn,i) = array_2d(i,nys:nyn) |
---|
1100 | ENDDO |
---|
1101 | IF ( debug_level >= 2) THEN |
---|
1102 | WRITE(9,*) 'r2f ', TRIM( name ),' ', SUM( data(nys:nyn,nxl:nxr) ) |
---|
1103 | ENDIF |
---|
1104 | ENDIF |
---|
1105 | |
---|
1106 | ENDIF |
---|
1107 | |
---|
1108 | CALL exchange_horiz_2d( data ) |
---|
1109 | |
---|
1110 | ELSE |
---|
1111 | message_string = '2d-REAL array "' // TRIM( name ) // '" not found in restart file' |
---|
1112 | CALL message( 'rrd_mpi_io_int', 'PA0722', 3, 2, 0, 6, 0 ) |
---|
1113 | ENDIF |
---|
1114 | |
---|
1115 | |
---|
1116 | CONTAINS |
---|
1117 | |
---|
1118 | SUBROUTINE rrd_mpi_io_real_2d_cyclic_fill |
---|
1119 | |
---|
1120 | IMPLICIT NONE |
---|
1121 | |
---|
1122 | INTEGER(iwp) :: i !< |
---|
1123 | INTEGER(iwp) :: ie !< |
---|
1124 | INTEGER(iwp) :: is !< |
---|
1125 | INTEGER(iwp) :: i_remote !< |
---|
1126 | INTEGER(iwp) :: j !< |
---|
1127 | INTEGER(iwp) :: je !< |
---|
1128 | INTEGER(iwp) :: js !< |
---|
1129 | INTEGER(iwp) :: j_remote !< |
---|
1130 | INTEGER(iwp) :: nval !< |
---|
1131 | INTEGER(iwp) :: rem_pe !< |
---|
1132 | |
---|
1133 | #if defined( __parallel ) |
---|
1134 | INTEGER(iwp) :: ierr !< |
---|
1135 | INTEGER(KIND=MPI_ADDRESS_KIND) :: rem_offs !< |
---|
1136 | #else |
---|
1137 | INTEGER(idp) :: rem_offs |
---|
1138 | #endif |
---|
1139 | |
---|
1140 | |
---|
1141 | !kk write(9,*) 'Here is rma_cylic_fill_real_2d ',nxl,nxr,nys,nyn; FLUSH(9) |
---|
1142 | |
---|
1143 | ! |
---|
1144 | !-- Reading 2d real array on prerun grid |
---|
1145 | CALL prerun_grid%activate_grid_from_this_class() |
---|
1146 | |
---|
1147 | IF ( pe_active_for_read ) THEN |
---|
1148 | #if defined( __parallel ) |
---|
1149 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_2d, 'native', MPI_INFO_NULL, & |
---|
1150 | ierr ) |
---|
1151 | CALL MPI_FILE_READ_ALL( fh, array_2d, SIZE( array_2d ), MPI_REAL, status, ierr ) |
---|
1152 | #endif |
---|
1153 | DO i = nxl, nxr |
---|
1154 | rmabuf_2d(nys:nyn,i) = array_2d(i,nys:nyn) |
---|
1155 | ENDDO |
---|
1156 | data(nys:nyn,nxl:nxr) = rmabuf_2d ! copy prerund data directly into output array data |
---|
1157 | ENDIF |
---|
1158 | |
---|
1159 | CALL mainrun_grid%activate_grid_from_this_class() |
---|
1160 | |
---|
1161 | #if defined( __parallel ) |
---|
1162 | ! |
---|
1163 | !-- Close RMA window to allow remote access |
---|
1164 | CALL MPI_WIN_FENCE( 0, rmawin_2d, ierr ) |
---|
1165 | #endif |
---|
1166 | |
---|
1167 | ! |
---|
1168 | !-- TODO: describe in more detail what is happening in this IF/ELSE clause |
---|
1169 | IF ( .NOT. pe_active_for_read ) THEN |
---|
1170 | |
---|
1171 | is = nxl |
---|
1172 | ie = nxr |
---|
1173 | js = nys |
---|
1174 | je = nyn |
---|
1175 | |
---|
1176 | ELSE |
---|
1177 | ! |
---|
1178 | !-- Extra get for cyclic data north of prerun data |
---|
1179 | is = nxl |
---|
1180 | ie = nxr |
---|
1181 | js = prerun_grid%nys+1 |
---|
1182 | je = nyn |
---|
1183 | DO i = is, ie |
---|
1184 | DO j = js, je |
---|
1185 | i_remote = MOD(i,nx_on_file+1) |
---|
1186 | j_remote = MOD(j,ny_on_file+1) |
---|
1187 | rem_pe = remote_pe(i_remote,j_remote) |
---|
1188 | rem_offs = rma_offset(i_remote,j_remote) |
---|
1189 | nval = 1 |
---|
1190 | |
---|
1191 | #if defined( __parallel ) |
---|
1192 | IF ( rem_pe /= myid ) THEN |
---|
1193 | CALL MPI_GET( data(j,i), nval, MPI_REAL, rem_pe, rem_offs, nval, MPI_REAL, & |
---|
1194 | rmawin_2d, ierr ) |
---|
1195 | ELSE |
---|
1196 | data(j,i) = rmabuf_2d(j_remote,i_remote) |
---|
1197 | ENDIF |
---|
1198 | #else |
---|
1199 | data(j,i) = array_2d(i_remote,j_remote) |
---|
1200 | #endif |
---|
1201 | ENDDO |
---|
1202 | ENDDO |
---|
1203 | ! |
---|
1204 | !-- Prepare setup for stripe right of prerun data |
---|
1205 | is = prerun_grid%nxr+1 |
---|
1206 | ie = nxr |
---|
1207 | js = nys |
---|
1208 | je = nyn |
---|
1209 | |
---|
1210 | ENDIF |
---|
1211 | |
---|
1212 | DO i = is, ie |
---|
1213 | DO j = js, je |
---|
1214 | i_remote = MOD(i,nx_on_file+1) |
---|
1215 | j_remote = MOD(j,ny_on_file+1) |
---|
1216 | rem_pe = remote_pe(i_remote,j_remote) |
---|
1217 | rem_offs = rma_offset(i_remote,j_remote) |
---|
1218 | nval = 1 |
---|
1219 | |
---|
1220 | #if defined( __parallel ) |
---|
1221 | IF ( rem_pe /= myid ) THEN |
---|
1222 | CALL MPI_GET( data(j,i), nval, MPI_REAL, rem_pe, rem_offs, nval, MPI_REAL, & |
---|
1223 | rmawin_2d, ierr ) |
---|
1224 | ELSE |
---|
1225 | data(j,i) = rmabuf_2d(j_remote,i_remote) |
---|
1226 | ENDIF |
---|
1227 | #else |
---|
1228 | data(j,i) = array_2d(i_remote,j_remote) |
---|
1229 | #endif |
---|
1230 | ENDDO |
---|
1231 | ENDDO |
---|
1232 | |
---|
1233 | #if defined( __parallel ) |
---|
1234 | ! |
---|
1235 | !-- Reopen RMA window to allow filling |
---|
1236 | CALL MPI_WIN_FENCE( 0, rmawin_2d, ierr ) |
---|
1237 | #endif |
---|
1238 | |
---|
1239 | END SUBROUTINE rrd_mpi_io_real_2d_cyclic_fill |
---|
1240 | |
---|
1241 | END SUBROUTINE rrd_mpi_io_real_2d |
---|
1242 | |
---|
1243 | |
---|
1244 | |
---|
1245 | !--------------------------------------------------------------------------------------------------! |
---|
1246 | ! Description: |
---|
1247 | ! ------------ |
---|
1248 | !> Read 2d-INTEGER array with MPI-IO |
---|
1249 | !--------------------------------------------------------------------------------------------------! |
---|
1250 | SUBROUTINE rrd_mpi_io_int_2d( name, data ) |
---|
1251 | |
---|
1252 | IMPLICIT NONE |
---|
1253 | |
---|
1254 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
1255 | |
---|
1256 | INTEGER(iwp) :: i !< |
---|
1257 | INTEGER(iwp) :: j !< |
---|
1258 | |
---|
1259 | #if defined( __parallel ) |
---|
1260 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
1261 | #endif |
---|
1262 | |
---|
1263 | INTEGER(KIND=iwp), INTENT(INOUT), DIMENSION(:,:) :: data !< |
---|
1264 | |
---|
1265 | LOGICAL :: found !< |
---|
1266 | |
---|
1267 | |
---|
1268 | found = .FALSE. |
---|
1269 | |
---|
1270 | DO i = 1, tgh%nr_arrays |
---|
1271 | IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN |
---|
1272 | array_position = array_offset(i) |
---|
1273 | found = .TRUE. |
---|
1274 | EXIT |
---|
1275 | ENDIF |
---|
1276 | ENDDO |
---|
1277 | |
---|
1278 | IF ( found ) THEN |
---|
1279 | |
---|
1280 | IF ( ( nxr - nxl + 1 + 2 * nbgp ) == SIZE( data, 2 ) ) THEN |
---|
1281 | ! |
---|
1282 | !-- Output array with Halos. |
---|
1283 | !-- ATTENTION: INTEGER arrays with ghost boundaries are not implemented yet. This kind of |
---|
1284 | !-- array would be dimensioned in the caller subroutine like this: |
---|
1285 | !-- INTEGER, DIMENSION(nysg:nyng,nxlg:nxrg):: data |
---|
1286 | message_string = '2d-INTEGER array "' // TRIM( name ) // '" to be read from restart ' // & |
---|
1287 | 'file is defined with illegal dimensions in the PALM code' |
---|
1288 | CALL message( 'rrd_mpi_io_int_2d', 'PA0723', 3, 2, 0, 6, 0 ) |
---|
1289 | |
---|
1290 | ELSEIF ( (nxr - nxl + 1) == SIZE( data, 2 ) ) THEN |
---|
1291 | ! |
---|
1292 | !-- INTEGER input array without Halos. |
---|
1293 | !-- This kind of array is dimensioned in the caller subroutine |
---|
1294 | !-- INTEGER, DIMENSION(nys:nyn,nxl:nxr) :: data |
---|
1295 | IF ( cyclic_fill_mode ) THEN |
---|
1296 | |
---|
1297 | CALL rrd_mpi_io_int_2d_cyclic_fill |
---|
1298 | |
---|
1299 | ELSE |
---|
1300 | |
---|
1301 | #if defined( __parallel ) |
---|
1302 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited # of cores is inactive |
---|
1303 | IF ( sm_io%iam_io_pe ) THEN |
---|
1304 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER, ft_2di_nb, 'native', & |
---|
1305 | MPI_INFO_NULL, ierr ) |
---|
1306 | CALL MPI_FILE_READ_ALL( fh, array_2di, SIZE( array_2di ), MPI_INTEGER, status, & |
---|
1307 | ierr ) |
---|
1308 | ENDIF |
---|
1309 | CALL sm_io%sm_node_barrier() |
---|
1310 | #else |
---|
1311 | CALL posix_lseek( fh, array_position ) |
---|
1312 | CALL posix_read( fh, array_2di, SIZE( array_2di ) ) |
---|
1313 | #endif |
---|
1314 | DO j = nys, nyn |
---|
1315 | DO i = nxl, nxr |
---|
1316 | data(j-nys+1,i-nxl+1) = array_2di(i,j) |
---|
1317 | ENDDO |
---|
1318 | ENDDO |
---|
1319 | |
---|
1320 | ENDIF |
---|
1321 | |
---|
1322 | ELSE |
---|
1323 | |
---|
1324 | message_string = '2d-INTEGER array "' // TRIM( name ) // '" to be read from restart ' // & |
---|
1325 | 'file is defined with illegal dimensions in the PALM code' |
---|
1326 | CALL message( 'rrd_mpi_io_int_2d', 'PA0723', 3, 2, 0, 6, 0 ) |
---|
1327 | |
---|
1328 | ENDIF |
---|
1329 | |
---|
1330 | ELSE |
---|
1331 | |
---|
1332 | message_string = '2d-INTEGER array "' // TRIM( name ) // '" not found in restart file' |
---|
1333 | CALL message( 'rrd_mpi_io_int_2d', 'PA0722', 3, 2, 0, 6, 0 ) |
---|
1334 | |
---|
1335 | ENDIF |
---|
1336 | |
---|
1337 | |
---|
1338 | CONTAINS |
---|
1339 | |
---|
1340 | SUBROUTINE rrd_mpi_io_int_2d_cyclic_fill |
---|
1341 | |
---|
1342 | IMPLICIT NONE |
---|
1343 | |
---|
1344 | INTEGER(iwp) :: i !< |
---|
1345 | INTEGER(iwp) :: ie !< |
---|
1346 | INTEGER(iwp) :: is !< |
---|
1347 | INTEGER(iwp) :: i_remote !< |
---|
1348 | INTEGER(iwp) :: j !< |
---|
1349 | INTEGER(iwp) :: je !< |
---|
1350 | INTEGER(iwp) :: js !< |
---|
1351 | INTEGER(iwp) :: j_remote !< |
---|
1352 | INTEGER(iwp) :: nval !< |
---|
1353 | INTEGER(iwp) :: rem_pe !< |
---|
1354 | |
---|
1355 | #if defined( __parallel ) |
---|
1356 | INTEGER(iwp) :: ierr !< |
---|
1357 | INTEGER(KIND=MPI_ADDRESS_KIND) :: rem_offs !< |
---|
1358 | #else |
---|
1359 | INTEGER(idp) :: rem_offs |
---|
1360 | #endif |
---|
1361 | |
---|
1362 | |
---|
1363 | CALL prerun_grid%activate_grid_from_this_class() |
---|
1364 | |
---|
1365 | IF ( pe_active_for_read ) THEN |
---|
1366 | #if defined( __parallel ) |
---|
1367 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER, ft_2di_nb, 'native', & |
---|
1368 | MPI_INFO_NULL, ierr ) |
---|
1369 | CALL MPI_FILE_READ_ALL( fh, array_2di, SIZE( array_2di ), MPI_INTEGER, status, ierr ) |
---|
1370 | #endif |
---|
1371 | DO i = nxl, nxr |
---|
1372 | rmabuf_2di(nys:nyn,i) = array_2di(i,nys:nyn) |
---|
1373 | ENDDO |
---|
1374 | data(1:nny,1:nnx) = rmabuf_2di |
---|
1375 | ENDIF |
---|
1376 | |
---|
1377 | CALL mainrun_grid%activate_grid_from_this_class() |
---|
1378 | |
---|
1379 | #if defined( __parallel ) |
---|
1380 | ! |
---|
1381 | !-- Close RMA window to allow remote access |
---|
1382 | CALL MPI_WIN_FENCE( 0, rmawin_2di, ierr ) |
---|
1383 | #endif |
---|
1384 | |
---|
1385 | IF ( .NOT. pe_active_for_read ) THEN |
---|
1386 | |
---|
1387 | is = nxl |
---|
1388 | ie = nxr |
---|
1389 | js = nys |
---|
1390 | je = nyn |
---|
1391 | |
---|
1392 | ELSE |
---|
1393 | |
---|
1394 | is = nxl |
---|
1395 | ie = nxr |
---|
1396 | js = prerun_grid%nys+1 |
---|
1397 | je = nyn |
---|
1398 | DO i = is, ie |
---|
1399 | DO j = js, je |
---|
1400 | i_remote = MOD(i,nx_on_file+1) |
---|
1401 | j_remote = MOD(j,ny_on_file+1) |
---|
1402 | rem_pe = remote_pe(i_remote,j_remote) |
---|
1403 | rem_offs = rma_offset(i_remote,j_remote) |
---|
1404 | nval = 1 |
---|
1405 | |
---|
1406 | #if defined( __parallel ) |
---|
1407 | IF ( rem_pe /= myid ) THEN |
---|
1408 | CALL MPI_GET( data(j-nys+1,i-nxl+1), nval, MPI_INTEGER, rem_pe, rem_offs, nval, & |
---|
1409 | MPI_INTEGER, rmawin_2di, ierr ) |
---|
1410 | ELSE |
---|
1411 | data(j-nys+1,i-nxl+1) = rmabuf_2di(j_remote,i_remote) |
---|
1412 | ENDIF |
---|
1413 | #else |
---|
1414 | data(j-nys+1,i-nxl+1) = array_2di(i_remote,j_remote) |
---|
1415 | #endif |
---|
1416 | ENDDO |
---|
1417 | ENDDO |
---|
1418 | is = prerun_grid%nxr+1 |
---|
1419 | ie = nxr |
---|
1420 | js = nys |
---|
1421 | je = nyn |
---|
1422 | |
---|
1423 | ENDIF |
---|
1424 | |
---|
1425 | DO i = is, ie |
---|
1426 | DO j = js, je |
---|
1427 | i_remote = MOD(i,nx_on_file+1) |
---|
1428 | j_remote = MOD(j,ny_on_file+1) |
---|
1429 | rem_pe = remote_pe(i_remote,j_remote) |
---|
1430 | rem_offs = rma_offset(i_remote,j_remote) |
---|
1431 | nval = 1 |
---|
1432 | #if defined( __parallel ) |
---|
1433 | IF ( rem_pe /= myid ) THEN |
---|
1434 | CALL MPI_GET( data(j-nys+1,i-nxl+1), nval, MPI_INTEGER, rem_pe, rem_offs, nval, & |
---|
1435 | MPI_INTEGER, rmawin_2di, ierr) |
---|
1436 | ELSE |
---|
1437 | data(j-nys+1,i-nxl+1) = rmabuf_2di(j_remote,i_remote) |
---|
1438 | ENDIF |
---|
1439 | #else |
---|
1440 | data(j-nys+1,i-nxl+1) = array_2di(i_remote,j_remote) |
---|
1441 | #endif |
---|
1442 | ENDDO |
---|
1443 | ENDDO |
---|
1444 | |
---|
1445 | #if defined( __parallel ) |
---|
1446 | ! |
---|
1447 | !-- Reopen RMA window to allow filling |
---|
1448 | CALL MPI_WIN_FENCE( 0, rmawin_2di, ierr ) |
---|
1449 | #endif |
---|
1450 | |
---|
1451 | END SUBROUTINE rrd_mpi_io_int_2d_cyclic_fill |
---|
1452 | |
---|
1453 | END SUBROUTINE rrd_mpi_io_int_2d |
---|
1454 | |
---|
1455 | |
---|
1456 | |
---|
1457 | !--------------------------------------------------------------------------------------------------! |
---|
1458 | ! Description: |
---|
1459 | ! ------------ |
---|
1460 | !> Read 3d-INTEGER*4 array with MPI-IO |
---|
1461 | !--------------------------------------------------------------------------------------------------! |
---|
1462 | SUBROUTINE rrd_mpi_io_int4_3d( name, data ) |
---|
1463 | |
---|
1464 | IMPLICIT NONE |
---|
1465 | |
---|
1466 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
1467 | |
---|
1468 | INTEGER(iwp) :: i !< |
---|
1469 | |
---|
1470 | #if defined( __parallel ) |
---|
1471 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
1472 | #endif |
---|
1473 | |
---|
1474 | LOGICAL :: found !< |
---|
1475 | |
---|
1476 | INTEGER(isp), INTENT(INOUT), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data !< |
---|
1477 | |
---|
1478 | |
---|
1479 | found = .FALSE. |
---|
1480 | data = -1.0 |
---|
1481 | |
---|
1482 | DO i = 1, tgh%nr_arrays |
---|
1483 | IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN |
---|
1484 | array_position = array_offset(i) |
---|
1485 | found = .TRUE. |
---|
1486 | EXIT |
---|
1487 | ENDIF |
---|
1488 | ENDDO |
---|
1489 | |
---|
1490 | IF ( found ) THEN |
---|
1491 | |
---|
1492 | #if defined( __parallel ) |
---|
1493 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited # of cores is inactive |
---|
1494 | IF( sm_io%iam_io_pe ) THEN |
---|
1495 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER, ft_3di4, 'native', & |
---|
1496 | MPI_INFO_NULL, ierr ) |
---|
1497 | CALL MPI_FILE_READ_ALL( fh, array_3di4, SIZE( array_3di4 ), MPI_INTEGER, status, ierr ) |
---|
1498 | ENDIF |
---|
1499 | CALL sm_io%sm_node_barrier() |
---|
1500 | #else |
---|
1501 | CALL posix_lseek( fh, array_position ) |
---|
1502 | CALL posix_read(fh, array_3di4, SIZE( array_3di4 ) ) |
---|
1503 | #endif |
---|
1504 | IF ( include_total_domain_boundaries ) THEN |
---|
1505 | DO i = iog%nxl, iog%nxr |
---|
1506 | data(:,iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) = array_3di4(:,i,iog%nys:iog%nyn) |
---|
1507 | ENDDO |
---|
1508 | ELSE |
---|
1509 | DO i = nxl, nxr |
---|
1510 | data(:,nys:nyn,i) = array_3di4(:,i,nys:nyn) |
---|
1511 | ENDDO |
---|
1512 | ENDIF |
---|
1513 | |
---|
1514 | ELSE |
---|
1515 | |
---|
1516 | message_string = '3d-INTEGER*4 array "' // TRIM( name ) // '" not found in restart file' |
---|
1517 | CALL message( 'rrd_mpi_io_int4_3d', 'PA0722', 3, 2, 0, 6, 0 ) |
---|
1518 | |
---|
1519 | ENDIF |
---|
1520 | |
---|
1521 | |
---|
1522 | END SUBROUTINE rrd_mpi_io_int4_3d |
---|
1523 | |
---|
1524 | |
---|
1525 | |
---|
1526 | !--------------------------------------------------------------------------------------------------! |
---|
1527 | ! Description: |
---|
1528 | ! ------------ |
---|
1529 | !> Read 3d-INTEGER*8 array with MPI-IO |
---|
1530 | !--------------------------------------------------------------------------------------------------! |
---|
1531 | SUBROUTINE rrd_mpi_io_int8_3d( name, data ) |
---|
1532 | |
---|
1533 | IMPLICIT NONE |
---|
1534 | |
---|
1535 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
1536 | |
---|
1537 | INTEGER(iwp) :: i !< |
---|
1538 | |
---|
1539 | #if defined( __parallel ) |
---|
1540 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
1541 | #endif |
---|
1542 | |
---|
1543 | LOGICAL :: found !< |
---|
1544 | |
---|
1545 | INTEGER(idp), INTENT(INOUT), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data !< |
---|
1546 | |
---|
1547 | |
---|
1548 | found = .FALSE. |
---|
1549 | data = -1.0 |
---|
1550 | |
---|
1551 | DO i = 1, tgh%nr_arrays |
---|
1552 | IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN |
---|
1553 | array_position = array_offset(i) |
---|
1554 | found = .TRUE. |
---|
1555 | EXIT |
---|
1556 | ENDIF |
---|
1557 | ENDDO |
---|
1558 | |
---|
1559 | IF ( found ) THEN |
---|
1560 | |
---|
1561 | #if defined( __parallel ) |
---|
1562 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited # of cores is inactive |
---|
1563 | IF( sm_io%iam_io_pe ) THEN |
---|
1564 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER8, ft_3di8, 'native', MPI_INFO_NULL, & |
---|
1565 | ierr ) |
---|
1566 | CALL MPI_FILE_READ_ALL( fh, array_3di8, SIZE( array_3di8 ), MPI_INTEGER8, status, ierr ) |
---|
1567 | ENDIF |
---|
1568 | CALL sm_io%sm_node_barrier() |
---|
1569 | #else |
---|
1570 | CALL posix_lseek( fh, array_position ) |
---|
1571 | CALL posix_read(fh, array_3di8, SIZE( array_3di8 ) ) |
---|
1572 | #endif |
---|
1573 | IF ( include_total_domain_boundaries ) THEN |
---|
1574 | DO i = iog%nxl, iog%nxr |
---|
1575 | data(:,iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) = array_3di8(:,i,iog%nys:iog%nyn) |
---|
1576 | ENDDO |
---|
1577 | ELSE |
---|
1578 | DO i = nxl, nxr |
---|
1579 | data(:,nys:nyn,i) = array_3di8(:,i,nys:nyn) |
---|
1580 | ENDDO |
---|
1581 | ENDIF |
---|
1582 | |
---|
1583 | ELSE |
---|
1584 | |
---|
1585 | message_string = '3d-INTEGER*8 array "' // TRIM( name ) // '" not found in restart file' |
---|
1586 | CALL message( 'rrd_mpi_io_int8_3d', 'PA0722', 3, 2, 0, 6, 0 ) |
---|
1587 | |
---|
1588 | ENDIF |
---|
1589 | |
---|
1590 | |
---|
1591 | END SUBROUTINE rrd_mpi_io_int8_3d |
---|
1592 | |
---|
1593 | |
---|
1594 | |
---|
1595 | !--------------------------------------------------------------------------------------------------! |
---|
1596 | ! Description: |
---|
1597 | ! ------------ |
---|
1598 | !> Read 2d-REAL array with MPI-IO |
---|
1599 | !--------------------------------------------------------------------------------------------------! |
---|
1600 | SUBROUTINE rrd_mpi_io_real_3d( name, data ) |
---|
1601 | |
---|
1602 | IMPLICIT NONE |
---|
1603 | |
---|
1604 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
1605 | |
---|
1606 | INTEGER(iwp) :: i !< |
---|
1607 | |
---|
1608 | #if defined( __parallel ) |
---|
1609 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
1610 | #endif |
---|
1611 | |
---|
1612 | LOGICAL :: found !< |
---|
1613 | |
---|
1614 | REAL(wp), INTENT(INOUT), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data !< |
---|
1615 | |
---|
1616 | |
---|
1617 | found = .FALSE. |
---|
1618 | data = -1.0 |
---|
1619 | |
---|
1620 | DO i = 1, tgh%nr_arrays |
---|
1621 | IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN |
---|
1622 | array_position = array_offset(i) |
---|
1623 | found = .TRUE. |
---|
1624 | EXIT |
---|
1625 | ENDIF |
---|
1626 | ENDDO |
---|
1627 | |
---|
1628 | IF ( found ) THEN |
---|
1629 | |
---|
1630 | IF ( cyclic_fill_mode ) THEN |
---|
1631 | |
---|
1632 | CALL rrd_mpi_io_real_3d_cyclic_fill |
---|
1633 | ! |
---|
1634 | !-- Cyclic fill mode requires to use the "cyclic" communicator, in order to initialize |
---|
1635 | !-- grid points at the outer boundaries (ghost layers) of the total domain. These points |
---|
1636 | !-- are not contained in the prerun data, because the prerun used cyclic boundary conditions. |
---|
1637 | CALL exchange_horiz( data, nbgp, alternative_communicator = 1 ) |
---|
1638 | |
---|
1639 | ELSE |
---|
1640 | #if defined( __parallel ) |
---|
1641 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited # of cores is inactive |
---|
1642 | IF( sm_io%iam_io_pe ) THEN |
---|
1643 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3d, 'native', MPI_INFO_NULL, & |
---|
1644 | ierr ) |
---|
1645 | CALL MPI_FILE_READ_ALL( fh, array_3d, SIZE( array_3d ), MPI_REAL, status, ierr ) |
---|
1646 | ENDIF |
---|
1647 | CALL sm_io%sm_node_barrier() |
---|
1648 | #else |
---|
1649 | CALL posix_lseek( fh, array_position ) |
---|
1650 | CALL posix_read(fh, array_3d, SIZE( array_3d ) ) |
---|
1651 | #endif |
---|
1652 | IF ( include_total_domain_boundaries ) THEN |
---|
1653 | DO i = iog%nxl, iog%nxr |
---|
1654 | data(:,iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) = array_3d(:,i,iog%nys:iog%nyn) |
---|
1655 | ENDDO |
---|
1656 | ELSE |
---|
1657 | DO i = nxl, nxr |
---|
1658 | data(:,nys:nyn,i) = array_3d(:,i,nys:nyn) |
---|
1659 | ENDDO |
---|
1660 | ENDIF |
---|
1661 | |
---|
1662 | CALL exchange_horiz( data, nbgp ) |
---|
1663 | |
---|
1664 | ENDIF |
---|
1665 | |
---|
1666 | ELSE |
---|
1667 | |
---|
1668 | message_string = '3d-REAL array "' // TRIM( name ) // '" not found in restart file' |
---|
1669 | CALL message( 'rrd_mpi_io_real_3d', 'PA0722', 3, 2, 0, 6, 0 ) |
---|
1670 | |
---|
1671 | ENDIF |
---|
1672 | |
---|
1673 | |
---|
1674 | CONTAINS |
---|
1675 | |
---|
1676 | SUBROUTINE rrd_mpi_io_real_3d_cyclic_fill |
---|
1677 | |
---|
1678 | IMPLICIT NONE |
---|
1679 | |
---|
1680 | INTEGER(iwp) :: i !< |
---|
1681 | INTEGER(iwp) :: ie !< |
---|
1682 | INTEGER(iwp) :: is !< |
---|
1683 | INTEGER(iwp) :: i_remote !< |
---|
1684 | INTEGER(iwp) :: j !< |
---|
1685 | INTEGER(iwp) :: je !< |
---|
1686 | INTEGER(iwp) :: js !< |
---|
1687 | INTEGER(iwp) :: j_remote !< |
---|
1688 | INTEGER(iwp) :: nval !< |
---|
1689 | INTEGER(iwp) :: rem_pe !< |
---|
1690 | |
---|
1691 | #if defined( __parallel ) |
---|
1692 | INTEGER(iwp) :: ierr !< |
---|
1693 | INTEGER(KIND=MPI_ADDRESS_KIND) :: rem_offs !< |
---|
1694 | #else |
---|
1695 | INTEGER(idp) :: rem_offs |
---|
1696 | #endif |
---|
1697 | |
---|
1698 | |
---|
1699 | CALL prerun_grid%activate_grid_from_this_class() |
---|
1700 | |
---|
1701 | IF ( pe_active_for_read ) THEN |
---|
1702 | #if defined( __parallel ) |
---|
1703 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3d, 'native', MPI_INFO_NULL, & |
---|
1704 | ierr ) |
---|
1705 | CALL MPI_FILE_READ_ALL( fh, array_3d, SIZE( array_3d ), MPI_REAL, status, ierr ) |
---|
1706 | #endif |
---|
1707 | DO i = nxl, nxr |
---|
1708 | rmabuf_3d(:,nys:nyn,i) = array_3d(:,i,nys:nyn) |
---|
1709 | ENDDO |
---|
1710 | data(:,nys:nyn,nxl:nxr) = rmabuf_3d |
---|
1711 | ENDIF |
---|
1712 | CALL mainrun_grid%activate_grid_from_this_class () |
---|
1713 | |
---|
1714 | #if defined( __parallel ) |
---|
1715 | ! |
---|
1716 | !-- Close RMA window to allow remote access |
---|
1717 | CALL MPI_WIN_FENCE( 0, rmawin_3d, ierr ) |
---|
1718 | #endif |
---|
1719 | |
---|
1720 | IF ( .NOT. pe_active_for_read ) THEN |
---|
1721 | |
---|
1722 | is = nxl |
---|
1723 | ie = nxr |
---|
1724 | js = nys |
---|
1725 | je = nyn |
---|
1726 | |
---|
1727 | ELSE |
---|
1728 | |
---|
1729 | is = nxl |
---|
1730 | ie = nxr |
---|
1731 | js = prerun_grid%nys+1 |
---|
1732 | je = nyn |
---|
1733 | |
---|
1734 | DO i = is, ie |
---|
1735 | DO j = js, je |
---|
1736 | i_remote = MOD(i,nx_on_file+1) |
---|
1737 | j_remote = MOD(j,ny_on_file+1) |
---|
1738 | rem_pe = remote_pe(i_remote,j_remote) |
---|
1739 | rem_offs = rma_offset(i_remote,j_remote)*(nzt-nzb+2) |
---|
1740 | nval = nzt-nzb+2 |
---|
1741 | |
---|
1742 | #if defined( __parallel ) |
---|
1743 | IF(rem_pe /= myid) THEN |
---|
1744 | CALL MPI_GET( data(nzb,j,i), nval, MPI_REAL, rem_pe, rem_offs, nval, MPI_REAL, & |
---|
1745 | rmawin_3d, ierr) |
---|
1746 | ELSE |
---|
1747 | data(:,j,i) = rmabuf_3d(:,j_remote,i_remote) |
---|
1748 | ENDIF |
---|
1749 | #else |
---|
1750 | data(:,j,i) = array_3d(:,i_remote,j_remote) |
---|
1751 | #endif |
---|
1752 | ENDDO |
---|
1753 | ENDDO |
---|
1754 | is = prerun_grid%nxr+1 |
---|
1755 | ie = nxr |
---|
1756 | js = nys |
---|
1757 | je = nyn |
---|
1758 | |
---|
1759 | ENDIF |
---|
1760 | |
---|
1761 | DO i = is, ie |
---|
1762 | DO j = js, je |
---|
1763 | i_remote = MOD(i,nx_on_file+1) |
---|
1764 | j_remote = MOD(j,ny_on_file+1) |
---|
1765 | rem_pe = remote_pe(i_remote,j_remote) |
---|
1766 | rem_offs = rma_offset(i_remote,j_remote) * ( nzt-nzb+2 ) |
---|
1767 | nval = nzt-nzb+2 |
---|
1768 | |
---|
1769 | #if defined( __parallel ) |
---|
1770 | IF ( rem_pe /= myid ) THEN |
---|
1771 | CALL MPI_GET( data(nzb,j,i), nval, MPI_REAL, rem_pe, rem_offs, nval, MPI_REAL, & |
---|
1772 | rmawin_3d, ierr) |
---|
1773 | ELSE |
---|
1774 | data(:,j,i) = rmabuf_3d(:,j_remote,i_remote) |
---|
1775 | ENDIF |
---|
1776 | #else |
---|
1777 | data(:,j,i) = array_3d(:,i_remote,j_remote) |
---|
1778 | #endif |
---|
1779 | ENDDO |
---|
1780 | ENDDO |
---|
1781 | |
---|
1782 | #if defined( __parallel ) |
---|
1783 | ! |
---|
1784 | !-- Reopen RMA window to allow filling |
---|
1785 | CALL MPI_WIN_FENCE( 0, rmawin_3d, ierr ) |
---|
1786 | #endif |
---|
1787 | |
---|
1788 | END SUBROUTINE rrd_mpi_io_real_3d_cyclic_fill |
---|
1789 | |
---|
1790 | END SUBROUTINE rrd_mpi_io_real_3d |
---|
1791 | |
---|
1792 | |
---|
1793 | |
---|
1794 | !--------------------------------------------------------------------------------------------------! |
---|
1795 | ! Description: |
---|
1796 | ! ------------ |
---|
1797 | !> Read 3d-REAL soil array with MPI-IO |
---|
1798 | !> nzb_soil, nzt_soil are located in the module land_surface_model_mod. Since Fortran does not allow |
---|
1799 | !> cross referencing of module variables, it is required to pass these variables as arguments. |
---|
1800 | !--------------------------------------------------------------------------------------------------! |
---|
1801 | SUBROUTINE rrd_mpi_io_real_3d_soil( name, data, nzb_soil, nzt_soil ) |
---|
1802 | |
---|
1803 | IMPLICIT NONE |
---|
1804 | |
---|
1805 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
1806 | |
---|
1807 | INTEGER(iwp) :: i !< |
---|
1808 | INTEGER, INTENT(IN) :: nzb_soil !< |
---|
1809 | INTEGER, INTENT(IN) :: nzt_soil !< |
---|
1810 | |
---|
1811 | #if defined( __parallel ) |
---|
1812 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
1813 | INTEGER(iwp) :: ierr !< |
---|
1814 | #endif |
---|
1815 | |
---|
1816 | LOGICAL :: found !< |
---|
1817 | |
---|
1818 | REAL(wp), INTENT(INOUT), DIMENSION(nzb_soil:nzt_soil,nysg:nyng,nxlg:nxrg) :: data !< |
---|
1819 | |
---|
1820 | |
---|
1821 | ! |
---|
1822 | !-- Prerun data is not allowed to contain soil information so far |
---|
1823 | IF ( cyclic_fill_mode ) THEN |
---|
1824 | message_string = 'prerun data is not allowed to contain soil information' |
---|
1825 | CALL message( 'rrd_mpi_io_real_3d_soil', 'PA0729', 3, 2, -1, 6, 0 ) |
---|
1826 | ENDIF |
---|
1827 | |
---|
1828 | found = .FALSE. |
---|
1829 | |
---|
1830 | DO i = 1, tgh%nr_arrays |
---|
1831 | IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN |
---|
1832 | array_position = array_offset(i) |
---|
1833 | found = .TRUE. |
---|
1834 | EXIT |
---|
1835 | ENDIF |
---|
1836 | ENDDO |
---|
1837 | |
---|
1838 | IF ( found ) THEN |
---|
1839 | #if defined( __parallel ) |
---|
1840 | CALL rd_mpi_io_create_filetypes_3dsoil( nzb_soil, nzt_soil ) |
---|
1841 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive |
---|
1842 | IF ( sm_io%iam_io_pe ) THEN |
---|
1843 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3dsoil, 'native', & |
---|
1844 | MPI_INFO_NULL, ierr ) |
---|
1845 | CALL MPI_FILE_READ_ALL( fh, array_3d_soil, SIZE( array_3d_soil ), MPI_REAL, status, ierr ) |
---|
1846 | CALL MPI_TYPE_FREE( ft_3dsoil, ierr ) |
---|
1847 | ENDIF |
---|
1848 | CALL sm_io%sm_node_barrier() |
---|
1849 | #else |
---|
1850 | CALL posix_lseek( fh, array_position ) |
---|
1851 | CALL posix_read( fh, array_3d_soil, SIZE( array_3d_soil ) ) |
---|
1852 | #endif |
---|
1853 | IF ( include_total_domain_boundaries ) THEN |
---|
1854 | DO i = iog%nxl, iog%nxr |
---|
1855 | data(:,iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) = array_3d_soil(:,i,iog%nys:iog%nyn) |
---|
1856 | ENDDO |
---|
1857 | ELSE |
---|
1858 | DO i = nxl, nxr |
---|
1859 | data(:,nys:nyn,i) = array_3d_soil(:,i,nys:nyn) |
---|
1860 | ENDDO |
---|
1861 | ENDIF |
---|
1862 | |
---|
1863 | ELSE |
---|
1864 | |
---|
1865 | message_string = '3d-REAL soil array "' // TRIM( name ) // '" not found in restart file' |
---|
1866 | CALL message( 'rrd_mpi_io_real_3d_soil', 'PA0722', 3, 2, 0, 6, 0 ) |
---|
1867 | |
---|
1868 | ENDIF |
---|
1869 | |
---|
1870 | END SUBROUTINE rrd_mpi_io_real_3d_soil |
---|
1871 | |
---|
1872 | |
---|
1873 | |
---|
1874 | !--------------------------------------------------------------------------------------------------! |
---|
1875 | ! Description: |
---|
1876 | ! ------------ |
---|
1877 | !> Read CHARACTER with MPI-IO |
---|
1878 | !--------------------------------------------------------------------------------------------------! |
---|
1879 | SUBROUTINE rrd_mpi_io_char( name, text ) |
---|
1880 | |
---|
1881 | IMPLICIT NONE |
---|
1882 | |
---|
1883 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
1884 | CHARACTER(LEN=*), INTENT(OUT) :: text !< |
---|
1885 | CHARACTER(LEN=128) :: line !< |
---|
1886 | |
---|
1887 | INTEGER(iwp) :: i !< |
---|
1888 | |
---|
1889 | LOGICAL :: found !< |
---|
1890 | |
---|
1891 | |
---|
1892 | found = .FALSE. |
---|
1893 | text = ' ' |
---|
1894 | |
---|
1895 | DO i = 1, tgh%nr_char |
---|
1896 | line = text_lines(i) |
---|
1897 | IF ( TRIM( line(1:32) ) == TRIM( name ) ) THEN |
---|
1898 | text = line(33:) |
---|
1899 | found = .TRUE. |
---|
1900 | EXIT |
---|
1901 | ENDIF |
---|
1902 | ENDDO |
---|
1903 | |
---|
1904 | IF ( .NOT. found ) THEN |
---|
1905 | message_string = 'CHARACTER variable "' // TRIM( name ) // '" not found in restart file' |
---|
1906 | CALL message( 'rrd_mpi_io_char', 'PA0722', 3, 2, 0, 6, 0 ) |
---|
1907 | ENDIF |
---|
1908 | |
---|
1909 | END SUBROUTINE rrd_mpi_io_char |
---|
1910 | |
---|
1911 | |
---|
1912 | |
---|
1913 | !--------------------------------------------------------------------------------------------------! |
---|
1914 | ! Description: |
---|
1915 | ! ------------ |
---|
1916 | !> Read LOGICAL with MPI-IO |
---|
1917 | !--------------------------------------------------------------------------------------------------! |
---|
1918 | SUBROUTINE rrd_mpi_io_logical( name, value ) |
---|
1919 | |
---|
1920 | IMPLICIT NONE |
---|
1921 | |
---|
1922 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
1923 | |
---|
1924 | INTEGER(iwp) :: logical_as_integer !< |
---|
1925 | |
---|
1926 | LOGICAL, INTENT(OUT) :: value !< |
---|
1927 | |
---|
1928 | |
---|
1929 | CALL rrd_mpi_io_int( name, logical_as_integer ) |
---|
1930 | value = ( logical_as_integer == 1 ) |
---|
1931 | |
---|
1932 | END SUBROUTINE rrd_mpi_io_logical |
---|
1933 | |
---|
1934 | |
---|
1935 | |
---|
1936 | !--------------------------------------------------------------------------------------------------! |
---|
1937 | ! Description: |
---|
1938 | ! ------------ |
---|
1939 | !> Write INTEGER with MPI-IO |
---|
1940 | !--------------------------------------------------------------------------------------------------! |
---|
1941 | SUBROUTINE wrd_mpi_io_int( name, value ) |
---|
1942 | |
---|
1943 | IMPLICIT NONE |
---|
1944 | |
---|
1945 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
1946 | |
---|
1947 | INTEGER(KIND=iwp), INTENT(IN) :: value !< |
---|
1948 | |
---|
1949 | |
---|
1950 | IF ( header_int_index == max_nr_int ) THEN |
---|
1951 | STOP '+++ maximum number of INTEGER entries in restart file header exceeded' |
---|
1952 | ENDIF |
---|
1953 | |
---|
1954 | int_names(header_int_index) = name |
---|
1955 | int_values(header_int_index) = value |
---|
1956 | header_int_index = header_int_index + 1 |
---|
1957 | |
---|
1958 | END SUBROUTINE wrd_mpi_io_int |
---|
1959 | |
---|
1960 | |
---|
1961 | !--------------------------------------------------------------------------------------------------! |
---|
1962 | ! Description: |
---|
1963 | ! ------------ |
---|
1964 | !> To do: Description missing! |
---|
1965 | !--------------------------------------------------------------------------------------------------! |
---|
1966 | SUBROUTINE wrd_mpi_io_real( name, value ) |
---|
1967 | |
---|
1968 | IMPLICIT NONE |
---|
1969 | |
---|
1970 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
1971 | |
---|
1972 | REAL(wp), INTENT(IN) :: value !< |
---|
1973 | |
---|
1974 | |
---|
1975 | IF ( header_real_index == max_nr_real ) THEN |
---|
1976 | STOP '+++ maximum number of REAL entries in restart file header exceeded' |
---|
1977 | ENDIF |
---|
1978 | |
---|
1979 | real_names(header_real_index) = name |
---|
1980 | real_values(header_real_index) = value |
---|
1981 | header_real_index = header_real_index + 1 |
---|
1982 | |
---|
1983 | END SUBROUTINE wrd_mpi_io_real |
---|
1984 | |
---|
1985 | |
---|
1986 | |
---|
1987 | !--------------------------------------------------------------------------------------------------! |
---|
1988 | ! Description: |
---|
1989 | ! ------------ |
---|
1990 | !> Write 2d-REAL array with MPI-IO |
---|
1991 | !--------------------------------------------------------------------------------------------------! |
---|
1992 | SUBROUTINE wrd_mpi_io_real_2d( name, data ) |
---|
1993 | |
---|
1994 | IMPLICIT NONE |
---|
1995 | |
---|
1996 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
1997 | |
---|
1998 | INTEGER(iwp) :: i !< |
---|
1999 | |
---|
2000 | #if defined( __parallel ) |
---|
2001 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
2002 | #endif |
---|
2003 | |
---|
2004 | REAL(wp), INTENT(IN), DIMENSION(nysg:nyng,nxlg:nxrg) :: data !< |
---|
2005 | |
---|
2006 | |
---|
2007 | IF ( header_array_index == max_nr_arrays ) THEN |
---|
2008 | STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' |
---|
2009 | ENDIF |
---|
2010 | |
---|
2011 | array_names(header_array_index) = name |
---|
2012 | array_offset(header_array_index) = array_position |
---|
2013 | header_array_index = header_array_index + 1 |
---|
2014 | |
---|
2015 | IF ( include_total_domain_boundaries ) THEN |
---|
2016 | ! |
---|
2017 | !-- Prepare output with outer boundaries |
---|
2018 | DO i = iog%nxl, iog%nxr |
---|
2019 | array_2d(i,iog%nys:iog%nyn) = data(iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) |
---|
2020 | ENDDO |
---|
2021 | |
---|
2022 | ELSE |
---|
2023 | ! |
---|
2024 | !-- Prepare output without outer boundaries |
---|
2025 | DO i = nxl,nxr |
---|
2026 | array_2d(i,iog%nys:iog%nyn) = data(nys:nyn,i) |
---|
2027 | ENDDO |
---|
2028 | |
---|
2029 | ENDIF |
---|
2030 | |
---|
2031 | #if defined( __parallel ) |
---|
2032 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive |
---|
2033 | IF ( sm_io%iam_io_pe ) THEN |
---|
2034 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_2d, 'native', MPI_INFO_NULL, ierr ) |
---|
2035 | CALL MPI_FILE_WRITE_ALL( fh, array_2d, SIZE( array_2d), MPI_REAL, status, ierr ) |
---|
2036 | ENDIF |
---|
2037 | CALL sm_io%sm_node_barrier() |
---|
2038 | #else |
---|
2039 | CALL posix_lseek( fh, array_position ) |
---|
2040 | CALL posix_write( fh, array_2d, SIZE( array_2d ) ) |
---|
2041 | #endif |
---|
2042 | ! |
---|
2043 | !-- Type conversion required, otherwise right hand side brackets are calculated assuming 4 byte INT. |
---|
2044 | !-- Maybe a compiler problem. |
---|
2045 | array_position = array_position + ( INT( iog%ny, KIND=rd_offset_kind ) + 1 ) * & |
---|
2046 | ( INT( iog%nx, KIND=rd_offset_kind ) + 1 ) * wp |
---|
2047 | |
---|
2048 | END SUBROUTINE wrd_mpi_io_real_2d |
---|
2049 | |
---|
2050 | |
---|
2051 | |
---|
2052 | !--------------------------------------------------------------------------------------------------! |
---|
2053 | ! Description: |
---|
2054 | ! ------------ |
---|
2055 | !> Write 2d-INTEGER array with MPI-IO |
---|
2056 | !--------------------------------------------------------------------------------------------------! |
---|
2057 | SUBROUTINE wrd_mpi_io_int_2d( name, data ) |
---|
2058 | |
---|
2059 | IMPLICIT NONE |
---|
2060 | |
---|
2061 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2062 | |
---|
2063 | INTEGER(iwp) :: i !< |
---|
2064 | INTEGER(iwp) :: j !< |
---|
2065 | |
---|
2066 | #if defined( __parallel ) |
---|
2067 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
2068 | #endif |
---|
2069 | INTEGER(KIND=iwp), INTENT(IN), DIMENSION(:,:) :: data !< |
---|
2070 | |
---|
2071 | |
---|
2072 | IF ( header_array_index == max_nr_arrays ) THEN |
---|
2073 | STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' |
---|
2074 | ENDIF |
---|
2075 | |
---|
2076 | array_names(header_array_index) = name |
---|
2077 | array_offset(header_array_index) = array_position |
---|
2078 | header_array_index = header_array_index + 1 |
---|
2079 | |
---|
2080 | IF ( ( nxr - nxl + 1 + 2 * nbgp ) == SIZE( data, 2 ) ) THEN |
---|
2081 | ! |
---|
2082 | !-- Integer arrays with ghost layers are not implemented yet. These kind of arrays would be |
---|
2083 | !-- dimensioned in the caller subroutine as |
---|
2084 | !-- INTEGER, DIMENSION(nysg:nyng,nxlg:nxrg) :: data |
---|
2085 | message_string = '2d-INTEGER array "' // TRIM( name ) // '" to be written to restart ' // & |
---|
2086 | 'file is defined with illegal dimensions in the PALM code' |
---|
2087 | CALL message( 'wrd_mpi_io_int_2d', 'PA0723', 3, 2, 0, 6, 0 ) |
---|
2088 | |
---|
2089 | ELSEIF ( ( nxr-nxl+1 ) == SIZE( data, 2 ) ) THEN |
---|
2090 | ! |
---|
2091 | !-- INTEGER input array without ghost layers. |
---|
2092 | !-- This kind of array is dimensioned in the caller subroutine as |
---|
2093 | !-- INTEGER, DIMENSION(nys:nyn,nxl:nxr) :: data |
---|
2094 | DO j = nys, nyn |
---|
2095 | DO i = nxl, nxr |
---|
2096 | array_2di(i,j) = data(j-nys+1,i-nxl+1) |
---|
2097 | ENDDO |
---|
2098 | ENDDO |
---|
2099 | #if defined( __parallel ) |
---|
2100 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive |
---|
2101 | IF ( sm_io%iam_io_pe ) THEN |
---|
2102 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER, ft_2di_nb, 'native', & |
---|
2103 | MPI_INFO_NULL, ierr ) |
---|
2104 | CALL MPI_FILE_WRITE_ALL( fh, array_2di, SIZE( array_2di ), MPI_INTEGER, status, ierr ) |
---|
2105 | ENDIF |
---|
2106 | CALL sm_io%sm_node_barrier() |
---|
2107 | #else |
---|
2108 | CALL posix_lseek( fh, array_position ) |
---|
2109 | CALL posix_write( fh, array_2di, SIZE( array_2di ) ) |
---|
2110 | #endif |
---|
2111 | ! |
---|
2112 | !-- Type conversion required, otherwise rigth hand side brackets are calculated assuming 4 byte |
---|
2113 | !-- INT. Maybe a compiler problem. |
---|
2114 | array_position = array_position + INT( (ny+1), KIND=rd_offset_kind ) * & |
---|
2115 | INT( (nx+1), KIND=rd_offset_kind ) * 4 |
---|
2116 | |
---|
2117 | ELSE |
---|
2118 | |
---|
2119 | message_string = '2d-INTEGER array "' // TRIM( name ) // '" to be written to restart ' // & |
---|
2120 | 'file is defined with illegal dimensions in the PALM code' |
---|
2121 | CALL message( 'wrd_mpi_io_int_2d', 'PA0723', 3, 2, 0, 6, 0 ) |
---|
2122 | |
---|
2123 | ENDIF |
---|
2124 | |
---|
2125 | END SUBROUTINE wrd_mpi_io_int_2d |
---|
2126 | |
---|
2127 | |
---|
2128 | |
---|
2129 | !--------------------------------------------------------------------------------------------------! |
---|
2130 | ! Description: |
---|
2131 | ! ------------ |
---|
2132 | !> Write 3d-INTEGER*4 array with MPI-IO |
---|
2133 | !--------------------------------------------------------------------------------------------------! |
---|
2134 | SUBROUTINE wrd_mpi_io_int4_3d( name, data ) |
---|
2135 | |
---|
2136 | IMPLICIT NONE |
---|
2137 | |
---|
2138 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2139 | |
---|
2140 | INTEGER(iwp) :: i !< |
---|
2141 | #if defined( __parallel ) |
---|
2142 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
2143 | #endif |
---|
2144 | INTEGER(isp), INTENT(IN), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data !< |
---|
2145 | |
---|
2146 | |
---|
2147 | IF ( header_array_index == max_nr_arrays ) THEN |
---|
2148 | STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' |
---|
2149 | ENDIF |
---|
2150 | |
---|
2151 | array_names(header_array_index) = name |
---|
2152 | array_offset(header_array_index) = array_position |
---|
2153 | header_array_index = header_array_index + 1 |
---|
2154 | |
---|
2155 | IF ( include_total_domain_boundaries ) THEN |
---|
2156 | ! |
---|
2157 | !-- Prepare output of 3d-REAL-array with ghost layers. In the virtual PE grid, the first |
---|
2158 | !-- dimension is PEs along x, and the second along y. For MPI-IO it is recommended to have the |
---|
2159 | !-- index order of the array in the same way, i.e. the first dimension should be along x and the |
---|
2160 | !-- second along y. For this reason, the original PALM data need to be swaped. |
---|
2161 | DO i = iog%nxl, iog%nxr |
---|
2162 | array_3di4(:,i,iog%nys:iog%nyn) = data(:,iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) |
---|
2163 | ENDDO |
---|
2164 | |
---|
2165 | ELSE |
---|
2166 | ! |
---|
2167 | !-- Prepare output of 3d-REAL-array without ghost layers |
---|
2168 | DO i = nxl, nxr |
---|
2169 | array_3di4(:,i,iog%nys:iog%nyn) = data(:,nys:nyn,i) |
---|
2170 | ENDDO |
---|
2171 | |
---|
2172 | ENDIF |
---|
2173 | #if defined( __parallel ) |
---|
2174 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive |
---|
2175 | IF ( sm_io%iam_io_pe ) THEN |
---|
2176 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER, ft_3di4, 'native', MPI_INFO_NULL, ierr ) |
---|
2177 | CALL MPI_FILE_WRITE_ALL( fh, array_3di4, SIZE( array_3di4 ), MPI_INTEGER, status, ierr ) |
---|
2178 | ENDIF |
---|
2179 | CALL sm_io%sm_node_barrier() |
---|
2180 | #else |
---|
2181 | CALL posix_lseek( fh, array_position ) |
---|
2182 | CALL posix_write( fh, array_3di4, SIZE( array_3di4 ) ) |
---|
2183 | #endif |
---|
2184 | ! |
---|
2185 | !-- Type conversion required, otherwise right hand side brackets are calculated assuming 4 byte INT. |
---|
2186 | !-- Maybe a compiler problem. |
---|
2187 | array_position = array_position + INT( (nz+2), KIND = rd_offset_kind ) * & |
---|
2188 | INT( (iog%ny+1), KIND = rd_offset_kind ) * & |
---|
2189 | INT( (iog%nx+1), KIND = rd_offset_kind ) * isp |
---|
2190 | |
---|
2191 | write(9,*) 'array_position int4_3d ',trim(name),' ',array_position |
---|
2192 | |
---|
2193 | END SUBROUTINE wrd_mpi_io_int4_3d |
---|
2194 | |
---|
2195 | |
---|
2196 | |
---|
2197 | !--------------------------------------------------------------------------------------------------! |
---|
2198 | ! Description: |
---|
2199 | ! ------------ |
---|
2200 | !> Write 3d-INTEGER*8 array with MPI-IO |
---|
2201 | !--------------------------------------------------------------------------------------------------! |
---|
2202 | SUBROUTINE wrd_mpi_io_int8_3d( name, data ) |
---|
2203 | |
---|
2204 | IMPLICIT NONE |
---|
2205 | |
---|
2206 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2207 | |
---|
2208 | INTEGER(iwp) :: i !< |
---|
2209 | #if defined( __parallel ) |
---|
2210 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
2211 | #endif |
---|
2212 | INTEGER(idp), INTENT(IN), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data !< |
---|
2213 | |
---|
2214 | |
---|
2215 | IF ( header_array_index == max_nr_arrays ) THEN |
---|
2216 | STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' |
---|
2217 | ENDIF |
---|
2218 | |
---|
2219 | array_names(header_array_index) = name |
---|
2220 | array_offset(header_array_index) = array_position |
---|
2221 | header_array_index = header_array_index + 1 |
---|
2222 | |
---|
2223 | IF ( include_total_domain_boundaries ) THEN |
---|
2224 | ! |
---|
2225 | !-- Prepare output of 3d-REAL-array with ghost layers. In the virtual PE grid, the first |
---|
2226 | !-- dimension is PEs along x, and the second along y. For MPI-IO it is recommended to have the |
---|
2227 | !-- index order of the array in the same way, i.e. the first dimension should be along x and the |
---|
2228 | !-- second along y. For this reason, the original PALM data need to be swaped. |
---|
2229 | DO i = iog%nxl, iog%nxr |
---|
2230 | array_3di8(:,i,iog%nys:iog%nyn) = data(:,iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) |
---|
2231 | ENDDO |
---|
2232 | |
---|
2233 | ELSE |
---|
2234 | ! |
---|
2235 | !-- Prepare output of 3d-REAL-array without ghost layers |
---|
2236 | DO i = nxl, nxr |
---|
2237 | array_3di8(:,i,iog%nys:iog%nyn) = data(:,nys:nyn,i) |
---|
2238 | ENDDO |
---|
2239 | |
---|
2240 | ENDIF |
---|
2241 | #if defined( __parallel ) |
---|
2242 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive |
---|
2243 | IF ( sm_io%iam_io_pe ) THEN |
---|
2244 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER8, ft_3di8, 'native', MPI_INFO_NULL, ierr ) |
---|
2245 | CALL MPI_FILE_WRITE_ALL( fh, array_3di8, SIZE( array_3di8 ), MPI_INTEGER8, status, ierr ) |
---|
2246 | ENDIF |
---|
2247 | CALL sm_io%sm_node_barrier() |
---|
2248 | #else |
---|
2249 | CALL posix_lseek( fh, array_position ) |
---|
2250 | CALL posix_write( fh, array_3di8, SIZE( array_3di8 ) ) |
---|
2251 | #endif |
---|
2252 | ! |
---|
2253 | !-- Type conversion required, otherwise right hand side brackets are calculated assuming 4 byte INT. |
---|
2254 | !-- Maybe a compiler problem. |
---|
2255 | array_position = array_position + INT( (nz+2), KIND = rd_offset_kind ) * & |
---|
2256 | INT( (iog%ny+1), KIND = rd_offset_kind ) * & |
---|
2257 | INT( (iog%nx+1), KIND = rd_offset_kind ) * dp |
---|
2258 | |
---|
2259 | write(9,*) 'array_position int8_3d ',trim(name),' ',array_position |
---|
2260 | |
---|
2261 | END SUBROUTINE wrd_mpi_io_int8_3d |
---|
2262 | |
---|
2263 | |
---|
2264 | |
---|
2265 | !--------------------------------------------------------------------------------------------------! |
---|
2266 | ! Description: |
---|
2267 | ! ------------ |
---|
2268 | !> Write 3d-REAL array with MPI-IO |
---|
2269 | !--------------------------------------------------------------------------------------------------! |
---|
2270 | SUBROUTINE wrd_mpi_io_real_3d( name, data ) |
---|
2271 | |
---|
2272 | IMPLICIT NONE |
---|
2273 | |
---|
2274 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2275 | |
---|
2276 | INTEGER(iwp) :: i !< |
---|
2277 | #if defined( __parallel ) |
---|
2278 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
2279 | #endif |
---|
2280 | REAL(wp), INTENT(IN), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data !< |
---|
2281 | |
---|
2282 | |
---|
2283 | IF ( header_array_index == max_nr_arrays ) THEN |
---|
2284 | STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' |
---|
2285 | ENDIF |
---|
2286 | |
---|
2287 | array_names(header_array_index) = name |
---|
2288 | array_offset(header_array_index) = array_position |
---|
2289 | header_array_index = header_array_index + 1 |
---|
2290 | |
---|
2291 | IF ( include_total_domain_boundaries ) THEN |
---|
2292 | ! |
---|
2293 | !-- Prepare output of 3d-REAL-array with ghost layers. In the virtual PE grid, the first |
---|
2294 | !-- dimension is PEs along x, and the second along y. For MPI-IO it is recommended to have the |
---|
2295 | !-- index order of the array in the same way, i.e. the first dimension should be along x and the |
---|
2296 | !-- second along y. For this reason, the original PALM data need to be swaped. |
---|
2297 | DO i = iog%nxl, iog%nxr |
---|
2298 | array_3d(:,i,iog%nys:iog%nyn) = data(:,iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) |
---|
2299 | ENDDO |
---|
2300 | |
---|
2301 | ELSE |
---|
2302 | ! |
---|
2303 | !-- Prepare output of 3d-REAL-array without ghost layers |
---|
2304 | DO i = nxl, nxr |
---|
2305 | array_3d(:,i,iog%nys:iog%nyn) = data(:,nys:nyn,i) |
---|
2306 | ENDDO |
---|
2307 | |
---|
2308 | ENDIF |
---|
2309 | #if defined( __parallel ) |
---|
2310 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive |
---|
2311 | IF ( sm_io%iam_io_pe ) THEN |
---|
2312 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3d, 'native', MPI_INFO_NULL, ierr ) |
---|
2313 | CALL MPI_FILE_WRITE_ALL( fh, array_3d, SIZE( array_3d ), MPI_REAL, status, ierr ) |
---|
2314 | ENDIF |
---|
2315 | CALL sm_io%sm_node_barrier() |
---|
2316 | #else |
---|
2317 | CALL posix_lseek( fh, array_position ) |
---|
2318 | CALL posix_write( fh, array_3d, SIZE( array_3d ) ) |
---|
2319 | #endif |
---|
2320 | ! |
---|
2321 | !-- Type conversion required, otherwise right hand side brackets are calculated assuming 4 byte INT. |
---|
2322 | !-- Maybe a compiler problem. |
---|
2323 | array_position = array_position + INT( (nz+2), KIND = rd_offset_kind ) * & |
---|
2324 | INT( (iog%ny+1), KIND = rd_offset_kind ) * & |
---|
2325 | INT( (iog%nx+1), KIND = rd_offset_kind ) * wp |
---|
2326 | |
---|
2327 | write(9,*) 'array_position real3d ',trim(name),' ',array_position |
---|
2328 | |
---|
2329 | END SUBROUTINE wrd_mpi_io_real_3d |
---|
2330 | |
---|
2331 | |
---|
2332 | |
---|
2333 | !--------------------------------------------------------------------------------------------------! |
---|
2334 | ! Description: |
---|
2335 | ! ------------ |
---|
2336 | !> Write 3d-REAL soil array with MPI-IO. |
---|
2337 | !> nzb_soil, nzt_soil are located in the module land_surface_model_mod. Since Fortran does not allow |
---|
2338 | !> cross referencing of module variables, it is required to pass these variables as arguments. |
---|
2339 | !--------------------------------------------------------------------------------------------------! |
---|
2340 | SUBROUTINE wrd_mpi_io_real_3d_soil( name, data, nzb_soil, nzt_soil ) |
---|
2341 | |
---|
2342 | IMPLICIT NONE |
---|
2343 | |
---|
2344 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2345 | |
---|
2346 | INTEGER(iwp) :: i !< |
---|
2347 | INTEGER, INTENT(IN) :: nzb_soil !< |
---|
2348 | INTEGER, INTENT(IN) :: nzt_soil !< |
---|
2349 | |
---|
2350 | #if defined( __parallel ) |
---|
2351 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
2352 | #endif |
---|
2353 | |
---|
2354 | REAL(wp), INTENT(IN), DIMENSION(nzb_soil:nzt_soil,nysg:nyng,nxlg:nxrg) :: data !< |
---|
2355 | |
---|
2356 | |
---|
2357 | IF ( header_array_index == max_nr_arrays ) THEN |
---|
2358 | STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' |
---|
2359 | ENDIF |
---|
2360 | |
---|
2361 | array_names(header_array_index) = name |
---|
2362 | array_offset(header_array_index) = array_position |
---|
2363 | header_array_index = header_array_index + 1 |
---|
2364 | |
---|
2365 | #if defined( __parallel ) |
---|
2366 | CALL rd_mpi_io_create_filetypes_3dsoil( nzb_soil, nzt_soil ) |
---|
2367 | #endif |
---|
2368 | |
---|
2369 | IF ( include_total_domain_boundaries) THEN |
---|
2370 | ! |
---|
2371 | !-- Prepare output of 3d-REAL-array with ghost layers. In the virtual PE grid, the first |
---|
2372 | !-- dimension is PEs along x, and the second along y. For MPI-IO it is recommended to have the |
---|
2373 | !-- index order of the array in the same way, i.e. the first dimension should be along x and the |
---|
2374 | !-- second along y. For this reason, the original PALM data need to be swaped. |
---|
2375 | DO i = iog%nxl, iog%nxr |
---|
2376 | array_3d_soil(:,i,iog%nys:iog%nyn) = data(:,iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) |
---|
2377 | ENDDO |
---|
2378 | |
---|
2379 | ELSE |
---|
2380 | ! |
---|
2381 | !-- Prepare output of 3d-REAL-array without ghost layers |
---|
2382 | DO i = nxl, nxr |
---|
2383 | array_3d_soil(:,i,iog%nys:iog%nyn) = data(:,nys:nyn,i) |
---|
2384 | ENDDO |
---|
2385 | |
---|
2386 | ENDIF |
---|
2387 | #if defined( __parallel ) |
---|
2388 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive |
---|
2389 | IF ( sm_io%iam_io_pe ) THEN |
---|
2390 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3dsoil, 'native', MPI_INFO_NULL, & |
---|
2391 | ierr ) |
---|
2392 | CALL MPI_FILE_WRITE_ALL( fh, array_3d_soil, SIZE( array_3d_soil ), MPI_REAL, status, ierr ) |
---|
2393 | ENDIF |
---|
2394 | CALL sm_io%sm_node_barrier() |
---|
2395 | #else |
---|
2396 | CALL posix_lseek( fh, array_position ) |
---|
2397 | CALL posix_write( fh, array_3d_soil, SIZE( array_3d_soil ) ) |
---|
2398 | #endif |
---|
2399 | ! |
---|
2400 | !-- Type conversion required, otherwise right hand side brackets are calculated assuming 4 byte INT. |
---|
2401 | !-- Maybe a compiler problem. |
---|
2402 | array_position = array_position + INT( (nzt_soil-nzb_soil+1), KIND = rd_offset_kind ) * & |
---|
2403 | INT( (iog%ny+1), KIND = rd_offset_kind ) * & |
---|
2404 | INT( (iog%nx+1), KIND = rd_offset_kind ) * wp |
---|
2405 | |
---|
2406 | END SUBROUTINE wrd_mpi_io_real_3d_soil |
---|
2407 | |
---|
2408 | |
---|
2409 | |
---|
2410 | !--------------------------------------------------------------------------------------------------! |
---|
2411 | ! Description: |
---|
2412 | ! ------------ |
---|
2413 | !> Write CHARATCTER with MPI-IO |
---|
2414 | !--------------------------------------------------------------------------------------------------! |
---|
2415 | SUBROUTINE wrd_mpi_io_char( name, text ) |
---|
2416 | |
---|
2417 | IMPLICIT NONE |
---|
2418 | |
---|
2419 | CHARACTER(LEN=128) :: lo_line !< |
---|
2420 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2421 | CHARACTER(LEN=*), INTENT(IN) :: text !< |
---|
2422 | |
---|
2423 | |
---|
2424 | IF ( header_char_index == max_nr_char ) THEN |
---|
2425 | STOP '+++ maximum number of CHARACTER entries in restart file header exceeded' |
---|
2426 | ENDIF |
---|
2427 | |
---|
2428 | lo_line = name |
---|
2429 | lo_line(33:) = text |
---|
2430 | text_lines(header_char_index) = lo_line |
---|
2431 | header_char_index = header_char_index + 1 |
---|
2432 | |
---|
2433 | END SUBROUTINE wrd_mpi_io_char |
---|
2434 | |
---|
2435 | |
---|
2436 | |
---|
2437 | !--------------------------------------------------------------------------------------------------! |
---|
2438 | ! Description: |
---|
2439 | ! ------------ |
---|
2440 | !> Write LOGICAL with MPI-IO |
---|
2441 | !--------------------------------------------------------------------------------------------------! |
---|
2442 | SUBROUTINE wrd_mpi_io_logical( name, value ) |
---|
2443 | |
---|
2444 | IMPLICIT NONE |
---|
2445 | |
---|
2446 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2447 | |
---|
2448 | INTEGER(iwp) :: logical_as_integer !< |
---|
2449 | |
---|
2450 | LOGICAL, INTENT(IN) :: value !< |
---|
2451 | |
---|
2452 | |
---|
2453 | IF ( value ) THEN |
---|
2454 | logical_as_integer = 1 |
---|
2455 | ELSE |
---|
2456 | logical_as_integer = 0 |
---|
2457 | ENDIF |
---|
2458 | |
---|
2459 | CALL wrd_mpi_io_int( name, logical_as_integer ) |
---|
2460 | |
---|
2461 | END SUBROUTINE wrd_mpi_io_logical |
---|
2462 | |
---|
2463 | |
---|
2464 | |
---|
2465 | !--------------------------------------------------------------------------------------------------! |
---|
2466 | ! Description: |
---|
2467 | ! ------------ |
---|
2468 | !> Read 1d-REAL global array with MPI-IO. |
---|
2469 | !> Array contains identical data on all PEs. |
---|
2470 | !--------------------------------------------------------------------------------------------------! |
---|
2471 | SUBROUTINE rrd_mpi_io_global_array_real_1d( name, data ) |
---|
2472 | |
---|
2473 | IMPLICIT NONE |
---|
2474 | |
---|
2475 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2476 | |
---|
2477 | INTEGER(iwp) :: i !< |
---|
2478 | INTEGER(KIND=rd_offset_kind) :: offset !< |
---|
2479 | |
---|
2480 | #if defined( __parallel ) |
---|
2481 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
2482 | #endif |
---|
2483 | |
---|
2484 | LOGICAL :: found !< |
---|
2485 | |
---|
2486 | REAL(KIND=wp), INTENT(INOUT), DIMENSION(:) :: data !< |
---|
2487 | |
---|
2488 | |
---|
2489 | offset = 0 |
---|
2490 | found = .FALSE. |
---|
2491 | |
---|
2492 | DO i = 1, tgh%nr_arrays |
---|
2493 | IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN |
---|
2494 | array_position = array_offset(i) |
---|
2495 | found = .TRUE. |
---|
2496 | EXIT |
---|
2497 | ENDIF |
---|
2498 | ENDDO |
---|
2499 | |
---|
2500 | |
---|
2501 | IF ( found ) THEN |
---|
2502 | |
---|
2503 | ! |
---|
2504 | !-- Set default view |
---|
2505 | #if defined( __parallel ) |
---|
2506 | IF ( cyclic_fill_mode ) THEN !kk This may be the general solution for all cases |
---|
2507 | IF ( pe_active_for_read ) THEN |
---|
2508 | CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) |
---|
2509 | CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) |
---|
2510 | CALL MPI_FILE_READ_ALL( fh, data, SIZE( data ), MPI_REAL, status, ierr ) |
---|
2511 | ENDIF |
---|
2512 | CALL MPI_BCAST( data, SIZE( data ), MPI_REAL, 0, comm2d, ierr ) |
---|
2513 | ELSE |
---|
2514 | IF ( sm_io%iam_io_pe ) THEN |
---|
2515 | CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) |
---|
2516 | CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) |
---|
2517 | CALL MPI_FILE_READ_ALL( fh, data, SIZE( data ), MPI_REAL, status, ierr ) |
---|
2518 | ENDIF |
---|
2519 | IF ( sm_io%is_sm_active() ) THEN |
---|
2520 | CALL MPI_BCAST( data, SIZE( data ), MPI_REAL, 0, sm_io%comm_shared, ierr ) |
---|
2521 | ENDIF |
---|
2522 | ENDIF |
---|
2523 | #else |
---|
2524 | CALL posix_lseek( fh, array_position ) |
---|
2525 | CALL posix_read( fh, data, SIZE( data ) ) |
---|
2526 | #endif |
---|
2527 | |
---|
2528 | ELSE |
---|
2529 | |
---|
2530 | message_string = '1d/2d/3d/4d-REAL global array "' // TRIM( name ) // '" not found in ' // & |
---|
2531 | 'restart file' |
---|
2532 | CALL message( 'rrd_mpi_io_global_array_real_1d', 'PA0722', 3, 2, 0, 6, 0 ) |
---|
2533 | |
---|
2534 | ENDIF |
---|
2535 | |
---|
2536 | END SUBROUTINE rrd_mpi_io_global_array_real_1d |
---|
2537 | |
---|
2538 | |
---|
2539 | |
---|
2540 | !--------------------------------------------------------------------------------------------------! |
---|
2541 | ! Description: |
---|
2542 | ! ------------ |
---|
2543 | !> Read 2d-REAL global array with MPI-IO. |
---|
2544 | !> Array contains identical data on all PEs. |
---|
2545 | !--------------------------------------------------------------------------------------------------! |
---|
2546 | SUBROUTINE rrd_mpi_io_global_array_real_2d( name, data ) |
---|
2547 | |
---|
2548 | IMPLICIT NONE |
---|
2549 | |
---|
2550 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2551 | |
---|
2552 | INTEGER, DIMENSION(1) :: bufshape !< |
---|
2553 | |
---|
2554 | REAL(KIND=wp), INTENT(IN), DIMENSION(:,:), TARGET :: data !< |
---|
2555 | REAL(KIND=wp), POINTER, DIMENSION(:) :: buf !< |
---|
2556 | |
---|
2557 | TYPE(C_PTR) :: c_data !< |
---|
2558 | |
---|
2559 | |
---|
2560 | c_data = C_LOC( data ) |
---|
2561 | bufshape(1) = SIZE( data ) |
---|
2562 | CALL C_F_POINTER( c_data, buf, bufshape ) |
---|
2563 | |
---|
2564 | CALL rrd_mpi_io_global_array_real_1d( name, buf ) |
---|
2565 | |
---|
2566 | END SUBROUTINE rrd_mpi_io_global_array_real_2d |
---|
2567 | |
---|
2568 | |
---|
2569 | |
---|
2570 | !--------------------------------------------------------------------------------------------------! |
---|
2571 | ! Description: |
---|
2572 | ! ------------ |
---|
2573 | !> Read 3d-REAL global array with MPI-IO. |
---|
2574 | !> Array contains identical data on all PEs. |
---|
2575 | !--------------------------------------------------------------------------------------------------! |
---|
2576 | SUBROUTINE rrd_mpi_io_global_array_real_3d( name, data ) |
---|
2577 | |
---|
2578 | IMPLICIT NONE |
---|
2579 | |
---|
2580 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2581 | |
---|
2582 | INTEGER, DIMENSION(1) :: bufshape !< |
---|
2583 | |
---|
2584 | REAL(KIND=wp), INTENT(IN), DIMENSION(:,:,:), TARGET :: data !< |
---|
2585 | REAL(KIND=wp), POINTER, DIMENSION(:) :: buf !< |
---|
2586 | |
---|
2587 | TYPE(C_PTR) :: c_data !< |
---|
2588 | |
---|
2589 | |
---|
2590 | c_data = C_LOC( data ) |
---|
2591 | bufshape(1) = SIZE( data ) |
---|
2592 | CALL C_F_POINTER( c_data, buf, bufshape ) |
---|
2593 | |
---|
2594 | CALL rrd_mpi_io_global_array_real_1d( name, buf ) |
---|
2595 | |
---|
2596 | END SUBROUTINE rrd_mpi_io_global_array_real_3d |
---|
2597 | |
---|
2598 | |
---|
2599 | |
---|
2600 | !--------------------------------------------------------------------------------------------------! |
---|
2601 | ! Description: |
---|
2602 | ! ------------ |
---|
2603 | !> Read 4d-REAL global array with MPI-IO. |
---|
2604 | !> Array contains identical data on all PEs. |
---|
2605 | !--------------------------------------------------------------------------------------------------! |
---|
2606 | SUBROUTINE rrd_mpi_io_global_array_real_4d( name, data ) |
---|
2607 | |
---|
2608 | IMPLICIT NONE |
---|
2609 | |
---|
2610 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2611 | |
---|
2612 | INTEGER, DIMENSION(1) :: bufshape !< |
---|
2613 | |
---|
2614 | REAL(KIND=wp), INTENT(IN), DIMENSION(:,:,:,:), TARGET :: data !< |
---|
2615 | REAL(KIND=wp), POINTER, DIMENSION(:) :: buf !< |
---|
2616 | |
---|
2617 | TYPE(C_PTR) :: c_data !< |
---|
2618 | |
---|
2619 | |
---|
2620 | c_data = C_LOC( data ) |
---|
2621 | bufshape(1) = SIZE( data) |
---|
2622 | CALL C_F_POINTER( c_data, buf, bufshape ) |
---|
2623 | |
---|
2624 | CALL rrd_mpi_io_global_array_real_1d( name, buf ) |
---|
2625 | |
---|
2626 | END SUBROUTINE rrd_mpi_io_global_array_real_4d |
---|
2627 | |
---|
2628 | |
---|
2629 | |
---|
2630 | !--------------------------------------------------------------------------------------------------! |
---|
2631 | ! Description: |
---|
2632 | ! ------------ |
---|
2633 | !> Read 1d-INTEGER global array with MPI-IO. |
---|
2634 | !> Array contains identical data on all PEs. |
---|
2635 | !--------------------------------------------------------------------------------------------------! |
---|
2636 | SUBROUTINE rrd_mpi_io_global_array_int_1d( name, data ) |
---|
2637 | |
---|
2638 | IMPLICIT NONE |
---|
2639 | |
---|
2640 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2641 | |
---|
2642 | INTEGER(iwp) :: i !< |
---|
2643 | INTEGER(KIND=rd_offset_kind) :: offset !< |
---|
2644 | |
---|
2645 | #if defined( __parallel ) |
---|
2646 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
2647 | #endif |
---|
2648 | INTEGER(KIND=iwp), INTENT(INOUT), DIMENSION(:) :: data !< |
---|
2649 | |
---|
2650 | LOGICAL :: found !< |
---|
2651 | |
---|
2652 | |
---|
2653 | offset = 0 |
---|
2654 | found = .FALSE. |
---|
2655 | |
---|
2656 | DO i = 1, tgh%nr_arrays |
---|
2657 | IF ( TRIM( array_names(i) ) == TRIM( name ) ) THEN |
---|
2658 | array_position = array_offset(i) |
---|
2659 | found = .TRUE. |
---|
2660 | EXIT |
---|
2661 | ENDIF |
---|
2662 | ENDDO |
---|
2663 | |
---|
2664 | IF ( found ) THEN |
---|
2665 | ! |
---|
2666 | !-- Set default view |
---|
2667 | #if defined( __parallel ) |
---|
2668 | IF ( cyclic_fill_mode ) THEN !kk This may be the general solution for all cases |
---|
2669 | IF ( pe_active_for_read ) THEN |
---|
2670 | CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) |
---|
2671 | CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) |
---|
2672 | CALL MPI_FILE_READ_ALL( fh, data, SIZE( data), MPI_INTEGER, status, ierr ) |
---|
2673 | ENDIF |
---|
2674 | CALL MPI_BCAST( data, SIZE( data ), MPI_REAL, 0, comm2d, ierr ) |
---|
2675 | ELSE |
---|
2676 | IF ( sm_io%iam_io_pe ) THEN |
---|
2677 | CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) |
---|
2678 | CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) |
---|
2679 | CALL MPI_FILE_READ_ALL( fh, data, SIZE( data), MPI_INTEGER, status, ierr ) |
---|
2680 | ENDIF |
---|
2681 | IF ( sm_io%is_sm_active() ) THEN |
---|
2682 | CALL MPI_BCAST( data, SIZE( data ), MPI_INTEGER, 0, sm_io%comm_shared, ierr ) |
---|
2683 | ENDIF |
---|
2684 | ENDIF |
---|
2685 | #else |
---|
2686 | CALL posix_lseek( fh, array_position ) |
---|
2687 | CALL posix_read( fh, data, SIZE( data ) ) |
---|
2688 | #endif |
---|
2689 | ELSE |
---|
2690 | |
---|
2691 | message_string = '1d-INTEGER global array "' // TRIM( name ) // '" not found in ' // & |
---|
2692 | 'restart file' |
---|
2693 | CALL message( 'rrd_mpi_io_global_array_int_1d', 'PA0722', 3, 2, 0, 6, 0 ) |
---|
2694 | |
---|
2695 | ENDIF |
---|
2696 | |
---|
2697 | END SUBROUTINE rrd_mpi_io_global_array_int_1d |
---|
2698 | |
---|
2699 | |
---|
2700 | |
---|
2701 | !--------------------------------------------------------------------------------------------------! |
---|
2702 | ! Description: |
---|
2703 | ! ------------ |
---|
2704 | !> Write 1d-REAL global array with MPI-IO. |
---|
2705 | !> Array contains identical data on all PEs. |
---|
2706 | !--------------------------------------------------------------------------------------------------! |
---|
2707 | SUBROUTINE wrd_mpi_io_global_array_real_1d( name, data ) |
---|
2708 | |
---|
2709 | IMPLICIT NONE |
---|
2710 | |
---|
2711 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2712 | |
---|
2713 | INTEGER(KIND=rd_offset_kind) :: offset !< |
---|
2714 | |
---|
2715 | #if defined( __parallel ) |
---|
2716 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
2717 | #endif |
---|
2718 | |
---|
2719 | REAL(KIND=wp), INTENT(IN), DIMENSION(:) :: data !< |
---|
2720 | |
---|
2721 | |
---|
2722 | offset = 0 |
---|
2723 | |
---|
2724 | IF ( header_array_index == max_nr_arrays ) THEN |
---|
2725 | STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' |
---|
2726 | ENDIF |
---|
2727 | |
---|
2728 | array_names(header_array_index) = name |
---|
2729 | array_offset(header_array_index) = array_position |
---|
2730 | header_array_index = header_array_index + 1 |
---|
2731 | |
---|
2732 | ! |
---|
2733 | !-- Set default view |
---|
2734 | #if defined( __parallel ) |
---|
2735 | IF ( sm_io%iam_io_pe ) THEN |
---|
2736 | CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) |
---|
2737 | ENDIF |
---|
2738 | ! |
---|
2739 | !-- Only PE 0 writes replicated data |
---|
2740 | IF ( myid == 0 ) THEN |
---|
2741 | CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) |
---|
2742 | CALL MPI_FILE_WRITE( fh, data, SIZE( data ), MPI_REAL, status, ierr ) |
---|
2743 | ENDIF |
---|
2744 | #else |
---|
2745 | CALL posix_lseek( fh, array_position ) |
---|
2746 | CALL posix_write( fh, data, SIZE( data ) ) |
---|
2747 | #endif |
---|
2748 | array_position = array_position + SIZE( data ) * wp |
---|
2749 | |
---|
2750 | END SUBROUTINE wrd_mpi_io_global_array_real_1d |
---|
2751 | |
---|
2752 | |
---|
2753 | |
---|
2754 | !--------------------------------------------------------------------------------------------------! |
---|
2755 | ! Description: |
---|
2756 | ! ------------ |
---|
2757 | !> Write 2d-REAL global array with MPI-IO. |
---|
2758 | !> Array contains identical data on all PEs. |
---|
2759 | !--------------------------------------------------------------------------------------------------! |
---|
2760 | SUBROUTINE wrd_mpi_io_global_array_real_2d( name, data ) |
---|
2761 | |
---|
2762 | IMPLICIT NONE |
---|
2763 | |
---|
2764 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2765 | |
---|
2766 | INTEGER, DIMENSION(1) :: bufshape !< |
---|
2767 | |
---|
2768 | REAL(KIND=wp), POINTER, DIMENSION(:) :: buf !< |
---|
2769 | REAL(KIND=wp), INTENT(IN), DIMENSION(:,:), TARGET :: data !< |
---|
2770 | |
---|
2771 | TYPE(C_PTR) :: c_data !< |
---|
2772 | |
---|
2773 | |
---|
2774 | c_data = C_LOC( data ) |
---|
2775 | bufshape(1) = SIZE( data) |
---|
2776 | CALL C_F_POINTER( c_data, buf, bufshape ) |
---|
2777 | |
---|
2778 | CALL wrd_mpi_io_global_array_real_1d( name, buf ) |
---|
2779 | |
---|
2780 | END SUBROUTINE wrd_mpi_io_global_array_real_2d |
---|
2781 | |
---|
2782 | |
---|
2783 | |
---|
2784 | !--------------------------------------------------------------------------------------------------! |
---|
2785 | ! Description: |
---|
2786 | ! ------------ |
---|
2787 | !> Write 3d-REAL global array with MPI-IO. |
---|
2788 | !> Array contains identical data on all PEs. |
---|
2789 | !--------------------------------------------------------------------------------------------------! |
---|
2790 | SUBROUTINE wrd_mpi_io_global_array_real_3d( name, data ) |
---|
2791 | |
---|
2792 | IMPLICIT NONE |
---|
2793 | |
---|
2794 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2795 | |
---|
2796 | INTEGER, DIMENSION(1) :: bufshape !< |
---|
2797 | |
---|
2798 | REAL(KIND=wp), POINTER, DIMENSION(:) :: buf !< |
---|
2799 | REAL(KIND=wp), INTENT(IN), DIMENSION(:,:,:), TARGET :: data !< |
---|
2800 | |
---|
2801 | TYPE(C_PTR) :: c_data !< |
---|
2802 | |
---|
2803 | |
---|
2804 | c_data = C_LOC( data ) |
---|
2805 | bufshape(1) = SIZE( data ) |
---|
2806 | CALL C_F_POINTER( c_data, buf, bufshape ) |
---|
2807 | |
---|
2808 | CALL wrd_mpi_io_global_array_real_1d( name, buf ) |
---|
2809 | |
---|
2810 | END SUBROUTINE wrd_mpi_io_global_array_real_3d |
---|
2811 | |
---|
2812 | |
---|
2813 | |
---|
2814 | !--------------------------------------------------------------------------------------------------! |
---|
2815 | ! Description: |
---|
2816 | ! ------------ |
---|
2817 | !> Write 4d-REAL global array with MPI-IO. |
---|
2818 | !> Array contains identical data on all PEs. |
---|
2819 | !--------------------------------------------------------------------------------------------------! |
---|
2820 | SUBROUTINE wrd_mpi_io_global_array_real_4d( name, data ) |
---|
2821 | |
---|
2822 | IMPLICIT NONE |
---|
2823 | |
---|
2824 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2825 | |
---|
2826 | INTEGER, DIMENSION(1) :: bufshape !< |
---|
2827 | |
---|
2828 | REAL(KIND=wp), POINTER, DIMENSION(:) :: buf !< |
---|
2829 | REAL(KIND=wp), INTENT(IN), DIMENSION(:,:,:,:), TARGET :: data !< |
---|
2830 | |
---|
2831 | TYPE(C_PTR) :: c_data !< |
---|
2832 | |
---|
2833 | |
---|
2834 | c_data = C_LOC( data ) |
---|
2835 | bufshape(1) = SIZE( data) |
---|
2836 | CALL C_F_POINTER( c_data, buf, bufshape ) |
---|
2837 | |
---|
2838 | CALL wrd_mpi_io_global_array_real_1d( name, buf ) |
---|
2839 | |
---|
2840 | END SUBROUTINE wrd_mpi_io_global_array_real_4d |
---|
2841 | |
---|
2842 | |
---|
2843 | |
---|
2844 | !--------------------------------------------------------------------------------------------------! |
---|
2845 | ! Description: |
---|
2846 | ! ------------ |
---|
2847 | !> Write 1d-INTEGER global array with MPI-IO. |
---|
2848 | !> Array contains identical data on all PEs. |
---|
2849 | !--------------------------------------------------------------------------------------------------! |
---|
2850 | SUBROUTINE wrd_mpi_io_global_array_int_1d( name, data ) |
---|
2851 | |
---|
2852 | IMPLICIT NONE |
---|
2853 | |
---|
2854 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2855 | |
---|
2856 | INTEGER(KIND=rd_offset_kind) :: offset !< |
---|
2857 | |
---|
2858 | INTEGER(KIND=iwp), INTENT(IN), DIMENSION(:) :: data !< |
---|
2859 | #if defined( __parallel ) |
---|
2860 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
2861 | #endif |
---|
2862 | |
---|
2863 | IF ( header_array_index == max_nr_arrays ) THEN |
---|
2864 | STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' |
---|
2865 | ENDIF |
---|
2866 | |
---|
2867 | offset = 0 |
---|
2868 | array_names(header_array_index) = name |
---|
2869 | array_offset(header_array_index) = array_position |
---|
2870 | header_array_index = header_array_index + 1 |
---|
2871 | |
---|
2872 | ! |
---|
2873 | !-- Set default view |
---|
2874 | #if defined( __parallel ) |
---|
2875 | IF ( sm_io%iam_io_pe ) THEN |
---|
2876 | CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) |
---|
2877 | ENDIF |
---|
2878 | ! |
---|
2879 | !-- Only PE 0 writes replicated data |
---|
2880 | IF ( myid == 0 ) THEN |
---|
2881 | CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) |
---|
2882 | CALL MPI_FILE_WRITE( fh, data, SIZE( data), MPI_INTEGER, status, ierr ) |
---|
2883 | ENDIF |
---|
2884 | #else |
---|
2885 | CALL posix_lseek( fh, array_position ) |
---|
2886 | CALL posix_write( fh, data, SIZE( data ) ) |
---|
2887 | #endif |
---|
2888 | array_position = array_position + SIZE( data ) * 4 |
---|
2889 | |
---|
2890 | END SUBROUTINE wrd_mpi_io_global_array_int_1d |
---|
2891 | |
---|
2892 | |
---|
2893 | |
---|
2894 | !--------------------------------------------------------------------------------------------------! |
---|
2895 | ! Description: |
---|
2896 | ! ------------ |
---|
2897 | !> Read particle data array with MPI-IO. |
---|
2898 | !--------------------------------------------------------------------------------------------------! |
---|
2899 | SUBROUTINE rrd_mpi_io_particles( name, prt_global_index ) |
---|
2900 | |
---|
2901 | IMPLICIT NONE |
---|
2902 | |
---|
2903 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
2904 | INTEGER(idp), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: prt_global_index !< |
---|
2905 | |
---|
2906 | INTEGER(iwp) :: array_size !< |
---|
2907 | INTEGER(iwp) :: byte_column !< |
---|
2908 | INTEGER(iwp) :: i !< |
---|
2909 | INTEGER(iwp) :: ind !< |
---|
2910 | INTEGER(iwp) :: j !< |
---|
2911 | INTEGER(iwp) :: k !< |
---|
2912 | INTEGER(iwp) :: n !< |
---|
2913 | INTEGER(iwp) :: particle_size !< |
---|
2914 | |
---|
2915 | INTEGER(KIND=rd_offset_kind) :: disp !< |
---|
2916 | INTEGER(KIND=rd_offset_kind) :: offset !< |
---|
2917 | INTEGER(KIND=rd_offset_kind) :: prt_nr_bytes !< |
---|
2918 | |
---|
2919 | LOGICAL :: found !< |
---|
2920 | |
---|
2921 | REAL(dp) :: rr !< there is no data type INTEGER*8 in MPI |
---|
2922 | REAL(dp) :: rs !< use REAL*8 to compute max offset |
---|
2923 | |
---|
2924 | TYPE(particle_type), DIMENSION(:), ALLOCATABLE, TARGET :: prt_data !< |
---|
2925 | |
---|
2926 | #if defined( __parallel ) |
---|
2927 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
2928 | #else |
---|
2929 | TYPE(C_PTR) :: buf |
---|
2930 | #endif |
---|
2931 | |
---|
2932 | found = .FALSE. |
---|
2933 | |
---|
2934 | DO i = 1, tgh%nr_arrays |
---|
2935 | IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN |
---|
2936 | array_position = array_offset(i) |
---|
2937 | found = .TRUE. |
---|
2938 | EXIT |
---|
2939 | ENDIF |
---|
2940 | ENDDO |
---|
2941 | |
---|
2942 | IF ( found ) THEN |
---|
2943 | |
---|
2944 | offset = 0 |
---|
2945 | |
---|
2946 | particle_size = STORAGE_SIZE(zero_particle) / 8 ! 8 here means number of bits/byte, NOT wp |
---|
2947 | |
---|
2948 | array_size = 0 |
---|
2949 | DO i = nxl, nxr |
---|
2950 | DO j = nys, nyn |
---|
2951 | array_size = MAX( array_size, SUM(prt_count(:,j,i)) ) |
---|
2952 | ENDDO |
---|
2953 | ENDDO |
---|
2954 | |
---|
2955 | write(9,*) 'particle_size_read ',particle_size,array_size,array_position,sum(prt_global_index) |
---|
2956 | |
---|
2957 | ALLOCATE( prt_data(MAX(array_size,1)) ) |
---|
2958 | |
---|
2959 | ! |
---|
2960 | !-- Write columns of particle |
---|
2961 | #if defined( __parallel ) |
---|
2962 | CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) |
---|
2963 | #endif |
---|
2964 | prt_nr_bytes = 0 |
---|
2965 | DO i = nxl, nxr |
---|
2966 | DO j = nys, nyn |
---|
2967 | disp = array_position + prt_global_index(nzb,j,i) * particle_size |
---|
2968 | byte_column = SUM( prt_count(:,j,i) ) * particle_size |
---|
2969 | prt_nr_bytes = MAX( disp+byte_column, prt_nr_bytes ) |
---|
2970 | |
---|
2971 | #if defined( __parallel ) |
---|
2972 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive |
---|
2973 | IF ( byte_column > 0 ) THEN |
---|
2974 | CALL MPI_FILE_SEEK( fh, disp, MPI_SEEK_SET, ierr ) |
---|
2975 | CALL MPI_FILE_READ( fh, prt_data, byte_column, MPI_BYTE, status, ierr ) |
---|
2976 | ENDIF |
---|
2977 | CALL sm_io%sm_node_barrier() |
---|
2978 | #else |
---|
2979 | buf = C_LOC(prt_data) ! use C_PTR to avaid another overlay in posix interface |
---|
2980 | CALL posix_lseek( fh, disp ) |
---|
2981 | CALL posix_read( fh, buf, byte_column ) |
---|
2982 | #endif |
---|
2983 | ind = 1 |
---|
2984 | DO k = nzb, nzt+1 |
---|
2985 | DO n = 1, prt_count(k,j,i) |
---|
2986 | grid_particles(k,j,i)%particles(n) = prt_data(ind) |
---|
2987 | ind = ind+1 |
---|
2988 | ENDDO |
---|
2989 | ENDDO |
---|
2990 | |
---|
2991 | ENDDO |
---|
2992 | ENDDO |
---|
2993 | |
---|
2994 | #if defined( __parallel ) |
---|
2995 | rs = prt_nr_bytes |
---|
2996 | CALL MPI_ALLREDUCE( rs, rr, 1, MPI_DOUBLE_PRECISION, MPI_MAX, comm2d, ierr ) |
---|
2997 | prt_nr_bytes = rr |
---|
2998 | #else |
---|
2999 | rr = rs |
---|
3000 | #endif |
---|
3001 | array_position = prt_nr_bytes |
---|
3002 | |
---|
3003 | write(9,*) 'array_position after particle read ',array_position,prt_nr_bytes,rs |
---|
3004 | |
---|
3005 | DEALLOCATE( prt_data ) |
---|
3006 | |
---|
3007 | ENDIF |
---|
3008 | |
---|
3009 | END SUBROUTINE rrd_mpi_io_particles |
---|
3010 | |
---|
3011 | |
---|
3012 | |
---|
3013 | !--------------------------------------------------------------------------------------------------! |
---|
3014 | ! Description: |
---|
3015 | ! ------------ |
---|
3016 | !> Read 1d-REAL surface data array with MPI-IO. |
---|
3017 | !--------------------------------------------------------------------------------------------------! |
---|
3018 | SUBROUTINE rrd_mpi_io_surface( name, data, first_index ) |
---|
3019 | |
---|
3020 | IMPLICIT NONE |
---|
3021 | |
---|
3022 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
3023 | |
---|
3024 | INTEGER(KIND=rd_offset_kind) :: disp !< displacement of actual indices |
---|
3025 | INTEGER(KIND=rd_offset_kind) :: disp_f !< displacement in file |
---|
3026 | INTEGER(KIND=rd_offset_kind) :: disp_n !< displacement of next column |
---|
3027 | INTEGER(iwp), OPTIONAL :: first_index !< |
---|
3028 | |
---|
3029 | INTEGER(iwp) :: i !< |
---|
3030 | INTEGER(iwp) :: i_f !< |
---|
3031 | INTEGER(iwp) :: j !< |
---|
3032 | INTEGER(iwp) :: j_f !< |
---|
3033 | INTEGER(iwp) :: lo_first_index !< |
---|
3034 | INTEGER(iwp) :: nr_bytes !< |
---|
3035 | INTEGER(iwp) :: nr_bytes_f !< |
---|
3036 | INTEGER(iwp) :: nr_words !< |
---|
3037 | #if defined( __parallel ) |
---|
3038 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
3039 | #else |
---|
3040 | TYPE(C_PTR) :: buf !< |
---|
3041 | #endif |
---|
3042 | |
---|
3043 | LOGICAL :: found !< |
---|
3044 | |
---|
3045 | REAL(wp), INTENT(OUT), DIMENSION(:), TARGET :: data !< |
---|
3046 | |
---|
3047 | |
---|
3048 | found = .FALSE. |
---|
3049 | lo_first_index = 1 |
---|
3050 | |
---|
3051 | IF ( PRESENT( first_index ) ) THEN |
---|
3052 | lo_first_index = first_index |
---|
3053 | ENDIF |
---|
3054 | |
---|
3055 | DO i = 1, tgh%nr_arrays |
---|
3056 | IF ( TRIM( array_names(i) ) == TRIM( name ) ) THEN |
---|
3057 | array_position = array_offset(i) + ( lo_first_index - 1 ) * & |
---|
3058 | total_number_of_surface_values * wp |
---|
3059 | found = .TRUE. |
---|
3060 | EXIT |
---|
3061 | ENDIF |
---|
3062 | ENDDO |
---|
3063 | |
---|
3064 | disp = -1 |
---|
3065 | disp_f = -1 |
---|
3066 | disp_n = -1 |
---|
3067 | IF ( found ) THEN |
---|
3068 | |
---|
3069 | IF ( cyclic_fill_mode ) THEN |
---|
3070 | |
---|
3071 | CALL rrd_mpi_io_surface_cyclic_fill |
---|
3072 | |
---|
3073 | ELSE |
---|
3074 | |
---|
3075 | IF ( MAXVAL( m_global_start ) == -1 ) RETURN ! Nothing to do on this PE |
---|
3076 | DO i = nxl, nxr |
---|
3077 | DO j = nys, nyn |
---|
3078 | |
---|
3079 | IF ( m_global_start(j,i) > 0 ) THEN |
---|
3080 | disp = array_position+(m_global_start(j,i)-1) * wp |
---|
3081 | nr_words = m_end_index(j,i)-m_start_index(j,i)+1 |
---|
3082 | nr_bytes = nr_words * wp |
---|
3083 | ENDIF |
---|
3084 | IF ( disp >= 0 .AND. disp_f == -1 ) THEN ! First entry |
---|
3085 | disp_f = disp |
---|
3086 | nr_bytes_f = 0 |
---|
3087 | i_f = i |
---|
3088 | j_f = j |
---|
3089 | ENDIF |
---|
3090 | IF ( j == nyn .AND. i == nxr ) THEN ! Last entry |
---|
3091 | disp_n = -1 |
---|
3092 | IF ( nr_bytes > 0 ) THEN |
---|
3093 | nr_bytes_f = nr_bytes_f+nr_bytes |
---|
3094 | ENDIF |
---|
3095 | ELSEIF ( j == nyn ) THEN ! Next x |
---|
3096 | IF ( m_global_start(nys,i+1) > 0 .AND. disp > 0 ) THEN |
---|
3097 | disp_n = array_position + ( m_global_start(nys,i+1) - 1 ) * wp |
---|
3098 | ELSE |
---|
3099 | CYCLE |
---|
3100 | ENDIF |
---|
3101 | ELSE |
---|
3102 | IF ( m_global_start(j+1,i) > 0 .AND. disp > 0 ) THEN |
---|
3103 | disp_n = array_position + ( m_global_start(j+1,i) - 1 ) * wp |
---|
3104 | ELSE |
---|
3105 | CYCLE |
---|
3106 | ENDIF |
---|
3107 | ENDIF |
---|
3108 | |
---|
3109 | |
---|
3110 | IF ( disp + nr_bytes == disp_n ) THEN ! Contiguous block |
---|
3111 | nr_bytes_f = nr_bytes_f + nr_bytes |
---|
3112 | ELSE ! Read |
---|
3113 | #if defined( __parallel ) |
---|
3114 | CALL MPI_FILE_SEEK( fhs, disp_f, MPI_SEEK_SET, ierr ) |
---|
3115 | nr_words = nr_bytes_f / wp |
---|
3116 | CALL MPI_FILE_READ( fhs, data(m_start_index(j_f,i_f)), nr_words, MPI_REAL, status, & |
---|
3117 | ierr ) |
---|
3118 | #else |
---|
3119 | ! |
---|
3120 | !-- Use C_PTR here, because posix read does not work with indexed array |
---|
3121 | buf = C_LOC( data(m_start_index(j_f,i_f)) ) |
---|
3122 | CALL posix_lseek( fh, disp_f ) |
---|
3123 | CALL posix_read( fh, buf, nr_bytes_f ) |
---|
3124 | #endif |
---|
3125 | disp_f = disp |
---|
3126 | nr_bytes_f = nr_bytes |
---|
3127 | i_f = i |
---|
3128 | j_f = j |
---|
3129 | ENDIF |
---|
3130 | |
---|
3131 | ENDDO |
---|
3132 | ENDDO |
---|
3133 | ENDIF |
---|
3134 | |
---|
3135 | |
---|
3136 | ELSE |
---|
3137 | |
---|
3138 | message_string = 'surface array "' // TRIM( name ) // '" not found in restart file' |
---|
3139 | CALL message( 'rrd_mpi_io_global_array_int_1d', 'PA0722', 3, 2, 0, 6, 0 ) |
---|
3140 | |
---|
3141 | ENDIF |
---|
3142 | |
---|
3143 | CONTAINS |
---|
3144 | |
---|
3145 | SUBROUTINE rrd_mpi_io_surface_cyclic_fill |
---|
3146 | |
---|
3147 | IMPLICIT NONE |
---|
3148 | |
---|
3149 | INTEGER(iwp) :: i !< |
---|
3150 | INTEGER(iwp) :: ie !< |
---|
3151 | #if defined( __parallel ) |
---|
3152 | INTEGER(iwp) :: ierr !< |
---|
3153 | #endif |
---|
3154 | INTEGER(iwp) :: is !< |
---|
3155 | INTEGER(iwp) :: i_remote !< |
---|
3156 | INTEGER(iwp) :: j !< |
---|
3157 | INTEGER(iwp) :: je !< |
---|
3158 | INTEGER(iwp) :: js !< |
---|
3159 | INTEGER(iwp) :: j_remote !< |
---|
3160 | INTEGER(iwp) :: nval !< |
---|
3161 | INTEGER(iwp) :: rem_pe !< |
---|
3162 | |
---|
3163 | #if defined( __parallel ) |
---|
3164 | INTEGER(KIND=MPI_ADDRESS_KIND) :: rem_offs !< |
---|
3165 | #else |
---|
3166 | INTEGER(idp) :: rem_offs |
---|
3167 | #endif |
---|
3168 | |
---|
3169 | LOGICAL :: write_done !< |
---|
3170 | |
---|
3171 | |
---|
3172 | ! |
---|
3173 | !-- In the current version, there is only 1 value per grid cell allowed. |
---|
3174 | !-- In this special case, the cyclical repetition can be done with the same method as for 2d-real |
---|
3175 | !-- array. |
---|
3176 | CALL prerun_grid%activate_grid_from_this_class() |
---|
3177 | |
---|
3178 | IF ( pe_active_for_read ) THEN |
---|
3179 | rmabuf_2d = -1.0 |
---|
3180 | DO i = nxl, nxr |
---|
3181 | DO j = nys, nyn |
---|
3182 | |
---|
3183 | IF ( m_global_start(j,i) > 0 ) THEN |
---|
3184 | disp = array_position+(m_global_start(j,i)-1) * wp |
---|
3185 | nr_words = m_end_index(j,i)-m_start_index(j,i)+1 |
---|
3186 | nr_bytes = nr_words * wp |
---|
3187 | ENDIF |
---|
3188 | IF ( disp >= 0 .AND. disp_f == -1 ) THEN ! First entry |
---|
3189 | disp_f = disp |
---|
3190 | nr_bytes_f = 0 |
---|
3191 | write_done = .TRUE. |
---|
3192 | ENDIF |
---|
3193 | IF( write_done ) THEN |
---|
3194 | i_f = i |
---|
3195 | j_f = j |
---|
3196 | write_done = .FALSE. |
---|
3197 | ENDIF |
---|
3198 | |
---|
3199 | IF ( j == nyn .AND. i == nxr ) THEN ! Last entry |
---|
3200 | disp_n = -1 |
---|
3201 | IF ( nr_bytes > 0 ) THEN |
---|
3202 | nr_bytes_f = nr_bytes_f+nr_bytes |
---|
3203 | ENDIF |
---|
3204 | ELSEIF ( j == nyn ) THEN ! Next x |
---|
3205 | IF ( m_global_start(nys,i+1) > 0 .AND. disp > 0 ) THEN |
---|
3206 | disp_n = array_position + ( m_global_start(nys,i+1) - 1 ) * wp |
---|
3207 | ELSE |
---|
3208 | CYCLE |
---|
3209 | ENDIF |
---|
3210 | ELSE |
---|
3211 | IF ( m_global_start(j+1,i) > 0 .AND. disp > 0 ) THEN |
---|
3212 | disp_n = array_position + ( m_global_start(j+1,i) - 1 ) * wp |
---|
3213 | ELSE |
---|
3214 | CYCLE |
---|
3215 | ENDIF |
---|
3216 | ENDIF |
---|
3217 | |
---|
3218 | |
---|
3219 | IF ( disp + nr_bytes == disp_n ) THEN ! Contiguous block |
---|
3220 | nr_bytes_f = nr_bytes_f + nr_bytes |
---|
3221 | ELSE ! Read |
---|
3222 | #if defined( __parallel ) |
---|
3223 | CALL MPI_FILE_SEEK( fhs, disp_f, MPI_SEEK_SET, ierr ) |
---|
3224 | nr_words = nr_bytes_f / wp |
---|
3225 | CALL MPI_FILE_READ( fhs, rmabuf_2d(j_f,i_f), nr_words, MPI_REAL, status, ierr ) |
---|
3226 | #else |
---|
3227 | CALL posix_lseek( fh, disp_f ) |
---|
3228 | CALL posix_read( fh, rmabuf_2d(j_f:,i_f:), nr_bytes_f ) |
---|
3229 | #endif |
---|
3230 | |
---|
3231 | disp_f = disp |
---|
3232 | nr_bytes_f = nr_bytes |
---|
3233 | write_done = .TRUE. |
---|
3234 | ENDIF |
---|
3235 | |
---|
3236 | ENDDO |
---|
3237 | ENDDO |
---|
3238 | |
---|
3239 | ENDIF |
---|
3240 | |
---|
3241 | CALL mainrun_grid%activate_grid_from_this_class() |
---|
3242 | |
---|
3243 | #if defined( __parallel ) |
---|
3244 | ! |
---|
3245 | !-- Close RMA window to allow remote access |
---|
3246 | CALL MPI_WIN_FENCE( 0, rmawin_2d, ierr ) |
---|
3247 | #endif |
---|
3248 | |
---|
3249 | IF ( .NOT. pe_active_for_read ) THEN |
---|
3250 | |
---|
3251 | is = nxl |
---|
3252 | ie = nxr |
---|
3253 | js = nys |
---|
3254 | je = nyn |
---|
3255 | |
---|
3256 | ELSE |
---|
3257 | |
---|
3258 | is = nxl |
---|
3259 | ie = nxr |
---|
3260 | js = prerun_grid%nys+1 |
---|
3261 | je = nyn |
---|
3262 | |
---|
3263 | DO i = is, ie |
---|
3264 | DO j = js, je |
---|
3265 | i_remote = MOD(i,nx_on_file+1) |
---|
3266 | j_remote = MOD(j,ny_on_file+1) |
---|
3267 | rem_pe = remote_pe(i_remote,j_remote) |
---|
3268 | rem_offs = rma_offset(i_remote,j_remote) |
---|
3269 | nval = 1 |
---|
3270 | |
---|
3271 | #if defined( __parallel ) |
---|
3272 | IF ( rem_pe /= myid ) THEN |
---|
3273 | CALL MPI_GET( data(m_start_index(j,i)), nval, MPI_REAL, rem_pe, rem_offs, nval, & |
---|
3274 | MPI_REAL, rmawin_2d, ierr) |
---|
3275 | ELSE |
---|
3276 | data(m_start_index(j,i)) = rmabuf_2d(j_remote,i_remote) |
---|
3277 | ENDIF |
---|
3278 | #else |
---|
3279 | data(m_start_index(j,i)) = array_2d(i_remote,j_remote) |
---|
3280 | #endif |
---|
3281 | ENDDO |
---|
3282 | ENDDO |
---|
3283 | is = prerun_grid%nxr+1 |
---|
3284 | ie = nxr |
---|
3285 | js = nys |
---|
3286 | je = nyn |
---|
3287 | |
---|
3288 | ENDIF |
---|
3289 | |
---|
3290 | DO i = is, ie |
---|
3291 | DO j = js, je |
---|
3292 | i_remote = MOD(i,nx_on_file+1) |
---|
3293 | j_remote = MOD(j,ny_on_file+1) |
---|
3294 | rem_pe = remote_pe(i_remote,j_remote) |
---|
3295 | rem_offs = rma_offset(i_remote,j_remote) |
---|
3296 | nval = 1 |
---|
3297 | |
---|
3298 | #if defined( __parallel ) |
---|
3299 | IF ( rem_pe /= myid ) THEN |
---|
3300 | CALL MPI_GET( data(m_start_index(j,i)), nval, MPI_REAL, rem_pe, rem_offs, nval, & |
---|
3301 | MPI_REAL, rmawin_2d, ierr) |
---|
3302 | ELSE |
---|
3303 | data(m_start_index(j,i)) = rmabuf_2d(j_remote,i_remote) |
---|
3304 | ENDIF |
---|
3305 | #else |
---|
3306 | data(m_start_index(j,i)) = array_2d(i_remote,j_remote) |
---|
3307 | #endif |
---|
3308 | ENDDO |
---|
3309 | ENDDO |
---|
3310 | |
---|
3311 | #if defined( __parallel ) |
---|
3312 | ! |
---|
3313 | !-- Reopen RMA window to allow filling |
---|
3314 | CALL MPI_WIN_FENCE( 0, rmawin_2d, ierr ) |
---|
3315 | #endif |
---|
3316 | |
---|
3317 | END SUBROUTINE rrd_mpi_io_surface_cyclic_fill |
---|
3318 | |
---|
3319 | END SUBROUTINE rrd_mpi_io_surface |
---|
3320 | |
---|
3321 | |
---|
3322 | |
---|
3323 | !--------------------------------------------------------------------------------------------------! |
---|
3324 | ! Description: |
---|
3325 | ! ------------ |
---|
3326 | !> Read 2d-REAL surface data array with MPI-IO. |
---|
3327 | !> These consist of multiple 1d-REAL surface data arrays. |
---|
3328 | !--------------------------------------------------------------------------------------------------! |
---|
3329 | SUBROUTINE rrd_mpi_io_surface_2d( name, data ) |
---|
3330 | |
---|
3331 | IMPLICIT NONE |
---|
3332 | |
---|
3333 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
3334 | |
---|
3335 | INTEGER(iwp) :: i !< |
---|
3336 | |
---|
3337 | REAL(wp), INTENT(OUT), DIMENSION(:,:) :: data !< |
---|
3338 | REAL(wp), DIMENSION(SIZE( data,2)) :: tmp !< |
---|
3339 | |
---|
3340 | |
---|
3341 | DO i = 1, SIZE( data, 1 ) |
---|
3342 | CALL rrd_mpi_io_surface( name, tmp, first_index = i ) |
---|
3343 | data(i,:) = tmp |
---|
3344 | ENDDO |
---|
3345 | |
---|
3346 | ! |
---|
3347 | !-- Comment from Klaus Ketelsen (September 2018) |
---|
3348 | !-- The intention of the following loop was to let the compiler do the copying on return. |
---|
3349 | !-- In my understanding it is standard conform to pass the second dimension to a 1d-array inside a |
---|
3350 | !-- subroutine and the compiler is responsible to generate code for copying. Acually this works fine |
---|
3351 | !-- for INTENT(IN) variables (wrd_mpi_io_surface_2d). For INTENT(OUT) like in this case the code |
---|
3352 | !-- works on pgi compiler. But both, the Intel 16 and the Cray compiler show wrong answers using |
---|
3353 | !-- this loop. That is the reason why the above auxiliary array tmp was introduced. |
---|
3354 | ! DO i = 1, SIZE( data,1) |
---|
3355 | ! CALL rrd_mpi_io_surface( name, data(i,:), first_index = i ) |
---|
3356 | ! ENDDO |
---|
3357 | |
---|
3358 | END SUBROUTINE rrd_mpi_io_surface_2d |
---|
3359 | |
---|
3360 | |
---|
3361 | |
---|
3362 | !--------------------------------------------------------------------------------------------------! |
---|
3363 | ! Description: |
---|
3364 | ! ------------ |
---|
3365 | !> Write particle data with MPI-IO. |
---|
3366 | !--------------------------------------------------------------------------------------------------! |
---|
3367 | SUBROUTINE wrd_mpi_io_particles( name, prt_global_index ) |
---|
3368 | |
---|
3369 | IMPLICIT NONE |
---|
3370 | |
---|
3371 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
3372 | INTEGER(idp), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: prt_global_index !< |
---|
3373 | |
---|
3374 | INTEGER(iwp) :: array_size !< |
---|
3375 | INTEGER(iwp) :: byte_column !< |
---|
3376 | INTEGER(iwp) :: i !< |
---|
3377 | INTEGER(iwp) :: ind !< |
---|
3378 | INTEGER(iwp) :: j !< |
---|
3379 | INTEGER(iwp) :: k !< |
---|
3380 | INTEGER(iwp) :: n !< |
---|
3381 | INTEGER(iwp) :: particle_size !< |
---|
3382 | |
---|
3383 | INTEGER(KIND=rd_offset_kind) :: disp !< |
---|
3384 | INTEGER(KIND=rd_offset_kind) :: offset !< |
---|
3385 | INTEGER(KIND=rd_offset_kind) :: prt_nr_bytes !< |
---|
3386 | |
---|
3387 | REAL(dp) :: rs !< use REAL*8 to compute max offset |
---|
3388 | REAL(dp) :: rr !< there is no data type INTEGER*8 in MPI |
---|
3389 | |
---|
3390 | |
---|
3391 | TYPE(particle_type), DIMENSION(:), ALLOCATABLE, TARGET :: prt_data !< |
---|
3392 | |
---|
3393 | #if defined( __parallel ) |
---|
3394 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
3395 | #else |
---|
3396 | TYPE(C_PTR) :: buf |
---|
3397 | #endif |
---|
3398 | |
---|
3399 | |
---|
3400 | offset = 0 |
---|
3401 | |
---|
3402 | array_names(header_array_index) = TRIM(name) |
---|
3403 | array_offset(header_array_index) = array_position |
---|
3404 | header_array_index = header_array_index + 1 |
---|
3405 | |
---|
3406 | particle_size = STORAGE_SIZE( zero_particle ) / 8 |
---|
3407 | |
---|
3408 | array_size = 0 |
---|
3409 | DO i = nxl, nxr |
---|
3410 | DO j = nys, nyn |
---|
3411 | array_size = MAX( array_size, SUM(prt_count(:,j,i)) ) |
---|
3412 | ENDDO |
---|
3413 | ENDDO |
---|
3414 | |
---|
3415 | ALLOCATE( prt_data(MAX(array_size,1)) ) |
---|
3416 | |
---|
3417 | ! |
---|
3418 | !-- Write columns of particles. |
---|
3419 | !-- The particles of a column are stored sequentially in the first dimension of the particles array. |
---|
3420 | !-- Store only the particles of one cell would be possible with this setup, but the I/O portions |
---|
3421 | !-- for a maximum of 100 particles are not big enough. |
---|
3422 | #if defined( __parallel ) |
---|
3423 | CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) |
---|
3424 | #endif |
---|
3425 | prt_nr_bytes = 0 |
---|
3426 | DO i = nxl, nxr |
---|
3427 | DO j = nys, nyn |
---|
3428 | disp = array_position + prt_global_index(nzb,j,i) * particle_size |
---|
3429 | byte_column = SUM( prt_count(:,j,i) ) * particle_size |
---|
3430 | prt_nr_bytes = MAX( disp+byte_column, prt_nr_bytes ) |
---|
3431 | |
---|
3432 | ind = 1 |
---|
3433 | DO k = nzb, nzt+1 |
---|
3434 | DO n = 1, prt_count(k,j,i) |
---|
3435 | prt_data(ind) = grid_particles(k,j,i)%particles(n) |
---|
3436 | ind = ind+1 |
---|
3437 | ENDDO |
---|
3438 | ENDDO |
---|
3439 | |
---|
3440 | #if defined( __parallel ) |
---|
3441 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive |
---|
3442 | IF ( byte_column > 0 ) THEN |
---|
3443 | CALL MPI_FILE_SEEK( fh, disp, MPI_SEEK_SET, ierr ) |
---|
3444 | CALL MPI_FILE_WRITE( fh, prt_data, byte_column, MPI_BYTE, status, ierr ) |
---|
3445 | ENDIF |
---|
3446 | CALL sm_io%sm_node_barrier() |
---|
3447 | #else |
---|
3448 | buf = C_LOC(prt_data) ! use C_PTR to avoid another overlay in posix interface |
---|
3449 | CALL posix_lseek( fh, disp ) |
---|
3450 | CALL posix_write( fh, buf, byte_column ) |
---|
3451 | #endif |
---|
3452 | ENDDO |
---|
3453 | ENDDO |
---|
3454 | |
---|
3455 | #if defined( __parallel ) |
---|
3456 | rs = prt_nr_bytes |
---|
3457 | CALL MPI_ALLREDUCE( rs, rr, 1, MPI_DOUBLE_PRECISION, MPI_MAX, comm2d, ierr ) |
---|
3458 | prt_nr_bytes = rr |
---|
3459 | #else |
---|
3460 | rr = rs |
---|
3461 | #endif |
---|
3462 | array_position = prt_nr_bytes |
---|
3463 | |
---|
3464 | write(9,*) 'array_position after particle ',array_position,prt_nr_bytes,rs |
---|
3465 | |
---|
3466 | DEALLOCATE( prt_data ) |
---|
3467 | |
---|
3468 | END SUBROUTINE wrd_mpi_io_particles |
---|
3469 | |
---|
3470 | |
---|
3471 | |
---|
3472 | !--------------------------------------------------------------------------------------------------! |
---|
3473 | ! Description: |
---|
3474 | ! ------------ |
---|
3475 | !> Write 1d-REAL surface data array with MPI-IO. |
---|
3476 | !--------------------------------------------------------------------------------------------------! |
---|
3477 | SUBROUTINE wrd_mpi_io_surface( name, data, first_index ) |
---|
3478 | |
---|
3479 | IMPLICIT NONE |
---|
3480 | |
---|
3481 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
3482 | |
---|
3483 | #if defined( __parallel ) |
---|
3484 | INTEGER(KIND=rd_offset_kind) :: disp !< |
---|
3485 | #endif |
---|
3486 | INTEGER(iwp), OPTIONAL :: first_index !< |
---|
3487 | #if defined( __parallel ) |
---|
3488 | INTEGER(iwp) :: i !< |
---|
3489 | #endif |
---|
3490 | INTEGER(iwp) :: lo_first_index !< |
---|
3491 | INTEGER(KIND=rd_offset_kind) :: offset !< |
---|
3492 | |
---|
3493 | #if defined( __parallel ) |
---|
3494 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
3495 | #endif |
---|
3496 | |
---|
3497 | REAL(wp), INTENT(IN), DIMENSION(:), TARGET :: data !< |
---|
3498 | |
---|
3499 | |
---|
3500 | offset = 0 |
---|
3501 | lo_first_index = 1 |
---|
3502 | |
---|
3503 | IF ( PRESENT( first_index ) ) THEN |
---|
3504 | lo_first_index = first_index |
---|
3505 | ENDIF |
---|
3506 | ! |
---|
3507 | !-- In case of 2d-data, name is written only once |
---|
3508 | IF ( lo_first_index == 1 ) THEN |
---|
3509 | |
---|
3510 | IF ( header_array_index == max_nr_arrays ) THEN |
---|
3511 | STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' |
---|
3512 | ENDIF |
---|
3513 | |
---|
3514 | array_names(header_array_index) = name |
---|
3515 | array_offset(header_array_index) = array_position |
---|
3516 | header_array_index = header_array_index + 1 |
---|
3517 | |
---|
3518 | ENDIF |
---|
3519 | |
---|
3520 | #if defined( __parallel ) |
---|
3521 | IF ( sm_io%is_sm_active() ) THEN |
---|
3522 | DO i = 1, nr_val |
---|
3523 | array_1d(i+local_start) = data(i) |
---|
3524 | ENDDO |
---|
3525 | ELSE |
---|
3526 | ! array_1d => data !kk Did not work in all cases why??? |
---|
3527 | ALLOCATE( array_1d( SIZE( data ) ) ) |
---|
3528 | array_1d = data |
---|
3529 | ENDIF |
---|
3530 | |
---|
3531 | CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive |
---|
3532 | IF ( sm_io%iam_io_pe ) THEN |
---|
3533 | IF ( all_pes_write ) THEN |
---|
3534 | CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_surf, 'native', MPI_INFO_NULL, & |
---|
3535 | ierr ) |
---|
3536 | CALL MPI_FILE_WRITE_ALL( fh, array_1d, nr_iope, MPI_REAL, status, ierr ) |
---|
3537 | ELSE |
---|
3538 | CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) |
---|
3539 | IF ( nr_val > 0 ) THEN |
---|
3540 | disp = array_position + 8 * ( glo_start - 1 ) |
---|
3541 | CALL MPI_FILE_SEEK( fh, disp, MPI_SEEK_SET, ierr ) |
---|
3542 | CALL MPI_FILE_WRITE( fh, array_1d, nr_iope, MPI_REAL, status, ierr ) |
---|
3543 | ENDIF |
---|
3544 | ENDIF |
---|
3545 | ENDIF |
---|
3546 | CALL sm_io%sm_node_barrier() |
---|
3547 | IF( .NOT. sm_io%is_sm_active() ) DEALLOCATE( array_1d ) |
---|
3548 | #else |
---|
3549 | CALL posix_lseek( fh, array_position ) |
---|
3550 | CALL posix_write( fh, data, nr_val ) |
---|
3551 | #endif |
---|
3552 | array_position = array_position + total_number_of_surface_values * wp |
---|
3553 | |
---|
3554 | ! IF ( lo_first_index == 1 ) THEN |
---|
3555 | ! IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'w_surf_1 ', TRIM( name ), ' ', nr_val, SUM( data(1:nr_val) ) |
---|
3556 | ! ELSE |
---|
3557 | ! IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'w_surf_n ', TRIM( name ), ' ', & |
---|
3558 | ! lo_first_index, nr_val, SUM( data(1:nr_val) ) |
---|
3559 | ! ENDIF |
---|
3560 | |
---|
3561 | END SUBROUTINE wrd_mpi_io_surface |
---|
3562 | |
---|
3563 | |
---|
3564 | |
---|
3565 | !--------------------------------------------------------------------------------------------------! |
---|
3566 | ! Description: |
---|
3567 | ! ------------ |
---|
3568 | !> Read 2d-REAL surface data array with MPI-IO. |
---|
3569 | !> This consist of multiple 1d-REAL surface data arrays. |
---|
3570 | !--------------------------------------------------------------------------------------------------! |
---|
3571 | SUBROUTINE wrd_mpi_io_surface_2d( name, data ) |
---|
3572 | |
---|
3573 | IMPLICIT NONE |
---|
3574 | |
---|
3575 | CHARACTER(LEN=*), INTENT(IN) :: name !< |
---|
3576 | |
---|
3577 | INTEGER(iwp) :: i !< |
---|
3578 | |
---|
3579 | REAL(wp), INTENT(IN), DIMENSION(:,:) :: data !< |
---|
3580 | |
---|
3581 | |
---|
3582 | DO i = 1, SIZE( data, 1 ) |
---|
3583 | CALL wrd_mpi_io_surface( name, data(i,:), first_index = i ) |
---|
3584 | ENDDO |
---|
3585 | |
---|
3586 | END SUBROUTINE wrd_mpi_io_surface_2d |
---|
3587 | |
---|
3588 | |
---|
3589 | |
---|
3590 | |
---|
3591 | !--------------------------------------------------------------------------------------------------! |
---|
3592 | ! Description: |
---|
3593 | ! ------------ |
---|
3594 | !> Close restart file for MPI-IO |
---|
3595 | !--------------------------------------------------------------------------------------------------! |
---|
3596 | SUBROUTINE rd_mpi_io_close |
---|
3597 | |
---|
3598 | IMPLICIT NONE |
---|
3599 | |
---|
3600 | INTEGER(iwp) :: gh_size !< |
---|
3601 | INTEGER(KIND=rd_offset_kind) :: offset !< |
---|
3602 | #if defined( __parallel ) |
---|
3603 | INTEGER, DIMENSION(rd_status_size) :: status !< |
---|
3604 | #endif |
---|
3605 | |
---|
3606 | #if ! defined( __parallel ) |
---|
3607 | TYPE(C_PTR) :: buf_ptr !< |
---|
3608 | #endif |
---|
3609 | |
---|
3610 | |
---|
3611 | offset = 0 |
---|
3612 | |
---|
3613 | IF ( wr_flag .AND. sm_io%iam_io_pe ) THEN |
---|
3614 | |
---|
3615 | tgh%nr_int = header_int_index - 1 |
---|
3616 | tgh%nr_char = header_char_index - 1 |
---|
3617 | tgh%nr_real = header_real_index - 1 |
---|
3618 | tgh%nr_arrays = header_array_index - 1 |
---|
3619 | tgh%total_nx = iog%nx + 1 |
---|
3620 | tgh%total_ny = iog%ny + 1 |
---|
3621 | IF ( include_total_domain_boundaries ) THEN ! Not sure, if LOGICAL interpretation is the same for all compilers, |
---|
3622 | tgh%i_outer_bound = 1 ! therefore store as INTEGER in general header |
---|
3623 | ELSE |
---|
3624 | tgh%i_outer_bound = 0 |
---|
3625 | ENDIF |
---|
3626 | ! |
---|
3627 | !-- Check for big/little endian format. This check is currently not used, and could be removed |
---|
3628 | !-- if we can assume little endian as the default on all machines. |
---|
3629 | CALL rd_mpi_io_check_endian( tgh%endian ) |
---|
3630 | |
---|
3631 | ! |
---|
3632 | !-- Set default view |
---|
3633 | #if defined( __parallel ) |
---|
3634 | CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) |
---|
3635 | #endif |
---|
3636 | ! |
---|
3637 | !-- Write header file |
---|
3638 | gh_size = storage_size(tgh) / 8 |
---|
3639 | IF ( myid == 0 ) THEN ! myid = 0 always performs I/O, even if I/O is limited to some cores |
---|
3640 | #if defined( __parallel ) |
---|
3641 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
3642 | CALL MPI_FILE_WRITE( fh, tgh, gh_size, MPI_INT, status, ierr ) |
---|
3643 | header_position = header_position + gh_size |
---|
3644 | ! |
---|
3645 | !-- INTEGER values |
---|
3646 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
3647 | CALL MPI_FILE_WRITE( fh, int_names, SIZE( int_names ) * 32, MPI_CHAR, status, ierr ) |
---|
3648 | header_position = header_position + SIZE( int_names ) * 32 |
---|
3649 | |
---|
3650 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
3651 | CALL MPI_FILE_WRITE( fh, int_values, SIZE( int_values ), MPI_INT, status, ierr ) |
---|
3652 | header_position = header_position + SIZE( int_values ) * iwp |
---|
3653 | ! |
---|
3654 | !-- Character entries |
---|
3655 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
3656 | CALL MPI_FILE_WRITE( fh, text_lines, SIZE( text_lines ) * 128, MPI_CHAR, status, ierr ) |
---|
3657 | header_position = header_position + SIZE( text_lines ) * 128 |
---|
3658 | ! |
---|
3659 | !--- REAL values |
---|
3660 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
3661 | CALL MPI_FILE_WRITE( fh, real_names, SIZE( real_names ) * 32, MPI_CHAR, status, ierr ) |
---|
3662 | header_position = header_position + SIZE( real_names ) * 32 |
---|
3663 | |
---|
3664 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
3665 | CALL MPI_FILE_WRITE( fh, real_values, SIZE( real_values ), MPI_REAL, status, ierr ) |
---|
3666 | header_position = header_position + SIZE( real_values ) * wp |
---|
3667 | ! |
---|
3668 | !-- 2d- and 3d- distributed array headers, all replicated array headers |
---|
3669 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
3670 | CALL MPI_FILE_WRITE( fh, array_names, SIZE( array_names ) * 32, MPI_CHAR, status, ierr ) |
---|
3671 | header_position = header_position + SIZE( array_names ) * 32 |
---|
3672 | |
---|
3673 | CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) |
---|
3674 | CALL MPI_FILE_WRITE( fh, array_offset, SIZE( array_offset ) * MPI_OFFSET_KIND, MPI_BYTE, & |
---|
3675 | status, ierr ) ! There is no I*8 datatype in Fortran |
---|
3676 | header_position = header_position + SIZE( array_offset ) * rd_offset_kind |
---|
3677 | #else |
---|
3678 | CALL posix_lseek( fh, header_position ) |
---|
3679 | buf_ptr = C_LOC( tgh ) |
---|
3680 | CALL posix_write( fh, buf_ptr, gh_size ) |
---|
3681 | header_position = header_position + gh_size |
---|
3682 | ! |
---|
3683 | !-- INTEGER values |
---|
3684 | CALL posix_lseek( fh, header_position ) |
---|
3685 | CALL posix_write( fh, int_names ) |
---|
3686 | header_position = header_position + SIZE( int_names ) * 32 |
---|
3687 | |
---|
3688 | CALL posix_lseek( fh, header_position ) |
---|
3689 | CALL posix_write( fh, int_values, SIZE( int_values ) ) |
---|
3690 | header_position = header_position + SIZE( int_values ) * iwp |
---|
3691 | ! |
---|
3692 | !-- Character entries |
---|
3693 | CALL posix_lseek( fh, header_position ) |
---|
3694 | CALL posix_write( fh, text_lines ) |
---|
3695 | header_position = header_position + SIZE( text_lines ) * 128 |
---|
3696 | ! |
---|
3697 | !-- REAL values |
---|
3698 | CALL posix_lseek( fh, header_position ) |
---|
3699 | CALL posix_write( fh, real_names ) |
---|
3700 | header_position = header_position + SIZE( real_names ) * 32 |
---|
3701 | |
---|
3702 | CALL posix_lseek( fh, header_position ) |
---|
3703 | CALL posix_write( fh, real_values, SIZE( real_values ) ) |
---|
3704 | header_position = header_position + SIZE( real_values ) * wp |
---|
3705 | ! |
---|
3706 | !-- 2d- and 3d-distributed array headers, all replicated array headers |
---|
3707 | CALL posix_lseek( fh, header_position ) |
---|
3708 | CALL posix_write( fh, array_names ) |
---|
3709 | header_position = header_position + SIZE( array_names ) * 32 |
---|
3710 | |
---|
3711 | CALL posix_lseek( fh, header_position ) |
---|
3712 | CALL posix_write( fh, array_offset, SIZE( array_offset ) ) |
---|
3713 | header_position = header_position + SIZE( array_offset ) * rd_offset_kind |
---|
3714 | #endif |
---|
3715 | IF ( debug_output ) CALL rd_mpi_io_print_header |
---|
3716 | ENDIF |
---|
3717 | |
---|
3718 | ENDIF |
---|
3719 | |
---|
3720 | ! |
---|
3721 | !-- Free file types |
---|
3722 | CALL rd_mpi_io_free_filetypes |
---|
3723 | |
---|
3724 | ! |
---|
3725 | !-- Close MPI-IO files |
---|
3726 | #if defined( __parallel ) |
---|
3727 | ! |
---|
3728 | !-- Restart file has been opened with comm2d |
---|
3729 | IF ( fhs /= -1 ) THEN |
---|
3730 | CALL MPI_FILE_CLOSE( fhs, ierr ) |
---|
3731 | ENDIF |
---|
3732 | ! |
---|
3733 | !-- Free RMA windows |
---|
3734 | IF ( cyclic_fill_mode ) THEN |
---|
3735 | CALL MPI_WIN_FREE( rmawin_2di, ierr ) |
---|
3736 | CALL MPI_WIN_FREE( rmawin_2d, ierr ) |
---|
3737 | CALL MPI_WIN_FREE( rmawin_3d, ierr ) |
---|
3738 | ENDIF |
---|
3739 | #endif |
---|
3740 | |
---|
3741 | IF (.NOT. pe_active_for_read ) RETURN |
---|
3742 | ! |
---|
3743 | !-- TODO: better explain the following message |
---|
3744 | !-- In case on non cyclic read, pe_active_for_read is set .TRUE. |
---|
3745 | IF ( sm_io%iam_io_pe ) THEN |
---|
3746 | |
---|
3747 | #if defined( __parallel ) |
---|
3748 | CALL MPI_FILE_CLOSE( fh, ierr ) |
---|
3749 | #else |
---|
3750 | CALL posix_close( fh ) |
---|
3751 | #endif |
---|
3752 | |
---|
3753 | ENDIF |
---|
3754 | |
---|
3755 | fh = -1 |
---|
3756 | |
---|
3757 | restart_file_size = array_position / ( 1024.0_dp * 1024.0_dp ) |
---|
3758 | |
---|
3759 | END SUBROUTINE rd_mpi_io_close |
---|
3760 | |
---|
3761 | |
---|
3762 | |
---|
3763 | FUNCTION rd_mpi_io_check_open() RESULT ( isopen ) |
---|
3764 | |
---|
3765 | LOGICAL :: isopen |
---|
3766 | |
---|
3767 | isopen = ( fh /= -1 ) |
---|
3768 | |
---|
3769 | END FUNCTION rd_mpi_io_check_open |
---|
3770 | |
---|
3771 | |
---|
3772 | |
---|
3773 | !--------------------------------------------------------------------------------------------------! |
---|
3774 | ! Description: |
---|
3775 | ! ------------ |
---|
3776 | !> This subroutine prepares a filetype and some variables for the I/O of surface data. |
---|
3777 | !> Whenever a new set of start_index and end_index is used, rd_mpi_io_surface_filetypes has to be |
---|
3778 | !> called. A main feature of this subroutine is computing the global start indices of the 1d- and |
---|
3779 | !> 2d- surface arrays. |
---|
3780 | !> Even if I/O is done by a limited number of cores only, the surface data are read by ALL cores! |
---|
3781 | !> Reading them by some cores and then distributing the data would result in complicated code |
---|
3782 | !> which is suspectable for errors and overloads the reading subroutine. Since reading of surface |
---|
3783 | !> data is not time critical (data size is comparably small), it will be read by all cores. |
---|
3784 | !--------------------------------------------------------------------------------------------------! |
---|
3785 | SUBROUTINE rd_mpi_io_surface_filetypes( start_index, end_index, data_to_write, global_start ) |
---|
3786 | |
---|
3787 | IMPLICIT NONE |
---|
3788 | |
---|
3789 | INTEGER(iwp) :: i !< loop index |
---|
3790 | INTEGER(iwp) :: j !< loop index |
---|
3791 | INTEGER(KIND=rd_offset_kind) :: offset !< |
---|
3792 | |
---|
3793 | INTEGER(iwp), DIMENSION(1) :: dims1 !< |
---|
3794 | INTEGER(iwp), DIMENSION(1) :: lize1 !< |
---|
3795 | INTEGER(iwp), DIMENSION(1) :: start1 !< |
---|
3796 | |
---|
3797 | INTEGER(iwp), DIMENSION(0:numprocs-1) :: all_nr_val !< number of values for all PEs |
---|
3798 | INTEGER(iwp), DIMENSION(0:numprocs-1) :: lo_nr_val !< local number of values in x and y direction |
---|
3799 | |
---|
3800 | |
---|
3801 | INTEGER, INTENT(INOUT), DIMENSION(nys:nyn,nxl:nxr) :: end_index !< |
---|
3802 | INTEGER, INTENT(OUT), DIMENSION(nys:nyn,nxl:nxr) :: global_start !< |
---|
3803 | INTEGER, INTENT(INOUT), DIMENSION(nys:nyn,nxl:nxr) :: start_index !< |
---|
3804 | |
---|
3805 | LOGICAL, INTENT(OUT) :: data_to_write !< returns, if surface data have to be written |
---|
3806 | |
---|
3807 | ! |
---|
3808 | !-- Actions during reading |
---|
3809 | IF ( rd_flag ) THEN |
---|
3810 | ! |
---|
3811 | !-- Set start index and end index for the mainrun grid. |
---|
3812 | !-- ATTENTION: This works only for horizontal surfaces with one vale per grid cell!!! |
---|
3813 | IF ( cyclic_fill_mode ) THEN |
---|
3814 | DO i = nxl, nxr |
---|
3815 | DO j = nys, nyn |
---|
3816 | start_index (j,i) = (i-nxl) * nny + j - nys + 1 |
---|
3817 | end_index (j,i) = start_index(j,i) |
---|
3818 | ENDDO |
---|
3819 | ENDDO |
---|
3820 | ENDIF |
---|
3821 | |
---|
3822 | IF ( .NOT. ALLOCATED( m_start_index ) ) ALLOCATE( m_start_index(nys:nyn,nxl:nxr) ) |
---|
3823 | IF ( .NOT. ALLOCATED( m_end_index ) ) ALLOCATE( m_end_index(nys:nyn,nxl:nxr) ) |
---|
3824 | IF ( .NOT. ALLOCATED( m_global_start ) ) ALLOCATE( m_global_start(nys:nyn,nxl:nxr) ) |
---|
3825 | ! |
---|
3826 | !-- Save arrays for later reading |
---|
3827 | m_start_index = start_index |
---|
3828 | m_end_index = end_index |
---|
3829 | m_global_start = global_start |
---|
3830 | nr_val = MAXVAL( end_index ) |
---|
3831 | |
---|
3832 | ENDIF |
---|
3833 | |
---|
3834 | IF ( .NOT. pe_active_for_read ) RETURN |
---|
3835 | |
---|
3836 | IF ( cyclic_fill_mode ) CALL prerun_grid%activate_grid_from_this_class() |
---|
3837 | |
---|
3838 | offset = 0 |
---|
3839 | lo_nr_val= 0 |
---|
3840 | lo_nr_val(myid) = MAXVAL( end_index ) |
---|
3841 | #if defined( __parallel ) |
---|
3842 | CALL MPI_ALLREDUCE( lo_nr_val, all_nr_val, numprocs, MPI_INTEGER, MPI_SUM, comm2d, ierr ) |
---|
3843 | IF ( ft_surf /= -1 .AND. sm_io%iam_io_pe ) THEN |
---|
3844 | CALL MPI_TYPE_FREE( ft_surf, ierr ) ! If set, free last surface filetype |
---|
3845 | ENDIF |
---|
3846 | |
---|
3847 | IF ( win_surf /= -1 ) THEN |
---|
3848 | IF ( sm_io%is_sm_active() ) THEN |
---|
3849 | CALL MPI_WIN_FREE( win_surf, ierr ) |
---|
3850 | ENDIF |
---|
3851 | win_surf = -1 |
---|
3852 | ENDIF |
---|
3853 | |
---|
3854 | IF ( sm_io%is_sm_active() .AND. rd_flag ) THEN |
---|
3855 | IF ( fhs == -1 ) THEN |
---|
3856 | CALL MPI_FILE_OPEN( comm2d, TRIM( io_file_name ), MPI_MODE_RDONLY, MPI_INFO_NULL, fhs, & |
---|
3857 | ierr ) |
---|
3858 | ENDIF |
---|
3859 | ELSE |
---|
3860 | fhs = fh |
---|
3861 | ENDIF |
---|
3862 | #else |
---|
3863 | all_nr_val(myid) = lo_nr_val(myid) |
---|
3864 | #endif |
---|
3865 | nr_val = lo_nr_val(myid) |
---|
3866 | |
---|
3867 | total_number_of_surface_values = 0 |
---|
3868 | DO i = 0, numprocs-1 |
---|
3869 | IF ( i == myid ) THEN |
---|
3870 | glo_start = total_number_of_surface_values + 1 |
---|
3871 | ENDIF |
---|
3872 | total_number_of_surface_values = total_number_of_surface_values + all_nr_val(i) |
---|
3873 | ENDDO |
---|
3874 | |
---|
3875 | ! |
---|
3876 | !-- Actions during reading |
---|
3877 | IF ( rd_flag ) THEN |
---|
3878 | |
---|
3879 | #if defined( __parallel ) |
---|
3880 | CALL MPI_FILE_SET_VIEW( fhs, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) |
---|
3881 | #endif |
---|
3882 | ENDIF |
---|
3883 | |
---|
3884 | IF ( cyclic_fill_mode ) CALL mainrun_grid%activate_grid_from_this_class() |
---|
3885 | |
---|
3886 | ! |
---|
3887 | !-- Actions during writing |
---|
3888 | IF ( wr_flag ) THEN |
---|
3889 | ! |
---|
3890 | !-- Create surface filetype |
---|
3891 | ft_surf = -1 |
---|
3892 | global_start = start_index + glo_start - 1 |
---|
3893 | |
---|
3894 | WHERE ( end_index < start_index ) |
---|
3895 | global_start = -1 |
---|
3896 | ENDWHERE |
---|
3897 | |
---|
3898 | #if defined( __parallel ) |
---|
3899 | IF ( sm_io%is_sm_active() ) THEN |
---|
3900 | IF ( sm_io%iam_io_pe ) THEN |
---|
3901 | ! |
---|
3902 | !-- Calculate number of values of all PEs of an I/O group |
---|
3903 | nr_iope = 0 |
---|
3904 | DO i = myid, myid+sm_io%sh_npes-1 |
---|
3905 | nr_iope = nr_iope + all_nr_val(i) |
---|
3906 | ENDDO |
---|
3907 | ELSE |
---|
3908 | local_start = 0 |
---|
3909 | DO i = myid-sm_io%sh_rank, myid-1 |
---|
3910 | local_start = local_start + all_nr_val(i) |
---|
3911 | ENDDO |
---|
3912 | ENDIF |
---|
3913 | ! |
---|
3914 | !-- Get the size of shared memory window on all PEs |
---|
3915 | CALL MPI_BCAST( nr_iope, 1, MPI_INTEGER, 0, sm_io%comm_shared, ierr ) |
---|
3916 | CALL sm_io%sm_allocate_shared( array_1d, 1, MAX( 1, nr_iope ), win_surf ) |
---|
3917 | ELSE |
---|
3918 | nr_iope = nr_val |
---|
3919 | ENDIF |
---|
3920 | #else |
---|
3921 | nr_iope = nr_val |
---|
3922 | #endif |
---|
3923 | |
---|
3924 | ! |
---|
3925 | !-- Check, if surface data exist on this PE |
---|
3926 | data_to_write = .TRUE. |
---|
3927 | IF ( total_number_of_surface_values == 0 ) THEN |
---|
3928 | data_to_write = .FALSE. |
---|
3929 | RETURN |
---|
3930 | ENDIF |
---|
3931 | |
---|
3932 | IF ( sm_io%iam_io_pe ) THEN |
---|
3933 | |
---|
3934 | all_pes_write = ( MINVAL( all_nr_val ) > 0 ) |
---|
3935 | |
---|
3936 | IF ( all_pes_write ) THEN |
---|
3937 | dims1(1) = total_number_of_surface_values |
---|
3938 | lize1(1) = nr_iope |
---|
3939 | start1(1) = glo_start-1 |
---|
3940 | |
---|
3941 | #if defined( __parallel ) |
---|
3942 | IF ( total_number_of_surface_values > 0 ) THEN |
---|
3943 | CALL MPI_TYPE_CREATE_SUBARRAY( 1, dims1, lize1, start1, MPI_ORDER_FORTRAN, & |
---|
3944 | MPI_REAL, ft_surf, ierr ) |
---|
3945 | CALL MPI_TYPE_COMMIT( ft_surf, ierr ) |
---|
3946 | ENDIF |
---|
3947 | #endif |
---|
3948 | ENDIF |
---|
3949 | ENDIF |
---|
3950 | |
---|
3951 | ENDIF |
---|
3952 | |
---|
3953 | END SUBROUTINE rd_mpi_io_surface_filetypes |
---|
3954 | |
---|
3955 | |
---|
3956 | |
---|
3957 | !--------------------------------------------------------------------------------------------------! |
---|
3958 | ! Description: |
---|
3959 | ! ------------ |
---|
3960 | !> This subroutine creates file types to access 2d-/3d-REAL arrays and 2d-INTEGER arrays |
---|
3961 | !> distributed in blocks among processes to a single file that contains the global arrays. |
---|
3962 | !--------------------------------------------------------------------------------------------------! |
---|
3963 | SUBROUTINE rd_mpi_io_create_filetypes |
---|
3964 | |
---|
3965 | IMPLICIT NONE |
---|
3966 | |
---|
3967 | INTEGER, DIMENSION(2) :: dims2 !< |
---|
3968 | INTEGER, DIMENSION(2) :: lize2 !< |
---|
3969 | INTEGER, DIMENSION(2) :: start2 !< |
---|
3970 | |
---|
3971 | INTEGER, DIMENSION(3) :: dims3 !< |
---|
3972 | INTEGER, DIMENSION(3) :: lize3 !< |
---|
3973 | INTEGER, DIMENSION(3) :: start3 !< |
---|
3974 | |
---|
3975 | TYPE(domain_decomposition_grid_features) :: save_io_grid !< temporary variable to store grid settings |
---|
3976 | |
---|
3977 | |
---|
3978 | IF ( sm_io%is_sm_active() ) THEN |
---|
3979 | save_io_grid = sm_io%io_grid |
---|
3980 | ENDIF |
---|
3981 | |
---|
3982 | IF( .NOT. pe_active_for_read ) RETURN |
---|
3983 | |
---|
3984 | IF ( cyclic_fill_mode ) CALL prerun_grid%activate_grid_from_this_class() |
---|
3985 | |
---|
3986 | ! |
---|
3987 | !-- Decision, if storage with or without ghost layers. |
---|
3988 | !-- Please note that the indexing of the global array always starts at 0, even in Fortran. |
---|
3989 | !-- Therefore the PE local indices have to be shifted by nbgp in the case with ghost layers. |
---|
3990 | IF ( include_total_domain_boundaries ) THEN |
---|
3991 | |
---|
3992 | iog%nxl = nxl + nbgp |
---|
3993 | iog%nxr = nxr + nbgp |
---|
3994 | iog%nys = nys + nbgp |
---|
3995 | iog%nyn = nyn + nbgp |
---|
3996 | iog%nnx = nnx |
---|
3997 | iog%nny = nny |
---|
3998 | iog%nx = nx + 2 * nbgp |
---|
3999 | iog%ny = ny + 2 * nbgp |
---|
4000 | IF ( myidx == 0 ) THEN |
---|
4001 | iog%nxl = iog%nxl - nbgp |
---|
4002 | iog%nnx = iog%nnx + nbgp |
---|
4003 | ENDIF |
---|
4004 | IF ( myidx == npex-1 .OR. npex == -1 ) THEN ! npex == 1 if -D__parallel not set |
---|
4005 | iog%nxr = iog%nxr + nbgp |
---|
4006 | iog%nnx = iog%nnx + nbgp |
---|
4007 | ENDIF |
---|
4008 | IF ( myidy == 0 ) THEN |
---|
4009 | iog%nys = iog%nys - nbgp |
---|
4010 | iog%nny = iog%nny + nbgp |
---|
4011 | ENDIF |
---|
4012 | IF ( myidy == npey-1 .OR. npey == -1 ) THEN ! npey == 1 if -D__parallel not set |
---|
4013 | iog%nyn = iog%nyn + nbgp |
---|
4014 | iog%nny = iog%nny + nbgp |
---|
4015 | ENDIF |
---|
4016 | |
---|
4017 | CALL sm_io%sm_adjust_outer_boundary() |
---|
4018 | |
---|
4019 | ELSE |
---|
4020 | |
---|
4021 | iog%nxl = nxl |
---|
4022 | iog%nxr = nxr |
---|
4023 | iog%nys = nys |
---|
4024 | iog%nyn = nyn |
---|
4025 | iog%nnx = nnx |
---|
4026 | iog%nny = nny |
---|
4027 | iog%nx = nx |
---|
4028 | iog%ny = ny |
---|
4029 | |
---|
4030 | ENDIF |
---|
4031 | |
---|
4032 | IF ( sm_io%is_sm_active() ) THEN |
---|
4033 | #if defined( __parallel ) |
---|
4034 | CALL sm_io%sm_allocate_shared( array_2d, sm_io%io_grid%nxl, sm_io%io_grid%nxr, & |
---|
4035 | sm_io%io_grid%nys, sm_io%io_grid%nyn, win_2dr ) |
---|
4036 | CALL sm_io%sm_allocate_shared( array_2di, save_io_grid%nxl, save_io_grid%nxr, & |
---|
4037 | save_io_grid%nys, save_io_grid%nyn, win_2di ) |
---|
4038 | CALL sm_io%sm_allocate_shared( array_3d, nzb, nzt+1, sm_io%io_grid%nxl, sm_io%io_grid%nxr, & |
---|
4039 | sm_io%io_grid%nys, sm_io%io_grid%nyn, win_3dr ) |
---|
4040 | #endif |
---|
4041 | ELSE |
---|
4042 | ALLOCATE( array_2d(iog%nxl:iog%nxr,iog%nys:iog%nyn) ) |
---|
4043 | ALLOCATE( array_2di(nxl:nxr,nys:nyn) ) |
---|
4044 | ALLOCATE( array_3d(nzb:nzt+1,iog%nxl:iog%nxr,iog%nys:iog%nyn) ) |
---|
4045 | sm_io%io_grid = iog |
---|
4046 | ENDIF |
---|
4047 | |
---|
4048 | ! |
---|
4049 | !-- Create filetype for 2d-REAL array with ghost layers around the total domain |
---|
4050 | dims2(1) = iog%nx + 1 |
---|
4051 | dims2(2) = iog%ny + 1 |
---|
4052 | |
---|
4053 | lize2(1) = sm_io%io_grid%nnx |
---|
4054 | lize2(2) = sm_io%io_grid%nny |
---|
4055 | |
---|
4056 | start2(1) = sm_io%io_grid%nxl |
---|
4057 | start2(2) = sm_io%io_grid%nys |
---|
4058 | |
---|
4059 | #if defined( __parallel ) |
---|
4060 | IF ( sm_io%iam_io_pe ) THEN |
---|
4061 | CALL MPI_TYPE_CREATE_SUBARRAY( 2, dims2, lize2, start2, MPI_ORDER_FORTRAN, MPI_REAL, & |
---|
4062 | ft_2d, ierr ) |
---|
4063 | CALL MPI_TYPE_COMMIT( ft_2d, ierr ) |
---|
4064 | ENDIF |
---|
4065 | #endif |
---|
4066 | ! |
---|
4067 | !-- Create filetype for 2d-INTEGER array without ghost layers around the total domain |
---|
4068 | dims2(1) = nx + 1 |
---|
4069 | dims2(2) = ny + 1 |
---|
4070 | |
---|
4071 | IF ( sm_io%is_sm_active() ) THEN |
---|
4072 | |
---|
4073 | lize2(1) = save_io_grid%nnx |
---|
4074 | lize2(2) = save_io_grid%nny |
---|
4075 | |
---|
4076 | start2(1) = save_io_grid%nxl |
---|
4077 | start2(2) = save_io_grid%nys |
---|
4078 | |
---|
4079 | ELSE |
---|
4080 | |
---|
4081 | lize2(1) = nnx |
---|
4082 | lize2(2) = nny |
---|
4083 | |
---|
4084 | start2(1) = nxl |
---|
4085 | start2(2) = nys |
---|
4086 | |
---|
4087 | ENDIF |
---|
4088 | |
---|
4089 | #if defined( __parallel ) |
---|
4090 | IF ( sm_io%iam_io_pe ) THEN |
---|
4091 | CALL MPI_TYPE_CREATE_SUBARRAY( 2, dims2, lize2, start2, MPI_ORDER_FORTRAN, MPI_INTEGER, & |
---|
4092 | ft_2di_nb, ierr ) |
---|
4093 | CALL MPI_TYPE_COMMIT( ft_2di_nb, ierr ) |
---|
4094 | ENDIF |
---|
4095 | #endif |
---|
4096 | ! |
---|
4097 | !-- Create filetype for 3d-REAL array |
---|
4098 | dims3(1) = nz + 2 |
---|
4099 | dims3(2) = iog%nx + 1 |
---|
4100 | dims3(3) = iog%ny + 1 |
---|
4101 | |
---|
4102 | lize3(1) = dims3(1) |
---|
4103 | lize3(2) = sm_io%io_grid%nnx |
---|
4104 | lize3(3) = sm_io%io_grid%nny |
---|
4105 | |
---|
4106 | start3(1) = nzb |
---|
4107 | start3(2) = sm_io%io_grid%nxl |
---|
4108 | start3(3) = sm_io%io_grid%nys |
---|
4109 | |
---|
4110 | #if defined( __parallel ) |
---|
4111 | IF ( sm_io%iam_io_pe ) THEN |
---|
4112 | CALL MPI_TYPE_CREATE_SUBARRAY( 3, dims3, lize3, start3, MPI_ORDER_FORTRAN, MPI_REAL, ft_3d, & |
---|
4113 | ierr ) |
---|
4114 | CALL MPI_TYPE_COMMIT( ft_3d, ierr ) |
---|
4115 | ENDIF |
---|
4116 | #endif |
---|
4117 | |
---|
4118 | IF ( cyclic_fill_mode ) CALL mainrun_grid%activate_grid_from_this_class() |
---|
4119 | |
---|
4120 | END SUBROUTINE rd_mpi_io_create_filetypes |
---|
4121 | |
---|
4122 | |
---|
4123 | |
---|
4124 | !--------------------------------------------------------------------------------------------------! |
---|
4125 | ! Description: |
---|
4126 | ! ------------ |
---|
4127 | !> This subroutine creates file types to access 3d-INTEGER*4 arrays and 3d-INTEGER*8 arrays |
---|
4128 | !> distributed in blocks among processes to a single file that contains the global arrays. |
---|
4129 | !> These types are only used for particle data. |
---|
4130 | !--------------------------------------------------------------------------------------------------! |
---|
4131 | SUBROUTINE rd_mpi_io_particle_filetypes |
---|
4132 | |
---|
4133 | IMPLICIT NONE |
---|
4134 | |
---|
4135 | INTEGER, DIMENSION(3) :: dims3 !< |
---|
4136 | INTEGER, DIMENSION(3) :: lize3 !< |
---|
4137 | INTEGER, DIMENSION(3) :: start3 !< |
---|
4138 | |
---|
4139 | TYPE(domain_decomposition_grid_features) :: save_io_grid !< temporary variable to store grid settings |
---|
4140 | |
---|
4141 | ! |
---|
4142 | !-- MPI_INTEGER8 is not standard MPI, but is supported on most MPI distibutions |
---|
4143 | !-- If not suppported, a workaround could be enabled with the following preprocessor directive |
---|
4144 | !#if defined( __NO_INTEGER8) |
---|
4145 | ! CALL MPI_TYPE_CONTIGUOUS( 2, MPI_INTEGER, MPI_INTEGER8, ierr ) |
---|
4146 | ! CALL MPI_TYPE_COMMIT( MPI_INTEGER8, ierr ) |
---|
4147 | !#endif |
---|
4148 | |
---|
4149 | IF ( sm_io%is_sm_active() ) THEN |
---|
4150 | save_io_grid = sm_io%io_grid |
---|
4151 | ENDIF |
---|
4152 | |
---|
4153 | IF( .NOT. pe_active_for_read ) RETURN |
---|
4154 | |
---|
4155 | ! |
---|
4156 | !-- Decision, if storage with or without ghost layers. |
---|
4157 | !-- Please note that the indexing of the global array always starts at 0, even in Fortran. |
---|
4158 | !-- Therefore the PE local indices have to be shifted by nbgp in the case with ghost layers. |
---|
4159 | IF ( include_total_domain_boundaries ) THEN |
---|
4160 | |
---|
4161 | iog%nxl = nxl + nbgp |
---|
4162 | iog%nxr = nxr + nbgp |
---|
4163 | iog%nys = nys + nbgp |
---|
4164 | iog%nyn = nyn + nbgp |
---|
4165 | iog%nnx = nnx |
---|
4166 | iog%nny = nny |
---|
4167 | iog%nx = nx + 2 * nbgp |
---|
4168 | iog%ny = ny + 2 * nbgp |
---|
4169 | IF ( myidx == 0 ) THEN |
---|
4170 | iog%nxl = iog%nxl - nbgp |
---|
4171 | iog%nnx = iog%nnx + nbgp |
---|
4172 | ENDIF |
---|
4173 | IF ( myidx == npex-1 .OR. npex == -1 ) THEN ! npex == 1 if -D__parallel not set |
---|
4174 | iog%nxr = iog%nxr + nbgp |
---|
4175 | iog%nnx = iog%nnx + nbgp |
---|
4176 | ENDIF |
---|
4177 | IF ( myidy == 0 ) THEN |
---|
4178 | iog%nys = iog%nys - nbgp |
---|
4179 | iog%nny = iog%nny + nbgp |
---|
4180 | ENDIF |
---|
4181 | IF ( myidy == npey-1 .OR. npey == -1 ) THEN ! npey == 1 if -D__parallel not set |
---|
4182 | iog%nyn = iog%nyn + nbgp |
---|
4183 | iog%nny = iog%nny + nbgp |
---|
4184 | ENDIF |
---|
4185 | |
---|
4186 | CALL sm_io%sm_adjust_outer_boundary() |
---|
4187 | |
---|
4188 | ELSE |
---|
4189 | |
---|
4190 | iog%nxl = nxl |
---|
4191 | iog%nxr = nxr |
---|
4192 | iog%nys = nys |
---|
4193 | iog%nyn = nyn |
---|
4194 | iog%nnx = nnx |
---|
4195 | iog%nny = nny |
---|
4196 | iog%nx = nx |
---|
4197 | iog%ny = ny |
---|
4198 | |
---|
4199 | ENDIF |
---|
4200 | |
---|
4201 | IF ( sm_io%is_sm_active() ) THEN |
---|
4202 | #if defined( __parallel ) |
---|
4203 | CALL sm_io%sm_allocate_shared( array_3di4, nzb, nzt+1, sm_io%io_grid%nxl, sm_io%io_grid%nxr,& |
---|
4204 | sm_io%io_grid%nys, sm_io%io_grid%nyn, win_3di4 ) |
---|
4205 | CALL sm_io%sm_allocate_shared( array_3di8, nzb, nzt+1, sm_io%io_grid%nxl, sm_io%io_grid%nxr,& |
---|
4206 | sm_io%io_grid%nys, sm_io%io_grid%nyn, win_3di8 ) |
---|
4207 | #endif |
---|
4208 | ELSE |
---|
4209 | ALLOCATE( array_3di4(nzb:nzt+1,iog%nxl:iog%nxr,iog%nys:iog%nyn) ) |
---|
4210 | ALLOCATE( array_3di8(nzb:nzt+1,iog%nxl:iog%nxr,iog%nys:iog%nyn) ) |
---|
4211 | |
---|
4212 | sm_io%io_grid = iog |
---|
4213 | ENDIF |
---|
4214 | |
---|
4215 | ! |
---|
4216 | !-- Create filetype for 3d INTEGER array |
---|
4217 | dims3(1) = nz + 2 |
---|
4218 | dims3(2) = iog%nx + 1 |
---|
4219 | dims3(3) = iog%ny + 1 |
---|
4220 | |
---|
4221 | lize3(1) = dims3(1) |
---|
4222 | lize3(2) = sm_io%io_grid%nnx |
---|
4223 | lize3(3) = sm_io%io_grid%nny |
---|
4224 | |
---|
4225 | start3(1) = nzb |
---|
4226 | start3(2) = sm_io%io_grid%nxl |
---|
4227 | start3(3) = sm_io%io_grid%nys |
---|
4228 | |
---|
4229 | #if defined( __parallel ) |
---|
4230 | IF ( sm_io%iam_io_pe ) THEN |
---|
4231 | CALL MPI_TYPE_CREATE_SUBARRAY( 3, dims3, lize3, start3, MPI_ORDER_FORTRAN, MPI_INTEGER, & |
---|
4232 | ft_3di4, ierr ) |
---|
4233 | CALL MPI_TYPE_COMMIT( ft_3di4, ierr ) |
---|
4234 | |
---|
4235 | CALL MPI_TYPE_CREATE_SUBARRAY( 3, dims3, lize3, start3, MPI_ORDER_FORTRAN, MPI_INTEGER8, & |
---|
4236 | ft_3di8, ierr ) |
---|
4237 | CALL MPI_TYPE_COMMIT( ft_3di8, ierr ) |
---|
4238 | ENDIF |
---|
4239 | #endif |
---|
4240 | |
---|
4241 | END SUBROUTINE rd_mpi_io_particle_filetypes |
---|
4242 | |
---|
4243 | |
---|
4244 | |
---|
4245 | !--------------------------------------------------------------------------------------------------! |
---|
4246 | ! Description: |
---|
4247 | ! ------------ |
---|
4248 | !> This subroutine creates file types to access 3d-soil arrays distributed in blocks among processes |
---|
4249 | !> to a single file that contains the global arrays. It is not required for the serial mode. |
---|
4250 | !--------------------------------------------------------------------------------------------------! |
---|
4251 | #if defined( __parallel ) |
---|
4252 | SUBROUTINE rd_mpi_io_create_filetypes_3dsoil( nzb_soil, nzt_soil ) |
---|
4253 | |
---|
4254 | IMPLICIT NONE |
---|
4255 | |
---|
4256 | INTEGER, INTENT(IN) :: nzb_soil !< |
---|
4257 | INTEGER, INTENT(IN) :: nzt_soil !< |
---|
4258 | |
---|
4259 | INTEGER, DIMENSION(3) :: dims3 !< |
---|
4260 | INTEGER, DIMENSION(3) :: lize3 !< |
---|
4261 | INTEGER, DIMENSION(3) :: start3 !< |
---|
4262 | |
---|
4263 | |
---|
4264 | IF ( sm_io%is_sm_active() ) THEN |
---|
4265 | CALL sm_io%sm_allocate_shared( array_3d_soil, nzb_soil, nzt_soil, sm_io%io_grid%nxl, & |
---|
4266 | sm_io%io_grid%nxr, sm_io%io_grid%nys, sm_io%io_grid%nyn, & |
---|
4267 | win_3ds ) |
---|
4268 | ELSE |
---|
4269 | ALLOCATE( array_3d_soil(nzb_soil:nzt_soil,iog%nxl:iog%nxr,iog%nys:iog%nyn) ) |
---|
4270 | sm_io%io_grid = iog |
---|
4271 | ENDIF |
---|
4272 | |
---|
4273 | ! |
---|
4274 | !-- Create filetype for 3d-soil array |
---|
4275 | dims3(1) = nzt_soil - nzb_soil + 1 |
---|
4276 | dims3(2) = iog%nx + 1 |
---|
4277 | dims3(3) = iog%ny + 1 |
---|
4278 | |
---|
4279 | lize3(1) = dims3(1) |
---|
4280 | lize3(2) = sm_io%io_grid%nnx |
---|
4281 | lize3(3) = sm_io%io_grid%nny |
---|
4282 | |
---|
4283 | start3(1) = nzb_soil |
---|
4284 | start3(2) = sm_io%io_grid%nxl |
---|
4285 | start3(3) = sm_io%io_grid%nys |
---|
4286 | |
---|
4287 | IF ( sm_io%iam_io_pe ) THEN |
---|
4288 | CALL MPI_TYPE_CREATE_SUBARRAY( 3, dims3, lize3, start3, MPI_ORDER_FORTRAN, MPI_REAL, & |
---|
4289 | ft_3dsoil, ierr ) |
---|
4290 | CALL MPI_TYPE_COMMIT( ft_3dsoil, ierr ) |
---|
4291 | ENDIF |
---|
4292 | |
---|
4293 | END SUBROUTINE rd_mpi_io_create_filetypes_3dsoil |
---|
4294 | #endif |
---|
4295 | |
---|
4296 | |
---|
4297 | |
---|
4298 | !--------------------------------------------------------------------------------------------------! |
---|
4299 | ! Description: |
---|
4300 | ! ------------ |
---|
4301 | !> Free all file types that have been created for MPI-IO. |
---|
4302 | !--------------------------------------------------------------------------------------------------! |
---|
4303 | SUBROUTINE rd_mpi_io_free_filetypes |
---|
4304 | |
---|
4305 | IMPLICIT NONE |
---|
4306 | |
---|
4307 | |
---|
4308 | #if defined( __parallel ) |
---|
4309 | IF ( filetypes_created ) THEN |
---|
4310 | |
---|
4311 | IF ( sm_io%iam_io_pe ) THEN |
---|
4312 | CALL MPI_TYPE_FREE( ft_2d, ierr ) |
---|
4313 | CALL MPI_TYPE_FREE( ft_2di_nb, ierr ) |
---|
4314 | CALL MPI_TYPE_FREE( ft_3d, ierr ) |
---|
4315 | ENDIF |
---|
4316 | |
---|
4317 | IF ( sm_io%is_sm_active() ) THEN |
---|
4318 | CALL sm_io%sm_free_shared( win_2dr ) |
---|
4319 | CALL sm_io%sm_free_shared( win_2di ) |
---|
4320 | CALL sm_io%sm_free_shared( win_3dr ) |
---|
4321 | ELSE |
---|
4322 | DEALLOCATE( array_2d, array_2di, array_3d ) |
---|
4323 | ENDIF |
---|
4324 | |
---|
4325 | ENDIF |
---|
4326 | ! |
---|
4327 | !-- Free last surface filetype |
---|
4328 | IF ( sm_io%iam_io_pe .AND. ft_surf /= -1 ) THEN |
---|
4329 | CALL MPI_TYPE_FREE( ft_surf, ierr ) |
---|
4330 | ENDIF |
---|
4331 | |
---|
4332 | IF ( sm_io%is_sm_active() .AND. win_surf /= -1 ) THEN |
---|
4333 | CALL sm_io%sm_free_shared( win_surf ) |
---|
4334 | ENDIF |
---|
4335 | |
---|
4336 | ! |
---|
4337 | !-- Free last particle filetypes |
---|
4338 | IF ( sm_io%iam_io_pe .AND. ft_3di4 /= -1 ) THEN |
---|
4339 | CALL MPI_TYPE_FREE( ft_3di4, ierr ) |
---|
4340 | CALL MPI_TYPE_FREE( ft_3di8, ierr ) |
---|
4341 | ENDIF |
---|
4342 | |
---|
4343 | IF ( sm_io%is_sm_active() .AND. win_3di4 /= -1 ) THEN |
---|
4344 | CALL sm_io%sm_free_shared( win_3di4 ) |
---|
4345 | CALL sm_io%sm_free_shared( win_3di8 ) |
---|
4346 | ENDIF |
---|
4347 | |
---|
4348 | ft_surf = -1 |
---|
4349 | win_surf = -1 |
---|
4350 | #else |
---|
4351 | IF ( ASSOCIATED(array_2d) ) DEALLOCATE( array_2d ) |
---|
4352 | IF ( ASSOCIATED(array_2di) ) DEALLOCATE( array_2di ) |
---|
4353 | IF ( ASSOCIATED(array_3d) ) DEALLOCATE( array_3d ) |
---|
4354 | IF ( ASSOCIATED(array_3di4) ) DEALLOCATE( array_3di4 ) |
---|
4355 | IF ( ASSOCIATED(array_3di8) ) DEALLOCATE( array_3di8 ) |
---|
4356 | #endif |
---|
4357 | |
---|
4358 | END SUBROUTINE rd_mpi_io_free_filetypes |
---|
4359 | |
---|
4360 | |
---|
4361 | |
---|
4362 | !--------------------------------------------------------------------------------------------------! |
---|
4363 | ! Description: |
---|
4364 | ! ------------ |
---|
4365 | !> Print the restart data file header (MPI-IO format) for debugging. |
---|
4366 | !--------------------------------------------------------------------------------------------------! |
---|
4367 | SUBROUTINE rd_mpi_io_print_header |
---|
4368 | |
---|
4369 | IMPLICIT NONE |
---|
4370 | |
---|
4371 | INTEGER(iwp) :: i !< |
---|
4372 | |
---|
4373 | |
---|
4374 | WRITE (9,*) 'header position after reading the restart file header: ', header_position |
---|
4375 | WRITE (9,*) ' ' |
---|
4376 | WRITE (9,*) 'restart file header content:' |
---|
4377 | WRITE (9,*) '----------------------------' |
---|
4378 | WRITE (9,*) ' ' |
---|
4379 | |
---|
4380 | WRITE (9,*) ' CHARACTER header values Total number: ', tgh%nr_char |
---|
4381 | WRITE (9,*) ' ' |
---|
4382 | DO i = 1, tgh%nr_char |
---|
4383 | WRITE( 9, '(I3,A,1X,A)' ) i, ': ', text_lines(i)(1:80) |
---|
4384 | ENDDO |
---|
4385 | WRITE (9,*) ' ' |
---|
4386 | |
---|
4387 | WRITE (9,*) ' INTEGER header variables and values Total number: ', tgh%nr_int |
---|
4388 | WRITE (9,*) ' ' |
---|
4389 | DO i = 1, tgh%nr_int |
---|
4390 | WRITE(9,*) ' variable: ', int_names(i), ' value: ', int_values(i) |
---|
4391 | ENDDO |
---|
4392 | WRITE (9,*) ' ' |
---|
4393 | |
---|
4394 | WRITE (9,*) ' REAL header variables and values Total number: ', tgh%nr_real |
---|
4395 | WRITE (9,*) ' ' |
---|
4396 | DO i = 1, tgh%nr_real |
---|
4397 | WRITE(9,*) ' variable: ', real_names(i), ' value: ', real_values(i) |
---|
4398 | ENDDO |
---|
4399 | WRITE (9,*) ' ' |
---|
4400 | |
---|
4401 | WRITE (9,*) ' Header entries with offset (2d/3d arrays) Total number: ', tgh%nr_arrays |
---|
4402 | WRITE (9,*) ' ' |
---|
4403 | DO i = 1, tgh%nr_arrays |
---|
4404 | WRITE(9,*) ' variable: ', array_names(i), ' offset: ', array_offset(i) |
---|
4405 | ENDDO |
---|
4406 | WRITE (9,*) ' ' |
---|
4407 | |
---|
4408 | END SUBROUTINE rd_mpi_io_print_header |
---|
4409 | |
---|
4410 | |
---|
4411 | |
---|
4412 | !--------------------------------------------------------------------------------------------------! |
---|
4413 | ! Description: |
---|
4414 | ! ------------ |
---|
4415 | !> Check if big/little endian data format is used. |
---|
4416 | !> An int*4 pointer is set to a int*8 variable, the int*8 is set to 1, and then it is checked, if |
---|
4417 | !> the first 4 bytes of the pointer are equal 1 (little endian) or not. |
---|
4418 | !--------------------------------------------------------------------------------------------------! |
---|
4419 | SUBROUTINE rd_mpi_io_check_endian( i_endian ) |
---|
4420 | |
---|
4421 | IMPLICIT NONE |
---|
4422 | |
---|
4423 | INTEGER, INTENT(out) :: i_endian !< |
---|
4424 | INTEGER(KIND=8), TARGET :: int8 !< |
---|
4425 | |
---|
4426 | INTEGER, DIMENSION(1) :: bufshape !< |
---|
4427 | INTEGER(KIND=4), POINTER, DIMENSION(:) :: int4 !< |
---|
4428 | |
---|
4429 | TYPE(C_PTR) :: ptr !< |
---|
4430 | |
---|
4431 | |
---|
4432 | ptr = C_LOC( int8 ) |
---|
4433 | bufshape(1) = 2 |
---|
4434 | CALL C_F_POINTER( ptr, int4, bufshape ) |
---|
4435 | |
---|
4436 | int8 = 1 |
---|
4437 | |
---|
4438 | IF ( int4(1) == 1 ) THEN |
---|
4439 | i_endian = 1 ! Little endian |
---|
4440 | ELSE |
---|
4441 | i_endian = 2 ! Big endian |
---|
4442 | ENDIF |
---|
4443 | |
---|
4444 | END SUBROUTINE rd_mpi_io_check_endian |
---|
4445 | |
---|
4446 | END MODULE restart_data_mpi_io_mod |
---|