- Timestamp:
- May 14, 2020 6:35:22 PM (5 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 1 added
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/Makefile
r4525 r4534 25 25 # ----------------- 26 26 # $Id$ 27 # shared_memory_io_mod included and respective dependencies added 28 # 29 # 4525 2020-05-10 17:05:07Z raasch 27 30 # dependency for salsa_mod updated 28 31 # … … 289 292 run_control.f90 \ 290 293 salsa_mod.f90 \ 294 shared_memory_io_mod.f90 \ 291 295 singleton_mod.f90 \ 292 296 sor.f90 \ … … 1094 1098 mod_kinds.o \ 1095 1099 exchange_horiz_mod.o \ 1096 posix_interface_mod.o 1100 posix_interface_mod.o \ 1101 shared_memory_io_mod.o 1097 1102 run_control.o: \ 1098 1103 cpulog_mod.o \ … … 1118 1123 singleton_mod.o: \ 1119 1124 mod_kinds.o 1125 shared_memory_io_mod.o: \ 1126 modules.o 1120 1127 sor.o: \ 1121 1128 exchange_horiz_mod.o \ -
palm/trunk/SOURCE/check_parameters.f90
r4514 r4534 25 25 ! ----------------- 26 26 ! $Id$ 27 ! adjustments for I/O on reduced number of cores using shared memory MPI 28 ! 29 ! 4514 2020-04-30 16:29:59Z suehring 27 30 ! Enable output of qsurf and ssurf 28 31 ! … … 256 259 !-- Check and set the restart data format variables 257 260 IF ( TRIM( restart_data_format ) /= 'fortran_binary' .AND. & 258 TRIM( restart_data_format ) /= 'mpi' ) THEN 261 TRIM( restart_data_format ) /= 'mpi' .AND. & 262 TRIM( restart_data_format ) /= 'mpi_shared_memory' ) THEN 259 263 message_string = 'illegal restart data format "' // TRIM( restart_data_format ) // '"' 260 264 CALL message( 'check_parameters', 'PA....', 1, 2, 0, 6, 0 ) … … 269 273 270 274 IF ( TRIM( restart_data_format_input ) /= 'fortran_binary' .AND. & 271 TRIM( restart_data_format_input ) /= 'mpi' ) THEN 275 TRIM( restart_data_format_input ) /= 'mpi' .AND. & 276 TRIM( restart_data_format_input ) /= 'mpi_shared_memory' ) THEN 272 277 message_string = 'illegal restart data input format "' // & 273 278 TRIM( restart_data_format_input ) // '"' … … 275 280 ENDIF 276 281 IF ( TRIM( restart_data_format_output ) /= 'fortran_binary' .AND. & 277 TRIM( restart_data_format_output ) /= 'mpi' ) THEN 282 TRIM( restart_data_format_output ) /= 'mpi' .AND. & 283 TRIM( restart_data_format_output ) /= 'mpi_shared_memory' ) THEN 278 284 message_string = 'illegal restart data output format "' // & 279 285 TRIM( restart_data_format_output ) // '"' 286 CALL message( 'check_parameters', 'PA....', 1, 2, 0, 6, 0 ) 287 ENDIF 288 289 IF ( ( TRIM( restart_data_format_input ) == 'mpi_shared_memory' .AND. & 290 TRIM( restart_data_format_output ) /= 'mpi_shared_memory' ) .OR. & 291 ( TRIM( restart_data_format_input ) /= 'mpi_shared_memory' .AND. & 292 TRIM( restart_data_format_output ) == 'mpi_shared_memory' ) ) THEN 293 message_string = 'restart data formats both must be set "mpi_shared_memory"' 280 294 CALL message( 'check_parameters', 'PA....', 1, 2, 0, 6, 0 ) 281 295 ENDIF -
palm/trunk/SOURCE/land_surface_model_mod.f90
r4517 r4534 25 25 ! ----------------- 26 26 ! $Id$ 27 ! bugfix for switching on restart data output with MPI-IO 28 ! 29 ! 4517 2020-05-03 14:29:30Z raasch 27 30 ! added restart with MPI-IO for reading local arrays 28 31 ! … … 6660 6663 ENDDO 6661 6664 6662 ELSEIF ( TRIM( restart_data_format_output ) == ' fortran_binary' ) THEN6665 ELSEIF ( TRIM( restart_data_format_output ) == 'mpi' ) THEN 6663 6666 6664 6667 IF ( ALLOCATED( c_liq_av ) ) CALL wrd_mpi_io( 'c_liq_av', c_liq_av ) -
palm/trunk/SOURCE/read_restart_data_mod.f90
r4518 r4534 25 25 ! ----------------- 26 26 ! $Id$ 27 ! adjustments for I/O on reduced number of cores using shared memory MPI 28 ! 29 ! 4518 2020-05-04 15:44:28Z suehring 27 30 ! Move input of diagnostic output quantities to doq_rrd_local 28 31 ! … … 833 836 CALL close_file( 13 ) 834 837 835 ELSEIF ( TRIM( restart_data_format_input) == 'mpi' ) THEN838 ELSEIF ( restart_data_format_input(1:3) == 'mpi' ) THEN 836 839 ! 837 840 !-- Read global restart data using MPI-IO … … 843 846 ! 844 847 !-- Open the MPI-IO restart file. 845 CALL rd_mpi_io_open( 'read', 'BININ' // TRIM( coupling_char ) ) 848 CALL rd_mpi_io_open( 'read', 'BININ' // TRIM( coupling_char ), & 849 open_for_global_io_only = .TRUE. ) 846 850 847 851 ! … … 2158 2162 2159 2163 2160 ELSEIF ( TRIM( restart_data_format_input) == 'mpi' ) THEN2164 ELSEIF ( restart_data_format_input(1:3) == 'mpi' ) THEN 2161 2165 2162 2166 ! -
palm/trunk/SOURCE/restart_data_mpi_io_mod.f90
r4500 r4534 24 24 ! ----------------- 25 25 ! $Id$ 26 ! I/O on reduced number of cores added (using shared memory MPI) 27 ! 28 ! 4500 2020-04-17 10:12:45Z suehring 26 29 ! Fix too long lines 27 30 ! … … 60 63 61 64 USE control_parameters, & 62 ONLY: include_total_domain_boundaries 65 ONLY: include_total_domain_boundaries, restart_data_format_input 63 66 64 67 USE exchange_horiz_mod, & … … 73 76 ONLY: comm1dx, comm1dy, comm2d, myid, myidx, myidy, npex, npey, numprocs, pdims 74 77 75 76 IMPLICIT NONE 78 USE shared_memory_io_mod, & 79 ONLY: local_boundaries, sm_class 80 81 82 IMPLICIT NONE 83 84 CHARACTER(LEN=128) :: io_file_name !> internal variable to communicate filename between 85 !> different subroutines 86 87 #if defined( __parallel ) 88 INTEGER(iwp) :: ierr !< error status of MPI-calls 89 INTEGER(iwp), PARAMETER :: rd_offset_kind = MPI_OFFSET_KIND !< Adress or Offset kind 90 INTEGER(iwp), PARAMETER :: rd_status_size = MPI_STATUS_SIZE !< 91 #else 92 INTEGER(iwp), PARAMETER :: rd_offset_kind = C_SIZE_T !< 93 INTEGER(iwp), PARAMETER :: rd_status_size = 1 !< Not required in sequential mode 94 #endif 95 96 INTEGER(iwp) :: debug_level = 1 !< TODO: replace with standard debug output steering 97 98 INTEGER(iwp) :: comm_io !< Communicator for MPI-IO 99 INTEGER(iwp) :: fh !< MPI-IO file handle 100 #if defined( __parallel ) 101 INTEGER(iwp) :: fhs = -1 !< MPI-IO file handle to open file with comm2d always 102 #endif 103 INTEGER(iwp) :: ft_surf = -1 !< MPI filetype surface data 104 #if defined( __parallel ) 105 INTEGER(iwp) :: ft_2di_nb !< MPI filetype 2D array INTEGER no outer boundary 106 INTEGER(iwp) :: ft_2d !< MPI filetype 2D array REAL with outer boundaries 107 INTEGER(iwp) :: ft_3d !< MPI filetype 3D array REAL with outer boundaries 108 INTEGER(iwp) :: ft_3dsoil !< MPI filetype for 3d-soil array 109 #endif 110 INTEGER(iwp) :: glo_start !< global start index on this PE 111 #if defined( __parallel ) 112 INTEGER(iwp) :: local_start !< 113 #endif 114 INTEGER(iwp) :: nr_iope !< 115 INTEGER(iwp) :: nr_val !< local number of values in x and y direction 116 #if defined( __parallel ) 117 INTEGER(iwp) :: win_2di 118 INTEGER(iwp) :: win_2dr 119 INTEGER(iwp) :: win_3dr 120 INTEGER(iwp) :: win_3ds 121 INTEGER(iwp) :: win_surf = -1 122 #endif 123 INTEGER(iwp) :: total_number_of_surface_values !< total number of values for one variable 124 125 INTEGER(KIND=rd_offset_kind) :: array_position !< 126 INTEGER(KIND=rd_offset_kind) :: header_position !< 127 128 INTEGER(iwp), DIMENSION(:,:), POINTER, CONTIGUOUS :: array_2di !< 129 130 INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_end_index !< 131 INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_start_index !< 132 INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_global_start !< 77 133 78 134 LOGICAL :: all_pes_write !< all PEs have data to write 79 135 LOGICAL :: filetypes_created !< 136 LOGICAL :: io_on_limited_cores_per_node !< switch to shared memory MPI-IO 80 137 LOGICAL :: print_header_now = .TRUE. !< 81 138 LOGICAL :: rd_flag !< file is opened for read 82 139 LOGICAL :: wr_flag !< file is opened for write 83 140 84 #if defined( __parallel )85 INTEGER(iwp) :: ierr !< error status of MPI-calls86 INTEGER(iwp), PARAMETER :: rd_offset_kind = MPI_OFFSET_KIND !< Adress or Offset kind87 INTEGER(iwp), PARAMETER :: rd_status_size = MPI_STATUS_SIZE !<88 #else89 INTEGER(iwp), PARAMETER :: rd_offset_kind = C_SIZE_T !<90 INTEGER(iwp), PARAMETER :: rd_status_size = 1 !< Not required in sequential mode91 #endif92 93 INTEGER(iwp) :: debug_level = 1 !< TODO: replace with standard debug output steering94 95 INTEGER(iwp) :: fh !< MPI-IO file handle96 INTEGER(iwp) :: ft_surf = -1 !< MPI filetype surface data97 #if defined( __parallel )98 INTEGER(iwp) :: ft_2di_nb !< MPI filetype 2D array INTEGER no outer boundary99 INTEGER(iwp) :: ft_2d !< MPI filetype 2D array REAL with outer boundaries100 INTEGER(iwp) :: ft_3d !< MPI filetype 3D array REAL with outer boundaries101 INTEGER(iwp) :: ft_3dsoil !< MPI filetype for 3d-soil array102 #endif103 INTEGER(iwp) :: glo_start !< global start index on this PE104 INTEGER(iwp) :: nr_val !< local number of values in x and y direction105 INTEGER(iwp) :: total_number_of_surface_values !< total number of values for one variable106 107 INTEGER(KIND=rd_offset_kind) :: array_position !<108 INTEGER(KIND=rd_offset_kind) :: header_position !<109 110 INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_end_index !<111 INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_start_index !<112 INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_global_start !<113 114 141 REAL(KIND=wp) :: mb_processed !< 115 142 143 #if defined( __parallel ) 144 REAL(wp), DIMENSION(:), POINTER, CONTIGUOUS :: array_1d !< 145 #endif 146 REAL(wp), DIMENSION(:,:), POINTER, CONTIGUOUS :: array_2d !< 147 REAL(wp), DIMENSION(:,:,:), POINTER, CONTIGUOUS :: array_3d !< 148 REAL(wp), DIMENSION(:,:,:), POINTER, CONTIGUOUS :: array_3d_soil !< 149 116 150 ! 117 151 !-- Handling of outer boundaries 118 TYPE local_boundaries119 INTEGER(iwp) :: nnx120 INTEGER(iwp) :: nny121 INTEGER(iwp) :: nx122 INTEGER(iwp) :: nxl123 INTEGER(iwp) :: nxr124 INTEGER(iwp) :: ny125 INTEGER(iwp) :: nyn126 INTEGER(iwp) :: nys127 END TYPE local_boundaries128 129 152 TYPE(local_boundaries) :: lb !< 130 153 … … 144 167 TYPE(general_header), TARGET :: tgh 145 168 169 TYPE(sm_class) :: sm_io 170 146 171 ! 147 172 !-- Declaration of varibales for file header section … … 250 275 !> Open restart file for read or write with MPI-IO 251 276 !--------------------------------------------------------------------------------------------------! 252 SUBROUTINE rd_mpi_io_open( action, file_name )277 SUBROUTINE rd_mpi_io_open( action, file_name, open_for_global_io_only ) 253 278 254 279 IMPLICIT NONE … … 266 291 #endif 267 292 293 LOGICAL, INTENT(IN), OPTIONAL :: open_for_global_io_only !< 294 LOGICAL :: set_filetype !< 295 268 296 #if ! defined( __parallel ) 269 297 TYPE(C_PTR) :: buf_ptr !< … … 275 303 rd_flag = ( TRIM( action ) == 'READ' .OR. TRIM( action ) == 'read' ) 276 304 wr_flag = ( TRIM( action ) == 'WRITE' .OR. TRIM( action ) == 'write' ) 305 306 ! 307 !-- Store name of I/O file to communicate it internally within this module. 308 io_file_name = file_name 309 ! 310 !-- Setup for IO on a limited number of threads per node (using shared memory MPI) 311 IF ( TRIM( restart_data_format_input ) == 'mpi_shared_memory' ) THEN 312 io_on_limited_cores_per_node = .TRUE. 313 set_filetype = .TRUE. 314 ENDIF 315 ! 316 !-- Shared memory MPI is not used for reading of global data 317 IF ( PRESENT( open_for_global_io_only ) .AND. rd_flag ) THEN 318 IF ( open_for_global_io_only ) THEN 319 io_on_limited_cores_per_node = .FALSE. 320 set_filetype = .FALSE. 321 ENDIF 322 ENDIF 323 324 CALL sm_io%sm_init_comm( io_on_limited_cores_per_node ) 325 326 ! 327 !-- Set communicator to be used. If all cores are doing I/O, comm2d is used as usual. 328 IF( sm_io%is_sm_active() ) THEN 329 comm_io = sm_io%comm_io 330 ELSE 331 comm_io = comm2d 332 ENDIF 277 333 278 334 ! … … 284 340 !-- further below. 285 341 IF ( wr_flag) THEN 286 CALL r s_mpi_io_create_filetypes342 CALL rd_mpi_io_create_filetypes 287 343 filetypes_created = .TRUE. 288 344 ENDIF … … 291 347 !-- Open file for MPI-IO 292 348 #if defined( __parallel ) 293 IF ( rd_flag ) THEN 294 CALL MPI_FILE_OPEN( comm2d, TRIM( file_name ), MPI_MODE_RDONLY, MPI_INFO_NULL, fh, ierr ) 295 WRITE (9,*) 'Open MPI-IO restart file for read ==> ', TRIM( file_name ) 296 ELSEIF ( wr_flag ) THEN 297 CALL MPI_FILE_OPEN( comm2d, TRIM( file_name ), MPI_MODE_CREATE+MPI_MODE_WRONLY, & 298 MPI_INFO_NULL, fh, ierr ) 299 WRITE (9,*) 'Open MPI-IO restart file for write ==> ', TRIM( file_name ) 300 ELSE 301 CALL rs_mpi_io_error( 1 ) 349 IF ( sm_io%iam_io_pe ) THEN 350 IF ( rd_flag ) THEN 351 CALL MPI_FILE_OPEN( comm_io, TRIM( io_file_name ), MPI_MODE_RDONLY, MPI_INFO_NULL, fh, & 352 ierr ) 353 WRITE (9,*) 'Open MPI-IO restart file for read ==> ', TRIM( io_file_name ) 354 ELSEIF ( wr_flag ) THEN 355 CALL MPI_FILE_OPEN( comm_io, TRIM( io_file_name ), MPI_MODE_CREATE+MPI_MODE_WRONLY, & 356 MPI_INFO_NULL, fh, ierr ) 357 WRITE (9,*) 'Open MPI-IO restart file for write ==> ', TRIM( io_file_name ) 358 ELSE 359 CALL rd_mpi_io_error( 1 ) 360 ENDIF 302 361 ENDIF 303 362 #else 304 363 IF ( rd_flag ) THEN 305 fh = posix_open( TRIM( file_name ), .TRUE. )306 WRITE (9,*) 'Open sequential restart file for read ==> ', TRIM( file_name ), ' ', fh364 fh = posix_open( TRIM( io_file_name ), .TRUE. ) 365 WRITE (9,*) 'Open sequential restart file for read ==> ', TRIM( io_file_name ), ' ', fh 307 366 ELSEIF ( wr_flag ) THEN 308 fh = posix_open( TRIM( file_name ), .FALSE. )309 WRITE (9,*) 'Open sequential restart file for write ==> ', TRIM( file_name), ' ', fh367 fh = posix_open( TRIM( io_file_name ), .FALSE. ) 368 WRITE (9,*) 'Open sequential restart file for write ==> ', TRIM( io_file_name ), ' ', fh 310 369 ELSE 311 CALL r s_mpi_io_error( 1 )312 ENDIF 313 314 IF ( fh < 0 ) CALL r s_mpi_io_error( 6 )370 CALL rd_mpi_io_error( 1 ) 371 ENDIF 372 373 IF ( fh < 0 ) CALL rd_mpi_io_error( 6 ) 315 374 #endif 316 375 … … 347 406 348 407 IF ( rd_flag ) THEN 349 ! 350 !-- File is open for read. 351 #if defined( __parallel ) 352 !-- Set the default view 353 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 354 ! 355 !-- Read the file header size 356 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 357 CALL MPI_FILE_READ( fh, tgh, gh_size, MPI_BYTE, status, ierr ) 408 IF ( sm_io%iam_io_pe ) THEN 409 ! 410 !-- File is open for read. 411 #if defined( __parallel ) 412 !-- Set the default view 413 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 414 ! 415 !-- Read the file header size 416 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 417 CALL MPI_FILE_READ( fh, tgh, gh_size, MPI_BYTE, status, ierr ) 358 418 #else 359 CALL posix_lseek( fh, header_position ) 360 buf_ptr = C_LOC( tgh ) 361 CALL posix_read( fh, buf_ptr, gh_size ) 419 CALL posix_lseek( fh, header_position ) 420 buf_ptr = C_LOC( tgh ) 421 CALL posix_read( fh, buf_ptr, gh_size ) 422 #endif 423 ENDIF 424 #if defined( __parallel ) 425 IF ( sm_io%is_sm_active() ) THEN 426 CALL MPI_BCAST( tgh, gh_size, MPI_BYTE, 0, sm_io%comm_shared, ierr ) 427 ENDIF 362 428 #endif 363 429 header_position = header_position + gh_size … … 368 434 !-- File types depend on if boundaries of the total domain is included in data. This has been 369 435 !-- checked with the previous statement. 370 CALL rs_mpi_io_create_filetypes 371 filetypes_created = .TRUE. 372 373 #if defined( __parallel ) 374 ! 375 !-- Read INTEGER values 376 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 377 CALL MPI_FILE_READ( fh, int_names, SIZE( int_names ) * 32, MPI_CHAR, status, ierr ) 378 header_position = header_position + SIZE( int_names ) * 32 379 380 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 381 CALL MPI_FILE_READ (fh, int_values, SIZE( int_values ), MPI_INT, status, ierr ) 382 header_position = header_position + SIZE( int_values ) * iwp 383 ! 384 !-- Character entries 385 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 386 CALL MPI_FILE_READ( fh, text_lines, SIZE( text_lines ) * 128, MPI_CHAR, status, ierr ) 387 header_position = header_position+size(text_lines) * 128 388 ! 389 !-- REAL values 390 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 391 CALL MPI_FILE_READ( fh, real_names, SIZE( real_names ) * 32, MPI_CHAR, status, ierr ) 392 header_position = header_position + SIZE( real_names ) * 32 393 394 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 395 CALL MPI_FILE_READ( fh, real_values, SIZE( real_values ), MPI_REAL, status, ierr ) 396 header_position = header_position + SIZE( real_values ) * wp 397 ! 398 !-- 2d- and 3d-array headers 399 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 400 CALL MPI_FILE_READ( fh, array_names, SIZE( array_names ) * 32, MPI_CHAR, status, ierr ) 401 header_position = header_position + SIZE( array_names ) * 32 402 403 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 404 CALL MPI_FILE_READ( fh, array_offset, SIZE( array_offset ) * MPI_OFFSET_KIND, MPI_BYTE, & 405 status,ierr ) ! there is no I*8 datatype in Fortran 406 header_position = header_position + SIZE( array_offset ) * rd_offset_kind 436 IF ( set_filetype ) THEN 437 CALL rd_mpi_io_create_filetypes 438 filetypes_created = .TRUE. 439 ENDIF 440 441 IF ( sm_io%iam_io_pe ) THEN 442 #if defined( __parallel ) 443 ! 444 !-- Read INTEGER values 445 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 446 CALL MPI_FILE_READ( fh, int_names, SIZE( int_names ) * 32, MPI_CHAR, status, ierr ) 447 header_position = header_position + SIZE( int_names ) * 32 448 449 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 450 CALL MPI_FILE_READ (fh, int_values, SIZE( int_values ), MPI_INT, status, ierr ) 451 header_position = header_position + SIZE( int_values ) * iwp 452 ! 453 !-- Character entries 454 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 455 CALL MPI_FILE_READ( fh, text_lines, SIZE( text_lines ) * 128, MPI_CHAR, status, ierr ) 456 header_position = header_position+size(text_lines) * 128 457 ! 458 !-- REAL values 459 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 460 CALL MPI_FILE_READ( fh, real_names, SIZE( real_names ) * 32, MPI_CHAR, status, ierr ) 461 header_position = header_position + SIZE( real_names ) * 32 462 463 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 464 CALL MPI_FILE_READ( fh, real_values, SIZE( real_values ), MPI_REAL, status, ierr ) 465 header_position = header_position + SIZE( real_values ) * wp 466 ! 467 !-- 2d- and 3d-array headers 468 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 469 CALL MPI_FILE_READ( fh, array_names, SIZE( array_names ) * 32, MPI_CHAR, status, ierr ) 470 header_position = header_position + SIZE( array_names ) * 32 471 472 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) 473 CALL MPI_FILE_READ( fh, array_offset, SIZE( array_offset ) * MPI_OFFSET_KIND, MPI_BYTE, & 474 status,ierr ) ! there is no I*8 datatype in Fortran 475 header_position = header_position + SIZE( array_offset ) * rd_offset_kind 407 476 #else 408 CALL posix_lseek( fh, header_position ) 409 CALL posix_read( fh, int_names ) 410 header_position = header_position + SIZE( int_names ) * 32 411 412 CALL posix_lseek( fh, header_position ) 413 CALL posix_read( fh, int_values, SIZE( int_values ) ) 414 header_position = header_position + SIZE( int_values ) * iwp 415 ! 416 !-- Character entries 417 CALL posix_lseek( fh, header_position ) 418 CALL posix_read( fh, text_lines ) 419 header_position = header_position + SIZE( text_lines ) * 128 420 ! 421 !-- REAL values 422 CALL posix_lseek( fh, header_position ) 423 CALL posix_read( fh, real_names ) 424 header_position = header_position + SIZE( real_names ) * 32 425 426 CALL posix_lseek( fh, header_position ) 427 CALL posix_read( fh, real_values, SIZE( real_values ) ) 428 header_position = header_position + SIZE( real_values ) * wp 429 ! 430 !-- 2d- and 3d-array headers 431 CALL posix_lseek( fh, header_position ) 432 CALL posix_read( fh, array_names ) 433 header_position = header_position + SIZE( array_names ) * 32 434 435 CALL posix_lseek( fh, header_position ) 436 CALL posix_read( fh, array_offset, SIZE( array_offset ) ) ! there is no I*8 datatype in Fortran 437 header_position = header_position + SIZE( array_offset ) * rd_offset_kind 438 #endif 439 IF ( debug_level >= 2 ) THEN 440 WRITE (9,*) 'header positio after array metadata ', header_position 441 ENDIF 442 443 IF ( print_header_now ) CALL rs_mpi_io_print_header 477 CALL posix_lseek( fh, header_position ) 478 CALL posix_read( fh, int_names ) 479 header_position = header_position + SIZE( int_names ) * 32 480 481 CALL posix_lseek( fh, header_position ) 482 CALL posix_read( fh, int_values, SIZE( int_values ) ) 483 header_position = header_position + SIZE( int_values ) * iwp 484 ! 485 !-- Character entries 486 CALL posix_lseek( fh, header_position ) 487 CALL posix_read( fh, text_lines ) 488 header_position = header_position + SIZE( text_lines ) * 128 489 ! 490 !-- REAL values 491 CALL posix_lseek( fh, header_position ) 492 CALL posix_read( fh, real_names ) 493 header_position = header_position + SIZE( real_names ) * 32 494 495 CALL posix_lseek( fh, header_position ) 496 CALL posix_read( fh, real_values, SIZE( real_values ) ) 497 header_position = header_position + SIZE( real_values ) * wp 498 ! 499 !-- 2d- and 3d-array headers 500 CALL posix_lseek( fh, header_position ) 501 CALL posix_read( fh, array_names ) 502 header_position = header_position + SIZE( array_names ) * 32 503 504 CALL posix_lseek( fh, header_position ) 505 CALL posix_read( fh, array_offset, SIZE( array_offset ) ) ! there is no I*8 datatype in Fortran 506 header_position = header_position + SIZE( array_offset ) * rd_offset_kind 507 #endif 508 IF ( debug_level >= 2 ) THEN 509 WRITE (9,*) 'header positio after array metadata ', header_position 510 ENDIF 511 512 IF ( print_header_now ) CALL rd_mpi_io_print_header 513 514 ENDIF 515 516 #if defined( __parallel ) 517 ! 518 !-- Broadcast header to all remaining cores that are not involved in I/O 519 IF ( sm_io%is_sm_active() ) THEN 520 ! 521 !-- Not sure, that it is possible to broadcast CHARACTER array in one MPI_Bcast call 522 DO i = 1, SIZE( int_names ) 523 CALL MPI_BCAST( int_names(i), 32, MPI_CHARACTER, 0, sm_io%comm_shared, ierr ) 524 ENDDO 525 CALL MPI_BCAST( int_values, SIZE( int_values ), MPI_INTEGER, 0, sm_io%comm_shared, ierr ) 526 527 DO i = 1, SIZE( text_lines ) 528 CALL MPI_BCAST( text_lines(i), 128, MPI_CHARACTER, 0, sm_io%comm_shared, ierr ) 529 ENDDO 530 531 DO i = 1, SIZE( real_names ) 532 CALL MPI_BCAST( real_names(i), 32, MPI_CHARACTER, 0, sm_io%comm_shared, ierr ) 533 ENDDO 534 CALL MPI_BCAST( real_values, SIZE( real_values ), MPI_REAL, 0, sm_io%comm_shared, ierr ) 535 536 DO i = 1, SIZE( array_names ) 537 CALL MPI_BCAST( array_names(i), 32, MPI_CHARACTER, 0, sm_io%comm_shared, ierr ) 538 ENDDO 539 CALL MPI_BCAST( array_offset, SIZE( array_offset )*8, MPI_BYTE, 0, sm_io%comm_shared, & 540 ierr ) ! there is no I*8 datatype in Fortran (array_offset is I*8!) 541 542 CALL MPI_BCAST( header_position, rd_offset_kind, MPI_BYTE, 0, sm_io%comm_shared, ierr ) 543 544 ENDIF 545 #endif 444 546 445 547 ENDIF … … 515 617 IF ( .NOT. lo_found ) THEN 516 618 WRITE(9,*) 'INTEGER not found ', name 517 CALL r s_mpi_io_error( 3 )619 CALL rd_mpi_io_error( 3 ) 518 620 ENDIF 519 621 … … 560 662 IF ( .NOT. lo_found ) THEN 561 663 WRITE(9,*) 'REAL value not found ', name 562 CALL r s_mpi_io_error(3)664 CALL rd_mpi_io_error(3) 563 665 ENDIF 564 666 … … 586 688 587 689 REAL(wp), INTENT(INOUT), DIMENSION(nysg:nyng,nxlg:nxrg) :: data 588 589 REAL(KIND=wp), DIMENSION(lb%nxl:lb%nxr,lb%nys:lb%nyn) :: array_2d590 690 591 691 … … 600 700 ENDDO 601 701 602 IF ( found ) THEN 603 #if defined( __parallel ) 604 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_2d, 'native', MPI_INFO_NULL, ierr ) 605 CALL MPI_FILE_READ_ALL( fh, array_2d, SIZE( array_2d), MPI_REAL, status, ierr ) 702 IF ( found ) THEN 703 #if defined( __parallel ) 704 CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive 705 IF ( sm_io%iam_io_pe ) THEN 706 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_2d, 'native', MPI_INFO_NULL, & 707 ierr ) 708 CALL MPI_FILE_READ_ALL( fh, array_2d, SIZE( array_2d ), MPI_REAL, status, ierr ) 709 ENDIF 710 CALL sm_io%sm_node_barrier() 606 711 #else 607 608 609 #endif 610 611 612 613 614 615 616 617 618 619 620 IF ( debug_level >= 2) WRITE(9,*) 'r2f ', TRIM( name ),' ', SUM( data)621 622 623 624 625 626 627 CALL rs_mpi_io_error( 2 )628 712 CALL posix_lseek( fh, array_position ) 713 CALL posix_read( fh, array_2d, SIZE( array_2d ) ) 714 #endif 715 716 IF ( include_total_domain_boundaries) THEN 717 DO i = lb%nxl, lb%nxr 718 data(lb%nys-nbgp:lb%nyn-nbgp,i-nbgp) = array_2d(i,lb%nys:lb%nyn) 719 ENDDO 720 IF ( debug_level >= 2) WRITE(9,*) 'r2f_ob ', TRIM(name),' ', SUM( data(nys:nyn,nxl:nxr) ) 721 ELSE 722 DO i = nxl, nxr 723 data(nys:nyn,i) = array_2d(i,nys:nyn) 724 ENDDO 725 IF ( debug_level >= 2) WRITE(9,*) 'r2f ', TRIM( name ),' ', SUM( data(nys:nyn,nxl:nxr) ) 726 ENDIF 727 728 CALL exchange_horiz_2d( data ) 729 730 ELSE 731 WRITE(9,*) 'array_2D not found ', name 732 CALL rd_mpi_io_error( 2 ) 733 ENDIF 629 734 630 735 END SUBROUTINE rrd_mpi_io_real_2d … … 649 754 INTEGER, DIMENSION(rd_status_size) :: status 650 755 #endif 651 652 INTEGER, DIMENSION(nxl:nxr,nys:nyn) :: array_2d653 756 654 757 INTEGER(KIND=iwp), INTENT(INOUT), DIMENSION(:,:) :: data … … 675 778 !-- would be dimensioned in the caller subroutine like this: 676 779 !-- INTEGER, DIMENSION(nysg:nyng,nxlg:nxrg):: data 677 CALL r s_mpi_io_error( 2 )780 CALL rd_mpi_io_error( 2 ) 678 781 679 782 ELSEIF ( (nxr-nxl+1) == SIZE( data, 2 ) ) THEN … … 684 787 685 788 #if defined( __parallel ) 686 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER, ft_2di_nb, 'native', & 687 MPI_INFO_NULL, ierr ) 688 CALL MPI_FILE_READ_ALL( fh, array_2d, SIZE( array_2d), MPI_INTEGER, status, ierr ) 789 CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive 790 IF ( sm_io%iam_io_pe ) THEN 791 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER, ft_2di_nb, 'native', & 792 MPI_INFO_NULL, ierr ) 793 CALL MPI_FILE_READ_ALL( fh, array_2d, SIZE( array_2d ), MPI_INTEGER, status, ierr ) 794 ENDIF 795 CALL sm_io%sm_node_barrier() 689 796 #else 690 797 CALL posix_lseek( fh, array_position ) 691 CALL posix_read( fh, array_2d , SIZE( array_2d) )798 CALL posix_read( fh, array_2di, SIZE( array_2di ) ) 692 799 #endif 693 800 694 801 DO j = nys, nyn 695 802 DO i = nxl, nxr 696 data(j-nys+1,i-nxl+1) = array_2d (i,j)803 data(j-nys+1,i-nxl+1) = array_2di(i,j) 697 804 ENDDO 698 805 ENDDO 699 806 700 IF ( debug_level >= 2 ) WRITE(9,*) 'r2i ', TRIM( name ),' ', SUM( array_2d )807 IF ( debug_level >= 2 ) WRITE(9,*) 'r2i ', TRIM( name ),' ', SUM( array_2di ) 701 808 702 809 ELSE 703 810 WRITE (9,*) '### rrd_mpi_io_int_2d array: ', TRIM( name ) 704 CALL r s_mpi_io_error( 4 )811 CALL rd_mpi_io_error( 4 ) 705 812 ENDIF 706 813 … … 708 815 709 816 WRITE(9,*) 'array_2D not found ', name 710 CALL r s_mpi_io_error( 2 )817 CALL rd_mpi_io_error( 2 ) 711 818 712 819 ENDIF … … 734 841 735 842 LOGICAL :: found 736 737 REAL(KIND=wp), DIMENSION(nzb:nzt+1,lb%nxl:lb%nxr,lb%nys:lb%nyn) :: array_3d738 843 739 844 REAL(wp), INTENT(INOUT), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data … … 752 857 IF ( found ) THEN 753 858 #if defined( __parallel ) 754 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3d, 'native', MPI_INFO_NULL, ierr ) 755 CALL MPI_FILE_READ_ALL( fh, array_3d, SIZE( array_3d ), MPI_REAL, status, ierr ) 859 CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive 860 IF( sm_io%iam_io_pe ) THEN 861 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3d, 'native', MPI_INFO_NULL, & 862 ierr ) 863 CALL MPI_FILE_READ_ALL( fh, array_3d, SIZE( array_3d ), MPI_REAL, status, ierr ) 864 ENDIF 865 CALL sm_io%sm_node_barrier() 756 866 #else 757 867 CALL posix_lseek( fh, array_position ) … … 767 877 data(:,nys:nyn,i) = array_3d(:,i,nys:nyn) 768 878 ENDDO 769 IF ( debug_level >= 2 ) WRITE(9,*) 'r3f ', TRIM( name ),' ', SUM( data )879 IF ( debug_level >= 2 ) WRITE(9,*) 'r3f ', TRIM( name ),' ', SUM( data(:,nys:nyn,nxl:nxr) ) 770 880 ENDIF 771 881 … … 774 884 ELSE 775 885 WRITE(9,*) 'array_3D not found ', name 776 CALL r s_mpi_io_error(2)886 CALL rd_mpi_io_error(2) 777 887 ENDIF 778 888 … … 803 913 804 914 LOGICAL :: found 805 806 REAL(KIND=wp), DIMENSION(nzb_soil:nzt_soil,lb%nxl:lb%nxr,lb%nys:lb%nyn) :: array_3d807 915 808 916 REAL(wp), INTENT(INOUT), DIMENSION(nzb_soil:nzt_soil,nysg:nyng,nxlg:nxrg) :: data … … 822 930 #if defined( __parallel ) 823 931 CALL rd_mpi_io_create_filetypes_3dsoil( nzb_soil, nzt_soil ) 824 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3dsoil, 'native', MPI_INFO_NULL, & 825 ierr ) 826 CALL MPI_FILE_READ_ALL( fh, array_3d, SIZE( array_3d ), MPI_REAL, status, ierr ) 827 CALL MPI_TYPE_FREE( ft_3dsoil, ierr ) 932 CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive 933 IF ( sm_io%iam_io_pe ) THEN 934 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3dsoil, 'native', MPI_INFO_NULL,& 935 ierr ) 936 CALL MPI_FILE_READ_ALL( fh, array_3d_soil, SIZE( array_3d_soil ), MPI_REAL, status, ierr ) 937 CALL MPI_TYPE_FREE( ft_3dsoil, ierr ) 938 ENDIF 939 CALL sm_io%sm_node_barrier() 828 940 #else 829 941 CALL posix_lseek( fh, array_position ) 830 CALL posix_read( fh, array_3d , SIZE( array_3d) )942 CALL posix_read( fh, array_3d_soil, SIZE( array_3d_soil ) ) 831 943 #endif 832 944 IF ( include_total_domain_boundaries ) THEN … … 844 956 ELSE 845 957 WRITE(9,*) 'array_3D not found ', name 846 CALL r s_mpi_io_error( 2 )958 CALL rd_mpi_io_error( 2 ) 847 959 ENDIF 848 960 … … 890 1002 IF ( .NOT. lo_found ) THEN 891 1003 WRITE(9,*) 'Character variable not found ', name 892 CALL rs_mpi_io_error( 3 )1004 CALL rd_mpi_io_error( 3 ) 893 1005 ENDIF 894 1006 … … 975 1087 INTEGER, DIMENSION(rd_status_size) :: status 976 1088 #endif 977 978 REAL(KIND=wp), DIMENSION(lb%nxl:lb%nxr,lb%nys:lb%nyn) :: array_2d979 1089 980 1090 REAL(wp), INTENT(IN), DIMENSION(nysg:nyng,nxlg:nxrg) :: data … … 998 1108 array_2d(i,lb%nys:lb%nyn) = data(nys:nyn,i) 999 1109 ENDDO 1000 IF ( debug_level >= 2 ) WRITE(9,*) 'w2f ', TRIM( name ),' ', SUM( array_2d ) 1001 ENDIF 1002 1003 #if defined( __parallel ) 1004 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_2d, 'native', MPI_INFO_NULL, ierr ) 1005 CALL MPI_FILE_WRITE_ALL( fh, array_2d, SIZE( array_2d), MPI_REAL, status, ierr ) 1110 IF ( debug_level >= 2 ) WRITE(9,*) 'w2f ', TRIM( name ),' ', & 1111 SUM( array_2d(nxl:nxr, lb%nys:lb%nyn) ) 1112 ENDIF 1113 1114 #if defined( __parallel ) 1115 CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive 1116 IF ( sm_io%iam_io_pe ) THEN 1117 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_2d, 'native', MPI_INFO_NULL, ierr ) 1118 CALL MPI_FILE_WRITE_ALL( fh, array_2d, SIZE( array_2d), MPI_REAL, status, ierr ) 1119 ENDIF 1120 CALL sm_io%sm_node_barrier() 1006 1121 #else 1007 1122 CALL posix_lseek( fh, array_position ) … … 1036 1151 #endif 1037 1152 INTEGER(KIND=iwp), INTENT(IN), DIMENSION(:,:) :: data 1038 INTEGER, DIMENSION(nxl:nxr,nys:nyn) :: array_2d1039 1153 1040 1154 LOGICAl, OPTIONAL :: ar_found … … 1051 1165 !-- INTEGER, DIMENSION(nysg:nyng,nxlg:nxrg) :: data 1052 1166 WRITE (9,*) '### wrd_mpi_io_int_2d IF array: ', TRIM( name ) 1053 CALL r s_mpi_io_error( 4 )1167 CALL rd_mpi_io_error( 4 ) 1054 1168 1055 1169 ELSEIF ( ( nxr-nxl+1 ) == SIZE( data, 2 ) ) THEN … … 1060 1174 DO j = nys, nyn 1061 1175 DO i = nxl, nxr 1062 array_2d (i,j) = data(j-nys+1,i-nxl+1)1176 array_2di(i,j) = data(j-nys+1,i-nxl+1) 1063 1177 ENDDO 1064 1178 ENDDO 1065 IF ( debug_level >= 2 ) WRITE(9,*) 'w2i ', TRIM( name ), ' ', SUM( array_2d ) 1066 #if defined( __parallel ) 1067 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER, ft_2di_nb, 'native', MPI_INFO_NULL,& 1068 ierr ) 1069 CALL MPI_FILE_WRITE_ALL( fh, array_2d, SIZE( array_2d ), MPI_INTEGER, status, ierr ) 1179 IF ( debug_level >= 2 ) WRITE(9,*) 'w2i ', TRIM( name ), ' ', SUM( array_2di(nxl:nxr,nys:nyn) ), SUM( data ) 1180 #if defined( __parallel ) 1181 CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive 1182 IF ( sm_io%iam_io_pe ) THEN 1183 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER, ft_2di_nb, 'native', & 1184 MPI_INFO_NULL, ierr ) 1185 CALL MPI_FILE_WRITE_ALL( fh, array_2di, SIZE( array_2di ), MPI_INTEGER, status, ierr ) 1186 ENDIF 1187 CALL sm_io%sm_node_barrier() 1070 1188 #else 1071 1189 CALL posix_lseek( fh, array_position ) 1072 CALL posix_write( fh, array_2d , SIZE( array_2d) )1190 CALL posix_write( fh, array_2di, SIZE( array_2di ) ) 1073 1191 #endif 1074 1192 ! … … 1080 1198 ELSE 1081 1199 WRITE (9,*) '### wrd_mpi_io_int_2d array: ', TRIM( name ) 1082 CALL r s_mpi_io_error( 4 )1200 CALL rd_mpi_io_error( 4 ) 1083 1201 ENDIF 1084 1202 … … 1105 1223 #endif 1106 1224 REAL(wp), INTENT(IN), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data 1107 1108 REAL(KIND=wp), DIMENSION(nzb:nzt+1,lb%nxl:lb%nxr,lb%nys:lb%nyn) :: array_3d1109 1225 1110 1226 … … 1130 1246 array_3d(:,i,lb%nys:lb%nyn) = data(:,nys:nyn,i) 1131 1247 ENDDO 1132 IF ( debug_level >= 2 ) WRITE(9,*) 'w3f ', TRIM( name ),' ', SUM( array_3d ) 1133 ENDIF 1134 #if defined( __parallel ) 1135 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3d, 'native', MPI_INFO_NULL, ierr ) 1136 CALL MPI_FILE_WRITE_ALL( fh, array_3d, SIZE( array_3d ), MPI_REAL, status, ierr ) 1248 IF ( debug_level >= 2 ) WRITE(9,*) 'w3f ', TRIM( name ),' ', SUM( data(:,nys:nyn,nxl:nxr) ) 1249 ENDIF 1250 #if defined( __parallel ) 1251 CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive 1252 IF ( sm_io%iam_io_pe ) THEN 1253 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3d, 'native', MPI_INFO_NULL, ierr ) 1254 CALL MPI_FILE_WRITE_ALL( fh, array_3d, SIZE( array_3d ), MPI_REAL, status, ierr ) 1255 ENDIF 1256 CALL sm_io%sm_node_barrier() 1137 1257 #else 1138 1258 CALL posix_lseek( fh, array_position ) … … 1173 1293 REAL(wp), INTENT(IN), DIMENSION(nzb_soil:nzt_soil,nysg:nyng,nxlg:nxrg) :: data 1174 1294 1175 REAL(KIND=wp), DIMENSION(nzb_soil:nzt_soil,lb%nxl:lb%nxr,lb%nys:lb%nyn) :: array_3d1176 1177 1295 1178 1296 array_names(header_arr_index) = name 1179 1297 array_offset(header_arr_index) = array_position 1180 1298 header_arr_index = header_arr_index + 1 1299 1300 #if defined( __parallel ) 1301 CALL rd_mpi_io_create_filetypes_3dsoil( nzb_soil, nzt_soil ) 1302 #endif 1181 1303 1182 1304 IF ( include_total_domain_boundaries) THEN … … 1188 1310 !-- For this reason, the original PALM data need to be swaped. 1189 1311 DO i = lb%nxl, lb%nxr 1190 array_3d (:,i,lb%nys:lb%nyn) = data(:,lb%nys-nbgp:lb%nyn-nbgp,i-nbgp)1312 array_3d_soil(:,i,lb%nys:lb%nyn) = data(:,lb%nys-nbgp:lb%nyn-nbgp,i-nbgp) 1191 1313 ENDDO 1192 1314 IF ( debug_level >= 2 ) WRITE(9,*) 'w3f_ob_soil ', TRIM( name ), ' ', SUM( data(:,nys:nyn,nxl:nxr) ) … … 1195 1317 !-- Prepare output of 3d-REAL-array without ghost layers 1196 1318 DO i = nxl, nxr 1197 array_3d (:,i,lb%nys:lb%nyn) = data(:,nys:nyn,i)1319 array_3d_soil(:,i,lb%nys:lb%nyn) = data(:,nys:nyn,i) 1198 1320 ENDDO 1199 1321 IF ( debug_level >= 2 ) WRITE(9,*) 'w3f_soil ', TRIM( name ), ' ', SUM( array_3d ) 1200 1322 ENDIF 1201 1323 #if defined( __parallel ) 1202 CALL rd_mpi_io_create_filetypes_3dsoil( nzb_soil, nzt_soil ) 1203 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3dsoil, 'native', MPI_INFO_NULL, ierr ) 1204 CALL MPI_FILE_WRITE_ALL( fh, array_3d, SIZE( array_3d ), MPI_REAL, status, ierr ) 1205 CALL MPI_TYPE_FREE( ft_3dsoil, ierr ) 1324 CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive 1325 IF ( sm_io%iam_io_pe ) THEN 1326 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3dsoil, 'native', MPI_INFO_NULL, & 1327 ierr ) 1328 CALL MPI_FILE_WRITE_ALL( fh, array_3d_soil, SIZE( array_3d_soil ), MPI_REAL, status, ierr ) 1329 ENDIF 1330 CALL sm_io%sm_node_barrier() 1206 1331 #else 1207 1332 CALL posix_lseek( fh, array_position ) 1208 CALL posix_write( fh, array_3d , SIZE( array_3d) )1333 CALL posix_write( fh, array_3d_soil, SIZE( array_3d_soil ) ) 1209 1334 #endif 1210 1335 ! … … 1309 1434 !-- Set default view 1310 1435 #if defined( __parallel ) 1311 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 1312 CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) 1313 CALL MPI_FILE_READ_ALL( fh, data, SIZE( data ), MPI_REAL, status, ierr ) 1436 IF ( sm_io%iam_io_pe ) THEN 1437 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 1438 CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) 1439 CALL MPI_FILE_READ_ALL( fh, data, SIZE( data ), MPI_REAL, status, ierr ) 1440 ENDIF 1441 IF ( sm_io%is_sm_active() ) THEN 1442 CALL MPI_BCAST( data, SIZE(data), MPI_REAL, 0, sm_io%comm_shared, ierr ) 1443 ENDIF 1314 1444 #else 1315 1445 CALL posix_lseek( fh, array_position ) … … 1319 1449 ELSE 1320 1450 WRITE(9,*) 'replicated array_1D not found ', name 1321 CALL r s_mpi_io_error( 2 )1451 CALL rd_mpi_io_error( 2 ) 1322 1452 ENDIF 1323 1453 … … 1459 1589 !-- Set default view 1460 1590 #if defined( __parallel ) 1461 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 1462 CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) 1463 CALL MPI_FILE_READ_ALL( fh, data, SIZE( data), MPI_INTEGER, status, ierr ) 1591 IF ( sm_io%iam_io_pe ) THEN 1592 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 1593 CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) 1594 CALL MPI_FILE_READ_ALL( fh, data, SIZE( data), MPI_INTEGER, status, ierr ) 1595 ENDIF 1596 IF ( sm_io%is_sm_active() ) THEN 1597 CALL MPI_BCAST( data, SIZE(data), MPI_INTEGER, 0, sm_io%comm_shared, ierr ) 1598 ENDIF 1464 1599 #else 1465 1600 CALL posix_lseek( fh, array_position ) … … 1472 1607 ELSE 1473 1608 WRITE (9,*) '### rrd_mpi_io_global_array_int_1d ', TRIM( name ) 1474 CALL r s_mpi_io_error( 4 )1609 CALL rd_mpi_io_error( 4 ) 1475 1610 WRITE(9,*) 'replicated array_1D not found ', name 1476 CALL r s_mpi_io_error( 2 )1611 CALL rd_mpi_io_error( 2 ) 1477 1612 ENDIF 1478 1613 ENDIF … … 1515 1650 !-- Set default view 1516 1651 #if defined( __parallel ) 1517 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 1652 IF ( sm_io%iam_io_pe ) THEN 1653 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 1654 ENDIF 1518 1655 ! 1519 1656 !-- Only PE 0 writes replicated data … … 1656 1793 !-- Set default view 1657 1794 #if defined( __parallel ) 1658 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 1795 IF ( sm_io%iam_io_pe ) THEN 1796 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 1797 ENDIF 1659 1798 ! 1660 1799 !-- Only PE 0 writes replicated data … … 1767 1906 ELSE ! read 1768 1907 #if defined( __parallel ) 1769 IF ( debug_level >= 2 ) WRITE(9,'(a, 8i8)') 'read block ', j, i, j_f, i_f,&1770 1771 CALL MPI_FILE_SEEK( fh , disp_f, MPI_SEEK_SET, ierr )1908 IF ( debug_level >= 2 ) WRITE(9,'(a,4i4,4i10)') 'read block ', j, i, j_f, i_f, & 1909 m_start_index(j_f,i_f), nr_bytes_f, disp_f 1910 CALL MPI_FILE_SEEK( fhs, disp_f, MPI_SEEK_SET, ierr ) 1772 1911 nr_words = nr_bytes_f / wp 1773 CALL MPI_FILE_READ( fh , data(m_start_index(j_f,i_f)), nr_words, MPI_REAL, status, ierr )1912 CALL MPI_FILE_READ( fhs, data(m_start_index(j_f,i_f)), nr_words, MPI_REAL, status, ierr ) 1774 1913 #else 1775 1914 CALL posix_lseek( fh, disp_f ) 1776 ! CALL posix_read( fh, data(m_start_index(j_f:,i_f:)), nr_bytes_f )1915 CALL posix_read( fh, data(m_start_index(j_f,i_f):), nr_bytes_f ) 1777 1916 #endif 1778 1917 disp_f = disp … … 1787 1926 ELSE 1788 1927 WRITE(9,*) 'surface array not found ', name 1789 CALL r s_mpi_io_error( 2 )1928 CALL rd_mpi_io_error( 2 ) 1790 1929 ENDIF 1791 1930 1792 1931 IF ( lo_first_index == 1 ) THEN 1793 IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'r_surf ', TRIM( name ), ' ', nr_val, SUM( data(1:nr_val) )1932 IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'r_surf_1 ', TRIM( name ), ' ', nr_val, SUM( data(1:nr_val) ) 1794 1933 ELSE 1795 1934 IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'r_surf_next ', TRIM( name ), ' ', & … … 1856 1995 #endif 1857 1996 INTEGER(iwp), OPTIONAL :: first_index 1997 #if defined( __parallel ) 1998 INTEGER(iwp) :: i 1999 #endif 1858 2000 INTEGER(iwp) :: lo_first_index 1859 2001 INTEGER(KIND=rd_offset_kind) :: offset … … 1863 2005 #endif 1864 2006 1865 REAL(wp), INTENT(IN), DIMENSION(:) :: data2007 REAL(wp), INTENT(IN), DIMENSION(:), TARGET :: data 1866 2008 1867 2009 … … 1880 2022 ENDIF 1881 2023 #if defined( __parallel ) 1882 IF ( all_pes_write ) THEN 1883 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_surf, 'native', MPI_INFO_NULL, ierr ) 1884 CALL MPI_FILE_WRITE_ALL( fh, data, nr_val, MPI_REAL, status, ierr ) 2024 IF ( sm_io%is_sm_active() ) THEN 2025 DO i = 1, nr_val 2026 array_1d(i+local_start) = data(i) 2027 ENDDO 2028 IF ( debug_level >= 2 ) WRITE(9,*) 'w_surf ', TRIM( name ), ' ', SUM( array_1d(local_start+1:local_start+nr_val)),sum(data) 1885 2029 ELSE 1886 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 1887 IF ( nr_val > 0 ) THEN 1888 disp = array_position + 8 * ( glo_start - 1 ) 1889 CALL MPI_FILE_SEEK( fh, disp, MPI_SEEK_SET, ierr ) 1890 CALL MPI_FILE_WRITE( fh, data, nr_val, MPI_REAL, status, ierr ) 1891 ENDIF 1892 ENDIF 2030 ! array_1d => data !kk Did not work in all cases why??? 2031 ALLOCATE( array_1d( SIZE( data ) ) ) 2032 array_1d = data 2033 ENDIF 2034 2035 CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive 2036 IF ( sm_io%iam_io_pe ) THEN 2037 IF ( all_pes_write ) THEN 2038 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_surf, 'native', MPI_INFO_NULL, & 2039 ierr ) 2040 CALL MPI_FILE_WRITE_ALL( fh, array_1d, nr_iope, MPI_REAL, status, ierr ) 2041 ELSE 2042 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 2043 IF ( nr_val > 0 ) THEN 2044 disp = array_position + 8 * ( glo_start - 1 ) 2045 CALL MPI_FILE_SEEK( fh, disp, MPI_SEEK_SET, ierr ) 2046 CALL MPI_FILE_WRITE( fh, array_1d, nr_iope, MPI_REAL, status, ierr ) 2047 ENDIF 2048 ENDIF 2049 ENDIF 2050 CALL sm_io%sm_node_barrier() 2051 IF( .NOT. sm_io%is_sm_active() ) DEALLOCATE( array_1d ) 1893 2052 #else 1894 2053 CALL posix_lseek( fh, array_position ) … … 1898 2057 1899 2058 IF ( lo_first_index == 1 ) THEN 1900 IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'w_surf ', TRIM( name ), ' ', nr_val, SUM( data(1:nr_val) )2059 IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'w_surf_1 ', TRIM( name ), ' ', nr_val, SUM( data(1:nr_val) ) 1901 2060 ELSE 1902 2061 IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'w_surf_n ', TRIM( name ), ' ', & … … 1955 2114 offset = 0 1956 2115 1957 IF ( wr_flag ) THEN2116 IF ( wr_flag .AND. sm_io%iam_io_pe ) THEN 1958 2117 1959 2118 tgh%nr_int = header_int_index - 1 … … 1971 2130 !-- Check for big/little endian format. This check is currently not used, and could be removed 1972 2131 !-- if we can assume little endian as the default on all machines. 1973 CALL r s_mpi_io_check_endian( tgh%endian )2132 CALL rd_mpi_io_check_endian( tgh%endian ) 1974 2133 1975 2134 ! … … 1981 2140 !-- Write header file 1982 2141 gh_size = storage_size(tgh) / 8 1983 IF ( myid == 0 ) THEN 2142 IF ( myid == 0 ) THEN ! myid = 0 always performs I/O, even if I/O is limited to some cores 1984 2143 #if defined( __parallel ) 1985 2144 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) … … 2060 2219 ENDIF 2061 2220 2062 IF ( print_header_now ) CALL r s_mpi_io_print_header2221 IF ( print_header_now ) CALL rd_mpi_io_print_header 2063 2222 ENDIF 2064 2223 … … 2067 2226 ! 2068 2227 !-- Free file types 2069 CALL rs_mpi_io_free_filetypes 2070 2071 ! 2072 !-- Close MPI-IO file 2073 #if defined( __parallel ) 2074 CALL MPI_FILE_CLOSE( fh, ierr ) 2228 CALL rd_mpi_io_free_filetypes 2229 2230 ! 2231 !-- Close MPI-IO files 2232 #if defined( __parallel ) 2233 ! 2234 !-- Restart file has been opened with comm2d 2235 IF ( fhs /= -1 ) THEN 2236 CALL MPI_FILE_CLOSE( fhs, ierr ) 2237 ENDIF 2238 #endif 2239 2240 IF ( sm_io%iam_io_pe ) THEN 2241 2242 #if defined( __parallel ) 2243 CALL MPI_FILE_CLOSE( fh, ierr ) 2075 2244 #else 2076 CALL posix_close( fh ) 2077 #endif 2245 CALL posix_close( fh ) 2246 #endif 2247 2248 ENDIF 2078 2249 2079 2250 mb_processed = array_position / ( 1024.0_dp * 1024.0_dp ) … … 2090 2261 !> called. A main feature of this subroutine is computing the global start indices of the 1d- and 2091 2262 !> 2d- surface arrays. 2263 !> Even if I/O is done by a limited number of cores only, the surface data are read by ALL cores! 2264 !> Reading them by some cores and then distributing the data would result in complicated code 2265 !> which is suspectable for errors and overloads the reading subroutine. Since reading of surface 2266 !> data is not time critical (data size is comparably small), it will be read by all cores. 2092 2267 !--------------------------------------------------------------------------------------------------! 2093 2268 SUBROUTINE rd_mpi_io_surface_filetypes( start_index, end_index, data_to_write, global_start ) … … 2116 2291 #if defined( __parallel ) 2117 2292 CALL MPI_ALLREDUCE( lo_nr_val, all_nr_val, numprocs, MPI_INTEGER, MPI_SUM, comm2d, ierr ) 2118 IF ( ft_surf /= -1 ) THEN2293 IF ( ft_surf /= -1 .AND. sm_io%iam_io_pe ) THEN 2119 2294 CALL MPI_TYPE_FREE( ft_surf, ierr ) ! if set, free last surface filetype 2295 ENDIF 2296 2297 IF ( win_surf /= -1 ) THEN 2298 IF ( sm_io%is_sm_active() ) THEN 2299 CALL MPI_WIN_FREE( win_surf, ierr ) 2300 ENDIF 2301 win_surf = -1 2302 ENDIF 2303 2304 IF ( sm_io%is_sm_active() .AND. rd_flag ) THEN 2305 IF ( fhs == -1 ) THEN 2306 CALL MPI_FILE_OPEN( comm2d, TRIM( io_file_name ), MPI_MODE_RDONLY, MPI_INFO_NULL, fhs, & 2307 ierr ) 2308 ENDIF 2309 ELSE 2310 fhs = fh 2120 2311 ENDIF 2121 2312 #else … … 2146 2337 2147 2338 #if defined( __parallel ) 2148 CALL MPI_FILE_SET_VIEW( fh , offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr )2339 CALL MPI_FILE_SET_VIEW( fhs, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 2149 2340 #endif 2150 2341 ENDIF … … 2161 2352 global_start = -1 2162 2353 ENDWHERE 2354 2355 #if defined( __parallel ) 2356 IF ( sm_io%is_sm_active() ) THEN 2357 IF ( sm_io%iam_io_pe ) THEN 2358 ! 2359 !-- Calculate number of values of all PEs of an I/O group 2360 nr_iope = 0 2361 DO i = myid, myid+sm_io%sh_npes-1 2362 nr_iope = nr_iope + all_nr_val(i) 2363 ENDDO 2364 ELSE 2365 local_start = 0 2366 DO i = myid-sm_io%sh_rank, myid-1 2367 local_start = local_start + all_nr_val(i) 2368 ENDDO 2369 ENDIF 2370 ! 2371 !-- Get the size of shared memory window on all PEs 2372 CALL MPI_BCAST( nr_iope, 1, MPI_INTEGER, 0, sm_io%comm_shared, ierr ) 2373 CALL sm_io%sm_allocate_shared( array_1d, 1, MAX( 1, nr_iope ), win_surf ) 2374 ELSE 2375 nr_iope = nr_val 2376 ENDIF 2377 #else 2378 nr_iope = nr_val 2379 #endif 2163 2380 2164 2381 ! … … 2170 2387 ENDIF 2171 2388 2172 all_pes_write = ( MINVAL( all_nr_val ) > 0 ) 2173 2174 IF ( all_pes_write ) THEN 2175 dims1(1) = total_number_of_surface_values 2176 lize1(1) = nr_val 2177 start1(1) = glo_start-1 2178 2179 #if defined( __parallel ) 2180 IF ( total_number_of_surface_values > 0 ) THEN 2181 CALL MPI_TYPE_CREATE_SUBARRAY( 1, dims1, lize1, start1, MPI_ORDER_FORTRAN, MPI_REAL, ft_surf, ierr ) 2182 CALL MPI_TYPE_COMMIT( ft_surf, ierr ) 2389 IF ( sm_io%iam_io_pe ) THEN 2390 2391 all_pes_write = ( MINVAL( all_nr_val ) > 0 ) 2392 2393 IF ( all_pes_write ) THEN 2394 dims1(1) = total_number_of_surface_values 2395 lize1(1) = nr_iope 2396 start1(1) = glo_start-1 2397 2398 #if defined( __parallel ) 2399 IF ( total_number_of_surface_values > 0 ) THEN 2400 CALL MPI_TYPE_CREATE_SUBARRAY( 1, dims1, lize1, start1, MPI_ORDER_FORTRAN, & 2401 MPI_REAL, ft_surf, ierr ) 2402 CALL MPI_TYPE_COMMIT( ft_surf, ierr ) 2403 ENDIF 2404 #endif 2183 2405 ENDIF 2184 #endif 2185 ENDIF 2406 ENDIF 2407 2186 2408 ENDIF 2187 2409 … … 2196 2418 !> distributed in blocks among processes to a single file that contains the global arrays. 2197 2419 !--------------------------------------------------------------------------------------------------! 2198 SUBROUTINE r s_mpi_io_create_filetypes2420 SUBROUTINE rd_mpi_io_create_filetypes 2199 2421 2200 2422 IMPLICIT NONE … … 2208 2430 INTEGER, DIMENSION(3) :: start3 2209 2431 2432 TYPE(local_boundaries) :: save_io_grid !< temporary variable to store grid settings 2433 2434 2435 IF ( sm_io%is_sm_active() ) THEN 2436 save_io_grid = sm_io%io_grid 2437 ENDIF 2210 2438 2211 2439 ! … … 2240 2468 ENDIF 2241 2469 2470 CALL sm_io%sm_adjust_outer_boundary() 2471 2242 2472 ELSE 2243 2473 … … 2253 2483 ENDIF 2254 2484 2485 IF ( sm_io%is_sm_active() ) THEN 2486 #if defined( __parallel ) 2487 CALL sm_io%sm_allocate_shared( array_2d, sm_io%io_grid%nxl, sm_io%io_grid%nxr, & 2488 sm_io%io_grid%nys, sm_io%io_grid%nyn, win_2dr ) 2489 CALL sm_io%sm_allocate_shared( array_2di, save_io_grid%nxl, save_io_grid%nxr, & 2490 save_io_grid%nys, save_io_grid%nyn, win_2di ) 2491 CALL sm_io%sm_allocate_shared( array_3d, nzb, nzt+1, sm_io%io_grid%nxl, sm_io%io_grid%nxr, & 2492 sm_io%io_grid%nys, sm_io%io_grid%nyn, win_3dr ) 2493 #endif 2494 ELSE 2495 ALLOCATE( array_2d(lb%nxl:lb%nxr,lb%nys:lb%nyn) ) 2496 ALLOCATE( array_2di(nxl:nxr,nys:nyn) ) 2497 ALLOCATE( array_3d(nzb:nzt+1,lb%nxl:lb%nxr,lb%nys:lb%nyn) ) 2498 sm_io%io_grid = lb 2499 ENDIF 2500 2255 2501 ! 2256 2502 !-- Create filetype for 2d-REAL array with ghost layers around the total domain … … 2258 2504 dims2(2) = lb%ny + 1 2259 2505 2260 lize2(1) = lb%nnx 2261 lize2(2) = lb%nny 2262 2263 start2(1) = lb%nxl 2264 start2(2) = lb%nys 2265 2266 #if defined( __parallel ) 2267 CALL MPI_TYPE_CREATE_SUBARRAY( 2, dims2, lize2, start2, MPI_ORDER_FORTRAN, MPI_REAL, ft_2d, ierr ) 2268 CALL MPI_TYPE_COMMIT( ft_2d, ierr ) 2506 lize2(1) = sm_io%io_grid%nnx 2507 lize2(2) = sm_io%io_grid%nny 2508 2509 start2(1) = sm_io%io_grid%nxl 2510 start2(2) = sm_io%io_grid%nys 2511 2512 #if defined( __parallel ) 2513 IF ( sm_io%iam_io_pe ) THEN 2514 CALL MPI_TYPE_CREATE_SUBARRAY( 2, dims2, lize2, start2, MPI_ORDER_FORTRAN, MPI_REAL, & 2515 ft_2d, ierr ) 2516 CALL MPI_TYPE_COMMIT( ft_2d, ierr ) 2517 ENDIF 2269 2518 #endif 2270 2519 ! … … 2273 2522 dims2(2) = ny + 1 2274 2523 2275 lize2(1) = nnx 2276 lize2(2) = nny 2277 2278 start2(1) = nxl 2279 start2(2) = nys 2280 2281 #if defined( __parallel ) 2282 CALL MPI_TYPE_CREATE_SUBARRAY( 2, dims2, lize2, start2, MPI_ORDER_FORTRAN, MPI_INTEGER, ft_2di_nb, ierr ) 2283 CALL MPI_TYPE_COMMIT( ft_2di_nb, ierr ) 2524 IF ( sm_io%is_sm_active() ) THEN 2525 2526 lize2(1) = save_io_grid%nnx 2527 lize2(2) = save_io_grid%nny 2528 2529 start2(1) = save_io_grid%nxl 2530 start2(2) = save_io_grid%nys 2531 2532 ELSE 2533 2534 lize2(1) = nnx 2535 lize2(2) = nny 2536 2537 start2(1) = nxl 2538 start2(2) = nys 2539 2540 ENDIF 2541 2542 #if defined( __parallel ) 2543 IF ( sm_io%iam_io_pe ) THEN 2544 CALL MPI_TYPE_CREATE_SUBARRAY( 2, dims2, lize2, start2, MPI_ORDER_FORTRAN, MPI_INTEGER, & 2545 ft_2di_nb, ierr ) 2546 CALL MPI_TYPE_COMMIT( ft_2di_nb, ierr ) 2547 ENDIF 2284 2548 #endif 2285 2549 ! … … 2290 2554 2291 2555 lize3(1) = dims3(1) 2292 lize3(2) = lb%nnx2293 lize3(3) = lb%nny2556 lize3(2) = sm_io%io_grid%nnx 2557 lize3(3) = sm_io%io_grid%nny 2294 2558 2295 2559 start3(1) = nzb 2296 start3(2) = lb%nxl 2297 start3(3) = lb%nys 2298 2299 #if defined( __parallel ) 2300 CALL MPI_TYPE_CREATE_SUBARRAY( 3, dims3, lize3, start3, MPI_ORDER_FORTRAN, MPI_REAL, ft_3d, ierr ) 2301 CALL MPI_TYPE_COMMIT( ft_3d, ierr ) 2302 #endif 2303 2304 END SUBROUTINE rs_mpi_io_create_filetypes 2560 start3(2) = sm_io%io_grid%nxl 2561 start3(3) = sm_io%io_grid%nys 2562 2563 #if defined( __parallel ) 2564 IF ( sm_io%iam_io_pe ) THEN 2565 CALL MPI_TYPE_CREATE_SUBARRAY( 3, dims3, lize3, start3, MPI_ORDER_FORTRAN, MPI_REAL, ft_3d, & 2566 ierr ) 2567 CALL MPI_TYPE_COMMIT( ft_3d, ierr ) 2568 ENDIF 2569 #endif 2570 2571 END SUBROUTINE rd_mpi_io_create_filetypes 2305 2572 2306 2573 … … 2325 2592 INTEGER, DIMENSION(3) :: start3 2326 2593 2594 2595 IF ( sm_io%is_sm_active() ) THEN 2596 CALL sm_io%sm_allocate_shared( array_3d_soil, nzb_soil, nzt_soil, sm_io%io_grid%nxl, & 2597 sm_io%io_grid%nxr, sm_io%io_grid%nys, sm_io%io_grid%nyn, & 2598 win_3ds ) 2599 ELSE 2600 ALLOCATE( array_3d_soil(nzb_soil:nzt_soil,lb%nxl:lb%nxr,lb%nys:lb%nyn) ) 2601 sm_io%io_grid = lb 2602 ENDIF 2327 2603 2328 2604 ! … … 2333 2609 2334 2610 lize3(1) = dims3(1) 2335 lize3(2) = lb%nnx2336 lize3(3) = lb%nny2611 lize3(2) = sm_io%io_grid%nnx 2612 lize3(3) = sm_io%io_grid%nny 2337 2613 2338 2614 start3(1) = nzb_soil 2339 start3(2) = lb%nxl 2340 start3(3) = lb%nys 2341 2342 CALL MPI_TYPE_CREATE_SUBARRAY( 3, dims3, lize3, start3, MPI_ORDER_FORTRAN, MPI_REAL, & 2343 ft_3dsoil, ierr ) 2344 CALL MPI_TYPE_COMMIT( ft_3dsoil, ierr ) 2615 start3(2) = sm_io%io_grid%nxl 2616 start3(3) = sm_io%io_grid%nys 2617 2618 IF ( sm_io%iam_io_pe ) THEN 2619 CALL MPI_TYPE_CREATE_SUBARRAY( 3, dims3, lize3, start3, MPI_ORDER_FORTRAN, MPI_REAL, & 2620 ft_3dsoil, ierr ) 2621 CALL MPI_TYPE_COMMIT( ft_3dsoil, ierr ) 2622 ENDIF 2345 2623 2346 2624 END SUBROUTINE rd_mpi_io_create_filetypes_3dsoil … … 2354 2632 !> Free all file types that have been created for MPI-IO. 2355 2633 !--------------------------------------------------------------------------------------------------! 2356 SUBROUTINE r s_mpi_io_free_filetypes2634 SUBROUTINE rd_mpi_io_free_filetypes 2357 2635 2358 2636 IMPLICIT NONE … … 2361 2639 #if defined( __parallel ) 2362 2640 IF ( filetypes_created ) THEN 2363 CALL MPI_TYPE_FREE( ft_2d, ierr ) 2364 CALL MPI_TYPE_FREE( ft_2di_nb, ierr ) 2365 CALL MPI_TYPE_FREE( ft_3d, ierr ) 2641 2642 IF ( sm_io%iam_io_pe ) THEN 2643 CALL MPI_TYPE_FREE( ft_2d, ierr ) 2644 CALL MPI_TYPE_FREE( ft_2di_nb, ierr ) 2645 CALL MPI_TYPE_FREE( ft_3d, ierr ) 2646 ENDIF 2647 2648 IF ( sm_io%is_sm_active() ) THEN 2649 CALL sm_io%sm_free_shared( win_2dr ) 2650 CALL sm_io%sm_free_shared( win_2di ) 2651 CALL sm_io%sm_free_shared( win_3dr ) 2652 ELSE 2653 DEALLOCATE( array_2d, array_2di, array_3d ) 2654 ENDIF 2655 2366 2656 ENDIF 2367 2657 ! 2368 2658 !-- Free last surface filetype 2369 IF ( ft_surf /= -1 ) THEN2659 IF ( sm_io%iam_io_pe .AND. ft_surf /= -1 ) THEN 2370 2660 CALL MPI_TYPE_FREE( ft_surf, ierr ) 2371 2661 ENDIF 2372 #endif 2373 2374 END SUBROUTINE rs_mpi_io_free_filetypes 2662 2663 IF ( sm_io%is_sm_active() .AND. win_surf /= -1 ) THEN 2664 CALL sm_io%sm_free_shared( win_surf ) 2665 ENDIF 2666 2667 ft_surf = -1 2668 win_surf = -1 2669 #else 2670 DEALLOCATE( array_2d, array_2di, array_3d ) 2671 #endif 2672 2673 END SUBROUTINE rd_mpi_io_free_filetypes 2375 2674 2376 2675 … … 2381 2680 !> Print the restart data file header (MPI-IO format) for debugging. 2382 2681 !--------------------------------------------------------------------------------------------------! 2383 SUBROUTINE r s_mpi_io_print_header2682 SUBROUTINE rd_mpi_io_print_header 2384 2683 2385 2684 IMPLICIT NONE … … 2422 2721 print_header_now = .FALSE. 2423 2722 2424 END SUBROUTINE r s_mpi_io_print_header2723 END SUBROUTINE rd_mpi_io_print_header 2425 2724 2426 2725 … … 2431 2730 !> Print error messages for reading/writing restart data with MPI-IO 2432 2731 !--------------------------------------------------------------------------------------------------! 2433 SUBROUTINE r s_mpi_io_error( error_number )2732 SUBROUTINE rd_mpi_io_error( error_number ) 2434 2733 2435 2734 IMPLICIT NONE … … 2454 2753 WRITE(6,*) 'posix IO: ERROR Opening Restart File' 2455 2754 CASE DEFAULT 2456 WRITE(6,*) 'r s_mpi_io_error: illegal error number: ',error_number2755 WRITE(6,*) 'rd_mpi_io_error: illegal error number: ',error_number 2457 2756 2458 2757 END SELECT … … 2460 2759 ENDIF 2461 2760 #if defined( __parallel ) 2462 CALL MPI_BARRIER( comm2d, ierr ) 2463 CALL MPI_ABORT( comm2d, 1, ierr ) 2761 IF ( .NOT. sm_io%iam_io_pe ) RETURN 2762 2763 CALL MPI_BARRIER( comm_io, ierr ) 2764 CALL MPI_ABORT( comm_io, 1, ierr ) 2464 2765 #else 2465 2766 CALL ABORT 2466 2767 #endif 2467 2768 2468 END SUBROUTINE r s_mpi_io_error2769 END SUBROUTINE rd_mpi_io_error 2469 2770 2470 2771 … … 2477 2778 !> the first 4 bytes of the pointer are equal 1 (little endian) or not. 2478 2779 !--------------------------------------------------------------------------------------------------! 2479 SUBROUTINE r s_mpi_io_check_endian( i_endian )2780 SUBROUTINE rd_mpi_io_check_endian( i_endian ) 2480 2781 2481 2782 IMPLICIT NONE … … 2502 2803 ENDIF 2503 2804 2504 END SUBROUTINE r s_mpi_io_check_endian2805 END SUBROUTINE rd_mpi_io_check_endian 2505 2806 2506 2807 END MODULE restart_data_mpi_io_mod
Note: See TracChangeset
for help on using the changeset viewer.