!> @file restart_data_mpi_io_mod.f90 !------------------------------------------------------------------------------! ! This file is part of the PALM model system. ! ! PALM is free software: you can redistribute it and/or modify it under the terms of the GNU General ! Public License as published by the Free Software Foundation, either version 3 of the License, or ! (at your option) any later version. ! ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the ! implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General ! Public License for more details. ! ! You should have received a copy of the GNU General Public License along with PALM. If not, see ! . ! ! Copyright 1997-2020 Leibniz Universitaet Hannover ! -------------------------------------------------------------------------------------------------! ! ! Current revisions: ! ----------------- ! ! ! Former revisions: ! ----------------- ! $Id: restart_data_mpi_io_mod.f90 4539 2020-05-18 14:05:17Z sebschub $ ! checks added, if index limits in header are exceeded ! bugfix in rrd_mpi_io_int_2d ! ! 4536 2020-05-17 17:24:13Z raasch ! messages and debug output converted to PALM routines ! ! 4534 2020-05-14 18:35:22Z raasch ! I/O on reduced number of cores added (using shared memory MPI) ! ! 4500 2020-04-17 10:12:45Z suehring ! Fix too long lines ! ! 4498 2020-04-15 14:26:31Z raasch ! bugfix for creation of filetypes, argument removed from rd_mpi_io_open ! ! 4497 2020-04-15 10:20:51Z raasch ! last bugfix deactivated because of compile problems ! ! 4496 2020-04-15 08:37:26Z raasch ! problem with posix read arguments for surface data fixed ! ! 4495 2020-04-13 20:11:20Z raasch ! Initial version (K. Ketelsen), adjusted to PALM formatting standards (s. Raasch) ! ! ! ! Description: ! ------------ !> Routines for restart data handling using MPI-IO. !--------------------------------------------------------------------------------------------------! MODULE restart_data_mpi_io_mod #if defined( __parallel ) #if defined( __mpifh ) INCLUDE "mpif.h" #else USE MPI #endif #else USE posix_interface, & ONLY: posix_close, posix_lseek, posix_open, posix_read, posix_write #endif USE, INTRINSIC :: ISO_C_BINDING USE control_parameters, & ONLY: debug_output, debug_string, include_total_domain_boundaries, message_string, & restart_data_format_input, restart_data_format_output, restart_file_size USE exchange_horiz_mod, & ONLY: exchange_horiz, exchange_horiz_2d USE indices, & ONLY: nbgp, nnx, nny, nx, nxl, nxlg, nxr, nxrg, ny, nyn, nyng, nys, nysg, nz, nzb, nzt USE kinds USE pegrid, & ONLY: comm1dx, comm1dy, comm2d, myid, myidx, myidy, npex, npey, numprocs, pdims USE shared_memory_io_mod, & ONLY: local_boundaries, sm_class IMPLICIT NONE CHARACTER(LEN=128) :: io_file_name !> internal variable to communicate filename between !> different subroutines #if defined( __parallel ) INTEGER(iwp) :: ierr !< error status of MPI-calls INTEGER(iwp), PARAMETER :: rd_offset_kind = MPI_OFFSET_KIND !< Adress or Offset kind INTEGER(iwp), PARAMETER :: rd_status_size = MPI_STATUS_SIZE !< #else INTEGER(iwp), PARAMETER :: rd_offset_kind = C_SIZE_T !< INTEGER(iwp), PARAMETER :: rd_status_size = 1 !< Not required in sequential mode #endif INTEGER(iwp) :: debug_level = 1 !< TODO: replace with standard debug output steering INTEGER(iwp) :: comm_io !< Communicator for MPI-IO INTEGER(iwp) :: fh !< MPI-IO file handle #if defined( __parallel ) INTEGER(iwp) :: fhs = -1 !< MPI-IO file handle to open file with comm2d always #endif INTEGER(iwp) :: ft_surf = -1 !< MPI filetype surface data #if defined( __parallel ) INTEGER(iwp) :: ft_2di_nb !< MPI filetype 2D array INTEGER no outer boundary INTEGER(iwp) :: ft_2d !< MPI filetype 2D array REAL with outer boundaries INTEGER(iwp) :: ft_3d !< MPI filetype 3D array REAL with outer boundaries INTEGER(iwp) :: ft_3dsoil !< MPI filetype for 3d-soil array #endif INTEGER(iwp) :: glo_start !< global start index on this PE #if defined( __parallel ) INTEGER(iwp) :: local_start !< #endif INTEGER(iwp) :: nr_iope !< INTEGER(iwp) :: nr_val !< local number of values in x and y direction #if defined( __parallel ) INTEGER(iwp) :: win_2di INTEGER(iwp) :: win_2dr INTEGER(iwp) :: win_3dr INTEGER(iwp) :: win_3ds INTEGER(iwp) :: win_surf = -1 #endif INTEGER(iwp) :: total_number_of_surface_values !< total number of values for one variable INTEGER(KIND=rd_offset_kind) :: array_position !< INTEGER(KIND=rd_offset_kind) :: header_position !< INTEGER(iwp), DIMENSION(:,:), POINTER, CONTIGUOUS :: array_2di !< INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_end_index !< INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_start_index !< INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_global_start !< LOGICAL :: all_pes_write !< all PEs have data to write LOGICAL :: filetypes_created !< LOGICAL :: io_on_limited_cores_per_node !< switch to shared memory MPI-IO LOGICAL :: rd_flag !< file is opened for read LOGICAL :: wr_flag !< file is opened for write #if defined( __parallel ) REAL(wp), DIMENSION(:), POINTER, CONTIGUOUS :: array_1d !< #endif REAL(wp), DIMENSION(:,:), POINTER, CONTIGUOUS :: array_2d !< REAL(wp), DIMENSION(:,:,:), POINTER, CONTIGUOUS :: array_3d !< REAL(wp), DIMENSION(:,:,:), POINTER, CONTIGUOUS :: array_3d_soil !< ! !-- Handling of outer boundaries TYPE(local_boundaries) :: lb !< ! !-- General Header (first 32 byte in restart file) TYPE general_header INTEGER(iwp) :: nr_int !< number of INTEGER entries in header INTEGER(iwp) :: nr_char !< number of Text strings entries in header INTEGER(iwp) :: nr_real !< number of REAL entries in header INTEGER(iwp) :: nr_arrays !< number of arrays in restart files INTEGER(iwp) :: total_nx !< total number of points in x-direction INTEGER(iwp) :: total_ny !< total number of points in y-direction INTEGER(iwp) :: i_outer_bound !< if 1, outer boundaries are stored in restart file INTEGER(iwp) :: endian !< little endian (1) or big endian (2) internal format END TYPE general_header TYPE(general_header), TARGET :: tgh TYPE(sm_class) :: sm_io ! !-- Declaration of varibales for file header section INTEGER(KIND=rd_offset_kind) :: header_int_index INTEGER, PARAMETER :: max_nr_int=256 CHARACTER(LEN=32), DIMENSION(max_nr_int) :: int_names INTEGER(KIND=iwp), DIMENSION(max_nr_int) :: int_values INTEGER(KIND=rd_offset_kind) :: header_char_index INTEGER, PARAMETER :: max_nr_char=128 CHARACTER(LEN=128), DIMENSION(max_nr_char) :: text_lines INTEGER(KIND=rd_offset_kind) :: header_real_index INTEGER, PARAMETER :: max_nr_real=256 CHARACTER(LEN=32), DIMENSION(max_nr_real) :: real_names REAL(KIND=wp), DIMENSION(max_nr_real) :: real_values INTEGER(KIND=rd_offset_kind) :: header_array_index INTEGER, PARAMETER :: max_nr_arrays=600 CHARACTER(LEN=32), DIMENSION(max_nr_arrays) :: array_names INTEGER(KIND=rd_offset_kind), DIMENSION(max_nr_arrays) :: array_offset SAVE PRIVATE PUBLIC restart_file_size, total_number_of_surface_values ! !-- PALM interfaces INTERFACE rd_mpi_io_check_array MODULE PROCEDURE rd_mpi_io_check_array END INTERFACE rd_mpi_io_check_array INTERFACE rd_mpi_io_close MODULE PROCEDURE rd_mpi_io_close END INTERFACE rd_mpi_io_close INTERFACE rd_mpi_io_open MODULE PROCEDURE rd_mpi_io_open END INTERFACE rd_mpi_io_open INTERFACE rrd_mpi_io MODULE PROCEDURE rrd_mpi_io_char MODULE PROCEDURE rrd_mpi_io_int MODULE PROCEDURE rrd_mpi_io_int_2d MODULE PROCEDURE rrd_mpi_io_logical MODULE PROCEDURE rrd_mpi_io_real MODULE PROCEDURE rrd_mpi_io_real_2d MODULE PROCEDURE rrd_mpi_io_real_3d MODULE PROCEDURE rrd_mpi_io_real_3d_soil END INTERFACE rrd_mpi_io INTERFACE rrd_mpi_io_global_array MODULE PROCEDURE rrd_mpi_io_global_array_int_1d MODULE PROCEDURE rrd_mpi_io_global_array_real_1d MODULE PROCEDURE rrd_mpi_io_global_array_real_2d MODULE PROCEDURE rrd_mpi_io_global_array_real_3d MODULE PROCEDURE rrd_mpi_io_global_array_real_4d END INTERFACE rrd_mpi_io_global_array INTERFACE rrd_mpi_io_surface MODULE PROCEDURE rrd_mpi_io_surface MODULE PROCEDURE rrd_mpi_io_surface_2d END INTERFACE rrd_mpi_io_surface INTERFACE rd_mpi_io_surface_filetypes MODULE PROCEDURE rd_mpi_io_surface_filetypes END INTERFACE rd_mpi_io_surface_filetypes INTERFACE wrd_mpi_io MODULE PROCEDURE wrd_mpi_io_char MODULE PROCEDURE wrd_mpi_io_int MODULE PROCEDURE wrd_mpi_io_int_2d MODULE PROCEDURE wrd_mpi_io_logical MODULE PROCEDURE wrd_mpi_io_real MODULE PROCEDURE wrd_mpi_io_real_2d MODULE PROCEDURE wrd_mpi_io_real_3d MODULE PROCEDURE wrd_mpi_io_real_3d_soil END INTERFACE wrd_mpi_io INTERFACE wrd_mpi_io_global_array MODULE PROCEDURE wrd_mpi_io_global_array_int_1d MODULE PROCEDURE wrd_mpi_io_global_array_real_1d MODULE PROCEDURE wrd_mpi_io_global_array_real_2d MODULE PROCEDURE wrd_mpi_io_global_array_real_3d MODULE PROCEDURE wrd_mpi_io_global_array_real_4d END INTERFACE wrd_mpi_io_global_array INTERFACE wrd_mpi_io_surface MODULE PROCEDURE wrd_mpi_io_surface MODULE PROCEDURE wrd_mpi_io_surface_2d END INTERFACE wrd_mpi_io_surface PUBLIC rd_mpi_io_check_array, rd_mpi_io_close, rd_mpi_io_open, rrd_mpi_io, & rrd_mpi_io_global_array, rrd_mpi_io_surface, rd_mpi_io_surface_filetypes, wrd_mpi_io, & wrd_mpi_io_global_array, wrd_mpi_io_surface CONTAINS !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Open restart file for read or write with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE rd_mpi_io_open( action, file_name, open_for_global_io_only ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: action !< CHARACTER(LEN=*), INTENT(IN) :: file_name !< INTEGER(iwp) :: i !< INTEGER(iwp) :: gh_size !< INTEGER(KIND=rd_offset_kind) :: offset !< #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status !< #endif LOGICAL, INTENT(IN), OPTIONAL :: open_for_global_io_only !< LOGICAL :: set_filetype !< #if ! defined( __parallel ) TYPE(C_PTR) :: buf_ptr !< #endif offset = 0 io_on_limited_cores_per_node = .FALSE. rd_flag = ( TRIM( action ) == 'READ' .OR. TRIM( action ) == 'read' ) wr_flag = ( TRIM( action ) == 'WRITE' .OR. TRIM( action ) == 'write' ) IF ( .NOT. ( rd_flag .OR. wr_flag ) ) THEN message_string = 'illegal action "' // TRIM( action ) // '" for opening restart files' CALL message( 'restart_data_mpi_io_mod', 'PA0720', 1, 2, 0, 6, 0 ) ENDIF ! !-- Store name of I/O file to communicate it internally within this module. io_file_name = file_name ! !-- Setup for IO on a limited number of threads per node (using shared memory MPI) IF ( rd_flag ) THEN set_filetype = .TRUE. IF ( TRIM( restart_data_format_input ) == 'mpi_shared_memory' ) THEN io_on_limited_cores_per_node = .TRUE. ENDIF ENDIF IF ( TRIM( restart_data_format_output ) == 'mpi_shared_memory' .AND. wr_flag ) THEN io_on_limited_cores_per_node = .TRUE. ENDIF ! !-- Shared memory MPI is not used for reading of global data IF ( PRESENT( open_for_global_io_only ) .AND. rd_flag ) THEN IF ( open_for_global_io_only ) THEN io_on_limited_cores_per_node = .FALSE. set_filetype = .FALSE. ENDIF ENDIF CALL sm_io%sm_init_comm( io_on_limited_cores_per_node ) ! !-- Set communicator to be used. If all cores are doing I/O, comm2d is used as usual. IF( sm_io%is_sm_active() ) THEN comm_io = sm_io%comm_io ELSE comm_io = comm2d ENDIF ! !-- Create subarrays and file types filetypes_created = .FALSE. ! !-- In case of read it is not known yet if data include total domain. Filetypes will be created !-- further below. IF ( wr_flag ) THEN CALL rd_mpi_io_create_filetypes filetypes_created = .TRUE. ENDIF ! !-- Open file for MPI-IO #if defined( __parallel ) IF ( sm_io%iam_io_pe ) THEN IF ( rd_flag ) THEN IF ( debug_output ) THEN WRITE( debug_string, * ) 'open joint restart file "' // TRIM( io_file_name ) // & '" for read with MPI-IO' CALL debug_message( debug_string, 'start' ) ENDIF CALL MPI_FILE_OPEN( comm_io, TRIM( io_file_name ), MPI_MODE_RDONLY, MPI_INFO_NULL, fh, & ierr ) IF ( ierr /= 0 ) THEN message_string = 'error opening restart file "' // TRIM( io_file_name ) // & '" for reading with MPI-IO' CALL message( 'rrd_mpi_io_open', 'PA0727', 3, 2, 0, 6, 0 ) ENDIF IF ( debug_output ) THEN WRITE( debug_string, * ) 'open joint restart file "' // TRIM( io_file_name ) // & '" for read with MPI-IO' CALL debug_message( debug_string, 'end' ) ENDIF ELSEIF ( wr_flag ) THEN IF ( debug_output ) THEN WRITE( debug_string, * ) 'open joint restart file "' // TRIM( io_file_name ) // & '" for write with MPI-IO' CALL debug_message( debug_string, 'start' ) ENDIF CALL MPI_FILE_OPEN( comm_io, TRIM( io_file_name ), MPI_MODE_CREATE+MPI_MODE_WRONLY, & MPI_INFO_NULL, fh, ierr ) IF ( ierr /= 0 ) THEN message_string = 'error opening restart file "' // TRIM( io_file_name ) // & '" for writing with MPI-IO' CALL message( 'rrd_mpi_io_open', 'PA0728', 3, 2, 0, 6, 0 ) ENDIF IF ( debug_output ) THEN WRITE( debug_string, * ) 'open joint restart file "' // TRIM( io_file_name ) // & '" for write with MPI-IO' CALL debug_message( debug_string, 'end' ) ENDIF ENDIF ENDIF #else IF ( rd_flag ) THEN IF ( debug_output ) THEN WRITE( debug_string, * ) 'open restart file "' // TRIM( io_file_name ) // & '" for read in serial mode (posix)' CALL debug_message( debug_string, 'start' ) ENDIF fh = posix_open( TRIM( io_file_name ), .TRUE. ) IF ( debug_output ) THEN WRITE( debug_string, * ) 'open restart file "' // TRIM( io_file_name ) // & '" for read in serial mode (posix)' CALL debug_message( debug_string, 'end' ) ENDIF ELSEIF ( wr_flag ) THEN IF ( debug_output ) THEN WRITE( debug_string, * ) 'open restart file "' // TRIM( io_file_name ) // & '" for write in serial mode (posix)' CALL debug_message( debug_string, 'start' ) ENDIF fh = posix_open( TRIM( io_file_name ), .FALSE. ) IF ( debug_output ) THEN WRITE( debug_string, * ) 'open restart file "' // TRIM( io_file_name ) // & '" for write in serial mode (posix)' CALL debug_message( debug_string, 'end' ) ENDIF ENDIF IF ( fh < 0 ) THEN message_string = 'error opening restart file for posix I/O' CALL message( 'restart_data_mpi_io_mod', 'PA0721', 1, 2, 0, 6, 0 ) ENDIF #endif array_position = 65536 !> Start offset for writing 2-D and 3.D arrays at 64 k header_position = 0 header_int_index = 1 header_char_index = 1 header_real_index = 1 header_array_index = 1 int_names = ' ' int_values = 0 text_lines = ' ' real_names = ' ' real_values = 0.0 array_names = ' ' array_offset = 0 int_names(1) = 'nx' int_values(1) = nx int_names(2) = 'ny' int_values(2) = ny int_names(3) = 'nz' int_values(3) = nz header_int_index = header_int_index+3 DO i = 1, max_nr_arrays array_offset(i) = 0 array_names(i) = ' ' ENDDO gh_size = STORAGE_SIZE( tgh ) / 8 IF ( rd_flag ) THEN IF ( sm_io%iam_io_pe ) THEN ! !-- File is open for read. #if defined( __parallel ) !-- Set the default view CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) ! !-- Read the file header size CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_READ( fh, tgh, gh_size, MPI_BYTE, status, ierr ) #else CALL posix_lseek( fh, header_position ) buf_ptr = C_LOC( tgh ) CALL posix_read( fh, buf_ptr, gh_size ) #endif ENDIF #if defined( __parallel ) IF ( sm_io%is_sm_active() ) THEN CALL MPI_BCAST( tgh, gh_size, MPI_BYTE, 0, sm_io%comm_shared, ierr ) ENDIF #endif header_position = header_position + gh_size include_total_domain_boundaries = ( tgh%i_outer_bound == 1 ) ! !-- File types depend on if boundaries of the total domain is included in data. This has been !-- checked with the previous statement. IF ( set_filetype ) THEN CALL rd_mpi_io_create_filetypes filetypes_created = .TRUE. ENDIF IF ( sm_io%iam_io_pe ) THEN #if defined( __parallel ) ! !-- Read INTEGER values CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_READ( fh, int_names, SIZE( int_names ) * 32, MPI_CHAR, status, ierr ) header_position = header_position + SIZE( int_names ) * 32 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_READ (fh, int_values, SIZE( int_values ), MPI_INT, status, ierr ) header_position = header_position + SIZE( int_values ) * iwp ! !-- Character entries CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_READ( fh, text_lines, SIZE( text_lines ) * 128, MPI_CHAR, status, ierr ) header_position = header_position+size(text_lines) * 128 ! !-- REAL values CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_READ( fh, real_names, SIZE( real_names ) * 32, MPI_CHAR, status, ierr ) header_position = header_position + SIZE( real_names ) * 32 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_READ( fh, real_values, SIZE( real_values ), MPI_REAL, status, ierr ) header_position = header_position + SIZE( real_values ) * wp ! !-- 2d- and 3d-array headers CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_READ( fh, array_names, SIZE( array_names ) * 32, MPI_CHAR, status, ierr ) header_position = header_position + SIZE( array_names ) * 32 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_READ( fh, array_offset, SIZE( array_offset ) * MPI_OFFSET_KIND, MPI_BYTE, & status,ierr ) ! there is no I*8 datatype in Fortran header_position = header_position + SIZE( array_offset ) * rd_offset_kind #else CALL posix_lseek( fh, header_position ) CALL posix_read( fh, int_names ) header_position = header_position + SIZE( int_names ) * 32 CALL posix_lseek( fh, header_position ) CALL posix_read( fh, int_values, SIZE( int_values ) ) header_position = header_position + SIZE( int_values ) * iwp ! !-- Character entries CALL posix_lseek( fh, header_position ) CALL posix_read( fh, text_lines ) header_position = header_position + SIZE( text_lines ) * 128 ! !-- REAL values CALL posix_lseek( fh, header_position ) CALL posix_read( fh, real_names ) header_position = header_position + SIZE( real_names ) * 32 CALL posix_lseek( fh, header_position ) CALL posix_read( fh, real_values, SIZE( real_values ) ) header_position = header_position + SIZE( real_values ) * wp ! !-- 2d- and 3d-array headers CALL posix_lseek( fh, header_position ) CALL posix_read( fh, array_names ) header_position = header_position + SIZE( array_names ) * 32 CALL posix_lseek( fh, header_position ) CALL posix_read( fh, array_offset, SIZE( array_offset ) ) ! there is no I*8 datatype in Fortran header_position = header_position + SIZE( array_offset ) * rd_offset_kind #endif IF ( debug_output ) CALL rd_mpi_io_print_header ENDIF #if defined( __parallel ) ! !-- Broadcast header to all remaining cores that are not involved in I/O IF ( sm_io%is_sm_active() ) THEN ! !-- Not sure, that it is possible to broadcast CHARACTER array in one MPI_Bcast call DO i = 1, SIZE( int_names ) CALL MPI_BCAST( int_names(i), 32, MPI_CHARACTER, 0, sm_io%comm_shared, ierr ) ENDDO CALL MPI_BCAST( int_values, SIZE( int_values ), MPI_INTEGER, 0, sm_io%comm_shared, ierr ) DO i = 1, SIZE( text_lines ) CALL MPI_BCAST( text_lines(i), 128, MPI_CHARACTER, 0, sm_io%comm_shared, ierr ) ENDDO DO i = 1, SIZE( real_names ) CALL MPI_BCAST( real_names(i), 32, MPI_CHARACTER, 0, sm_io%comm_shared, ierr ) ENDDO CALL MPI_BCAST( real_values, SIZE( real_values ), MPI_REAL, 0, sm_io%comm_shared, ierr ) DO i = 1, SIZE( array_names ) CALL MPI_BCAST( array_names(i), 32, MPI_CHARACTER, 0, sm_io%comm_shared, ierr ) ENDDO CALL MPI_BCAST( array_offset, SIZE( array_offset )*8, MPI_BYTE, 0, sm_io%comm_shared, & ierr ) ! there is no I*8 datatype in Fortran (array_offset is I*8!) CALL MPI_BCAST( header_position, rd_offset_kind, MPI_BYTE, 0, sm_io%comm_shared, ierr ) ENDIF #endif ENDIF END SUBROUTINE rd_mpi_io_open !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Check, if array exists in restart file !--------------------------------------------------------------------------------------------------! SUBROUTINE rd_mpi_io_check_array( name, found ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name !< INTEGER(iwp) :: i !< LOGICAl :: found !< DO i = 1, tgh%nr_arrays IF ( TRIM( array_names(i) ) == TRIM( name ) ) THEN array_position = array_offset(i) found = .TRUE. RETURN ENDIF ENDDO found = .FALSE. END SUBROUTINE rd_mpi_io_check_array !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read INTEGER with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_int( name, value ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: i INTEGER(KIND=iwp), INTENT(OUT) :: value LOGICAL :: found found = .FALSE. value = 0 DO i = 1, tgh%nr_int IF ( TRIM(int_names(i)) == TRIM( name ) ) THEN value = int_values(i) found = .TRUE. EXIT ENDIF ENDDO IF ( .NOT. found ) THEN message_string = 'INTEGER variable "' // TRIM( name ) // '" not found in restart file' CALL message( 'rrd_mpi_io_int', 'PA0722', 3, 2, 0, 6, 0 ) ENDIF END SUBROUTINE rrd_mpi_io_int !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read REAL with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_real( name, value ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: i LOGICAL :: found REAL(KIND=wp), INTENT(OUT) :: value found = .FALSE. value = 0.0 DO i = 1, tgh%nr_real IF ( TRIM(real_names(i)) == TRIM( name ) ) THEN value = real_values(i) found = .TRUE. EXIT ENDIF ENDDO IF ( .NOT. found ) THEN message_string = 'REAL variable "' // TRIM( name ) // '" not found in restart file' CALL message( 'rrd_mpi_io_int', 'PA0722', 3, 2, 0, 6, 0 ) ENDIF END SUBROUTINE rrd_mpi_io_real !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read 2d-real array with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_real_2d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif INTEGER(iwp) :: i LOGICAL :: found REAL(wp), INTENT(INOUT), DIMENSION(nysg:nyng,nxlg:nxrg) :: data found = .FALSE. DO i = 1, tgh%nr_arrays IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN array_position = array_offset(i) found = .TRUE. EXIT ENDIF ENDDO IF ( found ) THEN #if defined( __parallel ) CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive IF ( sm_io%iam_io_pe ) THEN CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_2d, 'native', MPI_INFO_NULL, & ierr ) CALL MPI_FILE_READ_ALL( fh, array_2d, SIZE( array_2d ), MPI_REAL, status, ierr ) ENDIF CALL sm_io%sm_node_barrier() #else CALL posix_lseek( fh, array_position ) CALL posix_read( fh, array_2d, SIZE( array_2d ) ) #endif IF ( include_total_domain_boundaries) THEN DO i = lb%nxl, lb%nxr data(lb%nys-nbgp:lb%nyn-nbgp,i-nbgp) = array_2d(i,lb%nys:lb%nyn) ENDDO IF ( debug_level >= 2) WRITE(9,*) 'r2f_ob ', TRIM(name),' ', SUM( data(nys:nyn,nxl:nxr) ) ELSE DO i = nxl, nxr data(nys:nyn,i) = array_2d(i,nys:nyn) ENDDO IF ( debug_level >= 2) WRITE(9,*) 'r2f ', TRIM( name ),' ', SUM( data(nys:nyn,nxl:nxr) ) ENDIF CALL exchange_horiz_2d( data ) ELSE message_string = '2d-REAL array "' // TRIM( name ) // '" not found in restart file' CALL message( 'rrd_mpi_io_int', 'PA0722', 3, 2, 0, 6, 0 ) ENDIF END SUBROUTINE rrd_mpi_io_real_2d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read 2d-INTEGER array with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_int_2d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: i INTEGER(iwp) :: j #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif INTEGER(KIND=iwp), INTENT(INOUT), DIMENSION(:,:) :: data LOGICAL :: found found = .FALSE. DO i = 1, tgh%nr_arrays IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN array_position = array_offset(i) found = .TRUE. EXIT ENDIF ENDDO IF ( found ) THEN IF ( ( nxr - nxl + 1 + 2*nbgp ) == SIZE( data, 2 ) ) THEN ! !-- Output array with Halos. !-- ATTENTION: INTEGER array with ghost boundaries are not implemented yet. This kind of array !-- would be dimensioned in the caller subroutine like this: !-- INTEGER, DIMENSION(nysg:nyng,nxlg:nxrg):: data message_string = '2d-INTEGER array "' // TRIM( name ) // '" to be read from restart ' // & 'file is defined with illegal dimensions in the PALM code' CALL message( 'rrd_mpi_io_int_2d', 'PA0723', 3, 2, 0, 6, 0 ) ELSEIF ( (nxr-nxl+1) == SIZE( data, 2 ) ) THEN ! !-- INTEGER input array without Halos. !-- This kind of array is dimensioned in the caller subroutine !-- INTEGER, DIMENSION(nys:nyn,nxl:nxr) :: data #if defined( __parallel ) CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive IF ( sm_io%iam_io_pe ) THEN CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER, ft_2di_nb, 'native', & MPI_INFO_NULL, ierr ) CALL MPI_FILE_READ_ALL( fh, array_2di, SIZE( array_2di ), MPI_INTEGER, status, ierr ) ENDIF CALL sm_io%sm_node_barrier() #else CALL posix_lseek( fh, array_position ) CALL posix_read( fh, array_2di, SIZE( array_2di ) ) #endif DO j = nys, nyn DO i = nxl, nxr data(j-nys+1,i-nxl+1) = array_2di(i,j) ENDDO ENDDO ELSE message_string = '2d-INTEGER array "' // TRIM( name ) // '" to be read from restart ' // & 'file is defined with illegal dimensions in the PALM code' CALL message( 'rrd_mpi_io_int_2d', 'PA0723', 3, 2, 0, 6, 0 ) ENDIF ELSE message_string = '2d-INTEGER array "' // TRIM( name ) // '" not found in restart file' CALL message( 'rrd_mpi_io_int_2d', 'PA0722', 3, 2, 0, 6, 0 ) ENDIF END SUBROUTINE rrd_mpi_io_int_2d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read 2d-REAL array with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_real_3d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: i #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif LOGICAL :: found REAL(wp), INTENT(INOUT), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data found = .FALSE. DO i = 1, tgh%nr_arrays IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN array_position = array_offset(i) found = .TRUE. EXIT ENDIF ENDDO IF ( found ) THEN #if defined( __parallel ) CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive IF( sm_io%iam_io_pe ) THEN CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3d, 'native', MPI_INFO_NULL, & ierr ) CALL MPI_FILE_READ_ALL( fh, array_3d, SIZE( array_3d ), MPI_REAL, status, ierr ) ENDIF CALL sm_io%sm_node_barrier() #else CALL posix_lseek( fh, array_position ) CALL posix_read(fh, array_3d, SIZE( array_3d ) ) #endif IF ( include_total_domain_boundaries) THEN DO i = lb%nxl, lb%nxr data(:,lb%nys-nbgp:lb%nyn-nbgp,i-nbgp) = array_3d(:,i,lb%nys:lb%nyn) ENDDO ELSE DO i = nxl, nxr data(:,nys:nyn,i) = array_3d(:,i,nys:nyn) ENDDO ENDIF CALL exchange_horiz( data, nbgp ) ELSE message_string = '3d-REAL array "' // TRIM( name ) // '" not found in restart file' CALL message( 'rrd_mpi_io_real_3d', 'PA0722', 3, 2, 0, 6, 0 ) ENDIF END SUBROUTINE rrd_mpi_io_real_3d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read 3d-REAL soil array with MPI-IO !> nzb_soil, nzt_soil are located in the module land_surface_model_mod. Since Fortran does not allow !> cross referencing of module variables, it is required to pass these variables as arguments. !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_real_3d_soil( name, data, nzb_soil, nzt_soil ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: i INTEGER, INTENT(IN) :: nzb_soil INTEGER, INTENT(IN) :: nzt_soil #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif LOGICAL :: found REAL(wp), INTENT(INOUT), DIMENSION(nzb_soil:nzt_soil,nysg:nyng,nxlg:nxrg) :: data found = .FALSE. DO i = 1, tgh%nr_arrays IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN array_position = array_offset(i) found = .TRUE. EXIT ENDIF ENDDO IF ( found ) THEN #if defined( __parallel ) CALL rd_mpi_io_create_filetypes_3dsoil( nzb_soil, nzt_soil ) CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive IF ( sm_io%iam_io_pe ) THEN CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3dsoil, 'native', MPI_INFO_NULL,& ierr ) CALL MPI_FILE_READ_ALL( fh, array_3d_soil, SIZE( array_3d_soil ), MPI_REAL, status, ierr ) CALL MPI_TYPE_FREE( ft_3dsoil, ierr ) ENDIF CALL sm_io%sm_node_barrier() #else CALL posix_lseek( fh, array_position ) CALL posix_read( fh, array_3d_soil, SIZE( array_3d_soil ) ) #endif IF ( include_total_domain_boundaries ) THEN DO i = lb%nxl, lb%nxr data(:,lb%nys-nbgp:lb%nyn-nbgp,i-nbgp) = array_3d(:,i,lb%nys:lb%nyn) ENDDO ELSE DO i = nxl, nxr data(:,nys:nyn,i) = array_3d(:,i,nys:nyn) ENDDO ENDIF ELSE message_string = '3d-REAL soil array "' // TRIM( name ) // '" not found in restart file' CALL message( 'rrd_mpi_io_real_3d_soil', 'PA0722', 3, 2, 0, 6, 0 ) ENDIF END SUBROUTINE rrd_mpi_io_real_3d_soil !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read CHARACTER with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_char( name, text ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name CHARACTER(LEN=*), INTENT(OUT) :: text CHARACTER(LEN=128) :: line INTEGER(iwp) :: i LOGICAL :: found found = .FALSE. text = ' ' DO i = 1, tgh%nr_char line = text_lines(i) IF ( TRIM( line(1:32) ) == TRIM( name ) ) THEN text = line(33:) found = .TRUE. EXIT ENDIF ENDDO IF ( .NOT. found ) THEN message_string = 'CHARACTER variable "' // TRIM( name ) // '" not found in restart file' CALL message( 'rrd_mpi_io_char', 'PA0722', 3, 2, 0, 6, 0 ) ENDIF END SUBROUTINE rrd_mpi_io_char !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read LOGICAL with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_logical( name, value ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: logical_as_integer LOGICAL, INTENT(OUT) :: value CALL rrd_mpi_io_int( name, logical_as_integer ) value = ( logical_as_integer == 1 ) END SUBROUTINE rrd_mpi_io_logical !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Write INTEGER with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_int( name, value ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(KIND=iwp), INTENT(IN) :: value IF ( header_int_index == max_nr_int ) THEN STOP '+++ maximum number of INTEGER entries in restart file header exceeded' ENDIF int_names(header_int_index) = name int_values(header_int_index) = value header_int_index = header_int_index + 1 END SUBROUTINE wrd_mpi_io_int SUBROUTINE wrd_mpi_io_real( name, value ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name REAL(wp), INTENT(IN) :: value IF ( header_real_index == max_nr_real ) THEN STOP '+++ maximum number of REAL entries in restart file header exceeded' ENDIF real_names(header_real_index) = name real_values(header_real_index) = value header_real_index = header_real_index + 1 END SUBROUTINE wrd_mpi_io_real !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Write 2d-REAL array with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_real_2d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: i #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif REAL(wp), INTENT(IN), DIMENSION(nysg:nyng,nxlg:nxrg) :: data IF ( header_array_index == max_nr_arrays ) THEN STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' ENDIF array_names(header_array_index) = name array_offset(header_array_index) = array_position header_array_index = header_array_index + 1 IF ( include_total_domain_boundaries ) THEN ! !-- Prepare Output with outer boundaries DO i = lb%nxl, lb%nxr array_2d(i,lb%nys:lb%nyn) = data(lb%nys-nbgp:lb%nyn-nbgp,i-nbgp) ENDDO ELSE ! !-- Prepare Output without outer boundaries DO i = nxl,nxr array_2d(i,lb%nys:lb%nyn) = data(nys:nyn,i) ENDDO ENDIF #if defined( __parallel ) CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive IF ( sm_io%iam_io_pe ) THEN CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_2d, 'native', MPI_INFO_NULL, ierr ) CALL MPI_FILE_WRITE_ALL( fh, array_2d, SIZE( array_2d), MPI_REAL, status, ierr ) ENDIF CALL sm_io%sm_node_barrier() #else CALL posix_lseek( fh, array_position ) CALL posix_write( fh, array_2d, SIZE( array_2d ) ) #endif ! !-- Type conversion required, otherwise rigth hand side brackets are calculated assuming 4 byte INT. !-- Maybe a compiler problem. array_position = array_position + ( INT( lb%ny, KIND=rd_offset_kind ) + 1 ) * & ( INT( lb%nx, KIND=rd_offset_kind ) + 1 ) * wp END SUBROUTINE wrd_mpi_io_real_2d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Write 2d-INTEGER array with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_int_2d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: i INTEGER(iwp) :: j #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif INTEGER(KIND=iwp), INTENT(IN), DIMENSION(:,:) :: data IF ( header_array_index == max_nr_arrays ) THEN STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' ENDIF array_names(header_array_index) = name array_offset(header_array_index) = array_position header_array_index = header_array_index + 1 IF ( ( nxr-nxl + 1 + 2 * nbgp ) == SIZE( data, 2 ) ) THEN ! !-- Integer arrays with ghost layers are not implemented yet. These kind of arrays would be !-- dimensioned in the caller subroutine as !-- INTEGER, DIMENSION(nysg:nyng,nxlg:nxrg) :: data message_string = '2d-INTEGER array "' // TRIM( name ) // '" to be written to restart ' // & 'file is defined with illegal dimensions in the PALM code' CALL message( 'wrd_mpi_io_int_2d', 'PA0723', 3, 2, 0, 6, 0 ) ELSEIF ( ( nxr-nxl+1 ) == SIZE( data, 2 ) ) THEN ! !-- INTEGER input array without ghost layers. !-- This kind of array is dimensioned in the caller subroutine as !-- INTEGER, DIMENSION(nys:nyn,nxl:nxr) :: data DO j = nys, nyn DO i = nxl, nxr array_2di(i,j) = data(j-nys+1,i-nxl+1) ENDDO ENDDO #if defined( __parallel ) CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive IF ( sm_io%iam_io_pe ) THEN CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER, ft_2di_nb, 'native', & MPI_INFO_NULL, ierr ) CALL MPI_FILE_WRITE_ALL( fh, array_2di, SIZE( array_2di ), MPI_INTEGER, status, ierr ) ENDIF CALL sm_io%sm_node_barrier() #else CALL posix_lseek( fh, array_position ) CALL posix_write( fh, array_2di, SIZE( array_2di ) ) #endif ! !-- Type conversion required, otherwise rigth hand side brackets are calculated assuming 4 byte !-- INT. Maybe a compiler problem. array_position = array_position + INT( (ny+1), KIND=rd_offset_kind ) * & INT( (nx+1), KIND=rd_offset_kind ) * 4 ELSE message_string = '2d-INTEGER array "' // TRIM( name ) // '" to be written to restart ' // & 'file is defined with illegal dimensions in the PALM code' CALL message( 'wrd_mpi_io_int_2d', 'PA0723', 3, 2, 0, 6, 0 ) ENDIF END SUBROUTINE wrd_mpi_io_int_2d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Write 3d-REAL array with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_real_3d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: i #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif REAL(wp), INTENT(IN), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data IF ( header_array_index == max_nr_arrays ) THEN STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' ENDIF array_names(header_array_index) = name array_offset(header_array_index) = array_position header_array_index = header_array_index + 1 IF ( include_total_domain_boundaries ) THEN ! !-- Prepare output of 3d-REAL-array with ghost layers. !-- In the virtual PE grid, the first dimension is PEs along x, and the second along y. !-- For MPI-IO it is recommended to have the index order of the array in the same way, i.e. !-- the first dimension should be along x and the second along y. !-- For this reason, the original PALM data need to be swaped. DO i = lb%nxl, lb%nxr array_3d(:,i,lb%nys:lb%nyn) = data(:,lb%nys-nbgp:lb%nyn-nbgp,i-nbgp) ENDDO ELSE ! !-- Prepare output of 3d-REAL-array without ghost layers DO i = nxl, nxr array_3d(:,i,lb%nys:lb%nyn) = data(:,nys:nyn,i) ENDDO ENDIF #if defined( __parallel ) CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive IF ( sm_io%iam_io_pe ) THEN CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3d, 'native', MPI_INFO_NULL, ierr ) CALL MPI_FILE_WRITE_ALL( fh, array_3d, SIZE( array_3d ), MPI_REAL, status, ierr ) ENDIF CALL sm_io%sm_node_barrier() #else CALL posix_lseek( fh, array_position ) CALL posix_write( fh, array_3d, SIZE( array_3d ) ) #endif ! !-- Type conversion required, otherwise rigth hand side brackets are calculated assuming 4 byte INT. !-- Maybe a compiler problem. array_position = array_position + INT( (nz+2), KIND=rd_offset_kind ) * & INT( (lb%ny+1), KIND=rd_offset_kind ) * & INT( (lb%nx+1), KIND=rd_offset_kind ) * wp END SUBROUTINE wrd_mpi_io_real_3d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Write 3d-REAL soil array with MPI-IO. !> nzb_soil, nzt_soil are located in the module land_surface_model_mod. Since Fortran does not allow !> cross referencing of module variables, it is required to pass these variables as arguments. !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_real_3d_soil( name, data, nzb_soil, nzt_soil ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: i INTEGER, INTENT(IN) :: nzb_soil INTEGER, INTENT(IN) :: nzt_soil #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif REAL(wp), INTENT(IN), DIMENSION(nzb_soil:nzt_soil,nysg:nyng,nxlg:nxrg) :: data IF ( header_array_index == max_nr_arrays ) THEN STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' ENDIF array_names(header_array_index) = name array_offset(header_array_index) = array_position header_array_index = header_array_index + 1 #if defined( __parallel ) CALL rd_mpi_io_create_filetypes_3dsoil( nzb_soil, nzt_soil ) #endif IF ( include_total_domain_boundaries) THEN ! !-- Prepare output of 3d-REAL-array with ghost layers. !-- In the virtual PE grid, the first dimension is PEs along x, and the second along y. !-- For MPI-IO it is recommended to have the index order of the array in the same way, i.e. !-- the first dimension should be along x and the second along y. !-- For this reason, the original PALM data need to be swaped. DO i = lb%nxl, lb%nxr array_3d_soil(:,i,lb%nys:lb%nyn) = data(:,lb%nys-nbgp:lb%nyn-nbgp,i-nbgp) ENDDO ELSE ! !-- Prepare output of 3d-REAL-array without ghost layers DO i = nxl, nxr array_3d_soil(:,i,lb%nys:lb%nyn) = data(:,nys:nyn,i) ENDDO ENDIF #if defined( __parallel ) CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive IF ( sm_io%iam_io_pe ) THEN CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_3dsoil, 'native', MPI_INFO_NULL, & ierr ) CALL MPI_FILE_WRITE_ALL( fh, array_3d_soil, SIZE( array_3d_soil ), MPI_REAL, status, ierr ) ENDIF CALL sm_io%sm_node_barrier() #else CALL posix_lseek( fh, array_position ) CALL posix_write( fh, array_3d_soil, SIZE( array_3d_soil ) ) #endif ! !-- Type conversion required, otherwise rigth hand side brackets are calculated assuming 4 byte INT. !-- Maybe a compiler problem. array_position = array_position + INT( (nzt_soil-nzb_soil+1), KIND=rd_offset_kind ) * & INT( (lb%ny+1), KIND=rd_offset_kind ) * & INT( (lb%nx+1), KIND=rd_offset_kind ) * wp END SUBROUTINE wrd_mpi_io_real_3d_soil !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Write CHARATCTER with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_char( name, text ) IMPLICIT NONE CHARACTER(LEN=128) :: lo_line CHARACTER(LEN=*), INTENT(IN) :: name CHARACTER(LEN=*), INTENT(IN) :: text IF ( header_char_index == max_nr_char ) THEN STOP '+++ maximum number of CHARACTER entries in restart file header exceeded' ENDIF lo_line = name lo_line(33:) = text text_lines(header_char_index) = lo_line header_char_index = header_char_index + 1 END SUBROUTINE wrd_mpi_io_char !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Write LOGICAL with MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_logical( name, value ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: logical_as_integer LOGICAL, INTENT(IN) :: value IF ( value ) THEN logical_as_integer = 1 ELSE logical_as_integer = 0 ENDIF CALL wrd_mpi_io_int( name, logical_as_integer ) END SUBROUTINE wrd_mpi_io_logical !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read 1d-REAL global array with MPI-IO. !> Array contains identical data on all PEs. !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_global_array_real_1d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: i INTEGER(KIND=rd_offset_kind) :: offset #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif LOGICAL :: found REAL(KIND=wp), INTENT(INOUT), DIMENSION(:) :: data offset = 0 found = .FALSE. DO i = 1, tgh%nr_arrays IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN array_position = array_offset(i) found = .TRUE. EXIT ENDIF ENDDO IF ( found ) THEN ! !-- Set default view #if defined( __parallel ) IF ( sm_io%iam_io_pe ) THEN CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_READ_ALL( fh, data, SIZE( data ), MPI_REAL, status, ierr ) ENDIF IF ( sm_io%is_sm_active() ) THEN CALL MPI_BCAST( data, SIZE(data), MPI_REAL, 0, sm_io%comm_shared, ierr ) ENDIF #else CALL posix_lseek( fh, array_position ) CALL posix_read( fh, data, SIZE( data ) ) #endif ELSE message_string = '1d/2d/3d/4d-REAL global array "' // TRIM( name ) // '" not found in ' // & 'restart file' CALL message( 'rrd_mpi_io_global_array_real_1d', 'PA0722', 3, 2, 0, 6, 0 ) ENDIF END SUBROUTINE rrd_mpi_io_global_array_real_1d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read 2d-REAL global array with MPI-IO. !> Array contains identical data on all PEs. !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_global_array_real_2d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER, DIMENSION(1) :: bufshape REAL(KIND=wp), INTENT(IN), DIMENSION(:,:), TARGET :: data REAL(KIND=wp), POINTER, DIMENSION(:) :: buf TYPE(C_PTR) :: c_data c_data = C_LOC( data ) bufshape(1) = SIZE( data ) CALL C_F_POINTER( c_data, buf, bufshape ) CALL rrd_mpi_io_global_array_real_1d( name, buf ) END SUBROUTINE rrd_mpi_io_global_array_real_2d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read 3d-REAL global array with MPI-IO. !> Array contains identical data on all PEs. !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_global_array_real_3d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER, DIMENSION(1) :: bufshape REAL(KIND=wp), INTENT(IN), DIMENSION(:,:,:), TARGET :: data REAL(KIND=wp), POINTER, DIMENSION(:) :: buf TYPE(C_PTR) :: c_data c_data = C_LOC( data ) bufshape(1) = SIZE( data ) CALL C_F_POINTER( c_data, buf, bufshape ) CALL rrd_mpi_io_global_array_real_1d( name, buf ) END SUBROUTINE rrd_mpi_io_global_array_real_3d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read 4d-REAL global array with MPI-IO. !> Array contains identical data on all PEs. !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_global_array_real_4d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER, DIMENSION(1) :: bufshape REAL(KIND=wp), INTENT(IN), DIMENSION(:,:,:,:), TARGET :: data REAL(KIND=wp), POINTER, DIMENSION(:) :: buf TYPE(C_PTR) :: c_data c_data = C_LOC( data ) bufshape(1) = SIZE( data) CALL C_F_POINTER( c_data, buf, bufshape ) CALL rrd_mpi_io_global_array_real_1d( name, buf ) END SUBROUTINE rrd_mpi_io_global_array_real_4d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read 1d-INTEGER global array with MPI-IO. !> Array contains identical data on all PEs. !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_global_array_int_1d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: i INTEGER(KIND=rd_offset_kind) :: offset #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif INTEGER(KIND=iwp), INTENT(INOUT), DIMENSION(:) :: data LOGICAL :: found offset = 0 found = .FALSE. DO i = 1, tgh%nr_arrays IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN array_position = array_offset(i) found = .TRUE. EXIT ENDIF ENDDO IF ( found ) THEN ! !-- Set default view #if defined( __parallel ) IF ( sm_io%iam_io_pe ) THEN CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_READ_ALL( fh, data, SIZE( data), MPI_INTEGER, status, ierr ) ENDIF IF ( sm_io%is_sm_active() ) THEN CALL MPI_BCAST( data, SIZE(data), MPI_INTEGER, 0, sm_io%comm_shared, ierr ) ENDIF #else CALL posix_lseek( fh, array_position ) CALL posix_read( fh, data, SIZE( data ) ) #endif ELSE message_string = '1d-INTEGER global array "' // TRIM( name ) // '" not found in ' // & 'restart file' CALL message( 'rrd_mpi_io_global_array_int_1d', 'PA0722', 3, 2, 0, 6, 0 ) ENDIF END SUBROUTINE rrd_mpi_io_global_array_int_1d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Write 1d-REAL global array with MPI-IO. !> Array contains identical data on all PEs. !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_global_array_real_1d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(KIND=rd_offset_kind) :: offset #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif REAL(KIND=wp), INTENT(IN), DIMENSION(:) :: data offset = 0 IF ( header_array_index == max_nr_arrays ) THEN STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' ENDIF array_names(header_array_index) = name array_offset(header_array_index) = array_position header_array_index = header_array_index + 1 ! !-- Set default view #if defined( __parallel ) IF ( sm_io%iam_io_pe ) THEN CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) ENDIF ! !-- Only PE 0 writes replicated data IF ( myid == 0 ) THEN CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_WRITE( fh, data, SIZE( data), MPI_REAL, status, ierr ) ENDIF #else CALL posix_lseek( fh, array_position ) CALL posix_write( fh, data, SIZE( data ) ) #endif array_position = array_position + SIZE( data ) * wp END SUBROUTINE wrd_mpi_io_global_array_real_1d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Write 2d-REAL global array with MPI-IO. !> Array contains identical data on all PEs. !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_global_array_real_2d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER, DIMENSION(1) :: bufshape REAL(KIND=wp), POINTER, DIMENSION(:) :: buf REAL(KIND=wp), INTENT(IN), DIMENSION(:,:), TARGET :: data TYPE(C_PTR) :: c_data c_data = C_LOC( data ) bufshape(1) = SIZE( data) CALL C_F_POINTER( c_data, buf, bufshape ) CALL wrd_mpi_io_global_array_real_1d( name, buf ) END SUBROUTINE wrd_mpi_io_global_array_real_2d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Write 3d-REAL global array with MPI-IO. !> Array contains identical data on all PEs. !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_global_array_real_3d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER, DIMENSION(1) :: bufshape REAL(KIND=wp), POINTER, DIMENSION(:) :: buf REAL(KIND=wp), INTENT(IN), DIMENSION(:,:,:), TARGET :: data TYPE(C_PTR) :: c_data c_data = C_LOC( data ) bufshape(1) = SIZE( data ) CALL C_F_POINTER( c_data, buf, bufshape ) CALL wrd_mpi_io_global_array_real_1d( name, buf ) END SUBROUTINE wrd_mpi_io_global_array_real_3d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Write 4d-REAL global array with MPI-IO. !> Array contains identical data on all PEs. !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_global_array_real_4d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER, DIMENSION(1) :: bufshape REAL(KIND=wp), POINTER, DIMENSION(:) :: buf REAL(KIND=wp), INTENT(IN), DIMENSION(:,:,:,:), TARGET :: data TYPE(C_PTR) :: c_data c_data = C_LOC( data ) bufshape(1) = SIZE( data) CALL C_F_POINTER( c_data, buf, bufshape ) CALL wrd_mpi_io_global_array_real_1d( name, buf ) END SUBROUTINE wrd_mpi_io_global_array_real_4d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Write 1d-INTEGER global array with MPI-IO. !> Array contains identical data on all PEs. !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_global_array_int_1d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(KIND=rd_offset_kind) :: offset INTEGER(KIND=iwp), INTENT(IN), DIMENSION(:) :: data #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif IF ( header_array_index == max_nr_arrays ) THEN STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' ENDIF offset = 0 array_names(header_array_index) = name array_offset(header_array_index) = array_position header_array_index = header_array_index + 1 ! !-- Set default view #if defined( __parallel ) IF ( sm_io%iam_io_pe ) THEN CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) ENDIF ! !-- Only PE 0 writes replicated data IF ( myid == 0 ) THEN ! CALL MPI_FILE_SEEK( fh, array_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_WRITE( fh, data, SIZE( data), MPI_INTEGER, status, ierr ) ENDIF #else CALL posix_lseek( fh, array_position ) CALL posix_write( fh, data, SIZE( data ) ) #endif array_position = array_position + SIZE( data ) * 4 END SUBROUTINE wrd_mpi_io_global_array_int_1d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read 1d-REAL surface data array with MPI-IO. !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_surface( name, data, first_index ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(KIND=rd_offset_kind) :: disp !< displacement of actual indices INTEGER(KIND=rd_offset_kind) :: disp_f !< displacement in file INTEGER(KIND=rd_offset_kind) :: disp_n !< displacement of next column INTEGER(iwp), OPTIONAL :: first_index INTEGER(iwp) :: i INTEGER(iwp) :: i_f INTEGER(iwp) :: j INTEGER(iwp) :: j_f INTEGER(iwp) :: lo_first_index INTEGER(iwp) :: nr_bytes INTEGER(iwp) :: nr_bytes_f INTEGER(iwp) :: nr_words #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif LOGICAL :: found REAL(wp), INTENT(OUT), DIMENSION(:) :: data found = .FALSE. lo_first_index = 1 IF ( MAXVAL( m_global_start ) == -1 ) RETURN ! nothing to do on this PE IF ( PRESENT( first_index ) ) THEN lo_first_index = first_index ENDIF DO i = 1, tgh%nr_arrays IF ( TRIM( array_names(i) ) == TRIM( name ) ) THEN array_position = array_offset(i) + ( lo_first_index - 1 ) * & total_number_of_surface_values * wp found = .TRUE. EXIT ENDIF ENDDO disp = -1 disp_f = -1 disp_n = -1 IF ( found ) THEN DO i = nxl, nxr DO j = nys, nyn IF ( m_global_start(j,i) > 0 ) THEN disp = array_position+(m_global_start(j,i)-1) * wp nr_words = m_end_index(j,i)-m_start_index(j,i)+1 nr_bytes = nr_words * wp ENDIF IF ( disp >= 0 .AND. disp_f == -1 ) THEN ! first Entry disp_f = disp nr_bytes_f = 0 i_f = i j_f = j ENDIF IF ( j == nyn .AND. i == nxr ) THEN ! last Entry disp_n = -1 IF ( nr_bytes > 0 ) THEN nr_bytes_f = nr_bytes_f+nr_bytes ENDIF ELSEIF ( j == nyn ) THEN ! next x IF ( m_global_start(nys,i+1) > 0 .AND. disp > 0 ) THEN disp_n = array_position + ( m_global_start(nys,i+1) - 1 ) * wp ELSE CYCLE ENDIF ELSE IF ( m_global_start(j+1,i) > 0 .AND. disp > 0 ) THEN disp_n = array_position + ( m_global_start(j+1,i) - 1 ) * wp ELSE CYCLE ENDIF ENDIF IF ( disp + nr_bytes == disp_n ) THEN ! contiguous block nr_bytes_f = nr_bytes_f + nr_bytes ELSE ! read #if defined( __parallel ) CALL MPI_FILE_SEEK( fhs, disp_f, MPI_SEEK_SET, ierr ) nr_words = nr_bytes_f / wp CALL MPI_FILE_READ( fhs, data(m_start_index(j_f,i_f)), nr_words, MPI_REAL, status, ierr ) #else CALL posix_lseek( fh, disp_f ) CALL posix_read( fh, data(m_start_index(j_f,i_f):), nr_bytes_f ) #endif disp_f = disp nr_bytes_f = nr_bytes i_f = i j_f = j ENDIF ENDDO ENDDO ELSE message_string = 'surface array "' // TRIM( name ) // '" not found in restart file' CALL message( 'rrd_mpi_io_global_array_int_1d', 'PA0722', 3, 2, 0, 6, 0 ) ENDIF ! IF ( lo_first_index == 1 ) THEN ! IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'r_surf_1 ', TRIM( name ), ' ', nr_val, SUM( data(1:nr_val) ) ! ELSE ! IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'r_surf_next ', TRIM( name ), ' ', & ! lo_first_index,nr_val, SUM( data(1:nr_val) ) ! ENDIF END SUBROUTINE rrd_mpi_io_surface !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read 2d-REAL surface data array with MPI-IO. !> These consist of multiple 1d-REAL surface data arrays. !--------------------------------------------------------------------------------------------------! SUBROUTINE rrd_mpi_io_surface_2d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: i REAL(wp), INTENT(OUT), DIMENSION(:,:) :: data REAL(wp), DIMENSION(SIZE( data,2)) :: tmp DO i = 1, SIZE( data,1) CALL rrd_mpi_io_surface( name, tmp, first_index = i ) data(i,:) = tmp ENDDO ! !-- Comment from Klaus Ketelsen (September 2018) !-- The intention of the following loop was let the compiler do the copying on return. !-- In my understanding is it standard conform to pass the second dimension to a 1d- !-- array inside a subroutine and the compiler is responsible to generate code for !-- copying. Acually this works fine for INTENT(IN) variables (wrd_mpi_io_surface_2d). !-- For INTENT(OUT) like in this case the code works on pgi compiler. But both, the Intel 16 !-- and the Cray compiler show wrong answers using this loop. !-- That is the reason why the above auxiliary array tmp was introduced. ! DO i = 1, SIZE( data,1) ! CALL rrd_mpi_io_surface( name, data(i,:), first_index = i ) ! ENDDO END SUBROUTINE rrd_mpi_io_surface_2d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Write 1d-REAL surface data array with MPI-IO. !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_surface( name, data, first_index ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name #if defined( __parallel ) INTEGER(KIND=rd_offset_kind) :: disp #endif INTEGER(iwp), OPTIONAL :: first_index #if defined( __parallel ) INTEGER(iwp) :: i #endif INTEGER(iwp) :: lo_first_index INTEGER(KIND=rd_offset_kind) :: offset #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif REAL(wp), INTENT(IN), DIMENSION(:), TARGET :: data offset = 0 lo_first_index = 1 IF ( PRESENT(first_index) ) THEN lo_first_index = first_index ENDIF ! !-- In case of 2d-data, name is written only once IF ( lo_first_index == 1 ) THEN IF ( header_array_index == max_nr_arrays ) THEN STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' ENDIF array_names(header_array_index) = name array_offset(header_array_index) = array_position header_array_index = header_array_index + 1 ENDIF #if defined( __parallel ) IF ( sm_io%is_sm_active() ) THEN DO i = 1, nr_val array_1d(i+local_start) = data(i) ENDDO ELSE ! array_1d => data !kk Did not work in all cases why??? ALLOCATE( array_1d( SIZE( data ) ) ) array_1d = data ENDIF CALL sm_io%sm_node_barrier() ! has no effect if I/O on limited number of cores is inactive IF ( sm_io%iam_io_pe ) THEN IF ( all_pes_write ) THEN CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_REAL, ft_surf, 'native', MPI_INFO_NULL, & ierr ) CALL MPI_FILE_WRITE_ALL( fh, array_1d, nr_iope, MPI_REAL, status, ierr ) ELSE CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) IF ( nr_val > 0 ) THEN disp = array_position + 8 * ( glo_start - 1 ) CALL MPI_FILE_SEEK( fh, disp, MPI_SEEK_SET, ierr ) CALL MPI_FILE_WRITE( fh, array_1d, nr_iope, MPI_REAL, status, ierr ) ENDIF ENDIF ENDIF CALL sm_io%sm_node_barrier() IF( .NOT. sm_io%is_sm_active() ) DEALLOCATE( array_1d ) #else CALL posix_lseek( fh, array_position ) CALL posix_write( fh, data, nr_val ) #endif array_position = array_position + total_number_of_surface_values * wp ! IF ( lo_first_index == 1 ) THEN ! IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'w_surf_1 ', TRIM( name ), ' ', nr_val, SUM( data(1:nr_val) ) ! ELSE ! IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'w_surf_n ', TRIM( name ), ' ', & ! lo_first_index, nr_val, SUM( data(1:nr_val) ) ! ENDIF END SUBROUTINE wrd_mpi_io_surface !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Read 2d-REAL surface data array with MPI-IO. !> These consist of multiple 1d-REAL surface data arrays. !--------------------------------------------------------------------------------------------------! SUBROUTINE wrd_mpi_io_surface_2d( name, data ) IMPLICIT NONE CHARACTER(LEN=*), INTENT(IN) :: name INTEGER(iwp) :: i REAL(wp), INTENT(IN), DIMENSION(:,:) :: data DO i = 1, SIZE( data,1) CALL wrd_mpi_io_surface( name, data(i,:), first_index = i ) ENDDO END SUBROUTINE wrd_mpi_io_surface_2d !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Close restart file for MPI-IO !--------------------------------------------------------------------------------------------------! SUBROUTINE rd_mpi_io_close IMPLICIT NONE INTEGER(iwp) :: gh_size INTEGER(KIND=rd_offset_kind) :: offset #if defined( __parallel ) INTEGER, DIMENSION(rd_status_size) :: status #endif #if ! defined( __parallel ) TYPE(C_PTR) :: buf_ptr #endif offset = 0 IF ( wr_flag .AND. sm_io%iam_io_pe ) THEN tgh%nr_int = header_int_index - 1 tgh%nr_char = header_char_index - 1 tgh%nr_real = header_real_index - 1 tgh%nr_arrays = header_array_index - 1 tgh%total_nx = lb%nx + 1 tgh%total_ny = lb%ny + 1 IF ( include_total_domain_boundaries ) THEN ! not sure, if LOGICAL interpretation is the same for all compilers, tgh%i_outer_bound = 1 ! therefore store as INTEGER in general header ELSE tgh%i_outer_bound = 0 ENDIF ! !-- Check for big/little endian format. This check is currently not used, and could be removed !-- if we can assume little endian as the default on all machines. CALL rd_mpi_io_check_endian( tgh%endian ) ! !-- Set default view #if defined( __parallel ) CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) #endif ! !-- Write header file gh_size = storage_size(tgh) / 8 IF ( myid == 0 ) THEN ! myid = 0 always performs I/O, even if I/O is limited to some cores #if defined( __parallel ) CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_WRITE( fh, tgh, gh_size, MPI_INT, status, ierr ) header_position = header_position + gh_size ! !-- INTEGER values CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_WRITE( fh, int_names, SIZE( int_names )*32, MPI_CHAR, status, ierr ) header_position = header_position + SIZE( int_names ) * 32 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_WRITE( fh, int_values, SIZE( int_values ), MPI_INT, status, ierr ) header_position = header_position + SIZE( int_values ) * iwp ! !-- Character entries CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_WRITE( fh, text_lines, SIZE( text_lines )*128, MPI_CHAR, status, ierr ) header_position = header_position + SIZE( text_lines ) * 128 ! !--- REAL values CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_WRITE( fh, real_names, SIZE( real_names )*32, MPI_CHAR, status, ierr ) header_position = header_position + SIZE( real_names ) * 32 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_WRITE( fh, real_values, SIZE( real_values ), MPI_REAL, status, ierr ) header_position = header_position + SIZE( real_values ) * wp ! !-- 2d- and 3d- distributed array headers, all replicated array headers CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_WRITE( fh, array_names, SIZE( array_names )*32, MPI_CHAR, status, ierr ) header_position = header_position + SIZE( array_names ) * 32 CALL MPI_FILE_SEEK( fh, header_position, MPI_SEEK_SET, ierr ) CALL MPI_FILE_WRITE( fh, array_offset, SIZE( array_offset )*MPI_OFFSET_KIND, MPI_BYTE, & status, ierr ) ! There is no I*8 datatype in Fortran header_position = header_position + SIZE( array_offset ) * rd_offset_kind #else CALL posix_lseek( fh, header_position ) buf_ptr = C_LOC( tgh ) CALL posix_write( fh, buf_ptr, gh_size ) header_position = header_position + gh_size ! !-- INTEGER values CALL posix_lseek( fh, header_position ) CALL posix_write( fh, int_names ) header_position = header_position + SIZE( int_names ) * 32 CALL posix_lseek( fh, header_position ) CALL posix_write( fh, int_values, SIZE( int_values ) ) header_position = header_position + SIZE( int_values ) * iwp ! !-- Character entries CALL posix_lseek( fh, header_position ) CALL posix_write( fh, text_lines ) header_position = header_position + SIZE( text_lines ) * 128 ! !-- REAL values CALL posix_lseek( fh, header_position ) CALL posix_write( fh, real_names ) header_position = header_position + SIZE( real_names ) * 32 CALL posix_lseek( fh, header_position ) CALL posix_write( fh, real_values, SIZE( real_values ) ) header_position = header_position + SIZE( real_values ) * wp ! !-- 2d- and 3d-distributed array headers, all replicated array headers CALL posix_lseek( fh, header_position ) CALL posix_write( fh, array_names ) header_position = header_position + SIZE( array_names ) * 32 CALL posix_lseek( fh, header_position ) CALL posix_write( fh, array_offset, SIZE( array_offset ) ) header_position = header_position + SIZE( array_offset ) * rd_offset_kind #endif IF ( debug_output ) CALL rd_mpi_io_print_header ENDIF ENDIF ! !-- Free file types CALL rd_mpi_io_free_filetypes ! !-- Close MPI-IO files #if defined( __parallel ) ! !-- Restart file has been opened with comm2d IF ( fhs /= -1 ) THEN CALL MPI_FILE_CLOSE( fhs, ierr ) ENDIF #endif IF ( sm_io%iam_io_pe ) THEN #if defined( __parallel ) CALL MPI_FILE_CLOSE( fh, ierr ) #else CALL posix_close( fh ) #endif ENDIF restart_file_size = array_position / ( 1024.0_dp * 1024.0_dp ) END SUBROUTINE rd_mpi_io_close !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> This subroutine prepares a filetype and some variables for the I/O of surface data. !> Whenever a new set of start_index and end_index is used, rd_mpi_io_surface_filetypes has to be !> called. A main feature of this subroutine is computing the global start indices of the 1d- and !> 2d- surface arrays. !> Even if I/O is done by a limited number of cores only, the surface data are read by ALL cores! !> Reading them by some cores and then distributing the data would result in complicated code !> which is suspectable for errors and overloads the reading subroutine. Since reading of surface !> data is not time critical (data size is comparably small), it will be read by all cores. !--------------------------------------------------------------------------------------------------! SUBROUTINE rd_mpi_io_surface_filetypes( start_index, end_index, data_to_write, global_start ) IMPLICIT NONE INTEGER(iwp) :: i !< loop index INTEGER(KIND=rd_offset_kind) :: offset INTEGER(iwp), DIMENSION(1) :: dims1 INTEGER(iwp), DIMENSION(1) :: lize1 INTEGER(iwp), DIMENSION(1) :: start1 INTEGER(iwp), DIMENSION(0:numprocs-1) :: lo_nr_val !< local number of values in x and y direction INTEGER(iwp), DIMENSION(0:numprocs-1) :: all_nr_val !< number of values for all PEs INTEGER, INTENT(IN), DIMENSION(nys:nyn,nxl:nxr) :: end_index INTEGER, INTENT(OUT), DIMENSION(nys:nyn,nxl:nxr) :: global_start INTEGER, INTENT(IN), DIMENSION(nys:nyn,nxl:nxr) :: start_index LOGICAL, INTENT(OUT) :: data_to_write !< returns, if surface data have to be written offset = 0 lo_nr_val= 0 lo_nr_val(myid) = MAXVAL( end_index ) #if defined( __parallel ) CALL MPI_ALLREDUCE( lo_nr_val, all_nr_val, numprocs, MPI_INTEGER, MPI_SUM, comm2d, ierr ) IF ( ft_surf /= -1 .AND. sm_io%iam_io_pe ) THEN CALL MPI_TYPE_FREE( ft_surf, ierr ) ! if set, free last surface filetype ENDIF IF ( win_surf /= -1 ) THEN IF ( sm_io%is_sm_active() ) THEN CALL MPI_WIN_FREE( win_surf, ierr ) ENDIF win_surf = -1 ENDIF IF ( sm_io%is_sm_active() .AND. rd_flag ) THEN IF ( fhs == -1 ) THEN CALL MPI_FILE_OPEN( comm2d, TRIM( io_file_name ), MPI_MODE_RDONLY, MPI_INFO_NULL, fhs, & ierr ) ENDIF ELSE fhs = fh ENDIF #else all_nr_val(myid) = lo_nr_val(myid) #endif nr_val = lo_nr_val(myid) total_number_of_surface_values = 0 DO i = 0, numprocs-1 IF ( i == myid ) THEN glo_start = total_number_of_surface_values + 1 ENDIF total_number_of_surface_values = total_number_of_surface_values + all_nr_val(i) ENDDO ! !-- Actions during read IF ( rd_flag ) THEN IF ( .NOT. ALLOCATED( m_start_index ) ) ALLOCATE( m_start_index(nys:nyn,nxl:nxr) ) IF ( .NOT. ALLOCATED( m_end_index ) ) ALLOCATE( m_end_index(nys:nyn,nxl:nxr) ) IF ( .NOT. ALLOCATED( m_global_start ) ) ALLOCATE( m_global_start(nys:nyn,nxl:nxr) ) ! !-- Save arrays for later reading m_start_index = start_index m_end_index = end_index m_global_start = global_start nr_val = MAXVAL( end_index ) #if defined( __parallel ) CALL MPI_FILE_SET_VIEW( fhs, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) #endif ENDIF ! !-- Actions during write IF ( wr_flag ) THEN ! !-- Create surface filetype ft_surf = -1 global_start = start_index + glo_start - 1 WHERE ( end_index < start_index ) global_start = -1 ENDWHERE #if defined( __parallel ) IF ( sm_io%is_sm_active() ) THEN IF ( sm_io%iam_io_pe ) THEN ! !-- Calculate number of values of all PEs of an I/O group nr_iope = 0 DO i = myid, myid+sm_io%sh_npes-1 nr_iope = nr_iope + all_nr_val(i) ENDDO ELSE local_start = 0 DO i = myid-sm_io%sh_rank, myid-1 local_start = local_start + all_nr_val(i) ENDDO ENDIF ! !-- Get the size of shared memory window on all PEs CALL MPI_BCAST( nr_iope, 1, MPI_INTEGER, 0, sm_io%comm_shared, ierr ) CALL sm_io%sm_allocate_shared( array_1d, 1, MAX( 1, nr_iope ), win_surf ) ELSE nr_iope = nr_val ENDIF #else nr_iope = nr_val #endif ! !-- Check, if surface data exist on this PE data_to_write = .TRUE. IF ( total_number_of_surface_values == 0 ) THEN data_to_write = .FALSE. RETURN ENDIF IF ( sm_io%iam_io_pe ) THEN all_pes_write = ( MINVAL( all_nr_val ) > 0 ) IF ( all_pes_write ) THEN dims1(1) = total_number_of_surface_values lize1(1) = nr_iope start1(1) = glo_start-1 #if defined( __parallel ) IF ( total_number_of_surface_values > 0 ) THEN CALL MPI_TYPE_CREATE_SUBARRAY( 1, dims1, lize1, start1, MPI_ORDER_FORTRAN, & MPI_REAL, ft_surf, ierr ) CALL MPI_TYPE_COMMIT( ft_surf, ierr ) ENDIF #endif ENDIF ENDIF ENDIF END SUBROUTINE rd_mpi_io_surface_filetypes !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> This subroutine creates file types to access 2d-/3d-REAL arrays and 2d-INTEGER arrays !> distributed in blocks among processes to a single file that contains the global arrays. !--------------------------------------------------------------------------------------------------! SUBROUTINE rd_mpi_io_create_filetypes IMPLICIT NONE INTEGER, DIMENSION(2) :: dims2 INTEGER, DIMENSION(2) :: lize2 INTEGER, DIMENSION(2) :: start2 INTEGER, DIMENSION(3) :: dims3 INTEGER, DIMENSION(3) :: lize3 INTEGER, DIMENSION(3) :: start3 TYPE(local_boundaries) :: save_io_grid !< temporary variable to store grid settings IF ( sm_io%is_sm_active() ) THEN save_io_grid = sm_io%io_grid ENDIF ! !-- Decision, if storage with or without ghost layers. !-- Please note that the indexing of the global array always starts at 0, even in Fortran. !-- Therefore the PE local indices have to be shifted by nbgp in the case with ghost layers. IF ( include_total_domain_boundaries ) THEN lb%nxl = nxl + nbgp lb%nxr = nxr + nbgp lb%nys = nys + nbgp lb%nyn = nyn + nbgp lb%nnx = nnx lb%nny = nny lb%nx = nx + 2 * nbgp lb%ny = ny + 2 * nbgp IF ( myidx == 0 ) THEN lb%nxl = lb%nxl - nbgp lb%nnx = lb%nnx + nbgp ENDIF IF ( myidx == npex-1 .OR. npex == -1 ) THEN ! npex == 1 if -D__parallel not set lb%nxr = lb%nxr + nbgp lb%nnx = lb%nnx + nbgp ENDIF IF ( myidy == 0 ) THEN lb%nys = lb%nys - nbgp lb%nny = lb%nny + nbgp ENDIF IF ( myidy == npey-1 .OR. npey == -1 ) THEN ! npey == 1 if -D__parallel not set lb%nyn = lb%nyn + nbgp lb%nny = lb%nny + nbgp ENDIF CALL sm_io%sm_adjust_outer_boundary() ELSE lb%nxl = nxl lb%nxr = nxr lb%nys = nys lb%nyn = nyn lb%nnx = nnx lb%nny = nny lb%nx = nx lb%ny = ny ENDIF IF ( sm_io%is_sm_active() ) THEN #if defined( __parallel ) CALL sm_io%sm_allocate_shared( array_2d, sm_io%io_grid%nxl, sm_io%io_grid%nxr, & sm_io%io_grid%nys, sm_io%io_grid%nyn, win_2dr ) CALL sm_io%sm_allocate_shared( array_2di, save_io_grid%nxl, save_io_grid%nxr, & save_io_grid%nys, save_io_grid%nyn, win_2di ) CALL sm_io%sm_allocate_shared( array_3d, nzb, nzt+1, sm_io%io_grid%nxl, sm_io%io_grid%nxr, & sm_io%io_grid%nys, sm_io%io_grid%nyn, win_3dr ) #endif ELSE ALLOCATE( array_2d(lb%nxl:lb%nxr,lb%nys:lb%nyn) ) ALLOCATE( array_2di(nxl:nxr,nys:nyn) ) ALLOCATE( array_3d(nzb:nzt+1,lb%nxl:lb%nxr,lb%nys:lb%nyn) ) sm_io%io_grid = lb ENDIF ! !-- Create filetype for 2d-REAL array with ghost layers around the total domain dims2(1) = lb%nx + 1 dims2(2) = lb%ny + 1 lize2(1) = sm_io%io_grid%nnx lize2(2) = sm_io%io_grid%nny start2(1) = sm_io%io_grid%nxl start2(2) = sm_io%io_grid%nys #if defined( __parallel ) IF ( sm_io%iam_io_pe ) THEN CALL MPI_TYPE_CREATE_SUBARRAY( 2, dims2, lize2, start2, MPI_ORDER_FORTRAN, MPI_REAL, & ft_2d, ierr ) CALL MPI_TYPE_COMMIT( ft_2d, ierr ) ENDIF #endif ! !-- Create filetype for 2d-INTEGER array without ghost layers around the total domain dims2(1) = nx + 1 dims2(2) = ny + 1 IF ( sm_io%is_sm_active() ) THEN lize2(1) = save_io_grid%nnx lize2(2) = save_io_grid%nny start2(1) = save_io_grid%nxl start2(2) = save_io_grid%nys ELSE lize2(1) = nnx lize2(2) = nny start2(1) = nxl start2(2) = nys ENDIF #if defined( __parallel ) IF ( sm_io%iam_io_pe ) THEN CALL MPI_TYPE_CREATE_SUBARRAY( 2, dims2, lize2, start2, MPI_ORDER_FORTRAN, MPI_INTEGER, & ft_2di_nb, ierr ) CALL MPI_TYPE_COMMIT( ft_2di_nb, ierr ) ENDIF #endif ! !-- Create filetype for 3d-REAL array dims3(1) = nz + 2 dims3(2) = lb%nx + 1 dims3(3) = lb%ny + 1 lize3(1) = dims3(1) lize3(2) = sm_io%io_grid%nnx lize3(3) = sm_io%io_grid%nny start3(1) = nzb start3(2) = sm_io%io_grid%nxl start3(3) = sm_io%io_grid%nys #if defined( __parallel ) IF ( sm_io%iam_io_pe ) THEN CALL MPI_TYPE_CREATE_SUBARRAY( 3, dims3, lize3, start3, MPI_ORDER_FORTRAN, MPI_REAL, ft_3d, & ierr ) CALL MPI_TYPE_COMMIT( ft_3d, ierr ) ENDIF #endif END SUBROUTINE rd_mpi_io_create_filetypes !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> This subroutine creates file types to access 3d-soil arrays !> distributed in blocks among processes to a single file that contains the global arrays. !> It is not required for the serial mode. !--------------------------------------------------------------------------------------------------! #if defined( __parallel ) SUBROUTINE rd_mpi_io_create_filetypes_3dsoil( nzb_soil, nzt_soil ) IMPLICIT NONE INTEGER, INTENT(IN) :: nzb_soil INTEGER, INTENT(IN) :: nzt_soil INTEGER, DIMENSION(3) :: dims3 INTEGER, DIMENSION(3) :: lize3 INTEGER, DIMENSION(3) :: start3 IF ( sm_io%is_sm_active() ) THEN CALL sm_io%sm_allocate_shared( array_3d_soil, nzb_soil, nzt_soil, sm_io%io_grid%nxl, & sm_io%io_grid%nxr, sm_io%io_grid%nys, sm_io%io_grid%nyn, & win_3ds ) ELSE ALLOCATE( array_3d_soil(nzb_soil:nzt_soil,lb%nxl:lb%nxr,lb%nys:lb%nyn) ) sm_io%io_grid = lb ENDIF ! !-- Create filetype for 3d-soil array dims3(1) = nzt_soil - nzb_soil + 1 dims3(2) = lb%nx + 1 dims3(3) = lb%ny + 1 lize3(1) = dims3(1) lize3(2) = sm_io%io_grid%nnx lize3(3) = sm_io%io_grid%nny start3(1) = nzb_soil start3(2) = sm_io%io_grid%nxl start3(3) = sm_io%io_grid%nys IF ( sm_io%iam_io_pe ) THEN CALL MPI_TYPE_CREATE_SUBARRAY( 3, dims3, lize3, start3, MPI_ORDER_FORTRAN, MPI_REAL, & ft_3dsoil, ierr ) CALL MPI_TYPE_COMMIT( ft_3dsoil, ierr ) ENDIF END SUBROUTINE rd_mpi_io_create_filetypes_3dsoil #endif !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Free all file types that have been created for MPI-IO. !--------------------------------------------------------------------------------------------------! SUBROUTINE rd_mpi_io_free_filetypes IMPLICIT NONE #if defined( __parallel ) IF ( filetypes_created ) THEN IF ( sm_io%iam_io_pe ) THEN CALL MPI_TYPE_FREE( ft_2d, ierr ) CALL MPI_TYPE_FREE( ft_2di_nb, ierr ) CALL MPI_TYPE_FREE( ft_3d, ierr ) ENDIF IF ( sm_io%is_sm_active() ) THEN CALL sm_io%sm_free_shared( win_2dr ) CALL sm_io%sm_free_shared( win_2di ) CALL sm_io%sm_free_shared( win_3dr ) ELSE DEALLOCATE( array_2d, array_2di, array_3d ) ENDIF ENDIF ! !-- Free last surface filetype IF ( sm_io%iam_io_pe .AND. ft_surf /= -1 ) THEN CALL MPI_TYPE_FREE( ft_surf, ierr ) ENDIF IF ( sm_io%is_sm_active() .AND. win_surf /= -1 ) THEN CALL sm_io%sm_free_shared( win_surf ) ENDIF ft_surf = -1 win_surf = -1 #else DEALLOCATE( array_2d, array_2di, array_3d ) #endif END SUBROUTINE rd_mpi_io_free_filetypes !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Print the restart data file header (MPI-IO format) for debugging. !--------------------------------------------------------------------------------------------------! SUBROUTINE rd_mpi_io_print_header IMPLICIT NONE INTEGER(iwp) :: i WRITE (9,*) 'header position after reading the restart file header: ', header_position WRITE (9,*) ' ' WRITE (9,*) 'restart file header content:' WRITE (9,*) '----------------------------' WRITE (9,*) ' ' WRITE (9,*) ' CHARACTER header values Total number: ', tgh%nr_char WRITE (9,*) ' ' DO i = 1, tgh%nr_char WRITE( 9, '(I3,A,1X,A)' ) i, ': ', text_lines(i)(1:80) ENDDO WRITE (9,*) ' ' WRITE (9,*) ' INTEGER header variables and values Total number: ', tgh%nr_int WRITE (9,*) ' ' DO i = 1, tgh%nr_int WRITE(9,*) ' variable: ', int_names(i), ' value: ', int_values(i) ENDDO WRITE (9,*) ' ' WRITE (9,*) ' REAL header variables and values Total number: ', tgh%nr_real WRITE (9,*) ' ' DO i = 1, tgh%nr_real WRITE(9,*) ' variable: ', real_names(i), ' value: ', real_values(i) ENDDO WRITE (9,*) ' ' WRITE (9,*) ' Header entries with offset (2d/3d arrays) Total number: ', tgh%nr_arrays WRITE (9,*) ' ' DO i = 1, tgh%nr_arrays WRITE(9,*) ' variable: ', array_names(i), ' offset: ', array_offset(i) ENDDO WRITE (9,*) ' ' END SUBROUTINE rd_mpi_io_print_header !--------------------------------------------------------------------------------------------------! ! Description: ! ------------ !> Check if big/little endian data format is used. !> An int*4 pointer is set to a int*8 variable, the int*8 is set to 1, and then it is checked, if !> the first 4 bytes of the pointer are equal 1 (little endian) or not. !--------------------------------------------------------------------------------------------------! SUBROUTINE rd_mpi_io_check_endian( i_endian ) IMPLICIT NONE INTEGER, INTENT(out) :: i_endian INTEGER(KIND=8), TARGET :: int8 INTEGER, DIMENSION(1) :: bufshape INTEGER(KIND=4), POINTER, DIMENSION(:) :: int4 TYPE(C_PTR) :: ptr ptr = C_LOC( int8 ) bufshape(1) = 2 CALL C_F_POINTER( ptr, int4, bufshape ) int8 = 1 IF ( int4(1) == 1 ) THEN i_endian = 1 ! little endian ELSE i_endian = 2 ! big endian ENDIF END SUBROUTINE rd_mpi_io_check_endian END MODULE restart_data_mpi_io_mod