Ignore:
Timestamp:
Nov 1, 2017 2:11:20 PM (7 years ago)
Author:
raasch
Message:

small changes concerning r2599, cycle number are now three digits wide

File:
1 edited

Legend:

Unmodified
Added
Removed
  • palm/trunk/SOURCE/parin.f90

    r2599 r2600  
    2525! -----------------
    2626! $Id$
     27! some comments added and variables renamed concerning r2599
     28!
     29! 2599 2017-11-01 13:18:45Z hellstea
    2730! The i/o grouping is updated to work correctly also in nested runs.
    2831!
     
    391394    IMPLICIT NONE
    392395
    393     INTEGER(iwp) ::  i      !<
    394     INTEGER(iwp) ::  ioerr  !< error flag for open/read/write
    395     INTEGER(iwp) ::  myworldid       !<
    396     INTEGER(iwp) ::  numworldprocs   !<
     396    INTEGER(iwp) ::  global_id      !< process id with respect to MPI_COMM_WORLD
     397    INTEGER(iwp) ::  global_procs   !< # of procs with respect to MPI_COMM_WORLD
     398    INTEGER(iwp) ::  i              !<
     399    INTEGER(iwp) ::  ioerr          !< error flag for open/read/write
    397400
    398401    NAMELIST /inipar/  aerosol_bulk, alpha_surface, approximation, bc_e_b,     &
     
    524527!-- severe problems depending on the configuration of the underlying file
    525528!-- system.
     529!-- Calculation of the number of blocks and the I/O group must be based on all
     530!-- PEs involved in this run. Since myid and numprocs are related to the
     531!-- comm2d communicator, which gives only a subset of all PEs in case of
     532!-- nested runs, that information must be inquired again from the global
     533!-- communicator.
    526534!-- First, set the default:
    527     CALL MPI_COMM_RANK( MPI_COMM_WORLD, myworldid, ierr )
    528     CALL MPI_COMM_SIZE( MPI_COMM_WORLD, numworldprocs, ierr )
     535    CALL MPI_COMM_RANK( MPI_COMM_WORLD, global_id, ierr )
     536    CALL MPI_COMM_SIZE( MPI_COMM_WORLD, global_procs, ierr )
    529537    IF ( maximum_parallel_io_streams == -1  .OR.                               &
    530          maximum_parallel_io_streams > numworldprocs )  THEN
    531        maximum_parallel_io_streams = numworldprocs
     538         maximum_parallel_io_streams > global_procs )  THEN
     539       maximum_parallel_io_streams = global_procs
    532540    ENDIF
    533541!
     
    535543!-- respective PE belongs. I/O of the groups is done in serial, but in parallel
    536544!-- for all PEs belonging to the same group.
    537 !-- These settings are repeated in init_pegrid for the communicator comm2d,
    538 !-- which is not available here
    539     !io_blocks = numprocs / maximum_parallel_io_streams
    540     io_blocks = numworldprocs / maximum_parallel_io_streams
    541     !io_group  = MOD( myid+1, io_blocks )
    542     io_group  = MOD( myworldid+1, io_blocks )
     545    io_blocks = global_procs / maximum_parallel_io_streams
     546    io_group  = MOD( global_id+1, io_blocks )
    543547   
    544548    CALL location_message( 'reading NAMELIST parameters from PARIN', .FALSE. )
Note: See TracChangeset for help on using the changeset viewer.