Changeset 2600 for palm/trunk/SOURCE


Ignore:
Timestamp:
Nov 1, 2017 2:11:20 PM (7 years ago)
Author:
raasch
Message:

small changes concerning r2599, cycle number are now three digits wide

Location:
palm/trunk/SOURCE
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • palm/trunk/SOURCE/Makefile

    r2599 r2600  
    2525# -----------------
    2626# $Id$
     27# comment line concerning bound checks removed
     28#
     29# 2599 2017-11-01 13:18:45Z hellstea
    2730# virtual_flight_mod.o, synthetic_turbulence_generator_mod.o and
    2831# wind_turbine_model_mod.o were added to read_var_list.o
     
    446449# The following line is needed for palm_simple_install, don't remove it!
    447450#to_be_replaced_by_include
    448 
    449 #BOUNDS="-Rbc"  # Array bounds checking. Compromises performance seriously.
    450451
    451452.SUFFIXES:
  • palm/trunk/SOURCE/init_pegrid.f90

    r2516 r2600  
    2525! -----------------
    2626! $Id$
     27! calculation of block-I/O quantitites removed (is now done in parin)
     28!
     29! 2516 2017-10-04 11:03:04Z suehring
    2730! Remove tabs
    2831!
     
    209212               coupling_topology, gathered_size, grid_level,                   &
    210213               grid_level_count, inflow_l, inflow_n, inflow_r, inflow_s,       &
    211                io_blocks, io_group, maximum_grid_level,                        &
    212                maximum_parallel_io_streams, message_string,                    &
     214               maximum_grid_level, message_string,                             &
    213215               mg_switch_to_pe0_level, momentum_advec, nest_bound_l,           &
    214216               nest_bound_n, nest_bound_r, nest_bound_s, nest_domain, neutral, &
     
    13721374    ENDIF
    13731375
    1374 !
    1375 !-- Calculate the number of groups into which parallel I/O is split.
    1376 !-- The default for files which are opened by all PEs (or where each
    1377 !-- PE opens his own independent file) is, that all PEs are doing input/output
    1378 !-- in parallel at the same time. This might cause performance or even more
    1379 !-- severe problems depending on the configuration of the underlying file
    1380 !-- system.
    1381 !-- First, set the default:
    1382     IF ( maximum_parallel_io_streams == -1  .OR. &
    1383          maximum_parallel_io_streams > numprocs )  THEN
    1384        maximum_parallel_io_streams = numprocs
    1385     ENDIF
    1386 
    1387 !
    1388 !-- Now calculate the number of io_blocks and the io_group to which the
    1389 !-- respective PE belongs. I/O of the groups is done in serial, but in parallel
    1390 !-- for all PEs belonging to the same group. A preliminary setting with myid
    1391 !-- based on MPI_COMM_WORLD has been done in parin.
    1392     io_blocks = numprocs / maximum_parallel_io_streams
    1393     io_group  = MOD( myid+1, io_blocks )
    1394    
    1395 
    13961376 END SUBROUTINE init_pegrid
  • palm/trunk/SOURCE/parin.f90

    r2599 r2600  
    2525! -----------------
    2626! $Id$
     27! some comments added and variables renamed concerning r2599
     28!
     29! 2599 2017-11-01 13:18:45Z hellstea
    2730! The i/o grouping is updated to work correctly also in nested runs.
    2831!
     
    391394    IMPLICIT NONE
    392395
    393     INTEGER(iwp) ::  i      !<
    394     INTEGER(iwp) ::  ioerr  !< error flag for open/read/write
    395     INTEGER(iwp) ::  myworldid       !<
    396     INTEGER(iwp) ::  numworldprocs   !<
     396    INTEGER(iwp) ::  global_id      !< process id with respect to MPI_COMM_WORLD
     397    INTEGER(iwp) ::  global_procs   !< # of procs with respect to MPI_COMM_WORLD
     398    INTEGER(iwp) ::  i              !<
     399    INTEGER(iwp) ::  ioerr          !< error flag for open/read/write
    397400
    398401    NAMELIST /inipar/  aerosol_bulk, alpha_surface, approximation, bc_e_b,     &
     
    524527!-- severe problems depending on the configuration of the underlying file
    525528!-- system.
     529!-- Calculation of the number of blocks and the I/O group must be based on all
     530!-- PEs involved in this run. Since myid and numprocs are related to the
     531!-- comm2d communicator, which gives only a subset of all PEs in case of
     532!-- nested runs, that information must be inquired again from the global
     533!-- communicator.
    526534!-- First, set the default:
    527     CALL MPI_COMM_RANK( MPI_COMM_WORLD, myworldid, ierr )
    528     CALL MPI_COMM_SIZE( MPI_COMM_WORLD, numworldprocs, ierr )
     535    CALL MPI_COMM_RANK( MPI_COMM_WORLD, global_id, ierr )
     536    CALL MPI_COMM_SIZE( MPI_COMM_WORLD, global_procs, ierr )
    529537    IF ( maximum_parallel_io_streams == -1  .OR.                               &
    530          maximum_parallel_io_streams > numworldprocs )  THEN
    531        maximum_parallel_io_streams = numworldprocs
     538         maximum_parallel_io_streams > global_procs )  THEN
     539       maximum_parallel_io_streams = global_procs
    532540    ENDIF
    533541!
     
    535543!-- respective PE belongs. I/O of the groups is done in serial, but in parallel
    536544!-- for all PEs belonging to the same group.
    537 !-- These settings are repeated in init_pegrid for the communicator comm2d,
    538 !-- which is not available here
    539     !io_blocks = numprocs / maximum_parallel_io_streams
    540     io_blocks = numworldprocs / maximum_parallel_io_streams
    541     !io_group  = MOD( myid+1, io_blocks )
    542     io_group  = MOD( myworldid+1, io_blocks )
     545    io_blocks = global_procs / maximum_parallel_io_streams
     546    io_group  = MOD( global_id+1, io_blocks )
    543547   
    544548    CALL location_message( 'reading NAMELIST parameters from PARIN', .FALSE. )
Note: See TracChangeset for help on using the changeset viewer.