Changeset 759


Ignore:
Timestamp:
Sep 15, 2011 1:58:31 PM (10 years ago)
Author:
raasch
Message:

New:
---

The number of parallel I/O operations can be limited with new mrun-option -w.
(advec_particles, data_output_2d, data_output_3d, header, init_grid, init_pegrid, init_3d_model, modules, palm, parin, write_3d_binary)

Changed:


mrun option -T is obligatory

Errors:


Bugfix: No zero assignments to volume_flow_initial and volume_flow_area in
case of normal restart runs. (init_3d_model)

initialization of u_0, v_0. This is just to avoid access of uninitialized
memory in exchange_horiz_2d, which causes respective error messages
when the Intel thread checker (inspector) is used. (production_e)

Bugfix for ts limitation (prandtl_fluxes)

Location:
palm/trunk
Files:
14 edited

Legend:

Unmodified
Added
Removed
  • palm/trunk/SCRIPTS/mrun

    r757 r759  
    215215     # 14/12/10 - Siggi  - adjustments for new Tsubame system at Tokyo
    216216     #                     institute of technology (lctit)
    217      # 23/12/10 - Micha  - different number of processors in ocean and atmosphere
    218      #                     is now allowed
     217     # 23/12/10 - Micha  - different number of processors in ocean and
     218     #                     atmosphere is now allowed
    219219     # 02/02/10 - Siggi  - further adjustments on Tsubame and concerning openMP
    220220     #                     usage
     
    225225     # 06/04/11 - BjornM - bugfix for runs with mpt on lcsgi
    226226     # 17/08/11 - Siggi  - extensions for impi library
    227      # 18/08/11 - Siggi  - bugfix for local append of output files with suffix (.nc)
     227     # 18/08/11 - Siggi  - bugfix for local append of output files with suffix
     228     #                     (.nc)
    228229     # 18/08/11 - Marcus - support for Linux OS with German locale
    229      #                   - properly report hosts on general Linux clusters 
     230     #                   - properly report hosts on general Linux clusters
    230231     # 29/08/11 - BjornW - adapted for lcflow (ForWind cluster in Oldenburg)
    231      # 29/08/11 - Carolin- initiating restart-run: adjustment of the path at IMUK
     232     # 29/08/11 - Carolin- initiating restart-run: adjustment of the path at
     233     #                     IMUK
     234     # 15/09/11 - Siggi  - new option -w tp set the maximum number of parallel
     235     #                     io streams, option -T is obligatory from now on
    232236
    233237 
     
    287291 mainprog=""
    288292 makefile=""
     293 max_par_io_str=""
    289294 mc=$0
    290295 while [[ $(echo $mc | grep -c "/") != 0 ]]
     
    474479    # SHELLSCRIPT-OPTIONEN EINLESEN UND KOMMANDO NEU ZUSAMMENSETZEN, FALLS ES
    475480    # FUER FOLGEJOBS BENOETIGT WIRD
    476  while  getopts  :a:AbBc:Cd:D:Fg:G:h:H:i:IkK:m:M:n:o:O:p:P:q:r:R:s:St:T:u:U:vxX:yY: option
     481 while  getopts  :a:AbBc:Cd:D:Fg:G:h:H:i:IkK:m:M:n:o:O:p:P:q:r:R:s:St:T:u:U:vw:xX:yY: option
    477482 do
    478483   case  $option  in
     
    507512       (S)   read_from_config=false; mc="$mc -S";;
    508513       (t)   cpumax=$OPTARG; mc="$mc -t$OPTARG";;
    509        (T)   tasks_per_node=$OPTARG; mc="$mc -T$OPTARG";;
     514       (T)   mrun_tasks_per_node=$OPTARG; mc="$mc -T$OPTARG";;
    510515       (u)   remote_username=$OPTARG; mc="$mc -u$OPTARG";;
    511516       (U)   return_username=$OPTARG; mc="$mc -U$OPTARG";;
    512517       (v)   silent=true; mc="$mc -v";;
     518       (w)   max_par_io_str=$OPTARG; mc="$mc -w$OPTARG";;
    513519       (x)   do_trace=true;set -x; mc="$mc -x";;
    514520       (X)   numprocs=$OPTARG; mc="$mc -X$OPTARG";;
     
    567573    printf "\n        -u    username on remote machine               \"\" "
    568574    printf "\n        -v    no prompt for confirmation               ---"
     575    printf "\n        -w    maximum parallel io streams              as given by -X"
    569576    printf "\n        -x    tracing of mrun for debug purposes       ---"
    570577    printf "\n        -X    # of processors (on parallel machines)   1"
     
    10361043
    10371044       # OPTIONSWERTE UEBERSTEUERN KONFIGURATIONSDATEI
    1038     [[ $mrun_memory     != 0  ]]  &&  memory=$mrun_memory
     1045    [[ $mrun_memory     != 0   ]]  &&  memory=$mrun_memory
    10391046    [[ "$mrun_group_number" != "none" ]]  &&  group_number=$mrun_group_number
    1040     [[ $mrun_cpumax     != 0  ]]  &&  cpumax=$mrun_cpumax
    1041     [[ "$mrun_numprocs" != "" ]]  &&  numprocs=$mrun_numprocs
     1047    [[ $mrun_cpumax     != 0   ]]  &&  cpumax=$mrun_cpumax
     1048    [[ "$mrun_numprocs" != ""  ]]  &&  numprocs=$mrun_numprocs
     1049    [[ "$max_par_io_str" != "" ]]  &&  maximum_parallel_io_streams=$max_par_io_str
     1050    [[ "$mrun_tasks_per_node" != "" ]]  &&  tasks_per_node=$mrun_tasks_per_node
    10421051
    10431052 fi
     
    11571166       # DEFAULT-WERT SETZEN) UND OB SIE EIN GANZZAHLIGER TEILER DER
    11581167       # GESAMTPROZESSORANZAHL IST
    1159     if [[ $host = nech  ||  $host = necriam  ||  $host = ibmh  ||  $host = ibmkisti  ||  $host = ibms ]]
    1160     then
    1161        [[ "$tasks_per_node" = "" ]]  &&  tasks_per_node=6
    1162        (( ival = $tasks_per_node ))
    1163        (( pes = numprocs ))
     1168    if [[ "$tasks_per_node" = ""  ]]
     1169    then
     1170       printf "\n"
     1171       printf "\n  +++ option \"-T\" (tasks per node) is missing"
     1172       printf "\n      set -T option or define tasks_per_node in the config file"
     1173       locat=tasks_per_node; (( iec = 0 )); exit
     1174    fi
     1175    (( ival = $tasks_per_node ))
     1176    (( pes = numprocs ))
    11641177#       if [[ $(echo $package_list | grep -c dvrp_graphics+1PE) = 1 ]]
    11651178#       then
    11661179#          (( pes = pes - 1 ))
    11671180#       fi
    1168        (( ii = pes / ival ))
    1169        if (( pes - ii * ival > 0 ))
    1170        then
    1171           printf "\n"
    1172           printf "\n  +++ tasks per node (option \"-T\") must be an integral"
    1173           printf "\n      divisor of the total number of processors (option \"-X\")"
    1174           printf "\n      values of this mrun-call: \"-T $tasks_per_node\" \"-X $numprocs\""
    1175           locat=tasks_per_node; (( iec = 0 )); exit
    1176        fi
    1177     fi
     1181    (( ii = pes / ival ))
     1182    if (( pes - ii * ival > 0 ))
     1183    then
     1184       printf "\n"
     1185       printf "\n  +++ tasks per node (option \"-T\") must be an integral"
     1186       printf "\n      divisor of the total number of processors (option \"-X\")"
     1187       printf "\n      values of this mrun-call: \"-T $tasks_per_node\" \"-X $numprocs\""
     1188       locat=tasks_per_node; (( iec = 0 )); exit
     1189    fi
     1190
    11781191
    11791192       # IBMY HAT NUR EINEN KNOTEN
     
    11901203    fi
    11911204
    1192        # FALLS OPENMP PARALLELISIERUNG VERWENDET WERDEN SOLL, ANZAHL VON THREADS
    1193        # SETZEN UND ZAHL DER TASKS PRO KNOTEN AUF 1 SETZEN
    1194 #    if [[ $use_openmp = true ]]
    1195 #    then
    1196 #       threads_per_task=$tasks_per_node
    1197 #       tasks_per_node=1
    1198 #    fi
    11991205
    12001206       # SETTINGS FOR SUBJOB-COMMAND
     
    12631269 fi
    12641270
     1271
     1272    # Set default value for the maximum number of parallel io streams
     1273 if [[ "$maximum_parallel_io_streams" = "" ]]
     1274 then
     1275    maximum_parallel_io_streams=$numprocs
     1276 fi
    12651277
    12661278
     
    21942206 then
    21952207    spalte1="tasks per node:"; spalte2="$tasks_per_node (number of nodes: $nodes)"
     2208    printf "| $spalte1$spalte2 | \n"
     2209 fi
     2210 if [[ $maximum_parallel_io_streams != $numprocs ]]
     2211 then
     2212    spalte1="max par io streams:"; spalte2="$maximum_parallel_io_streams"
    21962213    printf "| $spalte1$spalte2 | \n"
    21972214 fi
     
    29963013 &envpar  run_identifier = '$fname', host = '$localhost',
    29973014          write_binary = '$write_binary', tasks_per_node = $tasks_per_node,
     3015          maximum_parallel_io_streams = $maximum_parallel_io_streams,
    29983016          maximum_cpu_time_allowed = ${cpumax}.,
    29993017          revision = '$global_revision',
     
    44224440       mrun_com=${mrun_com}" -O $threads_per_task"
    44234441    fi
    4424     [[ "$tasks_per_node" != "" ]] &&  mrun_com=${mrun_com}" -T $tasks_per_node"
     4442    [[ "$tasks_per_node" != "" ]]  &&  mrun_com=${mrun_com}" -T $tasks_per_node"
    44254443    [[ $store_on_archive_system = true ]]  &&  mrun_com=${mrun_com}" -A"
    44264444    [[ $package_list != "" ]]     &&  mrun_com=${mrun_com}" -p \"$package_list\""
     
    44304448    [[ "$ocean_file_appendix" = true ]]  &&  mrun_com=${mrun_com}" -y"
    44314449    [[ $run_coupled_model = true ]]  &&  mrun_com=${mrun_com}" -Y \"$coupled_dist\""
     4450    [[ "$max_par_io_str" != "" ]]  &&  mrun_com=${mrun_com}" -w $max_par_io_str"
    44324451    if [[ $do_remote = true ]]
    44334452    then
  • palm/trunk/SOURCE/advec_particles.f90

    r668 r759  
    33!------------------------------------------------------------------------------!
    44! Current revisions:
    5 ! -----------------
     5! ------------------
     6! Splitting of parallel I/O (routine write_particles)
    67!
    78! Former revisions:
     
    38113812    CHARACTER (LEN=10) ::  particle_binary_version
    38123813
     3814    INTEGER ::  i
     3815
    38133816!
    38143817!-- First open the output unit.
     
    38283831    ENDIF
    38293832
    3830 !
    3831 !-- Write the version number of the binary format.
    3832 !-- Attention: After changes to the following output commands the version
    3833 !-- ---------  number of the variable particle_binary_version must be changed!
    3834 !--            Also, the version number and the list of arrays to be read in
    3835 !--            init_particles must be adjusted accordingly.
    3836     particle_binary_version = '3.0'
    3837     WRITE ( 90 )  particle_binary_version
    3838 
    3839 !
    3840 !-- Write some particle parameters, the size of the particle arrays as well as
    3841 !-- other dvrp-plot variables.
    3842     WRITE ( 90 )  bc_par_b, bc_par_lr, bc_par_ns, bc_par_t,                    &
    3843                   maximum_number_of_particles, maximum_number_of_tailpoints,   &
    3844                   maximum_number_of_tails, number_of_initial_particles,        &
    3845                   number_of_particles, number_of_particle_groups,              &
    3846                   number_of_tails, particle_groups, time_prel,                 &
    3847                   time_write_particle_data, uniform_particles
    3848 
    3849     IF ( number_of_initial_particles /= 0 )  WRITE ( 90 )  initial_particles
    3850 
    3851     WRITE ( 90 )  prt_count, prt_start_index
    3852     WRITE ( 90 )  particles
    3853 
    3854     IF ( use_particle_tails )  THEN
    3855        WRITE ( 90 )  particle_tail_coordinates
    3856     ENDIF
    3857 
    3858     CLOSE ( 90 )
     3833    DO  i = 0, io_blocks-1
     3834
     3835       IF ( i == io_group )  THEN
     3836
     3837!
     3838!--       Write the version number of the binary format.
     3839!--       Attention: After changes to the following output commands the version
     3840!--       ---------  number of the variable particle_binary_version must be
     3841!--                  changed! Also, the version number and the list of arrays
     3842!--                  to be read in init_particles must be adjusted accordingly.
     3843          particle_binary_version = '3.0'
     3844          WRITE ( 90 )  particle_binary_version
     3845
     3846!
     3847!--       Write some particle parameters, the size of the particle arrays as
     3848!--       well as other dvrp-plot variables.
     3849          WRITE ( 90 )  bc_par_b, bc_par_lr, bc_par_ns, bc_par_t,              &
     3850                        maximum_number_of_particles,                           &
     3851                        maximum_number_of_tailpoints, maximum_number_of_tails, &
     3852                        number_of_initial_particles, number_of_particles,      &
     3853                        number_of_particle_groups, number_of_tails,            &
     3854                        particle_groups, time_prel, time_write_particle_data,  &
     3855                        uniform_particles
     3856
     3857          IF ( number_of_initial_particles /= 0 ) WRITE ( 90 ) initial_particles
     3858
     3859          WRITE ( 90 )  prt_count, prt_start_index
     3860          WRITE ( 90 )  particles
     3861
     3862          IF ( use_particle_tails )  THEN
     3863             WRITE ( 90 )  particle_tail_coordinates
     3864          ENDIF
     3865
     3866          CLOSE ( 90 )
     3867
     3868       ENDIF
     3869
     3870#if defined( __parallel )
     3871       CALL MPI_BARRIER( comm2d, ierr )
     3872#endif
     3873
     3874    ENDDO
    38593875
    38603876 END SUBROUTINE write_particles
  • palm/trunk/SOURCE/data_output_2d.f90

    r730 r759  
    44! Current revisions:
    55! -----------------
     6! Splitting of parallel I/O
    67!
    78! Former revisions:
     
    737738                         ENDIF
    738739#endif
    739                          WRITE ( 21 )  nxlg, nxrg, nysg, nyng
    740                          WRITE ( 21 )  local_2d
     740                         DO  i = 0, io_blocks-1
     741                            IF ( i == io_group )  THEN
     742                               WRITE ( 21 )  nxlg, nxrg, nysg, nyng
     743                               WRITE ( 21 )  local_2d
     744                            ENDIF
     745#if defined( __parallel )
     746                            CALL MPI_BARRIER( comm2d, ierr )
     747#endif
     748                         ENDDO
    741749
    742750                      ELSE
     
    10331041                         ENDIF
    10341042#endif
    1035                          IF ( ( section(is,s) >= nys  .AND.                  &
    1036                                 section(is,s) <= nyn )  .OR.                 &
    1037                               ( section(is,s) == -1  .AND.  nys-1 == -1 ) )  &
    1038                          THEN
    1039                             WRITE (22)  nxlg, nxrg, nzb, nzt+1
    1040                             WRITE (22)  local_2d
    1041                          ELSE
    1042                             WRITE (22)  -1, -1, -1, -1
    1043                          ENDIF
     1043                         DO  i = 0, io_blocks-1
     1044                            IF ( i == io_group )  THEN
     1045                               IF ( ( section(is,s) >= nys  .AND.   &
     1046                                      section(is,s) <= nyn )  .OR.  &
     1047                                    ( section(is,s) == -1  .AND.    &
     1048                                      nys-1 == -1 ) )               &
     1049                               THEN
     1050                                  WRITE (22)  nxlg, nxrg, nzb, nzt+1
     1051                                  WRITE (22)  local_2d
     1052                               ELSE
     1053                                  WRITE (22)  -1, -1, -1, -1
     1054                               ENDIF
     1055                            ENDIF
     1056#if defined( __parallel )
     1057                            CALL MPI_BARRIER( comm2d, ierr )
     1058#endif
     1059                         ENDDO
    10441060
    10451061                      ELSE
     
    13341350                         ENDIF
    13351351#endif
    1336                          IF ( ( section(is,s) >= nxl  .AND.                  &
    1337                                 section(is,s) <= nxr )  .OR.                 &
    1338                               ( section(is,s) == -1  .AND.  nxl-1 == -1 ) )  &
    1339                          THEN
    1340                             WRITE (23)  nysg, nyng, nzb, nzt+1
    1341                             WRITE (23)  local_2d
    1342                          ELSE
    1343                             WRITE (23)  -1, -1, -1, -1
    1344                          ENDIF
     1352                         DO  i = 0, io_blocks-1
     1353                            IF ( i == io_group )  THEN
     1354                               IF ( ( section(is,s) >= nxl  .AND.   &
     1355                                      section(is,s) <= nxr )  .OR.  &
     1356                                    ( section(is,s) == -1  .AND.    &
     1357                                      nxl-1 == -1 ) )               &
     1358                               THEN
     1359                                  WRITE (23)  nysg, nyng, nzb, nzt+1
     1360                                  WRITE (23)  local_2d
     1361                               ELSE
     1362                                  WRITE (23)  -1, -1, -1, -1
     1363                               ENDIF
     1364                            ENDIF
     1365#if defined( __parallel )
     1366                            CALL MPI_BARRIER( comm2d, ierr )
     1367#endif
     1368                         ENDDO
    13451369
    13461370                      ELSE
     
    15041528
    15051529    IF ( data_output_2d_on_each_pe )  THEN
    1506        CALL close_file( file_id )
     1530       DO  i = 0, io_blocks-1
     1531          IF ( i == io_group )  THEN
     1532             CALL close_file( file_id )
     1533          ENDIF
     1534#if defined( __parallel )
     1535          CALL MPI_BARRIER( comm2d, ierr )
     1536#endif
     1537       ENDDO
    15071538    ELSE
    15081539       IF ( myid == 0 )  CALL close_file( file_id )
  • palm/trunk/SOURCE/data_output_3d.f90

    r728 r759  
    44! Current revisions:
    55! -----------------
     6! Splitting of parallel I/O
    67!
    78! Former revisions:
     
    395396!--       Determine the Skip-value for the next array. Record end and start
    396397!--       require 4 byte each.
    397           skip_do_avs = skip_do_avs + ( ((nx+2*nbgp)*(ny+2*nbgp)*(nz_do3d+1)) * 4 + 8 )
     398          skip_do_avs = skip_do_avs + ( ( ( nx+2*nbgp ) * ( ny+2*nbgp ) * &
     399                                          ( nz_do3d+1 ) ) * 4 + 8 )
    398400       ENDIF
    399401
     
    420422                                 do3d_time_count(av), av
    421423                ENDIF
    422                 WRITE ( 30 )  nxlg, nxrg, nysg, nyng, nzb, nz_do3d
    423                 WRITE ( 30 )  local_pf
     424                DO  i = 0, io_blocks-1
     425                   IF ( i == io_group )  THEN
     426                      WRITE ( 30 )  nxlg, nxrg, nysg, nyng, nzb, nz_do3d
     427                      WRITE ( 30 )  local_pf
     428                   ENDIF
     429#if defined( __parallel )
     430                   CALL MPI_BARRIER( comm2d, ierr )
     431#endif
     432                ENDDO
    424433
    425434             ELSE
  • palm/trunk/SOURCE/header.f90

    r708 r759  
    44! Current revisions:
    55! -----------------
    6 !
     6! output of maximum number of parallel io streams
    77!
    88! Former revisions:
     
    220220    ENDIF
    221221    IF ( use_seperate_pe_for_dvrp_output )  WRITE ( io, 105 )
     222    IF ( numprocs /= maximum_parallel_io_streams )  THEN
     223       WRITE ( io, 108 )  maximum_parallel_io_streams
     224    ENDIF
    222225#endif
    223226    WRITE ( io, 99 )
     
    15961599            37X,'because the job is running on an SMP-cluster')
    15971600107 FORMAT (37X,'A 1d-decomposition along ',A,' is used')
     1601108 FORMAT (37X,'Max. # of parallel I/O streams is ',I5)
    15981602#endif
    15991603110 FORMAT (/' Numerical Schemes:'/ &
  • palm/trunk/SOURCE/init_3d_model.f90

    r732 r759  
    77! Current revisions:
    88! ------------------
    9 ! Splitting of parallel I/O
     9! Splitting of parallel I/O in blocks of PEs
    1010! Bugfix: No zero assignments to volume_flow_initial and volume_flow_area in
    1111! case of normal restart runs.
     
    938938       IF ( TRIM( initializing_actions ) == 'cyclic_fill' )  THEN
    939939
    940           CALL read_parts_of_var_list
    941           CALL close_file( 13 )
     940          DO  i = 0, io_blocks-1
     941             IF ( i == io_group )  THEN
     942                CALL read_parts_of_var_list
     943                CALL close_file( 13 )
     944             ENDIF
     945#if defined( __parallel )
     946             CALL MPI_BARRIER( comm2d, ierr )
     947#endif
     948          ENDDO
    942949
    943950!
     
    10201027!
    10211028!--    Read binary data from restart file
    1022 !       DO  i = 0, io_blocks-1
    1023 !          IF ( i == io_group )  THEN
     1029       DO  i = 0, io_blocks-1
     1030          IF ( i == io_group )  THEN
    10241031             CALL read_3d_binary
    1025 !          ENDIF
    1026 !#if defined( __parallel )
    1027 !          CALL MPI_BARRIER( comm2d, ierr )
    1028 !#endif
    1029 !       ENDDO
     1032          ENDIF
     1033#if defined( __parallel )
     1034          CALL MPI_BARRIER( comm2d, ierr )
     1035#endif
     1036       ENDDO
    10301037
    10311038!
  • palm/trunk/SOURCE/init_grid.f90

    r723 r759  
    44! Current revisions:
    55! -----------------
    6 !
     6! Splitting of parallel I/O in blocks of PEs
    77!
    88! Former revisions:
     
    8080
    8181    INTEGER ::  bh, blx, bly, bxl, bxr, byn, bys, ch, cwx, cwy, cxl, cxr, cyn, &
    82                 cys, gls, i, inc, i_center, j, j_center, k, l, nxl_l, nxr_l, &
    83                 nyn_l, nys_l, nzb_si, nzt_l, vi
     82                cys, gls, i, ii, inc, i_center, j, j_center, k, l, nxl_l,      &
     83                nxr_l, nyn_l, nys_l, nzb_si, nzt_l, vi
    8484
    8585    INTEGER, DIMENSION(:), ALLOCATABLE   ::  vertical_influence
     
    507507
    508508       CASE ( 'read_from_file' )
    509 !
    510 !--       Arbitrary irregular topography data in PALM format (exactly matching
    511 !--       the grid size and total domain size)
    512           OPEN( 90, FILE='TOPOGRAPHY_DATA', STATUS='OLD', FORM='FORMATTED',  &
    513                ERR=10 )
    514           DO  j = ny, 0, -1
    515              READ( 90, *, ERR=11, END=11 )  ( topo_height(j,i), i = 0, nx )
    516           ENDDO
     509
     510          DO  ii = 0, io_blocks-1
     511             IF ( ii == io_group )  THEN
     512
     513!
     514!--             Arbitrary irregular topography data in PALM format (exactly
     515!--             matching the grid size and total domain size)
     516                OPEN( 90, FILE='TOPOGRAPHY_DATA', STATUS='OLD', &
     517                      FORM='FORMATTED', ERR=10 )
     518                DO  j = ny, 0, -1
     519                   READ( 90, *, ERR=11, END=11 )  ( topo_height(j,i), i = 0,nx )
     520                ENDDO
     521
     522                GOTO 12
     523         
     524 10             message_string = 'file TOPOGRAPHY_DATA does not exist'
     525                CALL message( 'init_grid', 'PA0208', 1, 2, 0, 6, 0 )
     526
     527 11             message_string = 'errors in file TOPOGRAPHY_DATA'
     528                CALL message( 'init_grid', 'PA0209', 1, 2, 0, 6, 0 )
     529
     530 12             CLOSE( 90 )
     531
     532             ENDIF
     533#if defined( __parallel )
     534             CALL MPI_BARRIER( comm2d, ierr )
     535#endif
     536          ENDDO
     537
    517538!
    518539!--       Calculate the index height of the topography
     
    523544          ENDDO
    524545!
    525 !--       Add cyclic boundaries (additional layers are for calculating flag
    526 !--       arrays needed for the multigrid sover)
     546!--       Add cyclic boundaries (additional layers are for calculating
     547!--       flag arrays needed for the multigrid sover)
    527548          nzb_local(-gls:-1,0:nx)     = nzb_local(ny-gls+1:ny,0:nx)
    528549          nzb_local(ny+1:ny+gls,0:nx) = nzb_local(0:gls-1,0:nx)
    529550          nzb_local(:,-gls:-1)        = nzb_local(:,nx-gls+1:nx)
    530551          nzb_local(:,nx+1:nx+gls)    = nzb_local(:,0:gls-1)
    531 
    532 
    533      
    534           GOTO 12
    535          
    536  10       message_string = 'file TOPOGRAPHY_DATA does not exist'
    537           CALL message( 'init_grid', 'PA0208', 1, 2, 0, 6, 0 )
    538 
    539  11       message_string = 'errors in file TOPOGRAPHY_DATA'
    540           CALL message( 'init_grid', 'PA0209', 1, 2, 0, 6, 0 )
    541 
    542  12       CLOSE( 90 )
    543552
    544553       CASE DEFAULT
  • palm/trunk/SOURCE/init_pegrid.f90

    r756 r759  
    44! Current revisions:
    55! -----------------
     6! calculation of number of io_blocks and the io_group to which the respective
     7! PE belongs
    68!
    79! ATTENTION: nnz_x undefined problem still has to be solved!!!!!!!!
     
    12371239    ENDIF
    12381240
     1241!
     1242!-- Calculate the number of groups into which parallel I/O is split.
     1243!-- The default for files which are opened by all PEs (or where each
     1244!-- PE opens his own independent file) is, that all PEs are doing input/output
     1245!-- in parallel at the same time. This might cause performance or even more
     1246!-- severe problems depending on the configuration of the underlying file
     1247!-- system.
     1248!-- First, set the default:
     1249    IF ( maximum_parallel_io_streams == -1  .OR. &
     1250         maximum_parallel_io_streams > numprocs )  THEN
     1251       maximum_parallel_io_streams = numprocs
     1252    ENDIF
     1253
     1254!
     1255!-- Now calculate the number of io_blocks and the io_group to which the
     1256!-- respective PE belongs. I/O of the groups is done in serial, but in parallel
     1257!-- for all PEs belonging to the same group. A preliminary setting with myid
     1258!-- based on MPI_COMM_WORLD has been done in parin.
     1259    io_blocks = numprocs / maximum_parallel_io_streams
     1260    io_group  = MOD( myid+1, io_blocks )
     1261   
     1262
    12391263 END SUBROUTINE init_pegrid
  • palm/trunk/SOURCE/modules.f90

    r744 r759  
    55! Current revisions:
    66! -----------------
     7! +io_blocks, io_group, maximum_parallel_io_streams,
     8! synchronous_exchange moved to control_parameters
    79!
    810! Former revisions:
     
    462464                inflow_disturbance_begin = -1, inflow_disturbance_end = -1, &
    463465                intermediate_timestep_count, intermediate_timestep_count_max, &
    464                 iran = -1234567, last_dt_change = 0, masks = 0, &
    465                 maximum_grid_level, max_pr_user = 0, mgcycles = 0, &
    466                 mg_cycles = -1, mg_switch_to_pe0_level = 0, mid, &
     466                io_group = 0, io_blocks = 1, iran = -1234567, &
     467                last_dt_change = 0, masks = 0, maximum_grid_level, &
     468                maximum_parallel_io_streams = -1, max_pr_user = 0, &
     469                mgcycles = 0, mg_cycles = -1, mg_switch_to_pe0_level = 0, mid, &
    467470                netcdf_data_format = 2, ngsrb = 2, nsor = 20, &
    468471                nsor_ini = 100, n_sor, normalizing_region = 0, &
     
    529532                random_heatflux = .FALSE., run_control_header = .FALSE., &
    530533                run_coupled = .TRUE., sloping_surface = .FALSE., &
    531                 stop_dt = .FALSE., terminate_run = .FALSE., &
    532                 turbulent_inflow = .FALSE., &
     534                stop_dt = .FALSE., synchronous_exchange = .FALSE., &
     535                terminate_run = .FALSE., turbulent_inflow = .FALSE., &
    533536                use_prior_plot1d_parameters = .FALSE., use_reference = .FALSE.,&
    534537                use_surface_fluxes = .FALSE., use_top_fluxes = .FALSE., &
     
    12501253    LOGICAL ::  collective_wait = .FALSE., left_border_pe  = .FALSE.,  &
    12511254                north_border_pe = .FALSE., reorder = .TRUE.,           &
    1252                 right_border_pe = .FALSE., south_border_pe = .FALSE.,  &
    1253                 synchronous_exchange = .FALSE.
     1255                right_border_pe = .FALSE., south_border_pe = .FALSE.
    12541256
    12551257    LOGICAL, DIMENSION(2) ::  cyclic = (/ .TRUE. , .TRUE. /), &
  • palm/trunk/SOURCE/palm.f90

    r715 r759  
    44! Current revisions:
    55! -----------------
    6 !
     6! Splitting of parallel I/O, cpu measurement for write_3d_binary and opening
     7! of unit 14 moved to here
    78!
    89! Former revisions:
     
    175176!-- If required, write binary data for restart runs
    176177    IF ( write_binary(1:4) == 'true' )  THEN
    177 !
    178 !--    Write flow field data
    179        CALL write_3d_binary
     178
     179       CALL cpu_log( log_point(22), 'write_3d_binary', 'start' )
     180
     181       CALL check_open( 14 )
     182
     183       DO  i = 0, io_blocks-1
     184          IF ( i == io_group )  THEN
     185!
     186!--          Write flow field data
     187             CALL write_3d_binary
     188          ENDIF
     189#if defined( __parallel )
     190          CALL MPI_BARRIER( comm2d, ierr )
     191#endif
     192       ENDDO
     193
     194       CALL cpu_log( log_point(22), 'write_3d_binary', 'stop' )
     195
    180196!
    181197!--    If required, write particle data
     
    192208!-- unit in routine user_last_actions.
    193209    CALL cpu_log( log_point(4), 'last actions', 'start' )
    194     CALL user_last_actions
    195     IF ( write_binary(1:4) == 'true' )  CALL close_file( 14 )
     210    DO  i = 0, io_blocks-1
     211       IF ( i == io_group )  THEN
     212          CALL user_last_actions
     213          IF ( write_binary(1:4) == 'true' )  CALL close_file( 14 )
     214       ENDIF
     215#if defined( __parallel )
     216       CALL MPI_BARRIER( comm2d, ierr )
     217#endif
     218    ENDDO
    196219    CALL close_file( 0 )
    197220    CALL close_dvrp
  • palm/trunk/SOURCE/parin.f90

    r684 r759  
    44! Current revisions:
    55! -----------------
    6 !
     6! +maximum_parallel_io_streams in envpar,
     7! splitting of parallel I/O in blocks of PEs
    78!
    89! Former revisions:
     
    117118    IMPLICIT NONE
    118119
    119     INTEGER ::  idum
     120    INTEGER ::  i, idum
    120121
    121122
     
    183184             dt_run_control,end_time, force_print_header, mask_scale_x, &
    184185             mask_scale_y, mask_scale_z, mask_x, mask_y, mask_z, mask_x_loop, &
    185              mask_y_loop, mask_z_loop, netcdf_data_format, &
    186              normalizing_region, npex, npey, nz_do3d, &
    187              precipitation_amount_interval, profile_columns, profile_rows, &
    188              restart_time, section_xy, section_xz, section_yz, &
    189              skip_time_data_output, skip_time_data_output_av, skip_time_dopr, &
    190              skip_time_do2d_xy, skip_time_do2d_xz, skip_time_do2d_yz, &
    191              skip_time_do3d, skip_time_domask, synchronous_exchange, &
    192              termination_time_needed, use_prior_plot1d_parameters, z_max_do1d, &
    193              z_max_do1d_normalized, z_max_do2d
    194 
    195 
    196     NAMELIST /envpar/  host, local_dvrserver_running, maximum_cpu_time_allowed,  &
    197                        revision, return_addres, return_username, run_identifier, &
    198                        tasks_per_node, write_binary                     
    199 
    200 !
    201 !-- Open the NAMELIST-file which is send with this job
    202     CALL check_open( 11 )
    203 
    204 !
    205 !-- Read the control parameters for initialization.
    206 !-- The namelist "inipar" must be provided in the NAMELIST-file. If this is
    207 !-- not the case and the file contains - instead of "inipar" - any other
    208 !-- namelist, a read error is created on t3e and control is transferred
    209 !-- to the statement with label 10. Therefore, on t3e machines one can not
    210 !-- distinguish between errors produced by a wrong "inipar" namelist or
    211 !-- because this namelist is totally missing.
    212     READ ( 11, inipar, ERR=10, END=11 )
    213     GOTO 12
    214  10 message_string = 'errors in \$inipar &or no \$inipar-namelist ' // &
    215                      'found (CRAY-machines only)'
    216     CALL message( 'parin', 'PA0271', 1, 2, 0, 6, 0 )
    217 
    218  11 message_string = 'no \$inipar-namelist found'
    219     CALL message( 'parin', 'PA0272', 1, 2, 0, 6, 0 )
    220 
    221 !
    222 !-- If required, read control parameters from restart file (produced by
    223 !-- a prior run). All PEs are reading from file created by PE0 (see check_open)
    224  12 IF ( TRIM( initializing_actions ) == 'read_restart_data' )  THEN
    225 
    226        CALL read_var_list
    227 !
    228 !--    The restart file will be reopened when reading the subdomain data
    229        CALL close_file( 13 )
    230 
    231 !
    232 !--    Increment the run count
    233        runnr = runnr + 1
    234 
    235     ENDIF
    236 
    237 !
    238 !-- Definition of names of areas used for computing statistics. They must
    239 !-- be defined at this place, because they are allowed to be redefined by
    240 !-- the user in user_parin.
    241     region = 'total domain'
    242 
    243 !
    244 !-- Read runtime parameters given by the user for this run (namelist "d3par").
    245 !-- The namelist "d3par" can be omitted. In that case, default values are
    246 !-- used for the parameters.
    247     READ ( 11, d3par, END=20 )
    248 
    249 !
    250 !-- Read control parameters for optionally used model software packages
    251  20 CALL package_parin
    252 
    253 !
    254 !-- Read user-defined variables
    255     CALL user_parin
    256 
    257 !
    258 !-- Check in case of initial run, if the grid point numbers are well defined
    259 !-- and allocate some arrays which are already needed in init_pegrid or
    260 !-- check_parameters. During restart jobs, these arrays will be allocated
    261 !-- in read_var_list. All other arrays are allocated in init_3d_model.
    262     IF ( TRIM( initializing_actions ) /= 'read_restart_data' )  THEN
    263 
    264        IF ( nx <= 0 )  THEN
    265           WRITE( message_string, * ) 'no value or wrong value given for nx: ',&
    266                                      'nx=', nx
    267           CALL message( 'parin', 'PA0273', 1, 2, 0, 6, 0 )
    268        ENDIF
    269        IF ( ny <= 0 )  THEN
    270           WRITE( message_string, * ) 'no value or wrong value given for ny: ',&
    271                                      'ny=', ny
    272           CALL message( 'parin', 'PA0274', 1, 2, 0, 6, 0 )
    273        ENDIF
    274        IF ( nz <= 0 )  THEN
    275           WRITE( message_string, * ) 'no value or wrong value given for nz: ',&
    276                                      'nz=', nz
    277           CALL message( 'parin', 'PA0275', 1, 2, 0, 6, 0 )
    278        ENDIF
    279 !
    280 !--    ATTENTION: in case of changes to the following statement please also
    281 !--    check the allocate statement in routine read_var_list
    282        ALLOCATE( lad(0:nz+1),pt_init(0:nz+1), q_init(0:nz+1), sa_init(0:nz+1), &
    283                  ug(0:nz+1), u_init(0:nz+1), v_init(0:nz+1), vg(0:nz+1),       &
    284                  hom(0:nz+1,2,pr_palm+max_pr_user,0:statistic_regions),        &
    285                  hom_sum(0:nz+1,pr_palm+max_pr_user,0:statistic_regions) )
    286 
    287        hom = 0.0
    288 
    289     ENDIF
    290 
    291 !
    292 !-- NAMELIST-file is not needed anymore
    293     CALL close_file( 11 )
    294 
    295 !
    296 !-- Read values of environment variables (this NAMELIST file is generated by
    297 !-- mrun)
     186             mask_y_loop, mask_z_loop, netcdf_data_format, normalizing_region, &
     187             npex, npey, nz_do3d, precipitation_amount_interval, &
     188             profile_columns, profile_rows, restart_time, section_xy, &
     189             section_xz, section_yz, skip_time_data_output, &
     190             skip_time_data_output_av, skip_time_dopr, skip_time_do2d_xy, &
     191             skip_time_do2d_xz, skip_time_do2d_yz, skip_time_do3d, &
     192             skip_time_domask, synchronous_exchange, termination_time_needed, &
     193             use_prior_plot1d_parameters, z_max_do1d, z_max_do1d_normalized, &
     194             z_max_do2d
     195
     196
     197    NAMELIST /envpar/  host, local_dvrserver_running, maximum_cpu_time_allowed,&
     198                       maximum_parallel_io_streams, revision, return_addres, &
     199                       return_username, run_identifier, tasks_per_node, &
     200                       write_binary
     201
     202!
     203!-- First read values of environment variables (this NAMELIST file is
     204!-- generated by mrun)
    298205    OPEN ( 90, FILE='ENVPAR', STATUS='OLD', FORM='FORMATTED', ERR=30 )
    299206    READ ( 90, envpar, ERR=31, END=32 )
    300207    CLOSE ( 90 )
     208
     209!
     210!-- Calculate the number of groups into which parallel I/O is split.
     211!-- The default for files which are opened by all PEs (or where each
     212!-- PE opens his own independent file) is, that all PEs are doing input/output
     213!-- in parallel at the same time. This might cause performance or even more
     214!-- severe problems depending on the configuration of the underlying file
     215!-- system.
     216!-- First, set the default:
     217    IF ( maximum_parallel_io_streams == -1  .OR. &
     218         maximum_parallel_io_streams > numprocs )  THEN
     219       maximum_parallel_io_streams = numprocs
     220    ENDIF
     221!
     222!-- Now calculate the number of io_blocks and the io_group to which the
     223!-- respective PE belongs. I/O of the groups is done in serial, but in parallel
     224!-- for all PEs belonging to the same group.
     225!-- These settings are repeated in init_pegrid for the communicator comm2d,
     226!-- which is not available here
     227    io_blocks = numprocs / maximum_parallel_io_streams
     228    io_group  = MOD( myid+1, io_blocks )
     229
     230!
     231!-- Data is read in parallel by groups of PEs
     232    DO  i = 0, io_blocks-1
     233       IF ( i == io_group )  THEN
     234
     235!
     236!--       Open the NAMELIST-file which is send with this job
     237          CALL check_open( 11 )
     238
     239!
     240!--       Read the control parameters for initialization.
     241!--       The namelist "inipar" must be provided in the NAMELIST-file. If this
     242!--       is not the case and the file contains - instead of "inipar" - any
     243!--       other namelist, a read error is created on t3e and control is
     244!--       transferred to the statement with label 10. Therefore, on t3e
     245!--       machines one can not distinguish between errors produced by a wrong
     246!--       "inipar" namelist or because this namelist is totally missing.
     247          READ ( 11, inipar, ERR=10, END=11 )
     248          GOTO 12
     249 10       message_string = 'errors in \$inipar &or no \$inipar-namelist ' // &
     250                           'found (CRAY-machines only)'
     251          CALL message( 'parin', 'PA0271', 1, 2, 0, 6, 0 )
     252
     253 11       message_string = 'no \$inipar-namelist found'
     254          CALL message( 'parin', 'PA0272', 1, 2, 0, 6, 0 )
     255
     256!
     257!--       If required, read control parameters from restart file (produced by
     258!--       a prior run). All PEs are reading from file created by PE0 (see
     259!--       check_open)
     260 12       IF ( TRIM( initializing_actions ) == 'read_restart_data' )  THEN
     261
     262             CALL read_var_list
     263!
     264!--          The restart file will be reopened when reading the subdomain data
     265             CALL close_file( 13 )
     266
     267!
     268!--          Increment the run count
     269             runnr = runnr + 1
     270
     271          ENDIF
     272
     273!
     274!--       Definition of names of areas used for computing statistics. They must
     275!--       be defined at this place, because they are allowed to be redefined by
     276!--       the user in user_parin.
     277          region = 'total domain'
     278
     279!
     280!--       Read runtime parameters given by the user for this run (namelist
     281!--       "d3par"). The namelist "d3par" can be omitted. In that case, default
     282!--       values are used for the parameters.
     283          READ ( 11, d3par, END=20 )
     284
     285!
     286!--       Read control parameters for optionally used model software packages
     287 20       CALL package_parin
     288
     289!
     290!--       Read user-defined variables
     291          CALL user_parin
     292
     293!
     294!--       Check in case of initial run, if the grid point numbers are well
     295!--       defined and allocate some arrays which are already needed in
     296!--       init_pegrid or check_parameters. During restart jobs, these arrays
     297!--       will be allocated in read_var_list. All other arrays are allocated
     298!--       in init_3d_model.
     299          IF ( TRIM( initializing_actions ) /= 'read_restart_data' )  THEN
     300
     301             IF ( nx <= 0 )  THEN
     302                WRITE( message_string, * ) 'no value or wrong value given', &
     303                                           ' for nx: nx=', nx
     304                CALL message( 'parin', 'PA0273', 1, 2, 0, 6, 0 )
     305             ENDIF
     306             IF ( ny <= 0 )  THEN
     307                WRITE( message_string, * ) 'no value or wrong value given', &
     308                                           ' for ny: ny=', ny
     309                CALL message( 'parin', 'PA0274', 1, 2, 0, 6, 0 )
     310             ENDIF
     311             IF ( nz <= 0 )  THEN
     312                WRITE( message_string, * ) 'no value or wrong value given', &
     313                                           ' for nz: nz=', nz
     314                CALL message( 'parin', 'PA0275', 1, 2, 0, 6, 0 )
     315             ENDIF
     316!
     317!--          ATTENTION: in case of changes to the following statement please
     318!--                  also check the allocate statement in routine read_var_list
     319             ALLOCATE( lad(0:nz+1),pt_init(0:nz+1), q_init(0:nz+1),           &
     320                       sa_init(0:nz+1), ug(0:nz+1), u_init(0:nz+1),           &
     321                       v_init(0:nz+1), vg(0:nz+1),                            &
     322                       hom(0:nz+1,2,pr_palm+max_pr_user,0:statistic_regions), &
     323                       hom_sum(0:nz+1,pr_palm+max_pr_user,0:statistic_regions) )
     324
     325             hom = 0.0
     326
     327          ENDIF
     328
     329!
     330!--       NAMELIST-file is not needed anymore
     331          CALL close_file( 11 )
     332
     333       ENDIF
     334#if defined( __parallel )
     335       CALL MPI_BARRIER( MPI_COMM_WORLD, ierr )
     336#endif
     337    ENDDO
     338
    301339    RETURN
    302340
  • palm/trunk/SOURCE/prandtl_fluxes.f90

    r710 r759  
    44! Current revisions:
    55! -----------------
    6 !
     6! Bugfix for ts limitation
    77!
    88! Former revisions:
     
    7474!--          ts must be limited, because otherwise overflow may occur in case of
    7575!--          us=0 when computing rif further below
    76              IF ( ts(j,i) < -1.05E5 )  ts = -1.0E5
    77              IF ( ts(j,i) >   1.0E5 )  ts =  1.0E5
     76             IF ( ts(j,i) < -1.05E5 )  ts(j,i) = -1.0E5
     77             IF ( ts(j,i) >   1.0E5 )  ts(j,i) =  1.0E5
    7878          ENDDO
    7979       ENDDO
  • palm/trunk/SOURCE/production_e.f90

    r668 r759  
    44! Current revisions:
    55! -----------------
     6! initialization of u_0, v_0
    67!
    78! Former revisions:
     
    11111112
    11121113          IF ( first_call )  THEN
    1113              ALLOCATE( u_0(nysg:nyng,nxlg:nxrg), &
    1114                        v_0(nysg:nyng,nxlg:nxrg) )
     1114             ALLOCATE( u_0(nysg:nyng,nxlg:nxrg), v_0(nysg:nyng,nxlg:nxrg) )
     1115             u_0 = 0.0   ! just to avoid access of uninitialized memory
     1116             v_0 = 0.0   ! within exchange_horiz_2d
    11151117             first_call = .FALSE.
    11161118          ENDIF
  • palm/trunk/SOURCE/write_3d_binary.f90

    r588 r759  
    44! Current revisions:
    55! -----------------
    6 !
     6! cpu measurement and file opening moved to main program
    77!
    88! Former revisions:
     
    6767
    6868
    69     CALL cpu_log( log_point(22), 'write_3d_binary', 'start' )
    70 
    71     CALL check_open( 14 )
    72    
    7369!
    7470!-- Write control parameters and other variables for restart.
     
    292288    WRITE ( 14 )  '*** end ***         '
    293289
    294 
    295     CALL cpu_log( log_point(22), 'write_3d_binary', 'stop' )
    296 
    297 
    298290 END SUBROUTINE write_3d_binary
Note: See TracChangeset for help on using the changeset viewer.