Changeset 622


Ignore:
Timestamp:
Dec 10, 2010 8:08:13 AM (14 years ago)
Author:
raasch
Message:

New:
---

Optional barriers included in order to speed up collective operations
MPI_ALLTOALL and MPI_ALLREDUCE. This feature is controlled with new initial
parameter collective_wait. Default is .FALSE, but .TRUE. on SGI-type
systems. (advec_particles, advec_s_bc, buoyancy, check_for_restart,
cpu_statistics, data_output_2d, data_output_ptseries, flow_statistics,
global_min_max, inflow_turbulence, init_3d_model, init_particles, init_pegrid,
init_slope, parin, pres, poismg, set_particle_attributes, timestep,
read_var_list, user_statistics, write_compressed, write_var_list)

Adjustments for Kyushu Univ. (lcrte, ibmku). Concerning hybrid
(MPI/openMP) runs, the number of openMP threads per MPI tasks can now
be given as an argument to mrun-option -O. (mbuild, mrun, subjob)

Changed:


Initialization of the module command changed for SGI-ICE/lcsgi (mbuild, subjob)

Errors:


Location:
palm/trunk
Files:
29 edited

Legend:

Unmodified
Added
Removed
  • palm/trunk/SCRIPTS/mbuild

    r562 r622  
    112112     #                    for lcxt4
    113113     # 07/09/10 - Siggi - bugfix for wrong netcdf/3.6.3 module on lcsgi
     114     # 08/12/10 - Siggi - initialization of the module command changed for
     115     #                    SGI-ICE/lcsgi
     116     #                    adjustments for Kyushu Univ. (lcrte, ibmku)
    114117
    115118
     
    464467    case  $remote_host  in
    465468        (lcmuk)          remote_addres=130.75.105.2;;
     469        (lcrte)          remote_addres=133.5.185.60;;
    466470        (lcsgib)         remote_addres=130.73.232.102;;
    467471        (lcsgih)         remote_addres=130.75.4.102;;
     
    472476        (decalpha)       remote_addres=165.132.26.56;;
    473477        (ibmh)           remote_addres=136.172.40.15;;
     478        (ibmku)          remote_addres=133.5.4.129;;
    474479        (ibms)           remote_addres=150.183.5.101;;
    475480        (ibmy)           remote_addres=165.132.26.58;;
     
    10631068          then
    10641069
    1065              print ". /usr/share/modules/init/bash; $module_calls  cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh  ${remote_username}@${remote_addres} 2>&1 | tee ${remote_host}_last_make_protokoll
     1070#             print ". /usr/share/modules/init/bash; $module_calls  cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh  ${remote_username}@${remote_addres} 2>&1 | tee ${remote_host}_last_make_protokoll
     1071             print "eval \`/sw/swdist/bin/modulesinit\`; $module_calls  cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh  ${remote_username}@${remote_addres} 2>&1 | tee ${remote_host}_last_make_protokoll
    10661072
    10671073          elif [[ $remote_host = lctit ]]
  • palm/trunk/SCRIPTS/mrun

    r592 r622  
    210210     # 17/08/10 - BjornM - adjustments for interactive runs on lcxt4
    211211     # 07/09/10 - Siggi  - bugfix for wrong netcdf/3.6.3 module on lcsgi
     212     # 08/12/10 - Siggi  - new handling of openmp/hybrid runs, option -O
     213     #                     has now argument threads_per_task
     214     #                     adjustments for Kyushu Univ. (lcrte, ibmku)
    212215
    213216
     
    290293 read_from_config=""
    291294 restart_run=false
    292  return_addres=$(nslookup `hostname` 2>&1 | grep "Address:" | tail -1 | awk '{print $2}')
    293  if [[ $return_addres = 130.75.105.158 ]]
    294  then
    295     return_addres=172.20.25.41
    296     echo "+++ WARNING: return_addres changed to $return_addres !!!!!"
     295 if [[ `hostname` = rte10 ]]
     296 then
     297   return_addres=133.5.185.60
     298   echo "+++ WARNING: return_addres changed to $return_addres !!!!!"
     299 else
     300    return_addres=$(nslookup `hostname` 2>&1 | grep "Address:" | tail -1 | awk '{print $2}')
    297301 fi
    298302 return_password=""
     
    450454    # SHELLSCRIPT-OPTIONEN EINLESEN UND KOMMANDO NEU ZUSAMMENSETZEN, FALLS ES
    451455    # FUER FOLGEJOBS BENOETIGT WIRD
    452  while  getopts  :a:AbBc:Cd:D:Fg:G:h:H:i:IkK:m:M:n:o:Op:P:q:r:R:s:St:T:u:U:vxX:yY: option
     456 while  getopts  :a:AbBc:Cd:D:Fg:G:h:H:i:IkK:m:M:n:o:O:p:P:q:r:R:s:St:T:u:U:vxX:yY: option
    453457 do
    454458   case  $option  in
     
    474478       (n)   node_usage=$OPTARG; mc="$mc -n$OPTARG";;
    475479       (o)   output_list=$OPTARG; mc="$mc -o'$OPTARG'";;
    476        (O)   use_openmp=true; mc="$mc -O";;
     480       (O)   use_openmp=true; threads_per_task=$OPTARG; mc="$mc -O$OPTARG";;
    477481       (p)   package_list=$OPTARG; mc="$mc -p'$OPTARG'";;
    478482       (P)   return_password=$OPTARG; mc="$mc -P$OPTARG";;
     
    658662    do_remote=true
    659663    case  $host  in
    660         (ibm|ibmb|ibmh|ibms|ibmy|nech|necriam|lckyoto|lcsgib|lcsgih|lctit|unics|lcxt4|lcxt5m|lck)  true;;
     664        (ibm|ibmh|ibmku|ibms|ibmy|nech|necriam|lckyoto|lcsgib|lcsgih|lctit|unics|lcxt4|lcxt5m|lck)  true;;
    661665        (*)  printf "\n"
    662666             printf "\n  +++ sorry: execution of batch jobs on remote host \"$host\""
     
    835839                   do_remote=true
    836840                   case  $host  in
    837                        (ibm|ibms|ibmy|lckyoto|lcsgib|lcsgih|lctit|nech|necriam|unics|lcxt4|lcxt5m|lck)  true;;
     841                       (ibm|ibmh|ibmku|ibms|ibmy|lckyoto|lcsgib|lcsgih|lctit|nech|necriam|unics|lcxt4|lcxt5m|lck)  true;;
    838842                       (*)  printf "\n  +++ sorry: execution of batch jobs on remote host \"$host\""
    839843                            printf "\n      is not available"
     
    11171121    do_remote=true
    11181122    case  $host  in
    1119         (ibm|ibmb|ibmh|ibms|ibmy|lckyoto|lcsgib|lcsgih|lctit|nech|necriam|unics|lcxt4|lcxt5m|lck)  true;;
     1123        (ibm|ibmh|ibmku|ibms|ibmy|lckyoto|lcsgib|lcsgih|lctit|nech|necriam|unics|lcxt4|lcxt5m|lck)  true;;
    11201124        (*)  printf "\n"
    11211125             printf "\n  +++ sorry: execution of batch jobs on remote host \"$host\""
     
    11461150       # DEFAULT-WERT SETZEN) UND OB SIE EIN GANZZAHLIGER TEILER DER
    11471151       # GESAMTPROZESSORANZAHL IST
    1148     if [[ $host = nech  ||  $host = necriam  ||  $host = ibmh  ||  $host = ibmb  ||  $host = ibms ]]
     1152    if [[ $host = nech  ||  $host = necriam  ||  $host = ibmh  ||  $host = ibms ]]
    11491153    then
    11501154       [[ "$tasks_per_node" = "" ]]  &&  tasks_per_node=6
     
    11811185       # FALLS OPENMP PARALLELISIERUNG VERWENDET WERDEN SOLL, ANZAHL VON THREADS
    11821186       # SETZEN UND ZAHL DER TASKS PRO KNOTEN AUF 1 SETZEN
    1183     if [[ $use_openmp = true ]]
    1184     then
    1185        threads_per_task=$tasks_per_node
    1186        tasks_per_node=1
    1187     fi
     1187#    if [[ $use_openmp = true ]]
     1188#    then
     1189#       threads_per_task=$tasks_per_node
     1190#       tasks_per_node=1
     1191#    fi
    11881192
    11891193       # SETTINGS FOR SUBJOB-COMMAND
    1190     if [[ $(echo $host | cut -c1-5) = lcsgi ]]
    1191     then
    1192        (( tp1 = tasks_per_node * threads_per_task ))
    1193        TOPT="-T $tp1"
    1194     else
    1195        TOPT="-T $tasks_per_node"
    1196     fi
     1194    TOPT="-T $tasks_per_node"
    11971195    OOPT="-O $threads_per_task"
    11981196
     
    12671265 then
    12681266    case  $host  in
    1269         (ibmb)       if [[ $node_usage = shared ]]
    1270                      then
    1271                         queue=cshare
    1272                      else
    1273                         queue=csolo
    1274                      fi;;
    12751267        (ibmh)       queue=no_class;;
    12761268        (ibmy)       queue=parallel;;
     
    20202012 if [[ "$tmp_data_catalog" = "" ]]
    20212013 then
    2022     if [[ $localhost = ibmb ]]
    2023     then
    2024        tmp_data_catalog=$WORK/mrun_restart_data
    2025     elif [[ $localhost = nech ]]
     2014    if [[ $localhost = nech ]]
    20262015    then
    20272016       tmp_data_catalog=$WRKSHR/mrun_restart_data
     
    21782167    printf "| $spalte1$spalte2 | \n"
    21792168 fi
    2180  if [[ $threads_per_task != 1 ]]
     2169 if [[ $use_openmp = true ]]
    21812170 then
    21822171    spalte1="threads per task:"; spalte2="$threads_per_task"
     
    30052994       then
    30062995          dxladebug  a.out
    3007        elif [[ $localhost = ibmb  ||  $localhost = ibmh ]]
     2996       elif [[ $localhost = ibmh ]]
    30082997       then
    30092998
     
    30873076          exit
    30883077       fi
     3078
     3079       # end debug mode
    30893080    else
     3081
     3082          # normal execution
    30903083       if [[ -n $numprocs ]]
    30913084       then
     
    31353128                fi
    31363129             else
    3137                 if [[ $localhost = ibmb  ||  $localhost = ibmh  ||  $localhost = ibms ]]
     3130                if [[ $localhost = ibmh  ||  $localhost = ibms ]]
    31383131                then
    31393132                   poe  a.out  -procs $numprocs  -nodes 1  -rmpool 0  $ROPTS
    3140                 elif [[ $localhost = ibmy ]]
     3133                elif [[ $localhost = ibmku  ||  $localhost = ibmy ]]
    31413134                then
    31423135                   if [[ -f $hostfile ]]
     
    31683161                      echo "coupled_run $iia $iio"  >  runfile_atmos
    31693162                   fi
    3170                    ./a.out  -procs $tasks_per_node  $ROPTS  <  runfile_atmos
     3163                   if [[ $localhost = ibmy ]]
     3164                   then
     3165                      ./a.out  -procs $tasks_per_node  $ROPTS  <  runfile_atmos
     3166                   else
     3167                      poe  ./a.out  -procs $numprocs $ROPTS  <  runfile_atmos
     3168                   fi
    31713169
    31723170                else
     
    32923290                            export MPI_DSM_CPULIST="0,1,4,5,2,3,6,7:allhosts"
    32933291                         fi
     3292                      else
     3293                         unset MPI_DSM_CPULIST
    32943294                      fi
    32953295                          # MPI_IB_RAILS: use both IB rails on ICE2
     
    33043304
    33053305                          # next is test for openmp usage
    3306                       # mpiexec -n $ii -pernode  ./a.out  $ROPTS  < runfile_atmos
     3306                  #     echo "mpiexec -npernode $tasks_per_node  ./a.out  $ROPTS  < runfile_atmos"
     3307                  #     mpiexec -npernode $tasks_per_node  ./a.out  $ROPTS  < runfile_atmos
    33073308                   elif [[ $( echo $mpilib | cut -c1-3 ) = mva ]]
    33083309                   then
     
    33173318               #          export MV2_CPU_MAPPING=0,1,4,5,2,3,6,7
    33183319               #       fi
     3320                      [[ $use_openmp = true ]]  &&  unset MV2_CPU_MAPPING
    33193321                      echo "*** MV2_CPU_MAPPING=$MV2_CPU_MAPPING"
    3320                       if [[ $threads_per_task != 1 ]]
     3322                      if [[ $use_openmp = true ]]
    33213323                      then
    3322                          mpiexec -npernode 1  ./a.out  $ROPTS  <  runfile_atmos
     3324                         mpiexec -npernode $tasks_per_node  ./a.out  $ROPTS  <  runfile_atmos
    33233325                      else
    33243326                         mpiexec -np $ii  ./a.out  $ROPTS  < runfile_atmos
     
    36363638                   cst="/"
    36373639                fi
    3638                 if [[ $localhost = ibmb  ||  $localhost = nech ]]
     3640                if [[ $localhost = nech ]]
    36393641                then
    36403642
     
    37173719             if [[ $localhost != $fromhost ]]
    37183720             then
    3719                 if [[ $localhost = ibmh  ||  $localhost = ibmb  ||  $localhost = nech ]]
     3721                if [[ $localhost = ibmh  ||  $localhost = nech ]]
    37203722                then
    37213723
     
    42314233          then
    42324234
    4233              if [[ $localhost = lcsgih  ||  $localhost = lcsgib  ||  $localhost = nech  ||  $localhost = ibmb  ||  $localhost = ibmh  ||  $localhost = ibms  ||  $localhost = lctit ]]
     4235             if [[ $localhost = lcsgih  ||  $localhost = lcsgib  ||  $localhost = nech  ||  $localhost = ibmh  ||  $localhost = ibmku  ||  $localhost = ibms  ||  $localhost = lctit ]]
    42344236             then
    42354237                echo "*** ssh will be used to initiate restart-runs!"
     
    43484350    if [[ $use_openmp = true ]]
    43494351    then
    4350        mrun_com=${mrun_com}" -O"
    4351        [[ "$tasks_per_node" != "" ]] &&  mrun_com=${mrun_com}" -T $threads_per_task"
    4352     else
    4353        [[ "$tasks_per_node" != "" ]] &&  mrun_com=${mrun_com}" -T $tasks_per_node"
    4354     fi
     4352       mrun_com=${mrun_com}" -O $threads_per_task"
     4353    fi
     4354    [[ "$tasks_per_node" != "" ]] &&  mrun_com=${mrun_com}" -T $tasks_per_node"
    43554355    [[ $store_on_archive_system = true ]]  &&  mrun_com=${mrun_com}" -A"
    43564356    [[ $package_list != "" ]]     &&  mrun_com=${mrun_com}" -p \"$package_list\""
  • palm/trunk/SCRIPTS/subjob

    r555 r622  
    121121     # 25/08/10 - Siggi - new variable project_account in pbs-statements for
    122122     #                    lcxt4
     123     # 08/12/10 - Siggi - initialization of the module command changed for
     124     #                    SGI-ICE/lcsgi
     125     #                    adjustments for Kyushu Univ. (lcrte, ibmku)
    123126
    124127
     
    144147
    145148 typeset  -i   cputime=0  memory=0  Memory=0  minuten  resttime  sekunden  stunden
    146  typeset  -i   inumprocs  nodes=0  tasks_per_node=0  threads_per_task=1
     149 typeset  -i   inumprocs  nodes=0  processes_per_node=0 tasks_per_node=0  threads_per_task=1
    147150 typeset  -L20 spalte1
    148151 typeset  -R40 spalte2
     
    194197     (b01*|bicegate1)        local_addres=130.73.232.102; local_host=lcsgib;;
    195198     (bicegate2)             local_addres=130.73.232.103; local_host=lcsgib;;
    196      (breg*-en0|berni*-en0)  local_addres=130.73.230.10;  local_host=ibmb;;
    197199     (breva)                 local_addres=130.75.105.98;  local_host=lcmuk;;
    198200     (bicegate2)             local_addres=130.73.232.103; local_host=lcsgib;;
     
    228230     (paesano)               local_addres=130.75.105.46;  local_host=lcmuk;;
    229231     (quanero)               local_addres=130.75.105.107; local_host=lcmuk;;
     232     (rte*)                  local_addres=133.5.185.60;   local_host=lcrte;;
    230233     (scirocco)              local_addres=172.20.25.41;   local_host=lcmuk;;
    231234     (sun1|sun2)             local_addres=130.75.6.1;     local_host=unics;;
     
    235238     (tgg*)                  local_addres=172.17.75.161;  local_host=lctit;;
    236239     (vorias)                local_addres=172.20.25.43;   local_host=lcmuk;;
     240     (*.cc.kyushu-u.ac.jp)   local_addres=133.5.4.129;    local_host=ibmku;;
    237241     (*)                     printf "\n  +++ \"$local_host\" unknown";
    238242                             printf "\n      please inform S. Raasch!";
     
    289293    printf "\n        -D    only the job-file will be created   ---"
    290294    printf "\n        -h    execution host, available hosts:    $remote_host"
    291     printf "\n              ibm, ibmb, ibmh, ibms, ibmy, lcmuk,"
     295    printf "\n              ibm, ibmh, ibmku, ibms, ibmy, lc...,"
    292296    printf "\n              lctit, nech, necriam, unics"
    293297    printf "\n        -m    memory demand per process in MByte  ---"
     
    354358    case  $remote_host  in
    355359        (ibm)     queue=p690_standard; remote_addres=134.76.99.81; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
    356         (ibmb)    queue=cpar; remote_addres=130.73.230.10; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
    357360        (ibmh)    queue=no_class; remote_addres=136.172.40.15; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
     361        (ibmku)   queue=s4; remote_addres=133.5.4.129; submcom=/usr/local/bin/llsubmit;;
    358362        (ibms)    queue=p_normal; remote_addres=150.183.5.101; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
    359363        (ibmy)    queue=parallel; remote_addres=165.132.26.58; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
     
    386390                     (*)                                     error=true;;
    387391                 esac;;
    388         (ibmb)   case  $ndq  in
    389                      (cdata|cdev|cexp|c1|cshare|csolo|cspec) error=false;;
     392        (ibmh)   case  $ndq  in
     393                     (no_class) error=false;;
    390394                     (*)                                     error=true;;
    391395                 esac;;
    392         (ibmh)   case  $ndq  in
    393                      (no_class)  error=false;;
     396        (ibmku)  case  $ndq  in
     397                     (sdbg1|sdbg2|sdbg4|s4|s16|s32|s32-s)    error=false;;
    394398                     (*)                                     error=true;;
    395399                 esac;;
     
    450454
    451455
    452     # KNOTENNUTZUNG IN ENTWICKLERQUEUE MUSS SHARED SEIN
    453  if [[ $node_usage != shared  &&  $queue = cdev ]]
    454  then
    455     node_usage=shared
    456  fi
    457 
    458 
    459 
    460456    # PRUEFEN DER CPU-ZEIT, ZEIT NACH STUNDEN, MINUTEN UND SEKUNDEN
    461457    # AUFTEILEN
     
    530526 if (( tasks_per_node != 0 ))
    531527 then
    532     if [[ $(echo $remote_host | cut -c1-5) = lcsgi ]]
    533     then
    534        (( nodes = numprocs / tasks_per_node ))
    535     else
    536        (( nodes = numprocs / ( tasks_per_node * threads_per_task ) ))
    537     fi
    538  fi
    539 
     528    (( nodes = numprocs / ( tasks_per_node * threads_per_task ) ))
     529 fi
     530
     531    # Calculate number of processes per node
     532 (( processes_per_node = tasks_per_node * threads_per_task ))
    540533
    541534
     
    597590
    598591
    599     # QSUB- ODER LL-KOMMANDOS BZW. SKRIPTE  GENERIEREN
     592    # Generate the batch job scripts (qsub/msub/LoadLeveler)
    600593 if [[ $(echo $remote_host | cut -c1-3) = ibm  &&  $numprocs != 0 ]]
    601594 then
    602595
    603     if [[ $remote_host = ibmy ]]
    604     then
    605        consumable_memory=""
    606     else
    607        consumable_memory="ConsumableMemory($memory mb)"
    608     fi
     596       # General LoadLeveler settings
     597    execute_in_shell="#!/bin/ksh"
     598    use_shell="# @ shell = /bin/ksh"
     599    consumable_memory="ConsumableMemory($memory mb)"
     600    class="# @ class = $queue"
     601    environment="# @ environment = OMP_NUM_THREADS=$threads_per_task; MP_SHARED_MEMORY=yes"
     602    network_to_use="# @ network.mpi = sn_all,shared,us"
     603    data_limit="# @ data_limit = 1.76gb"
     604    image_size="# @ image_size = 50"
     605
    609606
    610607    if [[ $remote_host = ibmh ]]
     
    614611       class=""
    615612       environment=""
    616     else
    617        class="# @ class = $queue"
    618        environment="# @ environment = OMP_NUM_THREADS=$threads_per_task; MP_SHARED_MEMORY=yes"
    619        if [[ $queue = cdev ]]
    620        then
    621           data_limit="# @ data_limit = 1.76gb"
    622           network_to_use="# @ network.mpi = sn_all,shared,ip"
    623        else
    624           if [[ $remote_host = ibms ]]
    625           then
    626              network_to_use="# @ network.mpi = csss,shared,us"
    627           elif [[ $remote_host = ibmy ]]
    628           then
    629              network_to_use=""
    630           else
    631              network_to_use="# @ network.mpi = sn_all,shared,us"
    632              data_limit="# @ data_limit = 1.76gb"
    633           fi
    634        fi
     613    elif [[ $remote_host = ibmku ]]
     614    then
     615       execute_in_shell="#!/usr/bin/ksh"
     616       use_shell="# @ shell = /usr/bin/ksh"
     617       consumable_memory=""
     618       environment=""
     619       network_to_use="# @ network.mpi = sn_all,shared,us"
     620       data_limit=""
     621       image_size=""
     622    elif [[ $remote_host = ibms ]]
     623    then
     624       network_to_use="# @ network.mpi = csss,shared,us"
     625    elif [[ $remote_host = ibmy ]]
     626    then
     627       consumable_memory=""
     628       network_to_use=""
    635629    fi
    636630
    637631    cat > $job_to_send << %%END%%
    638 #!/bin/ksh
    639 # @ shell = /bin/ksh
     632$execute_in_shell
     633$use_shell
    640634
    641635# @ job_type = parallel
     
    645639# @ output = $remote_dayfile
    646640# @ error = $remote_dayfile
    647 # @ image_size = 50
     641$image_size
    648642$class
    649643$environment
     
    659653       cat >> $job_to_send << %%END%%
    660654# @ node = $nodes
    661 # @ tasks_per_node = $tasks_per_node
     655# @ tasks_per_node = $processes_per_node
    662656# @ node_usage = $node_usage
    663657# @ queue
     
    721715#PBS -A $project_account
    722716#PBS -l walltime=$timestring
    723 #PBS -l nodes=${nodes}:ppn=$tasks_per_node
     717#PBS -l nodes=${nodes}:ppn=$processes_per_node
    724718#PBS -l pmem=${memory}mb
    725719#PBS -m abe
     
    813807#PBS -N $job_name
    814808#PBS -l walltime=$timestring
    815 #PBS -l nodes=$nodes:ppn=${tasks_per_node}
     809#PBS -l nodes=$nodes:ppn=${processes_per_node}
    816810#PBS -l naccesspolicy=$node_usage
    817811#PBS -o $remote_dayfile
     
    821815$email_directive
    822816
    823 . /usr/share/modules/init/bash
     817eval \`/sw/swdist/bin/modulesinit\`
     818#. /usr/share/modules/init/bash
    824819$module_calls
    825820
     
    839834$email_directive
    840835
    841 . /usr/share/modules/init/bash
     836eval \`/sw/swdist/bin/modulesinit\`
     837#. /usr/share/modules/init/bash
    842838$module_calls
    843839
     
    859855#PBS -l walltime=$timestring
    860856#PBS -l mppwidth=${numprocs}
    861 #PBS -l mppnppn=${tasks_per_node}
     857#PBS -l mppnppn=${processes_per_node}
    862858#PBS -m abe
    863859#PBS -o $remote_dayfile
     
    927923#PBS -l walltime=$timestring
    928924#PBS -l mppwidth=${numprocs}
    929 #PBS -l mppnppn=${tasks_per_node}
     925#PBS -l mppnppn=${processes_per_node}
    930926#PBS -m abe
    931927#PBS -o $remote_dayfile
     
    962958       cat > $job_to_send << %%END%%
    963959#!/bin/ksh
    964 #PBS -l cpunum_prc=$tasks_per_node,cputim_job=$cputime
     960#PBS -l cpunum_prc=$processes_per_node,cputim_job=$cputime
    965961#PBS -l ${qsubmem}=${Memory}gb
    966962#PBS -b $nodes
     
    976972       cat > $job_to_send << %%END%%
    977973#!/bin/ksh
    978 #PBS -l cpunum_prc=$tasks_per_node,cputim_job=$cputime
     974#PBS -l cpunum_prc=$processes_per_node,cputim_job=$cputime
    979975#PBS -l ${qsubmem}=${Memory}gb
    980976#PBS -o $remote_dayfile
     
    10721068    if [[ $(echo $remote_host | cut -c1-3) = ibm  ||  $(echo $remote_host | cut -c1-5) = lcsgi  ||  $(echo $remote_host | cut -c1-3) = nec  ||  $remote_host = lctit ]]
    10731069    then
    1074        if [[ $remote_host = ibmb  ||  $remote_host = ibmh ]]
     1070       if [[ $remote_host = ibmh ]]
    10751071       then
    10761072          return_queue=c1
     1073       elif [[ $remote_host = ibmku ]]
     1074       then
     1075          return_queue=sdbg2
    10771076       elif [[ $remote_host = ibms ]]
    10781077       then
     
    10971096       then
    10981097
    1099           echo "echo \"#!/bin/ksh\" >> scpjob.$kennung"               >>  $job_to_send
     1098          if [[ $remote_host = ibmku ]]
     1099          then
     1100             echo "echo \"#!/usr/bin/ksh\" >> scpjob.$kennung"            >>  $job_to_send
     1101             echo "echo \"# @ shell = /usr/bin/ksh\" >> scpjob.$kennung"  >>  $job_to_send
     1102          else
     1103             echo "echo \"#!/bin/ksh\" >> scpjob.$kennung"                >>  $job_to_send
     1104          fi
    11001105          echo "echo \"# @ job_type = serial\" >> scpjob.$kennung"    >>  $job_to_send
    11011106          echo "echo \"# @ job_name = transfer\" >> scpjob.$kennung"  >>  $job_to_send
     
    11161121          echo "echo \"set -x\" >> scpjob.$kennung"                   >>  $job_to_send
    11171122          echo "echo \"batch_scp  -d  -w 10  -u $local_user  $local_addres  ${job_catalog}/$remote_dayfile  \\\"$job_catalog\\\"  $local_dayfile\" >> scpjob.$kennung"  >>  $job_to_send
     1123          if [[ $remote_host = ibmku ]]
     1124          then
     1125             echo "echo \"rm  scpjob.$kennung\" >> scpjob.$kennung"   >>  $job_to_send
     1126          fi
    11181127          echo "echo \"exit\" >> scpjob.$kennung"                     >>  $job_to_send
    11191128
     
    12191228          echo "qsub  scpjob.$kennung"          >>  $job_to_send
    12201229       fi
    1221        echo "rm  scpjob.$kennung"               >>  $job_to_send
     1230       if [[ $remote_host != ibmku ]]
     1231       then
     1232          echo "rm  scpjob.$kennung"            >>  $job_to_send
     1233       fi
    12221234       if [[ $remote_host = nech ]]
    12231235       then
     
    12431255    echo "exit"      >>  $job_to_send
    12441256 fi
    1245  if [[ $remote_host = lctit ]]
     1257 if [[ $remote_host = lctit  ||  $remote_host = ibmku ]]
    12461258 then
    12471259    echo " "                               >>  $job_to_send
     
    13161328             printf "\n >>> submit with HLRN qos-feature hiprio...\n"
    13171329             ssh  $remote_addres  -l $remote_user  "cd $job_catalog; $submcom -l qos=hiprio $job_on_remhost; rm $job_on_remhost"
     1330          elif [[ $remote_host = ibmku ]]
     1331          then
     1332             ssh  $remote_addres  -l $remote_user  "cd $job_catalog; $submcom $job_on_remhost"
    13181333          else
    13191334             ssh  $remote_addres  -l $remote_user  "cd $job_catalog; $submcom $job_on_remhost; rm $job_on_remhost"
    13201335          fi
    13211336       else
    1322              # TIT ERLAUBT NUR DIE AUSFï¿œHRUNG GANZ BESTIMMTER KOMMANDOS
     1337             # TIT ERLAUBT NUR DIE AUSFUEHRUNG GANZ BESTIMMTER KOMMANDOS
    13231338             # MIT SSH, DESHALB AUFRUF PER PIPE
    13241339             # UEBERGANGSWEISE CHECK, OB N1GE ENVIRONMENT WIRKLICH VERFUEGBAR
     
    13651380          qsub  $job_on_remhost
    13661381       fi
    1367           # JOBFILE DARF AUF LCTIT NICHT GELOESCHT WERDEN!! GESCHIEHT ERST AM JOBENDE
    1368        [[ $local_host != lctit ]]  &&  rm  $job_on_remhost
     1382
     1383          # Jobfile must not be deleted on lctit/ibmku!! This will be done
     1384          # only at the end of the job.
     1385       if [[ $local_host != lctit  &&  $local_host != ibmku ]]
     1386       then
     1387          rm  $job_on_remhost
     1388       fi
    13691389       cd  -  > /dev/null
    13701390    fi
  • palm/trunk/SOURCE/advec_particles.f90

    r559 r622  
    44! Current revisions:
    55! -----------------
     6! optional barriers included in order to speed up collective operations
    67! TEST: PRINT statements on unit 9 (commented out)
    78!
     
    792793!
    793794!--       Compute total sum from local sums
     795          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    794796          CALL MPI_ALLREDUCE( sums_l(nzb,1,0), sums(nzb,1), nzt+2-nzb, &
    795797                              MPI_REAL, MPI_SUM, comm2d, ierr )
     798          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    796799          CALL MPI_ALLREDUCE( sums_l(nzb,2,0), sums(nzb,2), nzt+2-nzb, &
    797800                              MPI_REAL, MPI_SUM, comm2d, ierr )
     
    830833!
    831834!--       Compute total sum from local sums
     835          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    832836          CALL MPI_ALLREDUCE( sums_l(nzb,8,0), sums(nzb,8), nzt+2-nzb, &
    833837                              MPI_REAL, MPI_SUM, comm2d, ierr )
     838          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    834839          CALL MPI_ALLREDUCE( sums_l(nzb,30,0), sums(nzb,30), nzt+2-nzb, &
    835840                              MPI_REAL, MPI_SUM, comm2d, ierr )
     841          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    836842          CALL MPI_ALLREDUCE( sums_l(nzb,31,0), sums(nzb,31), nzt+2-nzb, &
    837843                              MPI_REAL, MPI_SUM, comm2d, ierr )
     844          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    838845          CALL MPI_ALLREDUCE( sums_l(nzb,32,0), sums(nzb,32), nzt+2-nzb, &
    839846                              MPI_REAL, MPI_SUM, comm2d, ierr )
     
    19481955!--    and set the switch corespondingly
    19491956#if defined( __parallel )
     1957       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    19501958       CALL MPI_ALLREDUCE( dt_3d_reached_l, dt_3d_reached, 1, MPI_LOGICAL, &
    19511959                           MPI_LAND, comm2d, ierr )
  • palm/trunk/SOURCE/advec_s_bc.f90

    r392 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    166166    ENDDO
    167167#if defined( __parallel )
     168    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    168169    CALL MPI_ALLREDUCE( fmax_l, fmax, 2, MPI_REAL, MPI_MAX, comm2d, ierr )
    169170#else
     
    463464    ENDDO
    464465#if defined( __parallel )
     466    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    465467    CALL MPI_ALLREDUCE( fmax_l, fmax, 2, MPI_REAL, MPI_MAX, comm2d, ierr )
    466468#else
     
    863865    ENDDO
    864866#if defined( __parallel )
     867    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    865868    CALL MPI_ALLREDUCE( fmax_l, fmax, 2, MPI_REAL, MPI_MAX, comm2d, ierr )
    866869#else
  • palm/trunk/SOURCE/buoyancy.f90

    r516 r622  
    44! Currrent revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    284284#if defined( __parallel )
    285285
     286          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    286287          CALL MPI_ALLREDUCE( sums_l(nzb,pr,0), sums(nzb,pr), nzt+2-nzb, &
    287288                              MPI_REAL, MPI_SUM, comm2d, ierr )
  • palm/trunk/SOURCE/check_for_restart.f90

    r392 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    6363!-- Make a logical OR for all processes. Stop the model run if at least
    6464!-- one processor has reached the time limit.
     65    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    6566    CALL MPI_ALLREDUCE( terminate_run_l, terminate_run, 1, MPI_LOGICAL, &
    6667                        MPI_LOR, comm2d, ierr )
  • palm/trunk/SOURCE/cpu_statistics.f90

    r484 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! output of handling of collective operations
    77!
    88! Former revisions:
     
    248248
    249249!
     250!--    Output handling of collective operations
     251       IF ( collective_wait )  THEN
     252          WRITE ( 18, 103 )
     253       ELSE
     254          WRITE ( 18, 104 )
     255       ENDIF
     256
     257!
    250258!--    Empty lines in order to create a gap to the results of the model
    251259!--    continuation runs
    252        WRITE ( 18, 103 )
     260       WRITE ( 18, 105 )
    253261
    254262!
     
    275283
    276284102 FORMAT (A20,2X,F9.3,2X,F7.2,1X,I7,3(1X,F9.3))
    277 103 FORMAT (//)
     285103 FORMAT (/'Barriers are set in front of collective operations')
     286104 FORMAT (/'No barriers are set in front of collective operations')
     287105 FORMAT (//)
    278288
    279289 END SUBROUTINE cpu_statistics
  • palm/trunk/SOURCE/data_output_2d.f90

    r559 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    899899!
    900900!--                   Now do the averaging over all PEs along y
     901                      IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    901902                      CALL MPI_ALLREDUCE( local_2d_l(nxl-1,nzb),              &
    902903                                          local_2d(nxl-1,nzb), ngp, MPI_REAL, &
     
    942943!--                      Distribute data over all PEs along y
    943944                         ngp = ( nxr-nxl+3 ) * ( nzt-nzb+2 )
     945                         IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr )
    944946                         CALL MPI_ALLREDUCE( local_2d_l(nxl-1,nzb),            &
    945947                                             local_2d(nxl-1,nzb), ngp,         &
     
    11981200!
    11991201!--                   Now do the averaging over all PEs along x
     1202                      IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    12001203                      CALL MPI_ALLREDUCE( local_2d_l(nys-1,nzb),              &
    12011204                                          local_2d(nys-1,nzb), ngp, MPI_REAL, &
     
    12411244!--                      Distribute data over all PEs along x
    12421245                         ngp = ( nyn-nys+3 ) * ( nzt-nzb+2 )
     1246                         IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr )
    12431247                         CALL MPI_ALLREDUCE( local_2d_l(nys-1,nzb),            &
    12441248                                             local_2d(nys-1,nzb), ngp,         &
  • palm/trunk/SOURCE/data_output_ptseries.f90

    r392 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    138138    inum = number_of_particle_groups + 1
    139139
     140    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    140141    CALL MPI_ALLREDUCE( pts_value_l(0,1), pts_value(0,1), 14*inum, MPI_REAL, &
    141142                        MPI_SUM, comm2d, ierr )
     143    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    142144    CALL MPI_ALLREDUCE( pts_value_l(0,15), pts_value(0,15), inum, MPI_REAL, &
    143145                        MPI_MAX, comm2d, ierr )
     146    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    144147    CALL MPI_ALLREDUCE( pts_value_l(0,16), pts_value(0,16), inum, MPI_REAL, &
    145148                        MPI_MIN, comm2d, ierr )
     
    239242    inum = number_of_particle_groups + 1
    240243
     244    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    241245    CALL MPI_ALLREDUCE( pts_value_l(0,17), pts_value(0,17), inum*10, MPI_REAL, &
    242246                        MPI_SUM, comm2d, ierr )
  • palm/trunk/SOURCE/flow_statistics.f90

    r550 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    237237!
    238238!--    Compute total sum from local sums
     239       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    239240       CALL MPI_ALLREDUCE( sums_l(nzb,1,0), sums(nzb,1), nzt+2-nzb, MPI_REAL, &
    240241                           MPI_SUM, comm2d, ierr )
     242       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    241243       CALL MPI_ALLREDUCE( sums_l(nzb,2,0), sums(nzb,2), nzt+2-nzb, MPI_REAL, &
    242244                           MPI_SUM, comm2d, ierr )
     245       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    243246       CALL MPI_ALLREDUCE( sums_l(nzb,4,0), sums(nzb,4), nzt+2-nzb, MPI_REAL, &
    244247                           MPI_SUM, comm2d, ierr )
    245248       IF ( ocean )  THEN
     249          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    246250          CALL MPI_ALLREDUCE( sums_l(nzb,23,0), sums(nzb,23), nzt+2-nzb, &
    247251                              MPI_REAL, MPI_SUM, comm2d, ierr )
    248252       ENDIF
    249253       IF ( humidity ) THEN
     254          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    250255          CALL MPI_ALLREDUCE( sums_l(nzb,44,0), sums(nzb,44), nzt+2-nzb, &
    251256                              MPI_REAL, MPI_SUM, comm2d, ierr )
     257          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    252258          CALL MPI_ALLREDUCE( sums_l(nzb,41,0), sums(nzb,41), nzt+2-nzb, &
    253259                              MPI_REAL, MPI_SUM, comm2d, ierr )
    254260          IF ( cloud_physics ) THEN
     261             IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    255262             CALL MPI_ALLREDUCE( sums_l(nzb,42,0), sums(nzb,42), nzt+2-nzb, &
    256263                                 MPI_REAL, MPI_SUM, comm2d, ierr )
     264             IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    257265             CALL MPI_ALLREDUCE( sums_l(nzb,43,0), sums(nzb,43), nzt+2-nzb, &
    258266                                 MPI_REAL, MPI_SUM, comm2d, ierr )
     
    261269
    262270       IF ( passive_scalar )  THEN
     271          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    263272          CALL MPI_ALLREDUCE( sums_l(nzb,41,0), sums(nzb,41), nzt+2-nzb, &
    264273                              MPI_REAL, MPI_SUM, comm2d, ierr )
     
    796805!
    797806!--    Compute total sum from local sums
     807       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    798808       CALL MPI_ALLREDUCE( sums_l(nzb,1,0), sums(nzb,1), ngp_sums, MPI_REAL, &
    799809                           MPI_SUM, comm2d, ierr )
  • palm/trunk/SOURCE/global_min_max.f90

    r484 r622  
    55! Current revisions:
    66! -----------------
    7 !
     7! optional barriers included in order to speed up collective operations
    88!
    99! Former revisions:
     
    6161#if defined( __parallel )
    6262       fmin_l(2)  = myid
     63       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    6364       CALL MPI_ALLREDUCE( fmin_l, fmin, 1, MPI_2REAL, MPI_MINLOC, comm2d, ierr )
    6465
     
    100101#if defined( __parallel )
    101102       fmax_l(2)  = myid
     103       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    102104       CALL MPI_ALLREDUCE( fmax_l, fmax, 1, MPI_2REAL, MPI_MAXLOC, comm2d, ierr )
    103105
     
    158160#if defined( __parallel )
    159161       fmax_l(2)  = myid
     162       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    160163       CALL MPI_ALLREDUCE( fmax_l, fmax, 1, MPI_2REAL, MPI_MAXLOC, comm2d, &
    161164                           ierr )
  • palm/trunk/SOURCE/inflow_turbulence.f90

    r484 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    7777!
    7878!-- Now, averaging over all PEs
     79    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    7980    CALL MPI_ALLREDUCE( avpr_l(nzb,1), avpr(nzb,1), ngp_pr, MPI_REAL, MPI_SUM, &
    8081                        comm2d, ierr )
     
    195196
    196197#if defined( __parallel )   
     198!       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    197199!       CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 1, MPI_REAL, &
    198200!                           MPI_SUM, comm1dy, ierr )   
  • palm/trunk/SOURCE/init_3d_model.f90

    r561 r622  
    77! Current revisions:
    88! -----------------
    9 !
     9! optional barriers included in order to speed up collective operations
    1010!
    1111! Former revisions:
     
    860860
    861861#if defined( __parallel )
     862          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    862863          CALL MPI_ALLREDUCE( volume_flow_initial_l(1), volume_flow_initial(1),&
    863864                              2, MPI_REAL, MPI_SUM, comm2d, ierr )
     865          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    864866          CALL MPI_ALLREDUCE( volume_flow_area_l(1), volume_flow_area(1),      &
    865867                              2, MPI_REAL, MPI_SUM, comm2d, ierr )
     
    11721174
    11731175#if defined( __parallel )
     1176          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    11741177          CALL MPI_ALLREDUCE( volume_flow_initial_l(1), volume_flow_initial(1),&
    11751178                              2, MPI_REAL, MPI_SUM, comm2d, ierr )
     1179          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    11761180          CALL MPI_ALLREDUCE( volume_flow_area_l(1), volume_flow_area(1),      &
    11771181                              2, MPI_REAL, MPI_SUM, comm2d, ierr )
     
    15601564    sr = statistic_regions + 1
    15611565#if defined( __parallel )
     1566    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    15621567    CALL MPI_ALLREDUCE( ngp_2dh_l(0), ngp_2dh(0), sr, MPI_INTEGER, MPI_SUM,   &
    15631568                        comm2d, ierr )
     1569    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    15641570    CALL MPI_ALLREDUCE( ngp_2dh_outer_l(0,0), ngp_2dh_outer(0,0), (nz+2)*sr,  &
    15651571                        MPI_INTEGER, MPI_SUM, comm2d, ierr )
     1572    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    15661573    CALL MPI_ALLREDUCE( ngp_2dh_s_inner_l(0,0), ngp_2dh_s_inner(0,0),         &
    15671574                        (nz+2)*sr, MPI_INTEGER, MPI_SUM, comm2d, ierr )
     1575    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    15681576    CALL MPI_ALLREDUCE( ngp_3d_inner_l(0), ngp_3d_inner_tmp(0), sr, MPI_REAL, &
    15691577                        MPI_SUM, comm2d, ierr )
  • palm/trunk/SOURCE/init_particles.f90

    r392 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    342342!--    Calculate the number of particles and tails of the total domain
    343343#if defined( __parallel )
     344       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    344345       CALL MPI_ALLREDUCE( number_of_particles, total_number_of_particles, 1, &
    345346                           MPI_INTEGER, MPI_SUM, comm2d, ierr )
     347       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    346348       CALL MPI_ALLREDUCE( number_of_tails, total_number_of_tails, 1, &
    347349                           MPI_INTEGER, MPI_SUM, comm2d, ierr )
     
    436438
    437439#if defined( __parallel )
     440          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    438441          CALL MPI_ALLREDUCE( uniform_particles_l, uniform_particles, 1, &
    439442                              MPI_LOGICAL, MPI_LAND, comm2d, ierr )
  • palm/trunk/SOURCE/init_pegrid.f90

    r482 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77! ATTENTION: nnz_x undefined problem still has to be solved!!!!!!!!
    88! TEST OUTPUT (TO BE REMOVED) logging mpi2 ierr values
     
    154154       CALL message( 'init_pegrid', 'PA0223', 1, 2, 0, 6, 0 )
    155155    ENDIF
     156
     157!
     158!-- For communication speedup, set barriers in front of collective
     159!-- communications by default on SGI-type systems
     160    IF ( host(3:5) == 'sgi' )  collective_wait = .TRUE.
    156161
    157162!
     
    929934       id_inflow_l = 0
    930935    ENDIF
     936    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    931937    CALL MPI_ALLREDUCE( id_inflow_l, id_inflow, 1, MPI_INTEGER, MPI_SUM, &
    932938                        comm1dx, ierr )
     
    935941!-- Broadcast the id of the recycling plane
    936942!-- WARNING: needs to be adjusted in case of inflows other than from left side!
    937     IF ( ( recycling_width / dx ) >= nxl  .AND.  ( recycling_width / dx ) <= nxr ) &
    938     THEN
     943    IF ( ( recycling_width / dx ) >= nxl  .AND. &
     944         ( recycling_width / dx ) <= nxr )  THEN
    939945       id_recycling_l = myidx
    940946    ELSE
    941947       id_recycling_l = 0
    942948    ENDIF
     949    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    943950    CALL MPI_ALLREDUCE( id_recycling_l, id_recycling, 1, MPI_INTEGER, MPI_SUM, &
    944951                        comm1dx, ierr )
  • palm/trunk/SOURCE/init_slope.f90

    r484 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    100100             ENDDO
    101101          ENDDO
    102       ENDDO
     102       ENDDO
    103103
    104104#if defined( __parallel )
    105       CALL MPI_ALLREDUCE( pt_init_local, pt_init, nzt+2-nzb, MPI_REAL, &
    106                            MPI_SUM, comm2d, ierr )
     105       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
     106       CALL MPI_ALLREDUCE( pt_init_local, pt_init, nzt+2-nzb, MPI_REAL, &
     107                            MPI_SUM, comm2d, ierr )
    107108#else
    108       pt_init = pt_init_local
     109       pt_init = pt_init_local
    109110#endif
    110111
    111       pt_init = pt_init / ngp_2dh(0)
    112       DEALLOCATE( pt_init_local )
     112       pt_init = pt_init / ngp_2dh(0)
     113       DEALLOCATE( pt_init_local )
    113114
    114    ENDIF
     115    ENDIF
    115116
    116117 END SUBROUTINE init_slope
  • palm/trunk/SOURCE/modules.f90

    r601 r622  
    55! Current revisions:
    66! -----------------
    7 !
     7! +collective_wait in pegrid
    88!
    99! Former revisions:
     
    11631163    INTEGER, DIMENSION(:), ALLOCATABLE ::  ngp_yz, type_xz
    11641164
    1165     LOGICAL ::  reorder = .TRUE.
     1165    LOGICAL ::  collective_wait = .FALSE., reorder = .TRUE.
    11661166    LOGICAL, DIMENSION(2) ::  cyclic = (/ .TRUE. , .TRUE. /), &
    11671167                              remain_dims
  • palm/trunk/SOURCE/parin.f90

    r601 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! +collective_wait in inipar
    77!
    88! Former revisions:
     
    119119             canyon_width_x, canyon_width_y, canyon_wall_left, &
    120120             canyon_wall_south, cfl_factor, cloud_droplets, cloud_physics, &
    121              conserve_volume_flow, conserve_volume_flow_mode, &
     121             collective_wait, conserve_volume_flow, conserve_volume_flow_mode, &
    122122             coupling_start_time, cthf, cut_spline_overshoot, &
    123123             cycle_mg, damp_level_1d, dissipation_1d, dp_external, dp_level_b, &
  • palm/trunk/SOURCE/poisfft.f90

    r484 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    718718!--    Transpose array
    719719       CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' )
     720       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    720721       CALL MPI_ALLTOALL( work(nxl,1,0),      sendrecvcount_xy, MPI_REAL, &
    721722                          f_out(1,1,nys_x,1), sendrecvcount_xy, MPI_REAL, &
     
    756757!--    Transpose array
    757758       CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' )
     759       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    758760       CALL MPI_ALLTOALL( f_in(1,1,nys_x,1), sendrecvcount_xy, MPI_REAL, &
    759761                          work(nxl,1,0),     sendrecvcount_xy, MPI_REAL, &
     
    10731075!--    Transpose array
    10741076       CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' )
     1077       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    10751078       CALL MPI_ALLTOALL( work(nys,1,0),      sendrecvcount_xy, MPI_REAL, &
    10761079                          f_out(1,1,nxl_y,1), sendrecvcount_xy, MPI_REAL, &
     
    11071110!--    Transpose array
    11081111       CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' )
     1112       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    11091113       CALL MPI_ALLTOALL( f_in(1,1,nxl_y,1), sendrecvcount_xy, MPI_REAL, &
    11101114                          work(nys,1,0),     sendrecvcount_xy, MPI_REAL, &
  • palm/trunk/SOURCE/poismg.f90

    r392 r622  
    88! Current revisions:
    99! -----------------
    10 !
     10! optional barriers included in order to speed up collective operations
    1111!
    1212! Former revisions:
     
    106106          maxerror = SUM( r(nzb+1:nzt,nys:nyn,nxl:nxr)**2 )
    107107#if defined( __parallel )
     108          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    108109          CALL MPI_ALLREDUCE( maxerror, residual_norm, 1, MPI_REAL, MPI_SUM, &
    109110                              comm2d, ierr)
  • palm/trunk/SOURCE/pres.f90

    r484 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    105105
    106106#if defined( __parallel )   
     107       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    107108       CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 1, MPI_REAL, &
    108109                           MPI_SUM, comm1dy, ierr )   
     
    143144
    144145#if defined( __parallel )   
     146       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    145147       CALL MPI_ALLREDUCE( volume_flow_l(2), volume_flow(2), 1, MPI_REAL, &
    146148                           MPI_SUM, comm1dx, ierr )   
     
    172174          ENDDO
    173175#if defined( __parallel )   
     176          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    174177          CALL MPI_ALLREDUCE( w_l_l(1), w_l(1), nzt, MPI_REAL, MPI_SUM, comm2d, &
    175178                              ierr )
     
    537540
    538541#if defined( __parallel )   
     542       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    539543       CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 2, MPI_REAL, &
    540544                           MPI_SUM, comm2d, ierr ) 
  • palm/trunk/SOURCE/read_var_list.f90

    r601 r622  
    33!------------------------------------------------------------------------------!
    44! Current revisions:
    5 ! -----------------_
    6 !
     5! ------------------
     6! +collective_wait
    77!
    88! Former revisions:
     
    275275          CASE ( 'cloud_physics' )
    276276             READ ( 13 )  cloud_physics
     277          CASE ( 'collective_wait' )
     278             READ ( 13 )  collective_wait
    277279          CASE ( 'conserve_volume_flow' )
    278280             READ ( 13 )  conserve_volume_flow
  • palm/trunk/SOURCE/set_particle_attributes.f90

    r484 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    140140#if defined( __parallel )
    141141
     142       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    142143       CALL MPI_ALLREDUCE( sums_l(nzb,4,0), sums(nzb,4), nzt+2-nzb, &
    143144                           MPI_REAL, MPI_SUM, comm2d, ierr )
  • palm/trunk/SOURCE/timestep.f90

    r392 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    118118          uv_gtrans_l = uv_gtrans_l / REAL( (nxr-nxl+1)*(nyn-nys+1)*(nzt-nzb) )
    119119#if defined( __parallel )
     120          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    120121          CALL MPI_ALLREDUCE( uv_gtrans_l, uv_gtrans, 2, MPI_REAL, MPI_SUM, &
    121122                              comm2d, ierr )
     
    164165!$OMP END PARALLEL
    165166#if defined( __parallel )
     167       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    166168       CALL MPI_ALLREDUCE( dt_diff_l, dt_diff, 1, MPI_REAL, MPI_MIN, comm2d, &
    167169                           ierr )
     
    252254!--       Determine the global minumum
    253255#if defined( __parallel )
     256          IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    254257          CALL MPI_ALLREDUCE( dt_plant_canopy_l, dt_plant_canopy, 1, MPI_REAL,  &
    255258                              MPI_MIN, comm2d, ierr )
  • palm/trunk/SOURCE/transpose.f90

    r484 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    6969!-- Transpose array
    7070    CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' )
     71    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    7172    CALL MPI_ALLTOALL( f_inv(nys_x,nzb_x,0), sendrecvcount_xy, MPI_REAL, &
    7273                       work(1),              sendrecvcount_xy, MPI_REAL, &
     
    143144!--    Transpose array
    144145       CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' )
     146       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    145147       CALL MPI_ALLTOALL( work(1),          sendrecvcount_zx, MPI_REAL, &
    146148                          f_inv(nys,nxl,1), sendrecvcount_zx, MPI_REAL, &
     
    229231!-- Transpose array
    230232    CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' )
     233    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    231234    CALL MPI_ALLTOALL( work(1),              sendrecvcount_xy, MPI_REAL, &
    232235                       f_inv(nys_x,nzb_x,0), sendrecvcount_xy, MPI_REAL, &
     
    291294!-- Transpose array
    292295    CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' )
     296    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    293297    CALL MPI_ALLTOALL( f_inv(nxl,1,nys), sendrecvcount_xy, MPI_REAL, &
    294298                       work(1),          sendrecvcount_xy, MPI_REAL, &
     
    373377!-- Transpose array
    374378    CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' )
     379    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    375380    CALL MPI_ALLTOALL( f_inv(nxl_y,nzb_y,0), sendrecvcount_yz, MPI_REAL, &
    376381                       work(1),              sendrecvcount_yz, MPI_REAL, &
     
    454459!-- Transpose array
    455460    CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' )
     461    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    456462    CALL MPI_ALLTOALL( f_inv(nys,nxl,1), sendrecvcount_zx, MPI_REAL, &
    457463                       work(1),          sendrecvcount_zx, MPI_REAL, &
     
    528534!--    Transpose array
    529535       CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' )
     536       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    530537       CALL MPI_ALLTOALL( work(1),              sendrecvcount_yz, MPI_REAL, &
    531538                          f_inv(nxl_y,nzb_y,0), sendrecvcount_yz, MPI_REAL, &
     
    627634!-- Transpose array
    628635    CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' )
     636    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    629637    CALL MPI_ALLTOALL( f_inv(nys,nxl,1), sendrecvcount_zyd, MPI_REAL, &
    630638                       work(1),          sendrecvcount_zyd, MPI_REAL, &
  • palm/trunk/SOURCE/user_statistics.f90

    r556 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! optional barriers included in order to speed up collective operations
    77!
    88! Former revisions:
     
    9292!--           assign ts_value(dots_num_palm+1:,sr) = ts_value_l directly.
    9393!#if defined( __parallel )
     94!       IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    9495!       CALL MPI_ALLREDUCE( ts_value_l(dots_num_palm+1),                       &
    9596!                           ts_value(dots_num_palm+1,sr),                      &
  • palm/trunk/SOURCE/write_compressed.f90

    r484 r622  
    55! Current revisions:
    66! -----------------
    7 !
     7! optional barriers included in order to speed up collective operations
    88!
    99! Former revisions:
     
    9090
    9191#if defined( __parallel )
     92    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    9293    CALL MPI_ALLREDUCE( ifieldmax_l, ifieldmax, 1, MPI_INTEGER, MPI_MAX, &
    9394                        comm2d, ierr )
     95    IF ( collective_wait )  CALL MPI_BARRIER( comm2d, ierr )
    9496    CALL MPI_ALLREDUCE( ifieldmin_l, ifieldmin, 1, MPI_INTEGER, MPI_MIN, &
    9597                        comm2d, ierr )
  • palm/trunk/SOURCE/write_var_list.f90

    r601 r622  
    44! Current revisions:
    55! -----------------
    6 !
     6! +collective_wait
    77!
    88! Former revisions:
     
    200200    WRITE ( 14 )  'cloud_physics                 '
    201201    WRITE ( 14 )  cloud_physics
     202    WRITE ( 14 )  'collective_wait               '
     203    WRITE ( 14 )  collective_wait
    202204    WRITE ( 14 )  'conserve_volume_flow          '
    203205    WRITE ( 14 )  conserve_volume_flow
Note: See TracChangeset for help on using the changeset viewer.