- Timestamp:
- Dec 10, 2010 8:08:13 AM (14 years ago)
- Location:
- palm/trunk
- Files:
-
- 29 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SCRIPTS/mbuild
r562 r622 112 112 # for lcxt4 113 113 # 07/09/10 - Siggi - bugfix for wrong netcdf/3.6.3 module on lcsgi 114 # 08/12/10 - Siggi - initialization of the module command changed for 115 # SGI-ICE/lcsgi 116 # adjustments for Kyushu Univ. (lcrte, ibmku) 114 117 115 118 … … 464 467 case $remote_host in 465 468 (lcmuk) remote_addres=130.75.105.2;; 469 (lcrte) remote_addres=133.5.185.60;; 466 470 (lcsgib) remote_addres=130.73.232.102;; 467 471 (lcsgih) remote_addres=130.75.4.102;; … … 472 476 (decalpha) remote_addres=165.132.26.56;; 473 477 (ibmh) remote_addres=136.172.40.15;; 478 (ibmku) remote_addres=133.5.4.129;; 474 479 (ibms) remote_addres=150.183.5.101;; 475 480 (ibmy) remote_addres=165.132.26.58;; … … 1063 1068 then 1064 1069 1065 print ". /usr/share/modules/init/bash; $module_calls cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_addres} 2>&1 | tee ${remote_host}_last_make_protokoll 1070 # print ". /usr/share/modules/init/bash; $module_calls cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_addres} 2>&1 | tee ${remote_host}_last_make_protokoll 1071 print "eval \`/sw/swdist/bin/modulesinit\`; $module_calls cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_addres} 2>&1 | tee ${remote_host}_last_make_protokoll 1066 1072 1067 1073 elif [[ $remote_host = lctit ]] -
palm/trunk/SCRIPTS/mrun
r592 r622 210 210 # 17/08/10 - BjornM - adjustments for interactive runs on lcxt4 211 211 # 07/09/10 - Siggi - bugfix for wrong netcdf/3.6.3 module on lcsgi 212 # 08/12/10 - Siggi - new handling of openmp/hybrid runs, option -O 213 # has now argument threads_per_task 214 # adjustments for Kyushu Univ. (lcrte, ibmku) 212 215 213 216 … … 290 293 read_from_config="" 291 294 restart_run=false 292 return_addres=$(nslookup `hostname` 2>&1 | grep "Address:" | tail -1 | awk '{print $2}') 293 if [[ $return_addres = 130.75.105.158 ]] 294 then 295 return_addres=172.20.25.41 296 echo "+++ WARNING: return_addres changed to $return_addres !!!!!" 295 if [[ `hostname` = rte10 ]] 296 then 297 return_addres=133.5.185.60 298 echo "+++ WARNING: return_addres changed to $return_addres !!!!!" 299 else 300 return_addres=$(nslookup `hostname` 2>&1 | grep "Address:" | tail -1 | awk '{print $2}') 297 301 fi 298 302 return_password="" … … 450 454 # SHELLSCRIPT-OPTIONEN EINLESEN UND KOMMANDO NEU ZUSAMMENSETZEN, FALLS ES 451 455 # FUER FOLGEJOBS BENOETIGT WIRD 452 while getopts :a:AbBc:Cd:D:Fg:G:h:H:i:IkK:m:M:n:o:O p:P:q:r:R:s:St:T:u:U:vxX:yY: option456 while getopts :a:AbBc:Cd:D:Fg:G:h:H:i:IkK:m:M:n:o:O:p:P:q:r:R:s:St:T:u:U:vxX:yY: option 453 457 do 454 458 case $option in … … 474 478 (n) node_usage=$OPTARG; mc="$mc -n$OPTARG";; 475 479 (o) output_list=$OPTARG; mc="$mc -o'$OPTARG'";; 476 (O) use_openmp=true; mc="$mc -O";;480 (O) use_openmp=true; threads_per_task=$OPTARG; mc="$mc -O$OPTARG";; 477 481 (p) package_list=$OPTARG; mc="$mc -p'$OPTARG'";; 478 482 (P) return_password=$OPTARG; mc="$mc -P$OPTARG";; … … 658 662 do_remote=true 659 663 case $host in 660 (ibm|ibm b|ibmh|ibms|ibmy|nech|necriam|lckyoto|lcsgib|lcsgih|lctit|unics|lcxt4|lcxt5m|lck) true;;664 (ibm|ibmh|ibmku|ibms|ibmy|nech|necriam|lckyoto|lcsgib|lcsgih|lctit|unics|lcxt4|lcxt5m|lck) true;; 661 665 (*) printf "\n" 662 666 printf "\n +++ sorry: execution of batch jobs on remote host \"$host\"" … … 835 839 do_remote=true 836 840 case $host in 837 (ibm|ibm s|ibmy|lckyoto|lcsgib|lcsgih|lctit|nech|necriam|unics|lcxt4|lcxt5m|lck) true;;841 (ibm|ibmh|ibmku|ibms|ibmy|lckyoto|lcsgib|lcsgih|lctit|nech|necriam|unics|lcxt4|lcxt5m|lck) true;; 838 842 (*) printf "\n +++ sorry: execution of batch jobs on remote host \"$host\"" 839 843 printf "\n is not available" … … 1117 1121 do_remote=true 1118 1122 case $host in 1119 (ibm|ibm b|ibmh|ibms|ibmy|lckyoto|lcsgib|lcsgih|lctit|nech|necriam|unics|lcxt4|lcxt5m|lck) true;;1123 (ibm|ibmh|ibmku|ibms|ibmy|lckyoto|lcsgib|lcsgih|lctit|nech|necriam|unics|lcxt4|lcxt5m|lck) true;; 1120 1124 (*) printf "\n" 1121 1125 printf "\n +++ sorry: execution of batch jobs on remote host \"$host\"" … … 1146 1150 # DEFAULT-WERT SETZEN) UND OB SIE EIN GANZZAHLIGER TEILER DER 1147 1151 # GESAMTPROZESSORANZAHL IST 1148 if [[ $host = nech || $host = necriam || $host = ibmh || $host = ibm b || $host = ibms ]]1152 if [[ $host = nech || $host = necriam || $host = ibmh || $host = ibms ]] 1149 1153 then 1150 1154 [[ "$tasks_per_node" = "" ]] && tasks_per_node=6 … … 1181 1185 # FALLS OPENMP PARALLELISIERUNG VERWENDET WERDEN SOLL, ANZAHL VON THREADS 1182 1186 # SETZEN UND ZAHL DER TASKS PRO KNOTEN AUF 1 SETZEN 1183 if [[ $use_openmp = true ]]1184 then1185 threads_per_task=$tasks_per_node1186 tasks_per_node=11187 fi1187 # if [[ $use_openmp = true ]] 1188 # then 1189 # threads_per_task=$tasks_per_node 1190 # tasks_per_node=1 1191 # fi 1188 1192 1189 1193 # SETTINGS FOR SUBJOB-COMMAND 1190 if [[ $(echo $host | cut -c1-5) = lcsgi ]] 1191 then 1192 (( tp1 = tasks_per_node * threads_per_task )) 1193 TOPT="-T $tp1" 1194 else 1195 TOPT="-T $tasks_per_node" 1196 fi 1194 TOPT="-T $tasks_per_node" 1197 1195 OOPT="-O $threads_per_task" 1198 1196 … … 1267 1265 then 1268 1266 case $host in 1269 (ibmb) if [[ $node_usage = shared ]]1270 then1271 queue=cshare1272 else1273 queue=csolo1274 fi;;1275 1267 (ibmh) queue=no_class;; 1276 1268 (ibmy) queue=parallel;; … … 2020 2012 if [[ "$tmp_data_catalog" = "" ]] 2021 2013 then 2022 if [[ $localhost = ibmb ]] 2023 then 2024 tmp_data_catalog=$WORK/mrun_restart_data 2025 elif [[ $localhost = nech ]] 2014 if [[ $localhost = nech ]] 2026 2015 then 2027 2016 tmp_data_catalog=$WRKSHR/mrun_restart_data … … 2178 2167 printf "| $spalte1$spalte2 | \n" 2179 2168 fi 2180 if [[ $ threads_per_task != 1]]2169 if [[ $use_openmp = true ]] 2181 2170 then 2182 2171 spalte1="threads per task:"; spalte2="$threads_per_task" … … 3005 2994 then 3006 2995 dxladebug a.out 3007 elif [[ $localhost = ibm b || $localhost = ibmh ]]2996 elif [[ $localhost = ibmh ]] 3008 2997 then 3009 2998 … … 3087 3076 exit 3088 3077 fi 3078 3079 # end debug mode 3089 3080 else 3081 3082 # normal execution 3090 3083 if [[ -n $numprocs ]] 3091 3084 then … … 3135 3128 fi 3136 3129 else 3137 if [[ $localhost = ibm b || $localhost = ibmh || $localhost = ibms ]]3130 if [[ $localhost = ibmh || $localhost = ibms ]] 3138 3131 then 3139 3132 poe a.out -procs $numprocs -nodes 1 -rmpool 0 $ROPTS 3140 elif [[ $localhost = ibm y ]]3133 elif [[ $localhost = ibmku || $localhost = ibmy ]] 3141 3134 then 3142 3135 if [[ -f $hostfile ]] … … 3168 3161 echo "coupled_run $iia $iio" > runfile_atmos 3169 3162 fi 3170 ./a.out -procs $tasks_per_node $ROPTS < runfile_atmos 3163 if [[ $localhost = ibmy ]] 3164 then 3165 ./a.out -procs $tasks_per_node $ROPTS < runfile_atmos 3166 else 3167 poe ./a.out -procs $numprocs $ROPTS < runfile_atmos 3168 fi 3171 3169 3172 3170 else … … 3292 3290 export MPI_DSM_CPULIST="0,1,4,5,2,3,6,7:allhosts" 3293 3291 fi 3292 else 3293 unset MPI_DSM_CPULIST 3294 3294 fi 3295 3295 # MPI_IB_RAILS: use both IB rails on ICE2 … … 3304 3304 3305 3305 # next is test for openmp usage 3306 # mpiexec -n $ii -pernode ./a.out $ROPTS < runfile_atmos 3306 # echo "mpiexec -npernode $tasks_per_node ./a.out $ROPTS < runfile_atmos" 3307 # mpiexec -npernode $tasks_per_node ./a.out $ROPTS < runfile_atmos 3307 3308 elif [[ $( echo $mpilib | cut -c1-3 ) = mva ]] 3308 3309 then … … 3317 3318 # export MV2_CPU_MAPPING=0,1,4,5,2,3,6,7 3318 3319 # fi 3320 [[ $use_openmp = true ]] && unset MV2_CPU_MAPPING 3319 3321 echo "*** MV2_CPU_MAPPING=$MV2_CPU_MAPPING" 3320 if [[ $ threads_per_task != 1]]3322 if [[ $use_openmp = true ]] 3321 3323 then 3322 mpiexec -npernode 1./a.out $ROPTS < runfile_atmos3324 mpiexec -npernode $tasks_per_node ./a.out $ROPTS < runfile_atmos 3323 3325 else 3324 3326 mpiexec -np $ii ./a.out $ROPTS < runfile_atmos … … 3636 3638 cst="/" 3637 3639 fi 3638 if [[ $localhost = ibmb || $localhost =nech ]]3640 if [[ $localhost = nech ]] 3639 3641 then 3640 3642 … … 3717 3719 if [[ $localhost != $fromhost ]] 3718 3720 then 3719 if [[ $localhost = ibmh || $localhost = ibmb || $localhost =nech ]]3721 if [[ $localhost = ibmh || $localhost = nech ]] 3720 3722 then 3721 3723 … … 4231 4233 then 4232 4234 4233 if [[ $localhost = lcsgih || $localhost = lcsgib || $localhost = nech || $localhost = ibm b || $localhost = ibmh|| $localhost = ibms || $localhost = lctit ]]4235 if [[ $localhost = lcsgih || $localhost = lcsgib || $localhost = nech || $localhost = ibmh || $localhost = ibmku || $localhost = ibms || $localhost = lctit ]] 4234 4236 then 4235 4237 echo "*** ssh will be used to initiate restart-runs!" … … 4348 4350 if [[ $use_openmp = true ]] 4349 4351 then 4350 mrun_com=${mrun_com}" -O" 4351 [[ "$tasks_per_node" != "" ]] && mrun_com=${mrun_com}" -T $threads_per_task" 4352 else 4353 [[ "$tasks_per_node" != "" ]] && mrun_com=${mrun_com}" -T $tasks_per_node" 4354 fi 4352 mrun_com=${mrun_com}" -O $threads_per_task" 4353 fi 4354 [[ "$tasks_per_node" != "" ]] && mrun_com=${mrun_com}" -T $tasks_per_node" 4355 4355 [[ $store_on_archive_system = true ]] && mrun_com=${mrun_com}" -A" 4356 4356 [[ $package_list != "" ]] && mrun_com=${mrun_com}" -p \"$package_list\"" -
palm/trunk/SCRIPTS/subjob
r555 r622 121 121 # 25/08/10 - Siggi - new variable project_account in pbs-statements for 122 122 # lcxt4 123 # 08/12/10 - Siggi - initialization of the module command changed for 124 # SGI-ICE/lcsgi 125 # adjustments for Kyushu Univ. (lcrte, ibmku) 123 126 124 127 … … 144 147 145 148 typeset -i cputime=0 memory=0 Memory=0 minuten resttime sekunden stunden 146 typeset -i inumprocs nodes=0 tasks_per_node=0 threads_per_task=1149 typeset -i inumprocs nodes=0 processes_per_node=0 tasks_per_node=0 threads_per_task=1 147 150 typeset -L20 spalte1 148 151 typeset -R40 spalte2 … … 194 197 (b01*|bicegate1) local_addres=130.73.232.102; local_host=lcsgib;; 195 198 (bicegate2) local_addres=130.73.232.103; local_host=lcsgib;; 196 (breg*-en0|berni*-en0) local_addres=130.73.230.10; local_host=ibmb;;197 199 (breva) local_addres=130.75.105.98; local_host=lcmuk;; 198 200 (bicegate2) local_addres=130.73.232.103; local_host=lcsgib;; … … 228 230 (paesano) local_addres=130.75.105.46; local_host=lcmuk;; 229 231 (quanero) local_addres=130.75.105.107; local_host=lcmuk;; 232 (rte*) local_addres=133.5.185.60; local_host=lcrte;; 230 233 (scirocco) local_addres=172.20.25.41; local_host=lcmuk;; 231 234 (sun1|sun2) local_addres=130.75.6.1; local_host=unics;; … … 235 238 (tgg*) local_addres=172.17.75.161; local_host=lctit;; 236 239 (vorias) local_addres=172.20.25.43; local_host=lcmuk;; 240 (*.cc.kyushu-u.ac.jp) local_addres=133.5.4.129; local_host=ibmku;; 237 241 (*) printf "\n +++ \"$local_host\" unknown"; 238 242 printf "\n please inform S. Raasch!"; … … 289 293 printf "\n -D only the job-file will be created ---" 290 294 printf "\n -h execution host, available hosts: $remote_host" 291 printf "\n ibm, ibm b, ibmh, ibms, ibmy, lcmuk,"295 printf "\n ibm, ibmh, ibmku, ibms, ibmy, lc...," 292 296 printf "\n lctit, nech, necriam, unics" 293 297 printf "\n -m memory demand per process in MByte ---" … … 354 358 case $remote_host in 355 359 (ibm) queue=p690_standard; remote_addres=134.76.99.81; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; 356 (ibmb) queue=cpar; remote_addres=130.73.230.10; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;357 360 (ibmh) queue=no_class; remote_addres=136.172.40.15; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; 361 (ibmku) queue=s4; remote_addres=133.5.4.129; submcom=/usr/local/bin/llsubmit;; 358 362 (ibms) queue=p_normal; remote_addres=150.183.5.101; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; 359 363 (ibmy) queue=parallel; remote_addres=165.132.26.58; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; … … 386 390 (*) error=true;; 387 391 esac;; 388 (ibm b) case $ndq in389 ( cdata|cdev|cexp|c1|cshare|csolo|cspec)error=false;;392 (ibmh) case $ndq in 393 (no_class) error=false;; 390 394 (*) error=true;; 391 395 esac;; 392 (ibm h)case $ndq in393 ( no_class)error=false;;396 (ibmku) case $ndq in 397 (sdbg1|sdbg2|sdbg4|s4|s16|s32|s32-s) error=false;; 394 398 (*) error=true;; 395 399 esac;; … … 450 454 451 455 452 # KNOTENNUTZUNG IN ENTWICKLERQUEUE MUSS SHARED SEIN453 if [[ $node_usage != shared && $queue = cdev ]]454 then455 node_usage=shared456 fi457 458 459 460 456 # PRUEFEN DER CPU-ZEIT, ZEIT NACH STUNDEN, MINUTEN UND SEKUNDEN 461 457 # AUFTEILEN … … 530 526 if (( tasks_per_node != 0 )) 531 527 then 532 if [[ $(echo $remote_host | cut -c1-5) = lcsgi ]] 533 then 534 (( nodes = numprocs / tasks_per_node )) 535 else 536 (( nodes = numprocs / ( tasks_per_node * threads_per_task ) )) 537 fi 538 fi 539 528 (( nodes = numprocs / ( tasks_per_node * threads_per_task ) )) 529 fi 530 531 # Calculate number of processes per node 532 (( processes_per_node = tasks_per_node * threads_per_task )) 540 533 541 534 … … 597 590 598 591 599 # QSUB- ODER LL-KOMMANDOS BZW. SKRIPTE GENERIEREN592 # Generate the batch job scripts (qsub/msub/LoadLeveler) 600 593 if [[ $(echo $remote_host | cut -c1-3) = ibm && $numprocs != 0 ]] 601 594 then 602 595 603 if [[ $remote_host = ibmy ]] 604 then 605 consumable_memory="" 606 else 607 consumable_memory="ConsumableMemory($memory mb)" 608 fi 596 # General LoadLeveler settings 597 execute_in_shell="#!/bin/ksh" 598 use_shell="# @ shell = /bin/ksh" 599 consumable_memory="ConsumableMemory($memory mb)" 600 class="# @ class = $queue" 601 environment="# @ environment = OMP_NUM_THREADS=$threads_per_task; MP_SHARED_MEMORY=yes" 602 network_to_use="# @ network.mpi = sn_all,shared,us" 603 data_limit="# @ data_limit = 1.76gb" 604 image_size="# @ image_size = 50" 605 609 606 610 607 if [[ $remote_host = ibmh ]] … … 614 611 class="" 615 612 environment="" 616 else 617 class="# @ class = $queue" 618 environment="# @ environment = OMP_NUM_THREADS=$threads_per_task; MP_SHARED_MEMORY=yes" 619 if [[ $queue = cdev ]] 620 then 621 data_limit="# @ data_limit = 1.76gb" 622 network_to_use="# @ network.mpi = sn_all,shared,ip" 623 else 624 if [[ $remote_host = ibms ]] 625 then 626 network_to_use="# @ network.mpi = csss,shared,us" 627 elif [[ $remote_host = ibmy ]] 628 then 629 network_to_use="" 630 else 631 network_to_use="# @ network.mpi = sn_all,shared,us" 632 data_limit="# @ data_limit = 1.76gb" 633 fi 634 fi 613 elif [[ $remote_host = ibmku ]] 614 then 615 execute_in_shell="#!/usr/bin/ksh" 616 use_shell="# @ shell = /usr/bin/ksh" 617 consumable_memory="" 618 environment="" 619 network_to_use="# @ network.mpi = sn_all,shared,us" 620 data_limit="" 621 image_size="" 622 elif [[ $remote_host = ibms ]] 623 then 624 network_to_use="# @ network.mpi = csss,shared,us" 625 elif [[ $remote_host = ibmy ]] 626 then 627 consumable_memory="" 628 network_to_use="" 635 629 fi 636 630 637 631 cat > $job_to_send << %%END%% 638 #!/bin/ksh 639 # @ shell = /bin/ksh 632 $execute_in_shell 633 $use_shell 640 634 641 635 # @ job_type = parallel … … 645 639 # @ output = $remote_dayfile 646 640 # @ error = $remote_dayfile 647 # @ image_size = 50 641 $image_size 648 642 $class 649 643 $environment … … 659 653 cat >> $job_to_send << %%END%% 660 654 # @ node = $nodes 661 # @ tasks_per_node = $ tasks_per_node655 # @ tasks_per_node = $processes_per_node 662 656 # @ node_usage = $node_usage 663 657 # @ queue … … 721 715 #PBS -A $project_account 722 716 #PBS -l walltime=$timestring 723 #PBS -l nodes=${nodes}:ppn=$ tasks_per_node717 #PBS -l nodes=${nodes}:ppn=$processes_per_node 724 718 #PBS -l pmem=${memory}mb 725 719 #PBS -m abe … … 813 807 #PBS -N $job_name 814 808 #PBS -l walltime=$timestring 815 #PBS -l nodes=$nodes:ppn=${ tasks_per_node}809 #PBS -l nodes=$nodes:ppn=${processes_per_node} 816 810 #PBS -l naccesspolicy=$node_usage 817 811 #PBS -o $remote_dayfile … … 821 815 $email_directive 822 816 823 . /usr/share/modules/init/bash 817 eval \`/sw/swdist/bin/modulesinit\` 818 #. /usr/share/modules/init/bash 824 819 $module_calls 825 820 … … 839 834 $email_directive 840 835 841 . /usr/share/modules/init/bash 836 eval \`/sw/swdist/bin/modulesinit\` 837 #. /usr/share/modules/init/bash 842 838 $module_calls 843 839 … … 859 855 #PBS -l walltime=$timestring 860 856 #PBS -l mppwidth=${numprocs} 861 #PBS -l mppnppn=${ tasks_per_node}857 #PBS -l mppnppn=${processes_per_node} 862 858 #PBS -m abe 863 859 #PBS -o $remote_dayfile … … 927 923 #PBS -l walltime=$timestring 928 924 #PBS -l mppwidth=${numprocs} 929 #PBS -l mppnppn=${ tasks_per_node}925 #PBS -l mppnppn=${processes_per_node} 930 926 #PBS -m abe 931 927 #PBS -o $remote_dayfile … … 962 958 cat > $job_to_send << %%END%% 963 959 #!/bin/ksh 964 #PBS -l cpunum_prc=$ tasks_per_node,cputim_job=$cputime960 #PBS -l cpunum_prc=$processes_per_node,cputim_job=$cputime 965 961 #PBS -l ${qsubmem}=${Memory}gb 966 962 #PBS -b $nodes … … 976 972 cat > $job_to_send << %%END%% 977 973 #!/bin/ksh 978 #PBS -l cpunum_prc=$ tasks_per_node,cputim_job=$cputime974 #PBS -l cpunum_prc=$processes_per_node,cputim_job=$cputime 979 975 #PBS -l ${qsubmem}=${Memory}gb 980 976 #PBS -o $remote_dayfile … … 1072 1068 if [[ $(echo $remote_host | cut -c1-3) = ibm || $(echo $remote_host | cut -c1-5) = lcsgi || $(echo $remote_host | cut -c1-3) = nec || $remote_host = lctit ]] 1073 1069 then 1074 if [[ $remote_host = ibm b || $remote_host = ibmh ]]1070 if [[ $remote_host = ibmh ]] 1075 1071 then 1076 1072 return_queue=c1 1073 elif [[ $remote_host = ibmku ]] 1074 then 1075 return_queue=sdbg2 1077 1076 elif [[ $remote_host = ibms ]] 1078 1077 then … … 1097 1096 then 1098 1097 1099 echo "echo \"#!/bin/ksh\" >> scpjob.$kennung" >> $job_to_send 1098 if [[ $remote_host = ibmku ]] 1099 then 1100 echo "echo \"#!/usr/bin/ksh\" >> scpjob.$kennung" >> $job_to_send 1101 echo "echo \"# @ shell = /usr/bin/ksh\" >> scpjob.$kennung" >> $job_to_send 1102 else 1103 echo "echo \"#!/bin/ksh\" >> scpjob.$kennung" >> $job_to_send 1104 fi 1100 1105 echo "echo \"# @ job_type = serial\" >> scpjob.$kennung" >> $job_to_send 1101 1106 echo "echo \"# @ job_name = transfer\" >> scpjob.$kennung" >> $job_to_send … … 1116 1121 echo "echo \"set -x\" >> scpjob.$kennung" >> $job_to_send 1117 1122 echo "echo \"batch_scp -d -w 10 -u $local_user $local_addres ${job_catalog}/$remote_dayfile \\\"$job_catalog\\\" $local_dayfile\" >> scpjob.$kennung" >> $job_to_send 1123 if [[ $remote_host = ibmku ]] 1124 then 1125 echo "echo \"rm scpjob.$kennung\" >> scpjob.$kennung" >> $job_to_send 1126 fi 1118 1127 echo "echo \"exit\" >> scpjob.$kennung" >> $job_to_send 1119 1128 … … 1219 1228 echo "qsub scpjob.$kennung" >> $job_to_send 1220 1229 fi 1221 echo "rm scpjob.$kennung" >> $job_to_send 1230 if [[ $remote_host != ibmku ]] 1231 then 1232 echo "rm scpjob.$kennung" >> $job_to_send 1233 fi 1222 1234 if [[ $remote_host = nech ]] 1223 1235 then … … 1243 1255 echo "exit" >> $job_to_send 1244 1256 fi 1245 if [[ $remote_host = lctit ]]1257 if [[ $remote_host = lctit || $remote_host = ibmku ]] 1246 1258 then 1247 1259 echo " " >> $job_to_send … … 1316 1328 printf "\n >>> submit with HLRN qos-feature hiprio...\n" 1317 1329 ssh $remote_addres -l $remote_user "cd $job_catalog; $submcom -l qos=hiprio $job_on_remhost; rm $job_on_remhost" 1330 elif [[ $remote_host = ibmku ]] 1331 then 1332 ssh $remote_addres -l $remote_user "cd $job_catalog; $submcom $job_on_remhost" 1318 1333 else 1319 1334 ssh $remote_addres -l $remote_user "cd $job_catalog; $submcom $job_on_remhost; rm $job_on_remhost" 1320 1335 fi 1321 1336 else 1322 # TIT ERLAUBT NUR DIE AUSF ï¿œHRUNG GANZ BESTIMMTER KOMMANDOS1337 # TIT ERLAUBT NUR DIE AUSFUEHRUNG GANZ BESTIMMTER KOMMANDOS 1323 1338 # MIT SSH, DESHALB AUFRUF PER PIPE 1324 1339 # UEBERGANGSWEISE CHECK, OB N1GE ENVIRONMENT WIRKLICH VERFUEGBAR … … 1365 1380 qsub $job_on_remhost 1366 1381 fi 1367 # JOBFILE DARF AUF LCTIT NICHT GELOESCHT WERDEN!! GESCHIEHT ERST AM JOBENDE 1368 [[ $local_host != lctit ]] && rm $job_on_remhost 1382 1383 # Jobfile must not be deleted on lctit/ibmku!! This will be done 1384 # only at the end of the job. 1385 if [[ $local_host != lctit && $local_host != ibmku ]] 1386 then 1387 rm $job_on_remhost 1388 fi 1369 1389 cd - > /dev/null 1370 1390 fi -
palm/trunk/SOURCE/advec_particles.f90
r559 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! optional barriers included in order to speed up collective operations 6 7 ! TEST: PRINT statements on unit 9 (commented out) 7 8 ! … … 792 793 ! 793 794 !-- Compute total sum from local sums 795 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 794 796 CALL MPI_ALLREDUCE( sums_l(nzb,1,0), sums(nzb,1), nzt+2-nzb, & 795 797 MPI_REAL, MPI_SUM, comm2d, ierr ) 798 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 796 799 CALL MPI_ALLREDUCE( sums_l(nzb,2,0), sums(nzb,2), nzt+2-nzb, & 797 800 MPI_REAL, MPI_SUM, comm2d, ierr ) … … 830 833 ! 831 834 !-- Compute total sum from local sums 835 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 832 836 CALL MPI_ALLREDUCE( sums_l(nzb,8,0), sums(nzb,8), nzt+2-nzb, & 833 837 MPI_REAL, MPI_SUM, comm2d, ierr ) 838 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 834 839 CALL MPI_ALLREDUCE( sums_l(nzb,30,0), sums(nzb,30), nzt+2-nzb, & 835 840 MPI_REAL, MPI_SUM, comm2d, ierr ) 841 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 836 842 CALL MPI_ALLREDUCE( sums_l(nzb,31,0), sums(nzb,31), nzt+2-nzb, & 837 843 MPI_REAL, MPI_SUM, comm2d, ierr ) 844 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 838 845 CALL MPI_ALLREDUCE( sums_l(nzb,32,0), sums(nzb,32), nzt+2-nzb, & 839 846 MPI_REAL, MPI_SUM, comm2d, ierr ) … … 1948 1955 !-- and set the switch corespondingly 1949 1956 #if defined( __parallel ) 1957 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1950 1958 CALL MPI_ALLREDUCE( dt_3d_reached_l, dt_3d_reached, 1, MPI_LOGICAL, & 1951 1959 MPI_LAND, comm2d, ierr ) -
palm/trunk/SOURCE/advec_s_bc.f90
r392 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 166 166 ENDDO 167 167 #if defined( __parallel ) 168 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 168 169 CALL MPI_ALLREDUCE( fmax_l, fmax, 2, MPI_REAL, MPI_MAX, comm2d, ierr ) 169 170 #else … … 463 464 ENDDO 464 465 #if defined( __parallel ) 466 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 465 467 CALL MPI_ALLREDUCE( fmax_l, fmax, 2, MPI_REAL, MPI_MAX, comm2d, ierr ) 466 468 #else … … 863 865 ENDDO 864 866 #if defined( __parallel ) 867 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 865 868 CALL MPI_ALLREDUCE( fmax_l, fmax, 2, MPI_REAL, MPI_MAX, comm2d, ierr ) 866 869 #else -
palm/trunk/SOURCE/buoyancy.f90
r516 r622 4 4 ! Currrent revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 284 284 #if defined( __parallel ) 285 285 286 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 286 287 CALL MPI_ALLREDUCE( sums_l(nzb,pr,0), sums(nzb,pr), nzt+2-nzb, & 287 288 MPI_REAL, MPI_SUM, comm2d, ierr ) -
palm/trunk/SOURCE/check_for_restart.f90
r392 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 63 63 !-- Make a logical OR for all processes. Stop the model run if at least 64 64 !-- one processor has reached the time limit. 65 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 65 66 CALL MPI_ALLREDUCE( terminate_run_l, terminate_run, 1, MPI_LOGICAL, & 66 67 MPI_LOR, comm2d, ierr ) -
palm/trunk/SOURCE/cpu_statistics.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! output of handling of collective operations 7 7 ! 8 8 ! Former revisions: … … 248 248 249 249 ! 250 !-- Output handling of collective operations 251 IF ( collective_wait ) THEN 252 WRITE ( 18, 103 ) 253 ELSE 254 WRITE ( 18, 104 ) 255 ENDIF 256 257 ! 250 258 !-- Empty lines in order to create a gap to the results of the model 251 259 !-- continuation runs 252 WRITE ( 18, 10 3)260 WRITE ( 18, 105 ) 253 261 254 262 ! … … 275 283 276 284 102 FORMAT (A20,2X,F9.3,2X,F7.2,1X,I7,3(1X,F9.3)) 277 103 FORMAT (//) 285 103 FORMAT (/'Barriers are set in front of collective operations') 286 104 FORMAT (/'No barriers are set in front of collective operations') 287 105 FORMAT (//) 278 288 279 289 END SUBROUTINE cpu_statistics -
palm/trunk/SOURCE/data_output_2d.f90
r559 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 899 899 ! 900 900 !-- Now do the averaging over all PEs along y 901 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 901 902 CALL MPI_ALLREDUCE( local_2d_l(nxl-1,nzb), & 902 903 local_2d(nxl-1,nzb), ngp, MPI_REAL, & … … 942 943 !-- Distribute data over all PEs along y 943 944 ngp = ( nxr-nxl+3 ) * ( nzt-nzb+2 ) 945 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 944 946 CALL MPI_ALLREDUCE( local_2d_l(nxl-1,nzb), & 945 947 local_2d(nxl-1,nzb), ngp, & … … 1198 1200 ! 1199 1201 !-- Now do the averaging over all PEs along x 1202 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1200 1203 CALL MPI_ALLREDUCE( local_2d_l(nys-1,nzb), & 1201 1204 local_2d(nys-1,nzb), ngp, MPI_REAL, & … … 1241 1244 !-- Distribute data over all PEs along x 1242 1245 ngp = ( nyn-nys+3 ) * ( nzt-nzb+2 ) 1246 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1243 1247 CALL MPI_ALLREDUCE( local_2d_l(nys-1,nzb), & 1244 1248 local_2d(nys-1,nzb), ngp, & -
palm/trunk/SOURCE/data_output_ptseries.f90
r392 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 138 138 inum = number_of_particle_groups + 1 139 139 140 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 140 141 CALL MPI_ALLREDUCE( pts_value_l(0,1), pts_value(0,1), 14*inum, MPI_REAL, & 141 142 MPI_SUM, comm2d, ierr ) 143 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 142 144 CALL MPI_ALLREDUCE( pts_value_l(0,15), pts_value(0,15), inum, MPI_REAL, & 143 145 MPI_MAX, comm2d, ierr ) 146 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 144 147 CALL MPI_ALLREDUCE( pts_value_l(0,16), pts_value(0,16), inum, MPI_REAL, & 145 148 MPI_MIN, comm2d, ierr ) … … 239 242 inum = number_of_particle_groups + 1 240 243 244 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 241 245 CALL MPI_ALLREDUCE( pts_value_l(0,17), pts_value(0,17), inum*10, MPI_REAL, & 242 246 MPI_SUM, comm2d, ierr ) -
palm/trunk/SOURCE/flow_statistics.f90
r550 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 237 237 ! 238 238 !-- Compute total sum from local sums 239 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 239 240 CALL MPI_ALLREDUCE( sums_l(nzb,1,0), sums(nzb,1), nzt+2-nzb, MPI_REAL, & 240 241 MPI_SUM, comm2d, ierr ) 242 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 241 243 CALL MPI_ALLREDUCE( sums_l(nzb,2,0), sums(nzb,2), nzt+2-nzb, MPI_REAL, & 242 244 MPI_SUM, comm2d, ierr ) 245 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 243 246 CALL MPI_ALLREDUCE( sums_l(nzb,4,0), sums(nzb,4), nzt+2-nzb, MPI_REAL, & 244 247 MPI_SUM, comm2d, ierr ) 245 248 IF ( ocean ) THEN 249 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 246 250 CALL MPI_ALLREDUCE( sums_l(nzb,23,0), sums(nzb,23), nzt+2-nzb, & 247 251 MPI_REAL, MPI_SUM, comm2d, ierr ) 248 252 ENDIF 249 253 IF ( humidity ) THEN 254 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 250 255 CALL MPI_ALLREDUCE( sums_l(nzb,44,0), sums(nzb,44), nzt+2-nzb, & 251 256 MPI_REAL, MPI_SUM, comm2d, ierr ) 257 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 252 258 CALL MPI_ALLREDUCE( sums_l(nzb,41,0), sums(nzb,41), nzt+2-nzb, & 253 259 MPI_REAL, MPI_SUM, comm2d, ierr ) 254 260 IF ( cloud_physics ) THEN 261 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 255 262 CALL MPI_ALLREDUCE( sums_l(nzb,42,0), sums(nzb,42), nzt+2-nzb, & 256 263 MPI_REAL, MPI_SUM, comm2d, ierr ) 264 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 257 265 CALL MPI_ALLREDUCE( sums_l(nzb,43,0), sums(nzb,43), nzt+2-nzb, & 258 266 MPI_REAL, MPI_SUM, comm2d, ierr ) … … 261 269 262 270 IF ( passive_scalar ) THEN 271 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 263 272 CALL MPI_ALLREDUCE( sums_l(nzb,41,0), sums(nzb,41), nzt+2-nzb, & 264 273 MPI_REAL, MPI_SUM, comm2d, ierr ) … … 796 805 ! 797 806 !-- Compute total sum from local sums 807 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 798 808 CALL MPI_ALLREDUCE( sums_l(nzb,1,0), sums(nzb,1), ngp_sums, MPI_REAL, & 799 809 MPI_SUM, comm2d, ierr ) -
palm/trunk/SOURCE/global_min_max.f90
r484 r622 5 5 ! Current revisions: 6 6 ! ----------------- 7 ! 7 ! optional barriers included in order to speed up collective operations 8 8 ! 9 9 ! Former revisions: … … 61 61 #if defined( __parallel ) 62 62 fmin_l(2) = myid 63 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 63 64 CALL MPI_ALLREDUCE( fmin_l, fmin, 1, MPI_2REAL, MPI_MINLOC, comm2d, ierr ) 64 65 … … 100 101 #if defined( __parallel ) 101 102 fmax_l(2) = myid 103 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 102 104 CALL MPI_ALLREDUCE( fmax_l, fmax, 1, MPI_2REAL, MPI_MAXLOC, comm2d, ierr ) 103 105 … … 158 160 #if defined( __parallel ) 159 161 fmax_l(2) = myid 162 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 160 163 CALL MPI_ALLREDUCE( fmax_l, fmax, 1, MPI_2REAL, MPI_MAXLOC, comm2d, & 161 164 ierr ) -
palm/trunk/SOURCE/inflow_turbulence.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 77 77 ! 78 78 !-- Now, averaging over all PEs 79 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 79 80 CALL MPI_ALLREDUCE( avpr_l(nzb,1), avpr(nzb,1), ngp_pr, MPI_REAL, MPI_SUM, & 80 81 comm2d, ierr ) … … 195 196 196 197 #if defined( __parallel ) 198 ! IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 197 199 ! CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 1, MPI_REAL, & 198 200 ! MPI_SUM, comm1dy, ierr ) -
palm/trunk/SOURCE/init_3d_model.f90
r561 r622 7 7 ! Current revisions: 8 8 ! ----------------- 9 ! 9 ! optional barriers included in order to speed up collective operations 10 10 ! 11 11 ! Former revisions: … … 860 860 861 861 #if defined( __parallel ) 862 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 862 863 CALL MPI_ALLREDUCE( volume_flow_initial_l(1), volume_flow_initial(1),& 863 864 2, MPI_REAL, MPI_SUM, comm2d, ierr ) 865 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 864 866 CALL MPI_ALLREDUCE( volume_flow_area_l(1), volume_flow_area(1), & 865 867 2, MPI_REAL, MPI_SUM, comm2d, ierr ) … … 1172 1174 1173 1175 #if defined( __parallel ) 1176 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1174 1177 CALL MPI_ALLREDUCE( volume_flow_initial_l(1), volume_flow_initial(1),& 1175 1178 2, MPI_REAL, MPI_SUM, comm2d, ierr ) 1179 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1176 1180 CALL MPI_ALLREDUCE( volume_flow_area_l(1), volume_flow_area(1), & 1177 1181 2, MPI_REAL, MPI_SUM, comm2d, ierr ) … … 1560 1564 sr = statistic_regions + 1 1561 1565 #if defined( __parallel ) 1566 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1562 1567 CALL MPI_ALLREDUCE( ngp_2dh_l(0), ngp_2dh(0), sr, MPI_INTEGER, MPI_SUM, & 1563 1568 comm2d, ierr ) 1569 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1564 1570 CALL MPI_ALLREDUCE( ngp_2dh_outer_l(0,0), ngp_2dh_outer(0,0), (nz+2)*sr, & 1565 1571 MPI_INTEGER, MPI_SUM, comm2d, ierr ) 1572 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1566 1573 CALL MPI_ALLREDUCE( ngp_2dh_s_inner_l(0,0), ngp_2dh_s_inner(0,0), & 1567 1574 (nz+2)*sr, MPI_INTEGER, MPI_SUM, comm2d, ierr ) 1575 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1568 1576 CALL MPI_ALLREDUCE( ngp_3d_inner_l(0), ngp_3d_inner_tmp(0), sr, MPI_REAL, & 1569 1577 MPI_SUM, comm2d, ierr ) -
palm/trunk/SOURCE/init_particles.f90
r392 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 342 342 !-- Calculate the number of particles and tails of the total domain 343 343 #if defined( __parallel ) 344 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 344 345 CALL MPI_ALLREDUCE( number_of_particles, total_number_of_particles, 1, & 345 346 MPI_INTEGER, MPI_SUM, comm2d, ierr ) 347 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 346 348 CALL MPI_ALLREDUCE( number_of_tails, total_number_of_tails, 1, & 347 349 MPI_INTEGER, MPI_SUM, comm2d, ierr ) … … 436 438 437 439 #if defined( __parallel ) 440 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 438 441 CALL MPI_ALLREDUCE( uniform_particles_l, uniform_particles, 1, & 439 442 MPI_LOGICAL, MPI_LAND, comm2d, ierr ) -
palm/trunk/SOURCE/init_pegrid.f90
r482 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! ATTENTION: nnz_x undefined problem still has to be solved!!!!!!!! 8 8 ! TEST OUTPUT (TO BE REMOVED) logging mpi2 ierr values … … 154 154 CALL message( 'init_pegrid', 'PA0223', 1, 2, 0, 6, 0 ) 155 155 ENDIF 156 157 ! 158 !-- For communication speedup, set barriers in front of collective 159 !-- communications by default on SGI-type systems 160 IF ( host(3:5) == 'sgi' ) collective_wait = .TRUE. 156 161 157 162 ! … … 929 934 id_inflow_l = 0 930 935 ENDIF 936 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 931 937 CALL MPI_ALLREDUCE( id_inflow_l, id_inflow, 1, MPI_INTEGER, MPI_SUM, & 932 938 comm1dx, ierr ) … … 935 941 !-- Broadcast the id of the recycling plane 936 942 !-- WARNING: needs to be adjusted in case of inflows other than from left side! 937 IF ( ( recycling_width / dx ) >= nxl .AND. ( recycling_width / dx ) <= nxr )&938 THEN943 IF ( ( recycling_width / dx ) >= nxl .AND. & 944 ( recycling_width / dx ) <= nxr ) THEN 939 945 id_recycling_l = myidx 940 946 ELSE 941 947 id_recycling_l = 0 942 948 ENDIF 949 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 943 950 CALL MPI_ALLREDUCE( id_recycling_l, id_recycling, 1, MPI_INTEGER, MPI_SUM, & 944 951 comm1dx, ierr ) -
palm/trunk/SOURCE/init_slope.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 100 100 ENDDO 101 101 ENDDO 102 ENDDO102 ENDDO 103 103 104 104 #if defined( __parallel ) 105 CALL MPI_ALLREDUCE( pt_init_local, pt_init, nzt+2-nzb, MPI_REAL, & 106 MPI_SUM, comm2d, ierr ) 105 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 106 CALL MPI_ALLREDUCE( pt_init_local, pt_init, nzt+2-nzb, MPI_REAL, & 107 MPI_SUM, comm2d, ierr ) 107 108 #else 108 pt_init = pt_init_local109 pt_init = pt_init_local 109 110 #endif 110 111 111 pt_init = pt_init / ngp_2dh(0)112 DEALLOCATE( pt_init_local )112 pt_init = pt_init / ngp_2dh(0) 113 DEALLOCATE( pt_init_local ) 113 114 114 ENDIF115 ENDIF 115 116 116 117 END SUBROUTINE init_slope -
palm/trunk/SOURCE/modules.f90
r601 r622 5 5 ! Current revisions: 6 6 ! ----------------- 7 ! 7 ! +collective_wait in pegrid 8 8 ! 9 9 ! Former revisions: … … 1163 1163 INTEGER, DIMENSION(:), ALLOCATABLE :: ngp_yz, type_xz 1164 1164 1165 LOGICAL :: reorder = .TRUE.1165 LOGICAL :: collective_wait = .FALSE., reorder = .TRUE. 1166 1166 LOGICAL, DIMENSION(2) :: cyclic = (/ .TRUE. , .TRUE. /), & 1167 1167 remain_dims -
palm/trunk/SOURCE/parin.f90
r601 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! +collective_wait in inipar 7 7 ! 8 8 ! Former revisions: … … 119 119 canyon_width_x, canyon_width_y, canyon_wall_left, & 120 120 canyon_wall_south, cfl_factor, cloud_droplets, cloud_physics, & 121 co nserve_volume_flow, conserve_volume_flow_mode, &121 collective_wait, conserve_volume_flow, conserve_volume_flow_mode, & 122 122 coupling_start_time, cthf, cut_spline_overshoot, & 123 123 cycle_mg, damp_level_1d, dissipation_1d, dp_external, dp_level_b, & -
palm/trunk/SOURCE/poisfft.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 718 718 !-- Transpose array 719 719 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 720 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 720 721 CALL MPI_ALLTOALL( work(nxl,1,0), sendrecvcount_xy, MPI_REAL, & 721 722 f_out(1,1,nys_x,1), sendrecvcount_xy, MPI_REAL, & … … 756 757 !-- Transpose array 757 758 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 759 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 758 760 CALL MPI_ALLTOALL( f_in(1,1,nys_x,1), sendrecvcount_xy, MPI_REAL, & 759 761 work(nxl,1,0), sendrecvcount_xy, MPI_REAL, & … … 1073 1075 !-- Transpose array 1074 1076 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 1077 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1075 1078 CALL MPI_ALLTOALL( work(nys,1,0), sendrecvcount_xy, MPI_REAL, & 1076 1079 f_out(1,1,nxl_y,1), sendrecvcount_xy, MPI_REAL, & … … 1107 1110 !-- Transpose array 1108 1111 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 1112 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1109 1113 CALL MPI_ALLTOALL( f_in(1,1,nxl_y,1), sendrecvcount_xy, MPI_REAL, & 1110 1114 work(nys,1,0), sendrecvcount_xy, MPI_REAL, & -
palm/trunk/SOURCE/poismg.f90
r392 r622 8 8 ! Current revisions: 9 9 ! ----------------- 10 ! 10 ! optional barriers included in order to speed up collective operations 11 11 ! 12 12 ! Former revisions: … … 106 106 maxerror = SUM( r(nzb+1:nzt,nys:nyn,nxl:nxr)**2 ) 107 107 #if defined( __parallel ) 108 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 108 109 CALL MPI_ALLREDUCE( maxerror, residual_norm, 1, MPI_REAL, MPI_SUM, & 109 110 comm2d, ierr) -
palm/trunk/SOURCE/pres.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 105 105 106 106 #if defined( __parallel ) 107 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 107 108 CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 1, MPI_REAL, & 108 109 MPI_SUM, comm1dy, ierr ) … … 143 144 144 145 #if defined( __parallel ) 146 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 145 147 CALL MPI_ALLREDUCE( volume_flow_l(2), volume_flow(2), 1, MPI_REAL, & 146 148 MPI_SUM, comm1dx, ierr ) … … 172 174 ENDDO 173 175 #if defined( __parallel ) 176 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 174 177 CALL MPI_ALLREDUCE( w_l_l(1), w_l(1), nzt, MPI_REAL, MPI_SUM, comm2d, & 175 178 ierr ) … … 537 540 538 541 #if defined( __parallel ) 542 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 539 543 CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 2, MPI_REAL, & 540 544 MPI_SUM, comm2d, ierr ) -
palm/trunk/SOURCE/read_var_list.f90
r601 r622 3 3 !------------------------------------------------------------------------------! 4 4 ! Current revisions: 5 ! ----------------- _6 ! 5 ! ------------------ 6 ! +collective_wait 7 7 ! 8 8 ! Former revisions: … … 275 275 CASE ( 'cloud_physics' ) 276 276 READ ( 13 ) cloud_physics 277 CASE ( 'collective_wait' ) 278 READ ( 13 ) collective_wait 277 279 CASE ( 'conserve_volume_flow' ) 278 280 READ ( 13 ) conserve_volume_flow -
palm/trunk/SOURCE/set_particle_attributes.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 140 140 #if defined( __parallel ) 141 141 142 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 142 143 CALL MPI_ALLREDUCE( sums_l(nzb,4,0), sums(nzb,4), nzt+2-nzb, & 143 144 MPI_REAL, MPI_SUM, comm2d, ierr ) -
palm/trunk/SOURCE/timestep.f90
r392 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 118 118 uv_gtrans_l = uv_gtrans_l / REAL( (nxr-nxl+1)*(nyn-nys+1)*(nzt-nzb) ) 119 119 #if defined( __parallel ) 120 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 120 121 CALL MPI_ALLREDUCE( uv_gtrans_l, uv_gtrans, 2, MPI_REAL, MPI_SUM, & 121 122 comm2d, ierr ) … … 164 165 !$OMP END PARALLEL 165 166 #if defined( __parallel ) 167 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 166 168 CALL MPI_ALLREDUCE( dt_diff_l, dt_diff, 1, MPI_REAL, MPI_MIN, comm2d, & 167 169 ierr ) … … 252 254 !-- Determine the global minumum 253 255 #if defined( __parallel ) 256 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 254 257 CALL MPI_ALLREDUCE( dt_plant_canopy_l, dt_plant_canopy, 1, MPI_REAL, & 255 258 MPI_MIN, comm2d, ierr ) -
palm/trunk/SOURCE/transpose.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 69 69 !-- Transpose array 70 70 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 71 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 71 72 CALL MPI_ALLTOALL( f_inv(nys_x,nzb_x,0), sendrecvcount_xy, MPI_REAL, & 72 73 work(1), sendrecvcount_xy, MPI_REAL, & … … 143 144 !-- Transpose array 144 145 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 146 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 145 147 CALL MPI_ALLTOALL( work(1), sendrecvcount_zx, MPI_REAL, & 146 148 f_inv(nys,nxl,1), sendrecvcount_zx, MPI_REAL, & … … 229 231 !-- Transpose array 230 232 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 233 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 231 234 CALL MPI_ALLTOALL( work(1), sendrecvcount_xy, MPI_REAL, & 232 235 f_inv(nys_x,nzb_x,0), sendrecvcount_xy, MPI_REAL, & … … 291 294 !-- Transpose array 292 295 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 296 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 293 297 CALL MPI_ALLTOALL( f_inv(nxl,1,nys), sendrecvcount_xy, MPI_REAL, & 294 298 work(1), sendrecvcount_xy, MPI_REAL, & … … 373 377 !-- Transpose array 374 378 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 379 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 375 380 CALL MPI_ALLTOALL( f_inv(nxl_y,nzb_y,0), sendrecvcount_yz, MPI_REAL, & 376 381 work(1), sendrecvcount_yz, MPI_REAL, & … … 454 459 !-- Transpose array 455 460 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 461 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 456 462 CALL MPI_ALLTOALL( f_inv(nys,nxl,1), sendrecvcount_zx, MPI_REAL, & 457 463 work(1), sendrecvcount_zx, MPI_REAL, & … … 528 534 !-- Transpose array 529 535 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 536 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 530 537 CALL MPI_ALLTOALL( work(1), sendrecvcount_yz, MPI_REAL, & 531 538 f_inv(nxl_y,nzb_y,0), sendrecvcount_yz, MPI_REAL, & … … 627 634 !-- Transpose array 628 635 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 636 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 629 637 CALL MPI_ALLTOALL( f_inv(nys,nxl,1), sendrecvcount_zyd, MPI_REAL, & 630 638 work(1), sendrecvcount_zyd, MPI_REAL, & -
palm/trunk/SOURCE/user_statistics.f90
r556 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 92 92 !-- assign ts_value(dots_num_palm+1:,sr) = ts_value_l directly. 93 93 !#if defined( __parallel ) 94 ! IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 94 95 ! CALL MPI_ALLREDUCE( ts_value_l(dots_num_palm+1), & 95 96 ! ts_value(dots_num_palm+1,sr), & -
palm/trunk/SOURCE/write_compressed.f90
r484 r622 5 5 ! Current revisions: 6 6 ! ----------------- 7 ! 7 ! optional barriers included in order to speed up collective operations 8 8 ! 9 9 ! Former revisions: … … 90 90 91 91 #if defined( __parallel ) 92 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 92 93 CALL MPI_ALLREDUCE( ifieldmax_l, ifieldmax, 1, MPI_INTEGER, MPI_MAX, & 93 94 comm2d, ierr ) 95 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 94 96 CALL MPI_ALLREDUCE( ifieldmin_l, ifieldmin, 1, MPI_INTEGER, MPI_MIN, & 95 97 comm2d, ierr ) -
palm/trunk/SOURCE/write_var_list.f90
r601 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! +collective_wait 7 7 ! 8 8 ! Former revisions: … … 200 200 WRITE ( 14 ) 'cloud_physics ' 201 201 WRITE ( 14 ) cloud_physics 202 WRITE ( 14 ) 'collective_wait ' 203 WRITE ( 14 ) collective_wait 202 204 WRITE ( 14 ) 'conserve_volume_flow ' 203 205 WRITE ( 14 ) conserve_volume_flow
Note: See TracChangeset
for help on using the changeset viewer.