Changeset 2365 for palm/trunk/SCRIPTS
- Timestamp:
- Aug 21, 2017 2:59:59 PM (7 years ago)
- Location:
- palm/trunk/SCRIPTS
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SCRIPTS/mbuild
r2316 r2365 27 27 # ----------------- 28 28 # $Id$ 29 # Added lckea & lckeam. KIT/IMK-IFU Garmisch cluster. LRZ (SadiqHuq) 30 # 31 # 2316 2017-07-20 07:53:42Z maronga 29 32 # Removed qmake block as mrungui now runs as a python script 30 33 # … … 531 534 # DETERMINE IP-ADDRES OF THE REMOTE-HOST 532 535 case $remote_host in 536 (lcbwuni) remote_address="129.13.82.89";; 533 537 (lcbullhh) remote_address=136.172.50.13;; 534 538 (lccrayb) remote_address=130.73.233.1;; … … 541 545 (lckiaps) remote_address=118.128.66.223;; 542 546 (lckyut) remote_address=133.5.4.37;; 547 (lclrz) remote_address=129.187.20.240;; 548 (lckea*) remote_address=172.27.80.109;; 543 549 (lctit) remote_address=10.1.6.170;; 544 550 (lcxe6) remote_address=129.177.20.113;; … … 547 553 (ibmkisti) remote_address=150.183.146.24;; 548 554 (ibmku) remote_address=133.5.4.129;; 555 (ibmmuc) remote_address=129.187.11.197;; 549 556 (ibms) remote_address=150.183.5.101;; 550 557 (nech) remote_address=136.172.44.192;; … … 783 790 elif [[ $(echo $remote_host | cut -c1-3) = ibm && $(echo $string | cut -c1-3) = ibm ]] 784 791 then 785 cpp_options="${cpp_options},-D__ibm" 792 if [[ $remote_host = ibmmuc ]] 793 then 794 cpp_options="${cpp_options}" 795 else 796 cpp_options="${cpp_options},-D__ibm" 797 fi 786 798 elif [[ $(echo $remote_host | cut -c1-3) = nec && $(echo $string | cut -c1-3) = nec ]] 787 799 then … … 790 802 if [[ $(echo $remote_host | cut -c1-3) = ibm ]] 791 803 then 792 cpp_options="${cpp_options},-D__$string" 804 if [[ $remote_host = ibmmuc ]] 805 then 806 cpp_options="${cpp_options} -D__parallel" 807 else 808 cpp_options="${cpp_options},-D__$string" 809 fi 793 810 else 794 811 cpp_options="$cpp_options -D__$string " -
palm/trunk/SCRIPTS/mrun
r2303 r2365 27 27 # ----------------- 28 28 # $Id$ 29 # Added lckea & lckeam. KIT/IMK-IFU Garmisch cluster. LRZ (SadiqHuq) 30 # Vertical grid nesting: set vnested_mode. -N Procs for fine and coarse grid. 31 # 32 # 2303 2017-07-04 12:26:18Z raasch 29 33 # bugfix: setting default value for write_binary 30 34 # … … 334 338 module_calls="" 335 339 mrun_script_name=$mc 340 vnested_dist="" 341 vnested_mode="vnested_twi" 336 342 netcdf_inc="" 337 343 netcdf_lib="" … … 341 347 numprocs_atmos=0 342 348 numprocs_ocean=0 349 numprocs_crse=0 350 numprocs_fine=0 343 351 OOPT="" 344 352 openmp=false … … 373 381 run_coupled_model=false 374 382 run_mode="" 383 run_vnested_model=false 375 384 dashes=" ----------------------------------------------------------------------------" 376 385 silent=false … … 475 484 # READ SHELLSCRIPT-OPTIONS AND REBUILD THE MRUN-COMMAND STRING (MC), 476 485 # WHICH WILL BE USED TO START RESTART-JOBS 477 while getopts :a:bBc:Cd:Fg:G:h:H:i:kK:m:M:n: o:O:p:P:q:r:R:s:St:T:u:U:vw:xX:yY:zZ option486 while getopts :a:bBc:Cd:Fg:G:h:H:i:kK:m:M:n:N:o:O:p:P:q:r:R:s:St:T:u:U:vw:xX:yY:zZ option 478 487 do 479 488 case $option in … … 495 504 (M) makefile=$OPTARG; mc="$mc -M$OPTARG";; 496 505 (n) node_usage=$OPTARG; mc="$mc -n$OPTARG";; 506 (N) run_vnested_model=true; vnested_dist=$OPTARG; mc="$mc -N'$OPTARG'";; 497 507 (o) output_list=$OPTARG; mc="$mc -o'$OPTARG'";; 498 508 (O) use_openmp=true; threads_per_task=$OPTARG; mc="$mc -O$OPTARG";; … … 550 560 printf "\n -M Makefile name Makefile" 551 561 printf "\n -n node usage (shared/not_shared) depending on -h" 562 printf "\n -N Vertical grid nesting. Number of" 563 printf "\n PE for Coarse and Fine grid" 552 564 printf "\n -o OUTPUT control list \"\" " 553 565 printf "\n -O threads per openMP task ---" … … 713 725 fi 714 726 727 # NESTING (-N) selected and vnesting_mode specified 728 if [[ $run_vnested_model = true ]] 729 then 730 731 if [[ -n $vnested_dist ]] 732 then 733 734 numprocs_crse=`echo $vnested_dist | cut -d" " -s -f1` 735 numprocs_fine=`echo $vnested_dist | cut -d" " -s -f2` 736 737 if (( $numprocs_crse + $numprocs_fine != $numprocs )) 738 then 739 740 printf "\n +++ number of processors does not fit to specification by \"-N\"." 741 printf "\n PEs (total) : $numprocs" 742 printf "\n PEs (Coarse) : $numprocs_crse" 743 printf "\n PEs (Fine) : $numprocs_fine" 744 locat=vnesting; exit 745 746 fi 747 748 else 749 750 printf "\n +++ "Specify PE for fine and coarse grid: -N "nCGPE nFGPE \"-N\"." 751 locat=vnesting; exit 752 753 fi 754 vnested_dist=`echo "$numprocs_crse $numprocs_fine"` 755 fi 756 715 757 # SAVE VALUES OF MRUN-OPTIONS SICHERN IN ORDER TO OVERWRITE 716 758 # THOSE VALUES GIVEN IN THE CONFIGURATION-FILE … … 1195 1237 (ibmh) queue=cluster;; 1196 1238 (ibmkisti) queue=class.32plus;; 1239 (ibmmuc*) queue=test;; 1240 (lcbwuni) queue=develop;; 1197 1241 (lcbullhh) queue=compute;; 1198 1242 (lccrayb) queue=mpp1q;; … … 1203 1247 (lckyuh) queue=fx-single;; 1204 1248 (lckyut) queue=cx-single;; 1249 (lclrz) queue=mpp2;; 1205 1250 (lctit) queue=S;; 1206 1251 (unics) queue=unics;; … … 1764 1809 1765 1810 # SET PREPROCESSOR-DIRECTIVES TO SELECT OPERATING SYSTEM SPECIFIC CODE 1766 if [[ $(echo $localhost | cut -c1-3) = ibm ]]1811 if [[ $(echo $localhost | cut -c1-3) = ibm && $localhost != ibmmuc* ]] 1767 1812 then 1768 1813 cpp_options="${cpp_options},-D__ibm=__ibm" … … 1778 1823 1779 1824 # SET DIRECTIVES GIVEN BY OPTION -K (E.G. parallel) 1780 if [[ $(echo $localhost | cut -c1-3) = ibm ]]1825 if [[ $(echo $localhost | cut -c1-3) = ibm && $localhost != ibmmuc ]] 1781 1826 then 1782 1827 [[ -n $cond1 ]] && cpp_options="${cpp_options},-D__$cond1=__$cond1" … … 1893 1938 fi 1894 1939 fi 1895 TEMPDIR=$tmp_user_catalog/${usern}.$kennung 1896 1940 1941 if [[ $localhost = ibmmuc* ]] 1942 then 1943 TEMPDIR=$tmp_user_catalog/${USER}.$kennung 1944 else 1945 TEMPDIR=$tmp_user_catalog/${usern}.$kennung 1946 fi 1897 1947 1898 1948 # DETERMINE THE NAME OF THE DIRECTORY WHICH IS USED TO TEMPORARILY STORE DATA FOR RESTART RUNS … … 2792 2842 then 2793 2843 /opt/optibm/HPM_2_4_1/bin/hpmcount a.out 2844 elif [[ $localhost = ibmmuc* ]] 2845 then 2846 ulimit -c unlimited # only for debgingg 2847 echo $MP_NODES > ~/job_queue/hostfile.$kennung 2848 echo $MP_PROCS >> ~/job_queue/hostfile.$kennung 2849 cat $LOADL_HOSTFILE >> ~/job_queue/hostfile.$kennung 2850 export MP_NODES=$nodes 2851 export MP_PROCS=$numprocs 2852 # export MPI_SINGLE_THREAD=no # LRZ NetCDF 2853 # export MP_TASKS_PER_NODE=$tasks_per_node 2854 echo "Resource Info: " 2855 echo "numprocs: " $numprocs " MP_PROCS " $MP_PROCS 2856 echo "nodes: " $nodes " MP_NODES " $MP_NODES 2857 echo "tasks_per_node: " $tasks_per_node 2858 echo "threads_per_task: " $threads_per_task 2859 export OMP_NUM_THREADS=1 2860 source /lrz/sys/share/modules/init/bash 2861 module li 2862 echo "runfile_atmos" 2863 2864 if [[ $run_vnested_model = true ]] 2865 then 2866 2867 printf "\n Nested run ($numprocs_crse Coarse, $numprocs_fine Fine)" 2868 printf "\n using $nested_mode nesting" 2869 printf "\n\n" 2870 2871 echo "$vnested_mode $numprocs_crse $numprocs_fine" > runfile_atmos 2872 2873 poe ./a.out < runfile_atmos 2874 # mpiexec -n $numprocs ./a.out < runfile_atmos 2875 else 2876 echo "precursor_atmos" > runfile_atmos 2877 poe ./a.out -proc $numprocs -nodes $nodes < runfile_atmos 2878 # mpiexec -n $numprocs ./a.out < runfile_atmos 2879 2880 fi 2794 2881 else 2795 2882 if [[ $run_coupled_model = false ]] … … 2899 2986 printf "\n threads per task: $threads_per_task stacksize: unlimited" 2900 2987 fi 2901 if [[ $run_coupled_model = false ]]2988 if [[ $run_coupled_model = false && $run_vnested_model = false ]] 2902 2989 then 2903 2990 if [[ "$ocean_file_appendix" = true ]] … … 2948 3035 then 2949 3036 mpirun_rsh -hostfile $PBS_NODEFILE -np `cat $PBS_NODEFILE | wc -l` a.out < runfile_atmos 2950 3037 elif [[ $host = lclrz || $host = lcbwuni ]] 3038 then 3039 mpiexec -n $ii a.out < runfile_atmos $ROPTeS 3040 elif [[ $host = lckea* ]] 3041 then 3042 srun -n $ii a.out < runfile_atmos $ROPTeS 2951 3043 elif [[ $host = lckiaps ]] 2952 3044 then … … 2959 3051 fi 2960 3052 2961 el se2962 3053 elif [[ $run_coupled_model = true ]] 3054 then 2963 3055 # COUPLED RUN 2964 3056 (( iia = $numprocs_atmos / $threads_per_task )) … … 2992 3084 wait 2993 3085 3086 3087 elif [[ $run_vnested_model = true ]] 3088 then 3089 printf "\n Vertical Nested run ($numprocs_crse Coarse, $numprocs_fine Fine)" 3090 printf "\n using $vnested_mode vnesting" 3091 printf "\n\n" 3092 3093 echo "$vnested_mode $numprocs_crse $numprocs_fine" > runfile_atmos 3094 3095 if [[ $host = lcbwuni || $host = lclrz* ]] 3096 then 3097 mpiexec -n $ii a.out < runfile_atmos $ROPTeS 3098 elif [[ $host = lckea* ]] 3099 then 3100 srun -n $ii a.out < runfile_atmos $ROPTeS 3101 else 3102 mpirun -np $numprocs ./a.out $ROPTS < runfile_atmos 3103 fi 3104 wait 2994 3105 fi 2995 3106 … … 3657 3768 [[ "$ocean_file_appendix" = true ]] && mrun_com=${mrun_com}" -y" 3658 3769 [[ $run_coupled_model = true ]] && mrun_com=${mrun_com}" -Y \"$coupled_dist\"" 3770 [[ $run_vnested_model = true ]] && mrun_com=${mrun_com}" -N \"$vnested_dist\"" 3659 3771 [[ "$check_namelist_files" = false ]] && mrun_com=${mrun_com}" -z" 3660 3772 [[ "$combine_plot_fields" = false ]] && mrun_com=${mrun_com}" -Z" -
palm/trunk/SCRIPTS/subjob
r2295 r2365 28 28 # ----------------- 29 29 # $Id$ 30 # Added lckea & lckeam. KIT/IMK-IFU Garmisch cluster. LRZ (SadiqHuq) 31 # 32 # 2295 2017-06-27 14:25:52Z raasch 30 33 # adjustments for using lcgeohu (cirrus @ HUB) 31 34 # … … 252 255 (inferno) local_address=130.75.105.5; local_host=lcmuk;; 253 256 (irifi) local_address=130.75.105.104; local_host=lcmuk;; 257 # (i*) local_address=129.187.11.197; local_host=ibmmuc;; 254 258 (jaboticaba) local_address=150.163.25.181; local_host=lcbr;; 259 (kea*) local_address=172.27.80.109; local_host=lckeal;; 255 260 (sno) local_address=130.75.105.113; local_host=lcmuk;; 256 261 (kuma) local_address=130.75.105.115; local_host=lcmuk;; 257 262 (levanto) local_address=130.75.105.45; local_host=lcmuk;; 258 263 (login*) local_address=118.128.66.201; local_host=lckiaps;; 264 # (login*) local_address=129.187.11.197; local_host=ibmmuc;; 265 (lm*) local_address=129.187.11.197; local_host=ibmmuc;; 266 (lx*) local_address=129.187.20.240; local_host=lclrz;; 267 (mpp2*) local_address=129.187.20.105; local_host=lclrz;; 259 268 (maestro) local_address=130.75.105.2; local_host=lcmuk;; 260 269 (meller) local_address=134.106.74.155; local_host=lcfor;; … … 278 287 (tc*) local_address="ocean"; local_host=lcocean;; 279 288 (t2a*) local_address=10.1.6.165; local_host=lctit;; 289 (uc1n*) local_address=129.13.82.89; local_host=lcbwuni;; 280 290 (urban*) local_address=147.46.30.151 local_host=lcsb;; 281 291 (vinessa) local_address=130.75.105.112; local_host=lcmuk;; … … 409 419 (ibmku) queue=s4; remote_address=133.5.4.129; submcom=/usr/local/bin/llsubmit;; 410 420 (ibms) queue=p_normal; remote_address=150.183.5.101; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; 421 (ibmmuc) remote_address=129.187.11.197; submcom=/usr/bin/llsubmit;; 422 (lcbwuni) queue=develop; remote_address=129.13.82.89; submcom=/opt/moab/bin/msub;; 411 423 (lcbullhh) queue=compute; remote_address=136.172.50.13; submcom=/usr/bin/sbatch;; 412 424 (lccrayb) queue=mpp1testq; remote_address=130.73.233.1; submcom="/opt/moab/default/bin/msub -E";; … … 417 429 (lckyoto) remote_address=133.3.51.11; submcom=/thin/local/bin/qsub;; 418 430 (lck) remote_address=165.132.26.61; submcom=/usr/torque/bin/qsub;; 431 (lckeal) queue=ivy; remote_address=172.27.80.109; submcom=/usr/bin/sbatch;; 419 432 (lckiaps) remote_address=118.128.66.201; submcom=/opt/pbs/default/bin/qsub;; 420 433 (lckordi) remote_address=210.219.61.8; submcom=/usr/torque/bin/qsub;; 421 434 (lckyuh) remote_address=133.5.4.33; submcom=/usr/bin/pjsub;; 422 435 (lckyut) remote_address=133.5.4.37; submcom=/usr/bin/pjsub;; 436 (lclrz) remote_address=129.187.20.240; submcom=/usr/bin/sbatch;; 423 437 (lcocean) remote_address="ocean"; submcom=qsub;; 424 438 (lcsb) remote_address=147.46.30.151; submcom=/usr/torque/bin/qsub;; … … 459 473 (*) error=true;; 460 474 esac;; 475 (ibmmuc*) case $ndq in 476 (test|micro|general|large|fat|fattest|special|tmp1|tmp2) error=false;; 477 (*) error=true;; 478 esac;; 461 479 (lcbullhh) case $ndq in 462 480 (compute|compute2|shared) error=false;; 463 481 (*) error=true;; 464 482 esac;; 483 (lcbwuni) case $ndq in 484 (develop|singlenode|multinode|verylong|fat) error=false;; 485 (*) error=true;; 486 esac;; 465 487 (lccrayb) case $ndq in 466 488 (dataq|mpp1q|mpp1testq|mpp2q|mpp2testq|smp1q|smp1testq|specialm1q) error=false;; … … 493 515 (lckyut) case $ndq in 494 516 (cx-dbg|cx-single|cx-small|cx-middle|cx-large) error=false;; 517 (*) error=true;; 518 esac;; 519 (lclrz) case $ndq in 520 (mpp1|mpp2|iuv2|myri) error=false;; 495 521 (*) error=true;; 496 522 esac;; … … 744 770 then 745 771 746 if [[ $remote_host != ibmkisti ]] 772 if [[ $remote_host == ibmmuc* ]] 773 then 774 cat > $job_to_send << %%END%% 775 776 #!/bin/bash 777 # @ job_type = parallel 778 # @ job_name = $job_name 779 # @ output = $remote_dayfile 780 # @ error = $remote_dayfile 781 # @ wall_clock_limit = $timestring 782 $class 783 $mcm_affinity_options 784 $task_affinity 785 $notify_user 786 # @ network.MPI = sn_all,not_shared,us 787 # @ notification = always 788 # @ energy_policy_tag = table_kit_ifu 789 # @ minimize_time_to_solution = yes 790 # @ node = $nodes 791 # @ total_tasks = $numprocs 792 # @ node_topology = island 793 # @ island_count = 1,2 794 # @ environment = LD_LIBRARY_PATH=/lrz/sys/libraries/netcdf/4.2.1.1_impi4/lib:/lrz/sys/libraries/hdf5/1.8.15/ibmmpi/lib:/lrz/sys/libraries/fftw/3.3.3/avx/lib/ 795 # @ queue 796 797 %%END%% 798 799 elif [[ $remote_host != ibmkisti ]] 747 800 then 748 801 … … 1248 1301 submcom="$submcom -W group_list=$group_number -N $job_name -l walltime=$timestring -l select=$nodes:ncpus=$processes_per_node:mpiprocs=$tasks_per_node:mem=${Memory}gb -l place=scatter -o $remote_dayfile -j oe -et 1 -q $queue " 1249 1302 fi 1303 1304 elif [[ $remote_host = lclrz ]] 1305 then 1306 cat > $job_to_send << %%END%% 1307 #!/bin/bash 1308 #SBATCH -J $job_name 1309 #SBATCH -t $timestring 1310 #SBATCH -N $nodes 1311 #SBATCH --ntasks-per-node=$processes_per_node 1312 #SBATCH --get-user-env 1313 #SBATCH -o $remote_dayfile 1314 #SBATCH -e $remote_dayfile 1315 #SBATCH --mail-user=${email_notification} 1316 #SBATCH --clusters=$queue 1317 1318 $init_cmds 1319 1320 $module_calls 1321 1322 %%END%% 1323 1324 elif [[ $remote_host = lckea* ]] 1325 then 1326 keal_tasks_per_core=1 1327 1328 if [[ $queue = haswell || $queue = ivy* ]] 1329 then 1330 if (( tasks_per_node > 20 )) 1331 then 1332 keal_tasks_per_core=2 1333 fi 1334 fi 1335 1336 cat > $job_to_send << %%END%% 1337 #!/bin/bash 1338 #SBATCH -J $job_name 1339 #SBATCH -t $timestring 1340 #SBATCH -N $nodes 1341 #SBATCH --ntasks-per-node=$processes_per_node 1342 #SBATCH --ntasks-per-core=$keal_tasks_per_core 1343 #SBATCH --mem-per-cpu=${memory}mb 1344 #SBATCH --get-user-env 1345 #SBATCH -o $remote_dayfile 1346 #SBATCH -e $remote_dayfile 1347 #SBATCH --mail-user=${email_notification} 1348 #SBATCH --mail-type=ALL 1349 #SBATCH --partition=$queue 1350 1351 export MV2_ENABLE_AFFINITY=0 1352 1353 $init_cmds 1354 1355 $module_calls 1356 1357 %%END%% 1358 1359 elif [[ $remote_host=lcbwuni ]] 1360 then 1361 if [[ $email_notification = none ]] 1362 then 1363 email_directive="" 1364 else 1365 email_directive="#PBS -M $email_notification" 1366 fi 1367 cat > $job_to_send << %%END%% 1368 #!/bin/ksh 1369 #PBS -N $job_name 1370 #PBS -l walltime=$timestring 1371 #PBS -l nodes=${nodes}:ppn=$processes_per_node 1372 #PBS -l pmem=${memory}mb 1373 #PBS -m abe 1374 #PBS -o $remote_dayfile 1375 #PBS -j oe 1376 #PBS -q $queue 1377 $email_directive 1378 %%END%% 1250 1379 1251 1380 else … … 1615 1744 eval $submcom $job_on_remhost 1616 1745 fi 1746 elif [[ $local_host = lclrz || $local_host = lckea* ]] 1747 then 1748 eval $submcom $job_on_remhost 1749 elif [[ $local_host = lcbwuni ]] 1750 then 1751 msub -q $queue $job_on_remhost 1617 1752 else 1618 1753 qsub $job_on_remhost
Note: See TracChangeset
for help on using the changeset viewer.