- Timestamp:
- Sep 24, 2014 2:06:57 PM (10 years ago)
- Location:
- palm/trunk
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SCRIPTS/mbuild
r1391 r1468 22 22 # Current revisions: 23 23 # ------------------ 24 # 24 # Typo removed (addres->address) 25 # Adjustments for lcxe6 25 26 # 26 27 # Former revisions: … … 170 171 # DETERMINE THE LOCAL HOST 171 172 local_host_real_name=$(hostname) 172 # local_addres =$(nslookup `hostname` 2>&1 | grep "Address:" | tail -1 | awk '{print $2}')173 # local_address=$(nslookup `hostname` 2>&1 | grep "Address:" | tail -1 | awk '{print $2}') 173 174 174 175 … … 359 360 column1="local username:"; column2=$local_username 360 361 printf "| $column1$column2 | \n" 361 column1="local IP-addres :"; column2=$local_addres362 column1="local IP-address:"; column2=$local_address 362 363 printf "| $column1$column2 | \n" 363 364 column1="config file:"; column2=$config_file … … 484 485 # DETERMINE IP-ADDRES OF THE REMOTE-HOST 485 486 case $remote_host in 486 (lccrayb) remote_addres =130.73.233.1;;487 (lccrayh) remote_addres =130.75.4.1;;488 (lcflow) remote_addres ="flow.hpc.uni-oldenburg.de";;487 (lccrayb) remote_address=130.73.233.1;; 488 (lccrayh) remote_address=130.75.4.1;; 489 (lcflow) remote_address="flow.hpc.uni-oldenburg.de";; 489 490 (lckordi) remote_adress=210.219.61.8;; 490 (lcmuk) remote_addres =130.75.105.2;;491 (lcrte) remote_addres =133.5.185.60;;492 (lcsb) remote_addres =147.46.30.151;;493 (lck) remote_addres =165.132.26.61;;494 (lckiaps) remote_addres =118.128.66.223;;495 (lckyut) remote_addres =133.5.4.37;;496 (lctit) remote_addres =10.1.6.170;;497 (lcxe6) remote_addres =129.177.20.113;;498 (lcxt5m) remote_addres =193.166.211.144;;499 (ibmh) remote_addres =136.172.40.15;;500 (ibmkisti) remote_addres =150.183.146.24;;501 (ibmku) remote_addres =133.5.4.129;;502 (ibms) remote_addres =150.183.5.101;;503 (nech) remote_addres =136.172.44.192;;504 (neck) remote_addres =133.5.178.11;;505 (ground.yonsei.ac.kr) remote_addres =134.75.155.33;;491 (lcmuk) remote_address=130.75.105.2;; 492 (lcrte) remote_address=133.5.185.60;; 493 (lcsb) remote_address=147.46.30.151;; 494 (lck) remote_address=165.132.26.61;; 495 (lckiaps) remote_address=118.128.66.223;; 496 (lckyut) remote_address=133.5.4.37;; 497 (lctit) remote_address=10.1.6.170;; 498 (lcxe6) remote_address=129.177.20.113;; 499 (lcxt5m) remote_address=193.166.211.144;; 500 (ibmh) remote_address=136.172.40.15;; 501 (ibmkisti) remote_address=150.183.146.24;; 502 (ibmku) remote_address=133.5.4.129;; 503 (ibms) remote_address=150.183.5.101;; 504 (nech) remote_address=136.172.44.192;; 505 (neck) remote_address=133.5.178.11;; 506 (ground.yonsei.ac.kr) remote_address=134.75.155.33;; 506 507 (*) if [[ $local_host != $remote_host ]] 507 508 then … … 964 965 column1="username:"; column2=$remote_username 965 966 printf "| $column1$column2 | \n" 966 column1="addres :"; column2=$remote_addres967 column1="address:"; column2=$remote_address 967 968 printf "| $column1$column2 | \n" 968 969 column1="compiler:"; column2=$compiler_name … … 1055 1056 # COPY CURRENT SOURCE CODE TO SOURCE-CODE DIRECTORY ON THE REMOTE HOST 1056 1057 # CREATE THIS DIRECTORY, IF IT DOES NOT EXIST 1057 echo " *** copying \"${mainprog}_sources.tar\" to \"${remote_addres }:${remote_md}/\" "1058 echo " *** copying \"${mainprog}_sources.tar\" to \"${remote_address}:${remote_md}/\" " 1058 1059 if [[ $remote_host != lctit ]] 1059 1060 then 1060 ssh ${remote_username}@${remote_addres } "[[ ! -d ${remote_md} ]] && (echo \" *** ${remote_md} will be created\"; mkdir -p ${remote_md})"1061 ssh ${remote_username}@${remote_address} "[[ ! -d ${remote_md} ]] && (echo \" *** ${remote_md} will be created\"; mkdir -p ${remote_md})" 1061 1062 else 1062 1063 # USING PIPE, BECAUSE TIT ALLOWS SSH TO EXECUTE ONLY SOME SELECTED COMMANDS 1063 print "[[ ! -d ${remote_md} ]] && (echo \" *** ${remote_md} will be created\"; mkdir -p ${remote_md})" | ssh ${remote_username}@${remote_addres } 2>&11064 fi 1065 1066 scp ${local_source_path}/${mainprog}_sources.tar ${remote_username}@${remote_addres }:${remote_md}/${mainprog}_sources.tar1064 print "[[ ! -d ${remote_md} ]] && (echo \" *** ${remote_md} will be created\"; mkdir -p ${remote_md})" | ssh ${remote_username}@${remote_address} 2>&1 1065 fi 1066 1067 scp ${local_source_path}/${mainprog}_sources.tar ${remote_username}@${remote_address}:${remote_md}/${mainprog}_sources.tar 1067 1068 1068 1069 … … 1072 1073 if [[ $remote_host != lctit ]] 1073 1074 then 1074 ssh ${remote_username}@${remote_addres } "cd ${remote_md}; [[ -f ${mainprog}_current_version.tar ]] && tar -xf ${mainprog}_current_version.tar"1075 ssh ${remote_username}@${remote_address} "cd ${remote_md}; [[ -f ${mainprog}_current_version.tar ]] && tar -xf ${mainprog}_current_version.tar" 1075 1076 else 1076 1077 # USING PIPE, BECAUSE TIT ALLOWS SSH TO EXECUTE ONLY SOME SELECTED COMMANDS 1077 print "cd ${remote_md}; [[ -f ${mainprog}_current_version.tar ]] && tar -xf ${mainprog}_current_version.tar" | ssh ${remote_username}@${remote_addres } 2>&11078 print "cd ${remote_md}; [[ -f ${mainprog}_current_version.tar ]] && tar -xf ${mainprog}_current_version.tar" | ssh ${remote_username}@${remote_address} 2>&1 1078 1079 fi 1079 1080 … … 1083 1084 if [[ $remote_host != lctit ]] 1084 1085 then 1085 ssh ${remote_username}@${remote_addres } "cd ${remote_md}; tar -xf ${mainprog}_sources.tar"1086 ssh ${remote_username}@${remote_address} "cd ${remote_md}; tar -xf ${mainprog}_sources.tar" 1086 1087 else 1087 1088 # USING PIPE, BECAUSE TIT ALLOWS SSH TO EXECUTE ONLY SOME SELECTED COMMANDS 1088 print "cd ${remote_md}; tar -xf ${mainprog}_sources.tar" | ssh ${remote_username}@${remote_addres } 2>&11089 print "cd ${remote_md}; tar -xf ${mainprog}_sources.tar" | ssh ${remote_username}@${remote_address} 2>&1 1089 1090 fi 1090 1091 … … 1121 1122 then 1122 1123 1123 ssh ${remote_username}@${remote_addres } "$init_cmds $module_calls cd ${remote_md}; echo '$make_call_string' > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" 2>&1 | tee ${remote_host}_last_make_protokoll1124 ssh ${remote_username}@${remote_address} "$init_cmds $module_calls cd ${remote_md}; echo '$make_call_string' > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" 2>&1 | tee ${remote_host}_last_make_protokoll 1124 1125 1125 1126 elif [[ $remote_host = ibmh ]] 1126 1127 then 1127 1128 1128 print "$init_cmds $module_calls export OBJECT_MODE=64; cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_addres } 2>&1 | tee ${remote_host}_last_make_protokoll1129 print "$init_cmds $module_calls export OBJECT_MODE=64; cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_address} 2>&1 | tee ${remote_host}_last_make_protokoll 1129 1130 1130 1131 elif [[ $remote_host = lctit ]] … … 1134 1135 while [[ $(cat ${remote_host}_last_make_protokoll | grep -c "Forwarding to N1GE") = 0 ]] 1135 1136 do 1136 print "cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_addres } 2>&1 | tee ${remote_host}_last_make_protokoll1137 print "cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_address} 2>&1 | tee ${remote_host}_last_make_protokoll 1137 1138 done 1138 1139 … … 1140 1141 then 1141 1142 1142 ssh ${remote_username}@${remote_addres } "$init_cmds $module_calls cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" 2>&1 | tee ${remote_host}_last_make_protokoll1143 ssh ${remote_username}@${remote_address} "$init_cmds $module_calls cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" 2>&1 | tee ${remote_host}_last_make_protokoll 1143 1144 1144 1145 else 1145 1146 1146 print "$init_cmds $module_calls cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_addres } 2>&1 | tee ${remote_host}_last_make_protokoll1147 print "$init_cmds $module_calls cd ${remote_md}; echo $make_call_string > LAST_MAKE_CALL; chmod u+x LAST_MAKE_CALL; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_address} 2>&1 | tee ${remote_host}_last_make_protokoll 1147 1148 1148 1149 fi … … 1177 1178 if [[ $remote_host != lctit ]] 1178 1179 then 1179 ssh ${remote_username}@${remote_addres } "cd ${remote_md}; chmod u+w *; tar -cf ${mainprog}_current_version.tar ${mainprog} *.f90 *.o *.mod"1180 ssh ${remote_username}@${remote_address} "cd ${remote_md}; chmod u+w *; tar -cf ${mainprog}_current_version.tar ${mainprog} *.f90 *.o *.mod" 1180 1181 else 1181 1182 # USING PIPE, BECAUSE TIT ALLOWS SSH TO EXECUTE ONLY SOME SELECTED COMMANDS 1182 print "cd ${remote_md}; chmod u+w *; tar -cf ${mainprog}_current_version.tar ${mainprog} *.f90 *.o *.mod" | ssh ${remote_username}@${remote_addres } 2>&11183 print "cd ${remote_md}; chmod u+w *; tar -cf ${mainprog}_current_version.tar ${mainprog} *.f90 *.o *.mod" | ssh ${remote_username}@${remote_address} 2>&1 1183 1184 fi 1184 1185 … … 1192 1193 1193 1194 printf "\n\n" 1194 echo " *** copying scripts and utility programs to \"${remote_addres }:${remote_ud}/\" "1195 echo " *** copying scripts and utility programs to \"${remote_address}:${remote_ud}/\" " 1195 1196 cd ${local_source_path}/../SCRIPTS 1196 1197 1197 1198 if [[ $remote_host != lctit ]] 1198 1199 then 1199 ssh ${remote_username}@${remote_addres } "[[ ! -d ${remote_ud} ]] && (echo \" *** ${remote_ud} will be created\"; mkdir -p ${remote_ud}); [[ ! -d ${remote_ud}/../SCRIPTS ]] && (echo \" *** ${remote_ud}/../SCRIPTS will be created\"; mkdir -p ${remote_ud}/../SCRIPTS)"1200 ssh ${remote_username}@${remote_address} "[[ ! -d ${remote_ud} ]] && (echo \" *** ${remote_ud} will be created\"; mkdir -p ${remote_ud}); [[ ! -d ${remote_ud}/../SCRIPTS ]] && (echo \" *** ${remote_ud}/../SCRIPTS will be created\"; mkdir -p ${remote_ud}/../SCRIPTS)" 1200 1201 else 1201 1202 # USING PIPE, BECAUSE TIT ALLOWS SSH TO EXECUTE ONLY SOME SELECTED COMMANDS 1202 print "[[ ! -d ${remote_ud} ]] && (echo \" *** ${remote_ud} will be created\"; mkdir -p ${remote_ud}); [[ ! -d ${remote_ud}/../SCRIPTS ]] && (echo \" *** ${remote_ud}/../SCRIPTS will be created\"; mkdir -p ${remote_ud}/../SCRIPTS)" | ssh ${remote_username}@${remote_addres } 2>&11203 print "[[ ! -d ${remote_ud} ]] && (echo \" *** ${remote_ud} will be created\"; mkdir -p ${remote_ud}); [[ ! -d ${remote_ud}/../SCRIPTS ]] && (echo \" *** ${remote_ud}/../SCRIPTS will be created\"; mkdir -p ${remote_ud}/../SCRIPTS)" | ssh ${remote_username}@${remote_address} 2>&1 1203 1204 fi 1204 1205 1205 1206 # COPY SHELL-SCRIPTS 1206 scp batch_scp mbuild mrun process_dvr_output .dvrserver.config subjob batch_nc2vdf nc2vdf nc2vdf.ncl nc2vdf.config ${remote_username}@${remote_addres }:${remote_ud}/../SCRIPTS > /dev/null1207 scp batch_scp mbuild mrun process_dvr_output .dvrserver.config subjob batch_nc2vdf nc2vdf nc2vdf.ncl nc2vdf.config ${remote_username}@${remote_address}:${remote_ud}/../SCRIPTS > /dev/null 1207 1208 1208 1209 cd - > /dev/null … … 1211 1212 1212 1213 # COPY UTILITY-ROUTINES 1213 scp Makefile *.f90 ${remote_username}@${remote_addres }:${remote_ud} > /dev/null1214 scp Makefile *.f90 ${remote_username}@${remote_address}:${remote_ud} > /dev/null 1214 1215 1215 1216 … … 1245 1246 then 1246 1247 1247 ssh ${remote_username}@${remote_addres } "$init_cmds $module_calls cd ${remote_ud}; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR"1248 ssh ${remote_username}@${remote_address} "$init_cmds $module_calls cd ${remote_ud}; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" 1248 1249 1249 1250 elif [[ $remote_host = ibmh ]] 1250 1251 then 1251 1252 1252 print "$init_cmds $module_calls export OBJECT_MODE=64; cd ${remote_ud}; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_addres }1253 print "$init_cmds $module_calls export OBJECT_MODE=64; cd ${remote_ud}; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_address} 1253 1254 1254 1255 elif [[ $remote_host = lctit ]] … … 1258 1259 while [[ $(cat ${remote_host}_last_make_protokoll | grep -c "Forwarding to N1GE") = 0 ]] 1259 1260 do 1260 print "$init_cmds $module_calls cd ${remote_ud}; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_addres } 2>&1 | tee ${remote_host}_last_make_protokoll1261 print "$init_cmds $module_calls cd ${remote_ud}; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_address} 2>&1 | tee ${remote_host}_last_make_protokoll 1261 1262 done 1262 1263 … … 1264 1265 then 1265 1266 1266 ssh ${remote_username}@${remote_addres } "$init_cmds $module_calls cd ${remote_ud}; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" 2>&1 | tee ${remote_host}_last_make_protokoll1267 ssh ${remote_username}@${remote_address} "$init_cmds $module_calls cd ${remote_ud}; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" 2>&1 | tee ${remote_host}_last_make_protokoll 1267 1268 1268 1269 else 1269 1270 1270 print "$init_cmds $module_calls cd ${remote_ud}; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_addres } 2>&1 | tee ${remote_host}_last_make_protokoll1271 print "$init_cmds $module_calls cd ${remote_ud}; $make_call_string; [[ \$? != 0 ]] && echo MAKE_ERROR" | ssh ${remote_username}@${remote_address} 2>&1 | tee ${remote_host}_last_make_protokoll 1271 1272 1272 1273 fi -
palm/trunk/SCRIPTS/mrun
r1443 r1468 22 22 # Current revisions: 23 23 # ------------------ 24 # 24 # Typo removed (addres->address) 25 # Bugfix: returning files to IMUK via ssh did not work for lccrayh and lcycrayb 26 # Added support for restart runs (remote) for lcxe6 25 27 # 26 28 # Former revisions: … … 257 259 if [[ `hostname` = rte10 ]] 258 260 then 259 return_addres =133.5.185.60260 echo "+++ WARNING: fixed return_addres = $return_addres is used !!!!!"261 return_address=133.5.185.60 262 echo "+++ WARNING: fixed return_address = $return_address is used !!!!!" 261 263 elif [[ `hostname` = climate0 ]] 262 264 then 263 return_addres =165.132.26.68264 echo "+++ WARNING: fixed return_addres = $return_addres is used !!!!!"265 return_address=165.132.26.68 266 echo "+++ WARNING: fixed return_address = $return_address is used !!!!!" 265 267 elif [[ `hostname` = urban00 ]] 266 268 then 267 return_addres =147.46.30.151268 echo "+++ WARNING: fixed return_addres = $return_addres is used !!!!!"269 return_address=147.46.30.151 270 echo "+++ WARNING: fixed return_address = $return_address is used !!!!!" 269 271 else 270 return_addres =$(nslookup `hostname` 2>&1 | grep "Address:" | tail -1 | awk '{print $2}')272 return_address=$(nslookup `hostname` 2>&1 | grep "Address:" | tail -1 | awk '{print $2}') 271 273 fi 272 274 return_password="" … … 409 411 (q) queue=$OPTARG; mc="$mc -q$OPTARG";; 410 412 (r) run_mode=$OPTARG; mc="$mc -r'$OPTARG'";; 411 (R) remotecall=true;return_addres =$OPTARG; mc="$mc -R$OPTARG";;413 (R) remotecall=true;return_address=$OPTARG; mc="$mc -R$OPTARG";; 412 414 (s) source_list=$OPTARG; mc="$mc -s'$OPTARG'";; 413 415 (S) read_from_config=false; mc="$mc -S";; … … 670 672 671 673 # READ AND EVALUATE THE CONFIGURATION-FILE FROM WITHIN THIS SHELLSCRIPT 672 # (OPTION -S). THE DEFAULT IS USING THE ROUTINE interpret_config674 # (OPTION -S). THE DEFAULT IS USING THE ROUTINE <<<< 673 675 if [[ "$read_from_config" = false ]] 674 676 then … … 3407 3409 echo "fname=$fname" >> mpi_exec_shell 3408 3410 echo "localhost=$localhost" >> mpi_exec_shell 3409 echo "return_addres =$return_addres">> mpi_exec_shell3411 echo "return_address=$return_address" >> mpi_exec_shell 3410 3412 echo "return_username=$return_username" >> mpi_exec_shell 3411 3413 echo "tasks_per_node=$tasks_per_node" >> mpi_exec_shell … … 3419 3421 echo "export fname" >> mpi_exec_shell 3420 3422 echo "export localhost" >> mpi_exec_shell 3421 echo "export return_addres ">> mpi_exec_shell3423 echo "export return_address" >> mpi_exec_shell 3422 3424 echo "export return_username" >> mpi_exec_shell 3423 3425 echo "export tasks_per_node" >> mpi_exec_shell … … 3794 3796 printf "\n ${pathout[$i]}/${localhost}_${fname}${endout[$i]}$cst" 3795 3797 printf "\n or higher cycle\n" 3796 echo "batch_scp $PORTOPT $cps -b -m -u $return_username $return_addres $file_to_transfer \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" >> transfer_${localout[$i]}3798 echo "batch_scp $PORTOPT $cps -b -m -u $return_username $return_address $file_to_transfer \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" >> transfer_${localout[$i]} 3797 3799 3798 3800 echo "[[ \$? = 0 ]] && rm $file_to_transfer" >> transfer_${localout[$i]} … … 3818 3820 if [[ $localhost = lccrayb ]] 3819 3821 then 3820 ssh $usern@blogin1 " cd $TEMPDIR; batch_scp $PORTOPT $cps -b -m -u $return_username $return_addres ${localout[$i]} \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}"3822 ssh $usern@blogin1 ". \\$HOME/.profile; cd $TEMPDIR; batch_scp $PORTOPT $cps -b -m -u $return_username $return_address ${localout[$i]} \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" 3821 3823 elif [[ $localhost = lccrayh ]] 3822 3824 then 3823 ssh $usern@hlogin1 "cd $TEMPDIR; batch_scp $PORTOPT $cps -b -m -u $return_username $return_addres ${localout[$i]} \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" 3825 ssh $usern@hlogin1 ". \\$HOME/.profile; cd $TEMPDIR; batch_scp $PORTOPT $cps -b -m -u $return_username $return_address ${localout[$i]} \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" 3826 elif [[ $localhost = lcxe6 ]] 3827 then 3828 ssh $usern@hexagon ". \\$HOME/.profile; cd $TEMPDIR; batch_scp $PORTOPT $cps -b -m -u $return_username $return_address ${localout[$i]} \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" 3824 3829 else 3825 batch_scp $PORTOPT $cps -b -m -u $return_username $return_addres ${localout[$i]} "${pathout[$i]}" ${localhost}_${fname}${endout[$i]} ${extout[$i]}3830 batch_scp $PORTOPT $cps -b -m -u $return_username $return_address ${localout[$i]} "${pathout[$i]}" ${localhost}_${fname}${endout[$i]} ${extout[$i]} 3826 3831 fi 3827 3832 [[ $? != 0 ]] && transfer_failed=true … … 3874 3879 printf "\n ${pathout[$i]}/${localhost}_${fname}${endout[$i]}" 3875 3880 printf "\n or higher cycle\n" 3876 echo "batch_scp $PORTOPT -A -b -m -u $return_username $return_addres $file_to_transfer \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" >> transfer_${localout[$i]}3881 echo "batch_scp $PORTOPT -A -b -m -u $return_username $return_address $file_to_transfer \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" >> transfer_${localout[$i]} 3877 3882 3878 3883 echo "[[ \$? = 0 ]] && rm $file_to_transfer" >> transfer_${localout[$i]} … … 3898 3903 if [[ $localhost = lccrayb ]] 3899 3904 then 3900 ssh $usern@blogin1 " cd $TEMPDIR; batch_scp $PORTOPT -A -b -m -u $return_username $return_addres ${localout[$i]} \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}"3905 ssh $usern@blogin1 ". \\$HOME/.profile; cd $TEMPDIR; batch_scp $PORTOPT -A -b -m -u $return_username $return_address ${localout[$i]} \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" 3901 3906 elif [[ $localhost = lccrayh ]] 3902 3907 then 3903 ssh $usern@hlogin1 "cd $TEMPDIR; batch_scp $PORTOPT -A -b -m -u $return_username $return_addres ${localout[$i]} \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" 3908 ssh $usern@hlogin1 ". \\$HOME/.profile; cd $TEMPDIR; batch_scp $PORTOPT -A -b -m -u $return_username $return_address ${localout[$i]} \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" 3909 elif [[ $localhost = lcxe6 ]] 3910 then 3911 ssh $usern@hexagon ". \\$HOME/.profile; cd $TEMPDIR; batch_scp $PORTOPT -A -b -m -u $return_username $return_address ${localout[$i]} \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" 3904 3912 else 3905 batch_scp $PORTOPT -A -b -m -u $return_username $return_addres ${localout[$i]} "${pathout[$i]}" ${localhost}_${fname}${endout[$i]} ${extout[$i]}3913 batch_scp $PORTOPT -A -b -m -u $return_username $return_address ${localout[$i]} "${pathout[$i]}" ${localhost}_${fname}${endout[$i]} ${extout[$i]} 3906 3914 fi 3907 3915 [[ $? != 0 ]] && transfer_failed=true … … 4372 4380 4373 4381 # START THE RESTART-JOB 4374 printf "\n\n *** initiating restart-run on \"$return_addres \" using command:\n"4382 printf "\n\n *** initiating restart-run on \"$return_address\" using command:\n" 4375 4383 echo " $mc" 4376 4384 printf "\n$dashes\n" … … 4378 4386 then 4379 4387 4380 if [[ $localhost = lccrayb || $localhost = lccrayh || $localhost = nech || $localhost = ibmh || $localhost = ibmkisti || $localhost = ibmku || $localhost = ibms || $localhost = lcflow || $localhost = lckyu* ]]4388 if [[ $localhost = lccrayb || $localhost = lccrayh || $localhost = nech || $localhost = ibmh || $localhost = ibmkisti || $localhost = ibmku || $localhost = ibms || $localhost = lcflow || $localhost = lckyu* || $localhost = lcxe6 ]] 4381 4389 then 4382 4390 echo "*** ssh will be used to initiate restart-runs!" 4383 echo " return_addres =\"$return_addres\" "4391 echo " return_address=\"$return_address\" " 4384 4392 echo " return_username=\"$return_username\" " 4385 if [[ $(echo $return_addres | grep -c "130.75.105") = 1 ]]4393 if [[ $(echo $return_address | grep -c "130.75.105") = 1 ]] 4386 4394 then 4387 4395 if [[ $localhost = ibmh ]] 4388 4396 then 4389 ssh $SSH_PORTOPT $usern@136.172.40.15 "ssh $SSH_PORTOPT $return_addres -l $return_username \"PATH=\\\$PATH:$LOCAL_MRUN_PATH;. /muksoft_i/packages/intel/composer_xe_2013_sp1.2.144/bin/compilervars.sh intel64;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" "4397 ssh $SSH_PORTOPT $usern@136.172.40.15 "ssh $SSH_PORTOPT $return_address -l $return_username \". \\\$HOME/.profile; module load intel-compiler hdf5 netcdf; PATH=\\\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" " 4390 4398 elif [[ $localhost = lccrayb ]] 4391 4399 then 4392 ssh $usern@blogin1 "ssh $SSH_PORTOPT $return_addres -l $return_username \"PATH=\\\$PATH:$LOCAL_MRUN_PATH;. /muksoft_i/packages/intel/composer_xe_2013_sp1.2.144/bin/compilervars.sh intel64;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" "4400 ssh $usern@blogin1 "ssh $SSH_PORTOPT $return_address -l $return_username \". \\\$HOME/.profile; module load intel-compiler hdf5 netcdf; PATH=\\\$PATH:$LOCAL_MRUN_PATH; export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" " 4393 4401 elif [[ $localhost = lccrayh ]] 4394 4402 then 4395 ssh $usern@hlogin1 "ssh $SSH_PORTOPT $return_addres -l $return_username \"PATH=\\\$PATH:$LOCAL_MRUN_PATH;. /muksoft_i/packages/intel/composer_xe_2013_sp1.2.144/bin/compilervars.sh intel64;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" " 4396 4403 ssh $usern@hlogin1 "ssh $SSH_PORTOPT $return_address -l $return_username \". \\\$HOME/.profile; module load intel-compiler hdf5 netcdf; PATH=\\\$PATH:$LOCAL_MRUN_PATH; export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" " 4404 elif [[ $localhost = lcxe6 ]] 4405 then 4406 ssh $usern@hexagon "ssh $SSH_PORTOPT $return_address -l $return_username \". \\\$HOME/.profile; module load intel-compiler hdf5 netcdf; PATH=\\\$PATH:$LOCAL_MRUN_PATH; export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" " 4397 4407 else 4398 ssh $SSH_PORTOPT $return_addres -l $return_username "PATH=\$PATH:$LOCAL_MRUN_PATH;. /muksoft_i/packages/intel/composer_xe_2013_sp1.2.144/bin/compilervars.sh intel64;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc "4408 ssh $SSH_PORTOPT $return_address -l $return_username ". \\\$HOME/.profile; module load intel-compiler hdf5 netcdf; PATH=\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc " 4399 4409 fi 4400 4410 else 4401 4411 if [[ $localhost = ibmkisti ]] 4402 4412 then 4403 ssh $SSH_PORTOPT $usern@gaiad "ssh $SSH_PORTOPT $return_addres -l $return_username \"PATH=\\\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" "4413 ssh $SSH_PORTOPT $usern@gaiad "ssh $SSH_PORTOPT $return_address -l $return_username \"PATH=\\\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" " 4404 4414 elif [[ $localhost = lcflow ]] 4405 4415 then 4406 /usr/bin/ssh $SSH_PORTOPT $return_addres -l $return_username "PATH=\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc "4416 /usr/bin/ssh $SSH_PORTOPT $return_address -l $return_username "PATH=\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc " 4407 4417 elif [[ $localhost = lccrayb ]] 4408 4418 then 4409 ssh $usern@blogin1 "ssh $SSH_PORTOPT $return_addres -l $return_username \"PATH=\\\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" "4419 ssh $usern@blogin1 "ssh $SSH_PORTOPT $return_address -l $return_username \"PATH=\\\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" " 4410 4420 elif [[ $localhost = lccrayh ]] 4411 4421 then 4412 ssh $usern@hlogin1 "ssh $SSH_PORTOPT $return_addres -l $return_username \"PATH=\\\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" "4422 ssh $usern@hlogin1 "ssh $SSH_PORTOPT $return_address -l $return_username \"PATH=\\\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" " 4413 4423 else 4414 ssh $SSH_PORTOPT $return_addres -l $return_username "PATH=\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc "4424 ssh $SSH_PORTOPT $return_address -l $return_username "PATH=\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc " 4415 4425 fi 4416 4426 fi … … 4509 4519 4510 4520 # BUILD THE MRUN-COMMAND TO BE CALLED IN THE BATCH-JOB ON THE REMOTE-MACHINE 4511 mrun_com="$mrun_script_name -a $afname -c $config_file -d $fname -h $host -H $fromhost -m $memory -t $cpumax -q $queue -R $return_addres -U $return_username -u $remote_username"4521 mrun_com="$mrun_script_name -a $afname -c $config_file -d $fname -h $host -H $fromhost -m $memory -t $cpumax -q $queue -R $return_address -U $return_username -u $remote_username" 4512 4522 [[ "$cpp_opts" != "" ]] && mrun_com=${mrun_com}" -D \"$cpp_opts\"" 4513 4523 [[ "$global_revision" != "" ]] && mrun_com=${mrun_com}" -G \"$global_revision\"" … … 4648 4658 # ON KISTI'S IBM FIREWALL IS ONLY OPENED ON INTERACTIVE NODE 4649 4659 echo "localdir=\`pwd\`" >> $jobfile 4650 echo "ssh $SSH_PORTOPT $remote_username@gaiad \"cd \$localdir; scp $PORTOPT -r $return_username@$return_addres :$working_directory/SOURCES_FOR_RUN_$fname .\" " >> $jobfile4660 echo "ssh $SSH_PORTOPT $remote_username@gaiad \"cd \$localdir; scp $PORTOPT -r $return_username@$return_address:$working_directory/SOURCES_FOR_RUN_$fname .\" " >> $jobfile 4651 4661 elif [[ $host = lccrayb ]] 4652 4662 then 4653 4663 echo "localdir=\`pwd\`" >> $jobfile 4654 echo "ssh $SSH_PORTOPT $remote_username@blogin1 \"cd \$localdir; scp $PORTOPT -r $return_username@$return_addres :$working_directory/SOURCES_FOR_RUN_$fname .\" " >> $jobfile4664 echo "ssh $SSH_PORTOPT $remote_username@blogin1 \"cd \$localdir; scp $PORTOPT -r $return_username@$return_address:$working_directory/SOURCES_FOR_RUN_$fname .\" " >> $jobfile 4655 4665 elif [[ $host = lccrayh ]] 4656 4666 then 4657 4667 echo "localdir=\`pwd\`" >> $jobfile 4658 echo "ssh $SSH_PORTOPT $remote_username@hlogin1 \"cd \$localdir; scp $PORTOPT -r $return_username@$return_addres :$working_directory/SOURCES_FOR_RUN_$fname .\" " >> $jobfile4668 echo "ssh $SSH_PORTOPT $remote_username@hlogin1 \"cd \$localdir; scp $PORTOPT -r $return_username@$return_address:$working_directory/SOURCES_FOR_RUN_$fname .\" " >> $jobfile 4659 4669 4660 4670 else 4661 echo "scp $PORTOPT -r $return_username@$return_addres :$working_directory/SOURCES_FOR_RUN_$fname ." >> $jobfile4671 echo "scp $PORTOPT -r $return_username@$return_address:$working_directory/SOURCES_FOR_RUN_$fname ." >> $jobfile 4662 4672 fi 4663 4673 fi … … 4757 4767 echo "%END%" >> $jobfile 4758 4768 else 4759 echo "batch_scp $PORTOPT -b -o -g -s -u $return_username $return_addres ${remotepathin[$i]} \"${pathin[$i]}\" ${frelin[$i]}" >> $jobfile4769 echo "batch_scp $PORTOPT -b -o -g -s -u $return_username $return_address ${remotepathin[$i]} \"${pathin[$i]}\" ${frelin[$i]}" >> $jobfile 4760 4770 fi 4761 4771 -
palm/trunk/SCRIPTS/subjob
r1453 r1468 23 23 # Current revisions: 24 24 # ------------------ 25 # 25 # Typo removed (addres->address) 26 # Adjustments for lcxe6 26 27 # 27 28 # Former revisions: … … 170 171 # COMMENT OUT, BECAUSE THE HOSTNAME (node*) IS SAME FOR BOTH MACHINES 171 172 case $local_host in 172 (ambiel-lx) local_addres =134.106.74.48; local_host=lcfor;;173 (atmos) local_addres =172.20.25.35; local_host=lcide;;174 (austru) local_addres =130.75.105.128; local_host=lcmuk;;175 (autan) local_addres =130.75.105.57; local_host=lcmuk;;176 (bora) local_addres =130.75.105.103; local_host=lcmuk;;177 (b04*) local_addres =133.5.4.33; local_host=lckyuh;;178 (blizzard1|p*) local_addres =136.172.40.15; local_host=ibmh;;179 (blizzard2|p*) local_addres =136.172.40.16; local_host=ibmh;;180 (blogin*|bxc*) local_addres =130.73.233.1; local_host=lccrayb;;181 (hlogin*|hxc*) local_addres =130.75.4.1; local_host=lccrayh;;182 (breva) local_addres =130.75.105.98; local_host=lcmuk;;183 (buran) local_addres =130.75.105.58; local_host=lcmuk;;184 (caurus) local_addres =130.75.105.19; local_host=lcmuk;;185 (climate*) local_addres =165.132.26.68; local_host=lcyon;;186 (clogin*) local_addres =86.50.166.21; local_host=lccrayf;;187 (cs*) local_addres =136.172.44.131; local_host=nech;;188 (elephanta) local_addres =130.75.105.6; local_host=lcmuk;;189 (flow01) local_addres =10.141.255.71; local_host=lcflow;;190 (flow02) local_addres =10.141.255.72; local_host=lcflow;;191 (node*) local_addres =165.132.26.61 local_host=lck;;192 # (node*) local_addres =210.219.61.8 local_host=lckordi;;193 (gaia*) local_addres =150.183.146.24; local_host=ibmkisti;;194 (gallego) local_addres =130.75.105.10; local_host=lcmuk;;195 (gregale) local_addres =130.75.105.109; local_host=lcmuk;;196 (hababai) local_addres =130.75.105.108; local_host=lcmuk;;197 (hayaka*) local_addres =133.5.4.33; local_host=lckyuh;;198 (hexagon.bccs.uib.no) local_addres =129.177.20.113; local_host=lcxe6;;199 (hx*) local_addres =133.3.51.11; local_host=lckyoto;;200 (inferno) local_addres =130.75.105.5; local_host=lcmuk;;201 (irifi) local_addres =130.75.105.104; local_host=lcmuk;;202 (jaboticaba) local_addres =150.163.25.181; local_host=lcbr;;203 (sno) local_addres =130.75.105.113; local_host=lcmuk;;204 (levanto) local_addres =130.75.105.45; local_host=lcmuk;;205 (login*) local_addres =118.128.66.223; local_host=lckiaps;;206 (maestro) local_addres =130.75.105.2; local_host=lcmuk;;207 (meller) local_addres =134.106.74.155; local_host=lcfor;;208 (meteo-login*) local_addres =193.166.211.144;local_host=lcxt5m;;209 (hexagon*) local_addres =129.177.20.113; local_host=lcxe6;;210 (nobel*) local_addres =150.183.5.101; local_host=ibms;;211 (orkan) local_addres =130.75.105.3; local_host=lcmuk;;212 (ostria) local_addres =130.75.105.106; local_host=lcmuk;;213 (paesano) local_addres =130.75.105.46; local_host=lcmuk;;214 (pcj*) local_addres =172.31.120.1; local_host=lckyut;;215 (pingui) local_addres =134.106.74.118; local_host=lcfor;;216 (quanero) local_addres =130.75.105.107; local_host=lcmuk;;217 (rte*) local_addres =133.5.185.60; local_host=lcrte;;218 (shiokaze-lx) local_addres =134.106.74.123; local_host=lcfor;;219 (sisu-login*) local_addres =86.50.166.21; local_host=lccrayf;;220 (solano) local_addres =130.75.105.110; local_host=lcmuk;;221 (sugoka*) local_addres =172.31.120.1; local_host=lckyut;;222 (t2a*) local_addres =10.1.6.165; local_host=lctit;;223 (urban*) local_addres =147.46.30.151 local_host=lcsb;;224 (vinessa) local_addres =130.75.105.112; local_host=lcmuk;;225 (vorias) local_addres =172.20.25.43; local_host=lcmuk;;226 (*.cc.kyushu-u.ac.jp) local_addres =133.5.4.129; local_host=ibmku;;173 (ambiel-lx) local_address=134.106.74.48; local_host=lcfor;; 174 (atmos) local_address=172.20.25.35; local_host=lcide;; 175 (austru) local_address=130.75.105.128; local_host=lcmuk;; 176 (autan) local_address=130.75.105.57; local_host=lcmuk;; 177 (bora) local_address=130.75.105.103; local_host=lcmuk;; 178 (b04*) local_address=133.5.4.33; local_host=lckyuh;; 179 (blizzard1|p*) local_address=136.172.40.15; local_host=ibmh;; 180 (blizzard2|p*) local_address=136.172.40.16; local_host=ibmh;; 181 (blogin*|bxc*) local_address=130.73.233.1; local_host=lccrayb;; 182 (hlogin*|hxc*) local_address=130.75.4.1; local_host=lccrayh;; 183 (breva) local_address=130.75.105.98; local_host=lcmuk;; 184 (buran) local_address=130.75.105.58; local_host=lcmuk;; 185 (caurus) local_address=130.75.105.19; local_host=lcmuk;; 186 (climate*) local_address=165.132.26.68; local_host=lcyon;; 187 (clogin*) local_address=86.50.166.21; local_host=lccrayf;; 188 (cs*) local_address=136.172.44.131; local_host=nech;; 189 (elephanta) local_address=130.75.105.6; local_host=lcmuk;; 190 (flow01) local_address=10.141.255.71; local_host=lcflow;; 191 (flow02) local_address=10.141.255.72; local_host=lcflow;; 192 (node*) local_address=165.132.26.61 local_host=lck;; 193 # (node*) local_address=210.219.61.8 local_host=lckordi;; 194 (gaia*) local_address=150.183.146.24; local_host=ibmkisti;; 195 (gallego) local_address=130.75.105.10; local_host=lcmuk;; 196 (gregale) local_address=130.75.105.109; local_host=lcmuk;; 197 (hababai) local_address=130.75.105.108; local_host=lcmuk;; 198 (hayaka*) local_address=133.5.4.33; local_host=lckyuh;; 199 (hexagon.bccs.uib.no) local_address=129.177.20.113; local_host=lcxe6;; 200 (hx*) local_address=133.3.51.11; local_host=lckyoto;; 201 (inferno) local_address=130.75.105.5; local_host=lcmuk;; 202 (irifi) local_address=130.75.105.104; local_host=lcmuk;; 203 (jaboticaba) local_address=150.163.25.181; local_host=lcbr;; 204 (sno) local_address=130.75.105.113; local_host=lcmuk;; 205 (levanto) local_address=130.75.105.45; local_host=lcmuk;; 206 (login*) local_address=118.128.66.223; local_host=lckiaps;; 207 (maestro) local_address=130.75.105.2; local_host=lcmuk;; 208 (meller) local_address=134.106.74.155; local_host=lcfor;; 209 (meteo-login*) local_address=193.166.211.144;local_host=lcxt5m;; 210 (hexagon*) local_address=129.177.20.113; local_host=lcxe6;; 211 (nobel*) local_address=150.183.5.101; local_host=ibms;; 212 (orkan) local_address=130.75.105.3; local_host=lcmuk;; 213 (ostria) local_address=130.75.105.106; local_host=lcmuk;; 214 (paesano) local_address=130.75.105.46; local_host=lcmuk;; 215 (pcj*) local_address=172.31.120.1; local_host=lckyut;; 216 (pingui) local_address=134.106.74.118; local_host=lcfor;; 217 (quanero) local_address=130.75.105.107; local_host=lcmuk;; 218 (rte*) local_address=133.5.185.60; local_host=lcrte;; 219 (shiokaze-lx) local_address=134.106.74.123; local_host=lcfor;; 220 (sisu-login*) local_address=86.50.166.21; local_host=lccrayf;; 221 (solano) local_address=130.75.105.110; local_host=lcmuk;; 222 (sugoka*) local_address=172.31.120.1; local_host=lckyut;; 223 (t2a*) local_address=10.1.6.165; local_host=lctit;; 224 (urban*) local_address=147.46.30.151 local_host=lcsb;; 225 (vinessa) local_address=130.75.105.112; local_host=lcmuk;; 226 (vorias) local_address=172.20.25.43; local_host=lcmuk;; 227 (*.cc.kyushu-u.ac.jp) local_address=133.5.4.129; local_host=ibmku;; 227 228 (*) printf "\n +++ \"$local_host\" unknown"; 228 229 printf "\n please contact the PALM group at IMUK"; … … 346 347 else 347 348 case $remote_host in 348 (ibm) queue=p690_standard; remote_addres =134.76.99.81; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;349 (ibmh) queue=cluster; remote_addres =136.172.40.15; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;350 (ibmkisti) queue=class.32plus; remote_addres =150.183.146.24; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;351 (ibmku) queue=s4; remote_addres =133.5.4.129; submcom=/usr/local/bin/llsubmit;;352 (ibms) queue=p_normal; remote_addres =150.183.5.101; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;353 (lccrayb) queue=mpp1testq; remote_addres =130.73.233.1; submcom=/opt/moab/7.2.6/bin/msub;;354 (lccrayh) queue=mpp1testq; remote_addres =130.75.4.1; submcom=/opt/moab/7.2.6/bin/msub;;355 (lccrayf) queue=small; remote_addres =86.50.166.21; submcom=/opt/slurm/default/bin/sbatch;;356 (lcflow) remote_addres =10.140.1.71; submcom=/cm/shared/apps/sge/6.2u5p2/bin/lx26-amd64/qsub;;357 (lckyoto) remote_addres =133.3.51.11; submcom=/thin/local/bin/qsub;;358 (lck) remote_addres =165.132.26.61; submcom=/usr/torque/bin/qsub;;359 (lckiaps) remote_addres =118.128.66.223; submcom=/cm/shared/apps/pbspro/11.0.2.110766/bin/qsub;;360 (lckordi) remote_addres =210.219.61.8; submcom=/usr/torque/bin/qsub;;361 (lckyuh) remote_addres =133.5.4.33; submcom=/usr/bin/pjsub;;362 (lckyut) remote_addres =133.5.4.37; submcom=/usr/bin/pjsub;;363 (lcsb) remote_addres =147.46.30.151; submcom=/usr/torque/bin/qsub;;364 (lctit) queue=S; remote_addres =10.1.6.165; submcom=/opt/pbs/tools/bin/t2sub;;365 (lcxe6) remote_addres =129.177.20.113; submcom=/opt/torque/default/bin/qsub;;366 (lcxt5m) remote_addres =193.166.211.144; submcom=/opt/pbs/10.1.0.91350/bin/qsub;;367 (lcyon) remote_addres =165.132.26.68; submcom=/usr/torque/bin/qsub;;368 (nech) qsubmem=memsz_job; qsubtime=cputim_job; remote_addres =136.172.44.147; submcom="/usr/local/bin/qsub";;349 (ibm) queue=p690_standard; remote_address=134.76.99.81; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; 350 (ibmh) queue=cluster; remote_address=136.172.40.15; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; 351 (ibmkisti) queue=class.32plus; remote_address=150.183.146.24; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; 352 (ibmku) queue=s4; remote_address=133.5.4.129; submcom=/usr/local/bin/llsubmit;; 353 (ibms) queue=p_normal; remote_address=150.183.5.101; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; 354 (lccrayb) queue=mpp1testq; remote_address=130.73.233.1; submcom=/opt/moab/7.2.6/bin/msub;; 355 (lccrayh) queue=mpp1testq; remote_address=130.75.4.1; submcom=/opt/moab/7.2.6/bin/msub;; 356 (lccrayf) queue=small; remote_address=86.50.166.21; submcom=/opt/slurm/default/bin/sbatch;; 357 (lcflow) remote_address=10.140.1.71; submcom=/cm/shared/apps/sge/6.2u5p2/bin/lx26-amd64/qsub;; 358 (lckyoto) remote_address=133.3.51.11; submcom=/thin/local/bin/qsub;; 359 (lck) remote_address=165.132.26.61; submcom=/usr/torque/bin/qsub;; 360 (lckiaps) remote_address=118.128.66.223; submcom=/cm/shared/apps/pbspro/11.0.2.110766/bin/qsub;; 361 (lckordi) remote_address=210.219.61.8; submcom=/usr/torque/bin/qsub;; 362 (lckyuh) remote_address=133.5.4.33; submcom=/usr/bin/pjsub;; 363 (lckyut) remote_address=133.5.4.37; submcom=/usr/bin/pjsub;; 364 (lcsb) remote_address=147.46.30.151; submcom=/usr/torque/bin/qsub;; 365 (lctit) queue=S; remote_address=10.1.6.165; submcom=/opt/pbs/tools/bin/t2sub;; 366 (lcxe6) remote_address=129.177.20.113; submcom=/opt/torque/default/bin/qsub;; 367 (lcxt5m) remote_address=193.166.211.144; submcom=/opt/pbs/10.1.0.91350/bin/qsub;; 368 (lcyon) remote_address=165.132.26.68; submcom=/usr/torque/bin/qsub;; 369 (nech) qsubmem=memsz_job; qsubtime=cputim_job; remote_address=136.172.44.147; submcom="/usr/local/bin/qsub";; 369 370 (*) printf "\n +++ hostname \"$remote_host\" not allowed"; 370 371 locat=parameter; exit;; … … 1180 1181 echo "trap '" >> $job_to_send 1181 1182 echo "set +vx" >> $job_to_send 1182 if [[ $(echo $remote_host | cut -c1-3) = ibm || $remote_host = lccrayb || $remote_host = lccrayh || $(echo $remote_host | cut -c1-3) = nec || $remote_host = lcflow || $remote_host = lckiaps || $remote_host = lckyu* ]]1183 if [[ $(echo $remote_host | cut -c1-3) = ibm || $remote_host = lccrayb || $remote_host = lccrayh || $(echo $remote_host | cut -c1-3) = nec || $remote_host = lcflow || $remote_host = lckiaps || $remote_host = lckyu* || $remote_host = lcxe6 ]] 1183 1184 then 1184 1185 if [[ $remote_host = ibmh ]] … … 1197 1198 then 1198 1199 return_queue=dataq 1200 elif [[ $remote_host = lcxe6 ]] 1201 then 1202 return_queue=debug 1199 1203 elif [[ $remote_host = lckiaps ]] 1200 1204 then … … 1237 1241 1238 1242 echo "echo \"set -x\" >> scpjob.$identifier" >> $job_to_send 1239 echo "echo \"batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres ${job_catalog}/$remote_dayfile \\\"$job_catalog\\\" $local_dayfile\" >> scpjob.$identifier" >> $job_to_send1243 echo "echo \"batch_scp $PORTOPT -d -w 10 -u $local_user $local_address ${job_catalog}/$remote_dayfile \\\"$job_catalog\\\" $local_dayfile\" >> scpjob.$identifier" >> $job_to_send 1240 1244 if [[ $remote_host = ibmku ]] 1241 1245 then … … 1254 1258 echo "set -x" >> $job_to_send 1255 1259 echo "cd /pf/b/${remote_user}/job_queue" >> $job_to_send 1256 echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres $remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send1260 echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_address $remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send 1257 1261 echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send 1258 1262 echo "%%END%%" >> $job_to_send … … 1271 1275 echo "export LANG=en_US.UTF-8" >> $job_to_send 1272 1276 echo "set -x" >> $job_to_send 1273 echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres $remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send1277 echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_address $remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send 1274 1278 echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send 1275 1279 echo "%%END%%" >> $job_to_send … … 1288 1292 echo "export LANG=en_US.UTF-8" >> $job_to_send 1289 1293 echo "set -x" >> $job_to_send 1290 echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres $remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send1294 echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_address $remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send 1291 1295 echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send 1292 1296 echo "%%END%%" >> $job_to_send … … 1303 1307 echo " " >> $job_to_send 1304 1308 echo "set -x" >> $job_to_send 1305 echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send1309 echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_address ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send 1306 1310 echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send 1307 1311 echo "%%END%%" >> $job_to_send … … 1324 1328 echo "export PATH=\$PATH:\$PALM_BIN" >> $job_to_send 1325 1329 echo "" >> $job_to_send 1326 echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send1330 echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_address ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send 1327 1331 echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send 1328 1332 echo "rm -f scpjob.${identifier}" >> $job_to_send … … 1330 1334 echo "sed -e 's/SGEPREFIX/#$/g' scpjob.${identifier}.tmp > scpjob.${identifier}" >> $job_to_send 1331 1335 echo "rm -f scpjob.${identifier}.tmp" >> $job_to_send 1332 1336 elif [[ $remote_host = lcxe6 ]] 1337 then 1338 echo "cat > scpjob.${identifier} << %%END%%" >> $job_to_send 1339 echo "#!/bin/ksh" >> $job_to_send 1340 echo "#PBS -N job_protocol_transfer" >> $job_to_send 1341 echo "#PBS -l walltime=00:30:00" >> $job_to_send 1342 echo "#PBS -A $project_account" >> $job_to_send 1343 echo "#PBS -l mppwidth=1" >> $job_to_send 1344 echo "#PBS -l mppnppn=1" >> $job_to_send 1345 echo "#PBS -o \$HOME/job_queue/last_job_transfer_protocol" >> $job_to_send 1346 echo "#PBS -j oe" >> $job_to_send 1347 echo " " >> $job_to_send 1348 echo "set -x" >> $job_to_send 1349 echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_address ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send 1350 echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send 1351 echo "%%END%%" >> $job_to_send 1333 1352 else 1334 1353 … … 1351 1370 fi 1352 1371 echo "set -x" >> $job_to_send 1353 echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile > /dev/null" >> $job_to_send1372 echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_address ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile > /dev/null" >> $job_to_send 1354 1373 echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send 1355 1374 echo "%%END%%" >> $job_to_send … … 1371 1390 then 1372 1391 echo "mv scpjob.$identifier $job_catalog" >> $job_to_send 1373 echo "ssh $SSH_PORTOPT ${remote_username}@${remote_addres } \"$submcom ${job_catalog}/scpjob.$identifier\" " >> $job_to_send1392 echo "ssh $SSH_PORTOPT ${remote_username}@${remote_address} \"$submcom ${job_catalog}/scpjob.$identifier\" " >> $job_to_send 1374 1393 echo "rm ${job_catalog}/scpjob.$identifier" >> $job_to_send 1375 1394 elif [[ $remote_host = lckyu* ]] 1376 1395 then 1377 echo "scp $PORTOPT scpjob.$identifier ${remote_username}@${remote_addres }:job_queue" >> $job_to_send1378 echo "ssh $SSH_PORTOPT ${remote_username}@${remote_addres } \"cd job_queue; $submcom scpjob.$identifier; rm scpjob.$identifier\" " >> $job_to_send1396 echo "scp $PORTOPT scpjob.$identifier ${remote_username}@${remote_address}:job_queue" >> $job_to_send 1397 echo "ssh $SSH_PORTOPT ${remote_username}@${remote_address} \"cd job_queue; $submcom scpjob.$identifier; rm scpjob.$identifier\" " >> $job_to_send 1379 1398 elif [[ $remote_host = lcflow ]] 1380 1399 then 1381 1400 echo "mv scpjob.$identifier $job_catalog" >> $job_to_send 1382 echo "/usr/bin/ssh ${remote_username}@${remote_addres } \"$init_cmds $module_calls cd $job_catalog; $submcom scpjob.$identifier\" " >> $job_to_send1401 echo "/usr/bin/ssh ${remote_username}@${remote_address} \"$init_cmds $module_calls cd $job_catalog; $submcom scpjob.$identifier\" " >> $job_to_send 1383 1402 else 1384 1403 echo "$submcom scpjob.$identifier" >> $job_to_send … … 1393 1412 fi 1394 1413 else 1395 # echo "ftpcopy -d $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send1414 # echo "ftpcopy -d $local_address ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send 1396 1415 # ??? funktioniert das ÃŒberhaupt noch ??? 1397 echo "nohup ftpcopy -d -w 15 $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile > /dev/null &" >> $job_to_send1416 echo "nohup ftpcopy -d -w 15 $local_address ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile > /dev/null &" >> $job_to_send 1398 1417 fi 1399 1418 echo "set -x" >> $job_to_send … … 1441 1460 scp $PORTOPT $job_to_send ${remote_user}@136.172.44.205:${job_catalog}/$job_on_remhost 1442 1461 else 1443 scp $PORTOPT $job_to_send ${remote_user}@${remote_addres }:${job_catalog}/$job_on_remhost1462 scp $PORTOPT $job_to_send ${remote_user}@${remote_address}:${job_catalog}/$job_on_remhost 1444 1463 fi 1445 1464 if [[ $? = 1 ]] … … 1466 1485 if [[ $remote_host = ibmku ]] 1467 1486 then 1468 ssh $SSH_PORTOPT $remote_addres -l $remote_user "cd $job_catalog; $submcom $job_on_remhost"1487 ssh $SSH_PORTOPT $remote_address -l $remote_user "cd $job_catalog; $submcom $job_on_remhost" 1469 1488 elif [[ $remote_host = lcflow ]] 1470 1489 then 1471 /usr/bin/ssh $SSH_PORTOPT $remote_addres -l $remote_user "$init_cmds $module_calls cd $job_catalog; $submcom $job_on_remhost"1490 /usr/bin/ssh $SSH_PORTOPT $remote_address -l $remote_user "$init_cmds $module_calls cd $job_catalog; $submcom $job_on_remhost" 1472 1491 else 1473 ssh $SSH_PORTOPT $remote_addres -l $remote_user "cd $job_catalog; $submcom $job_on_remhost; rm $job_on_remhost"1492 ssh $SSH_PORTOPT $remote_address -l $remote_user "cd $job_catalog; $submcom $job_on_remhost; rm $job_on_remhost" 1474 1493 fi 1475 1494 -
palm/trunk/SOURCE/check_for_restart.f90
r1354 r1468 20 20 ! Current revisions: 21 21 ! ----------------- 22 ! 22 ! Added support for unscheduled job termination using the flag files 23 ! DO_STOP_NOW and DO_RESTART_NOW 23 24 ! 24 25 ! Former revisions: … … 65 66 66 67 67 LOGICAL :: terminate_run_l !: 68 LOGICAL :: terminate_run_l !: 69 LOGICAL :: do_stop_now = .FALSE. !: 70 LOGICAL :: do_restart_now = .FALSE. !: 68 71 69 72 REAL(wp) :: remaining_time !: … … 113 116 114 117 terminate_coupled = 3 118 115 119 #if defined( __parallel ) 116 120 IF ( myid == 0 ) THEN … … 126 130 ENDIF 127 131 128 ! 129 !-- Set the stop flag also, if restart is forced by user 130 IF ( time_restart /= 9999999.9_wp .AND. & 132 133 ! 134 !-- Check if a flag file exists that forces a termination of the model 135 terminate_run_l = .FALSE. 136 IF ( myid == 0 ) THEN 137 INQUIRE(FILE="DO_STOP_NOW", EXIST=do_stop_now) 138 INQUIRE(FILE="DO_RESTART_NOW", EXIST=do_restart_now) 139 140 IF ( do_stop_now .OR. do_restart_now ) THEN 141 142 terminate_run_l = .TRUE. 143 144 WRITE( message_string, * ) 'run will be terminated because user ', & 145 'forced a job finialization using a flag', & 146 'file:', & 147 '&DO_STOP_NOW: ', do_stop_now, & 148 '&DO_RESTART_NOW: ', do_restart_now 149 CALL message( 'check_for_restart', 'PA0398', 0, 0, 0, 6, 0 ) 150 151 ENDIF 152 ENDIF 153 154 155 #if defined( __parallel ) 156 ! 157 !-- Make a logical OR for all processes. Stop the model run if at least 158 !-- one processor has reached the time limit. 159 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 160 CALL MPI_ALLREDUCE( terminate_run_l, terminate_run, 1, MPI_LOGICAL, & 161 MPI_LOR, comm2d, ierr ) 162 #else 163 terminate_run = terminate_run_l 164 #endif 165 166 ! 167 !-- In case of coupled runs inform the remote model of the termination 168 !-- and its reason, provided the remote model has not already been 169 !-- informed of another termination reason (terminate_coupled > 0) before, 170 !-- or vice versa (terminate_coupled_remote > 0). 171 IF ( terminate_run .AND. coupling_mode /= 'uncoupled' .AND. & 172 terminate_coupled == 0 .AND. terminate_coupled_remote == 0 ) THEN 173 174 terminate_coupled = 6 175 176 #if defined( __parallel ) 177 IF ( myid == 0 ) THEN 178 CALL MPI_SENDRECV( terminate_coupled, 1, MPI_INTEGER, & 179 target_id, 0, & 180 terminate_coupled_remote, 1, MPI_INTEGER, & 181 target_id, 0, & 182 comm_inter, status, ierr ) 183 ENDIF 184 CALL MPI_BCAST( terminate_coupled_remote, 1, MPI_INTEGER, 0, & 185 comm2d, ierr ) 186 #endif 187 188 ENDIF 189 190 ! 191 !-- Set the stop flag also, if restart is forced by user settings 192 IF ( time_restart /= 9999999.9_wp .AND. & 131 193 time_restart < time_since_reference_point ) THEN 132 194 … … 183 245 ! 184 246 !-- If the run is stopped, set a flag file which is necessary to initiate 185 !-- the start of a continuation run 186 IF ( terminate_run .AND. myid == 0 ) THEN 247 !-- the start of a continuation run, except if the user forced to stop the 248 !-- run without restart 249 IF ( terminate_run .AND. myid == 0 .AND. .NOT. do_stop_now) THEN 187 250 188 251 OPEN ( 90, FILE='CONTINUE_RUN', FORM='FORMATTED' ) -
palm/trunk/SOURCE/check_open.f90
r1360 r1468 20 20 ! Current revisions: 21 21 ! ----------------- 22 ! 22 ! Adapted for use on up to 6-digit processor cores 23 ! Added file unit 117 (PROGRESS) 23 24 ! 24 25 ! Former revisions: … … 187 188 IF ( openfile(file_id)%opened_before ) THEN 188 189 SELECT CASE ( file_id ) 189 CASE ( 13, 14, 21, 22, 23, 80:85 )190 CASE ( 13, 14, 21, 22, 23, 80:85, 117 ) 190 191 IF ( file_id == 14 .AND. openfile(file_id)%opened_before ) THEN 191 192 message_string = 're-open of unit ' // & … … 209 210 SELECT CASE ( file_id ) 210 211 211 CASE ( 15, 16, 17, 18, 19, 50:59, 81:84, 104:105, 107, 109 )212 CASE ( 15, 16, 17, 18, 19, 50:59, 81:84, 104:105, 107, 109, 117 ) 212 213 213 214 IF ( myid /= 0 ) THEN … … 280 281 ELSE 281 282 ! 282 !-- First opening of unit 13 openes file _0000 on all PEs because only283 !-- this file contains the global variables283 !-- First opening of unit 13 openes file _000000 on all PEs because 284 !-- only this file contains the global variables 284 285 IF ( .NOT. openfile(file_id)%opened_before ) THEN 285 OPEN ( 13, FILE='BININ'//TRIM( coupling_char )//'/_0000 ', &286 OPEN ( 13, FILE='BININ'//TRIM( coupling_char )//'/_000000', & 286 287 FORM='UNFORMATTED', STATUS='OLD' ) 287 288 ELSE … … 336 337 ENDIF 337 338 IF ( myid_char == '' ) THEN 338 OPEN ( 20, FILE='DATA_LOG'//TRIM( coupling_char )//'/_0000 ', &339 OPEN ( 20, FILE='DATA_LOG'//TRIM( coupling_char )//'/_000000', & 339 340 FORM='UNFORMATTED', POSITION='APPEND' ) 340 341 ELSE … … 1053 1054 1054 1055 ENDIF 1056 1057 1058 ! 1059 !-- Progress file that is used by the PALM watchdog 1060 CASE ( 117 ) 1061 1062 OPEN ( 117, FILE='PROGRESS'//coupling_char, STATUS='REPLACE', FORM='FORMATTED' ) 1063 1055 1064 1056 1065 CASE ( 201:200+2*max_masks ) -
palm/trunk/SOURCE/header.f90
r1430 r1468 20 20 ! Current revisions: 21 21 ! ----------------- 22 ! 22 ! Adapted for use on up to 6-digit processor cores 23 23 ! 24 24 ! Former revisions: … … 1694 1694 1695 1695 99 FORMAT (1X,78('-')) 1696 100 FORMAT (/1X,'******************************', 6X,42('-')/ &1697 1X,'* ',A,' *', 6X,A/ &1698 1X,'******************************', 6X,42('-'))1699 101 FORMAT (3 7X,'coupled run using MPI-',I1,': ',A/ &1700 3 7X,42('-'))1701 102 FORMAT (/' Date: ',A8, 6X,'Run: ',A20/ &1702 ' Time: ',A8, 6X,'Run-No.: ',I2.2/ &1696 100 FORMAT (/1X,'******************************',4X,44('-')/ & 1697 1X,'* ',A,' *',4X,A/ & 1698 1X,'******************************',4X,44('-')) 1699 101 FORMAT (35X,'coupled run using MPI-',I1,': ',A/ & 1700 35X,42('-')) 1701 102 FORMAT (/' Date: ',A8,4X,'Run: ',A20/ & 1702 ' Time: ',A8,4X,'Run-No.: ',I2.2/ & 1703 1703 ' Run on host: ',A10) 1704 1704 #if defined( __parallel ) 1705 103 FORMAT (' Number of PEs:',10X,I6, 6X,'Processor grid (x,y): (',I3,',',I3, &1705 103 FORMAT (' Number of PEs:',10X,I6,4X,'Processor grid (x,y): (',I4,',',I4, & 1706 1706 ')',1X,A) 1707 104 FORMAT (' Number of PEs:', 8X,I5,9X,'Tasks:',I4,' threads per task:',I4/ &1708 3 7X,'Processor grid (x,y): (',I3,',',I3,')',1X,A)1709 105 FORMAT (3 7X,'One additional PE is used to handle'/37X,'the dvrp output!')1710 106 FORMAT (3 7X,'A 1d-decomposition along x is forced'/ &1711 3 7X,'because the job is running on an SMP-cluster')1712 107 FORMAT (3 7X,'A 1d-decomposition along ',A,' is used')1713 108 FORMAT (3 7X,'Max. # of parallel I/O streams is ',I5)1714 109 FORMAT (3 7X,'Precursor run for coupled atmos-ocean run'/ &1715 3 7X,42('-'))1716 114 FORMAT (3 7X,'Coupled atmosphere-ocean run following'/ &1717 3 7X,'independent precursor runs'/ &1718 3 7X,42('-'))1707 104 FORMAT (' Number of PEs:',10X,I6,4X,'Tasks:',I4,' threads per task:',I4/ & 1708 35X,'Processor grid (x,y): (',I4,',',I4,')',1X,A) 1709 105 FORMAT (35X,'One additional PE is used to handle'/37X,'the dvrp output!') 1710 106 FORMAT (35X,'A 1d-decomposition along x is forced'/ & 1711 35X,'because the job is running on an SMP-cluster') 1712 107 FORMAT (35X,'A 1d-decomposition along ',A,' is used') 1713 108 FORMAT (35X,'Max. # of parallel I/O streams is ',I5) 1714 109 FORMAT (35X,'Precursor run for coupled atmos-ocean run'/ & 1715 35X,42('-')) 1716 114 FORMAT (35X,'Coupled atmosphere-ocean run following'/ & 1717 35X,'independent precursor runs'/ & 1718 35X,42('-')) 1719 1719 117 FORMAT (' Accelerator boards / node: ',I2) 1720 1720 #endif -
palm/trunk/SOURCE/init_pegrid.f90
r1436 r1468 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 22 ! Adapted for use on up to 6-digit processor cores 23 23 ! 24 24 ! Former revisions: … … 242 242 comm2d, ierr ) 243 243 CALL MPI_COMM_RANK( comm2d, myid, ierr ) 244 WRITE (myid_char,'(''_'',I 4.4)') myid244 WRITE (myid_char,'(''_'',I6.6)') myid 245 245 246 246 CALL MPI_CART_COORDS( comm2d, myid, ndim, pcoord, ierr ) -
palm/trunk/SOURCE/modules.f90
r1451 r1468 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 22 ! Adapted for use on up to 6-digit processor cores. 23 ! Increased identifier string length for user-defined quantities to 20. 23 24 ! 24 25 ! Former revisions: … … 548 549 CHARACTER (LEN=1000) :: message_string = ' ' 549 550 550 CHARACTER (LEN= 11), DIMENSION(100) :: data_output = ' ', &551 CHARACTER (LEN=20), DIMENSION(100) :: data_output = ' ', & 551 552 data_output_user = ' ', doav = ' ' 552 CHARACTER (LEN= 10), DIMENSION(max_masks,100) :: &553 CHARACTER (LEN=20), DIMENSION(max_masks,100) :: & 553 554 data_output_masks = ' ', data_output_masks_user = ' ' 554 555 555 CHARACTER (LEN= 10), DIMENSION(300) :: data_output_pr = ' '556 CHARACTER (LEN= 10), DIMENSION(200) :: data_output_pr_user = ' '556 CHARACTER (LEN=20), DIMENSION(300) :: data_output_pr = ' ' 557 CHARACTER (LEN=20), DIMENSION(200) :: data_output_pr_user = ' ' 557 558 CHARACTER (LEN=20), DIMENSION(11) :: netcdf_precision = ' ' 558 559 … … 1241 1242 #endif 1242 1243 CHARACTER(LEN=2) :: send_receive = 'al' 1243 CHARACTER(LEN= 5) :: myid_char = ''1244 CHARACTER(LEN=7) :: myid_char = '' 1244 1245 INTEGER(iwp) :: acc_rank, comm1dx, comm1dy, comm2d, comm_inter, & 1245 1246 comm_palm, id_inflow = 0, id_recycling = 0, ierr, & -
palm/trunk/SOURCE/palm.f90
r1403 r1468 20 20 ! Current revisions: 21 21 ! ----------------- 22 ! 22 ! Adapted for use on up to 6-digit processor cores 23 23 ! 24 24 ! Former revisions: … … 169 169 ! 170 170 !-- Test output (to be removed later) 171 WRITE (*,'(A,I 4,A,I3,A,I3,A,I3)') '*** Connect MPI-Task ', myid,' to CPU ',&171 WRITE (*,'(A,I6,A,I3,A,I3,A,I3)') '*** Connect MPI-Task ', myid,' to CPU ',& 172 172 acc_rank, ' Devices: ', num_acc_per_node,& 173 173 ' connected to:', & … … 191 191 ! 192 192 !-- Open a file for debug output 193 WRITE (myid_char,'(''_'',I 4.4)') myid193 WRITE (myid_char,'(''_'',I6.6)') myid 194 194 OPEN( 9, FILE='DEBUG'//TRIM( coupling_char )//myid_char, FORM='FORMATTED' ) 195 195 -
palm/trunk/SOURCE/progress_bar.f90
r1402 r1468 20 20 ! Current revisions: 21 21 ! ----------------- 22 ! 22 ! Added support for progress file PROGRESS which is used in case of batch jobs 23 23 ! 24 24 ! Former revisions: … … 28 28 ! Description: 29 29 ! ------------ 30 ! CPU-time measurements for any program part whatever. Results of the 30 ! This routine prints either a progress bar on the standard output in case of 31 ! interactive runs, or it prints the progress in a separate file called 32 ! PROGRESS. 31 33 !------------------------------------------------------------------------------! 32 34 33 35 USE control_parameters, & 34 ONLY : end_time, simulated_time, simulated_time_at_begin, time_restart 36 ONLY : end_time, run_identifier, simulated_time, & 37 simulated_time_at_begin, time_restart 35 38 36 USE, INTRINSIC :: ISO_FORTRAN_ENV, &39 USE, INTRINSIC :: ISO_FORTRAN_ENV, & 37 40 ONLY : OUTPUT_UNIT 38 41 … … 60 63 61 64 SUBROUTINE init_progress_bar 65 !------------------------------------------------------------------------------! 66 ! Description: 67 ! ------------ 68 ! Initialize the progress bar/file 69 !------------------------------------------------------------------------------! 62 70 63 71 IMPLICIT NONE … … 74 82 ENDIF 75 83 76 bar = '____________________________________________________________' 77 crosses = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' 84 IF ( batch_job ) THEN 85 86 CALL check_open ( 117 ) 87 WRITE ( 117, FMT='(A20,/)' ) run_identifier 88 89 ELSE 90 bar = '____________________________________________________________' 91 crosses = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' 78 92 ! 79 !-- Line feed on stdout to seperate the progress bar from previous messages80 WRITE ( OUTPUT_UNIT, '(1X)' )93 !-- Line feed on stdout to seperate the progress bar from previous messages 94 WRITE ( OUTPUT_UNIT, '(1X)' ) 81 95 #if defined( __intel_compiler ) 82 96 ! 83 !-- The Intel compiler does not allow to immediately flush the output buffer84 !-- in case that option ADVANCE='NO' is used in the write statement.85 !-- A workaround is to set a special carriage control feature and use "+" as86 !-- first output character, but this non-standard and only available with the87 !-- Intel compiler88 OPEN ( OUTPUT_UNIT, CARRIAGECONTROL='FORTRAN' )97 !-- The Intel compiler does not allow to immediately flush the output buffer 98 !-- in case that option ADVANCE='NO' is used in the write statement. 99 !-- A workaround is to set a special carriage control feature and use "+" as 100 !-- first output character, but this non-standard and only available with the 101 !-- Intel compiler 102 OPEN ( OUTPUT_UNIT, CARRIAGECONTROL='FORTRAN' ) 89 103 #endif 104 105 ENDIF 106 90 107 initialized = .TRUE. 91 108 … … 97 114 ! Description: 98 115 ! ------------ 99 ! 116 ! Print progress data to standard output (interactive) or to file (batch jobs) 100 117 !------------------------------------------------------------------------------! 101 118 … … 104 121 REAL(wp) :: remaining_time_in_percent !: remaining time to be simulated 105 122 !: in the job 123 REAL(wp) :: remaining_time_in_percent_total !: total remaining time of 124 !: the job chain 125 126 IF ( .NOT. initialized ) CALL init_progress_bar 127 128 129 remaining_time_in_percent = & 130 ( simulated_time - simulated_time_at_begin ) / time_to_be_simulated 131 132 remaining_time_in_percent_total = ( simulated_time / end_time ) 106 133 107 134 ! 108 !-- Porgress bar does not make sense in batch mode (and also ADVANCE=no does 109 !-- not properly work in batch mode on Cray XC30) 110 IF ( batch_job ) RETURN 135 !-- In batch mode, use a file (PROGRESS), otherwise use progress bar 136 IF ( batch_job ) THEN 111 137 112 IF ( .NOT. initialized ) CALL init_progress_bar 138 BACKSPACE ( 117 ) 139 WRITE ( 117, FMT='(F5.2,1X,F5.2)' ) remaining_time_in_percent, & 140 remaining_time_in_percent_total 141 CALL local_flush( 117 ) 142 143 ELSE 144 113 145 ! 114 !-- Calculate length of progress bar115 remaining_time_in_percent = &116 ( simulated_time - simulated_time_at_begin ) / time_to_be_simulated146 !-- Calculate length of progress bar 147 ilength = remaining_time_in_percent * 60.0_wp 148 ilength = MIN( ilength, 60 ) 117 149 118 ilength = remaining_time_in_percent * 60.0_wp 119 ilength = MIN( ilength, 60 ) 120 121 bar(1:ilength) = crosses(1:ilength) 150 bar(1:ilength) = crosses(1:ilength) 122 151 123 152 #if defined( __intel_compiler ) 124 WRITE ( OUTPUT_UNIT, '(A,6X,''['',A,''] '',F5.1,'' left'')' ) & 125 '+', bar, & 126 MAX( 0.0_wp, ( 1.0_wp - remaining_time_in_percent ) * 100.0_wp ) 153 WRITE ( OUTPUT_UNIT, '(A,6X,''['',A,''] '',F5.1,'' left'')' ) & 154 '+', bar, & 155 MAX( 0.0_wp, ( 1.0_wp - remaining_time_in_percent ) * & 156 100.0_wp ) 127 157 #else 128 WRITE ( OUTPUT_UNIT, '(A,6X,''['',A,''] '',F5.1,'' left'')', & 129 ADVANCE='NO' ) CHAR( 13 ), bar, & 130 MAX( 0.0_wp, ( 1.0_wp - remaining_time_in_percent ) * 100.0_wp ) 158 WRITE ( OUTPUT_UNIT, '(A,6X,''['',A,''] '',F5.1,'' left'')', & 159 ADVANCE='NO' ) CHAR( 13 ), bar, & 160 MAX( 0.0_wp, ( 1.0_wp - remaining_time_in_percent ) * & 161 100.0_wp ) 131 162 #endif 132 CALL local_flush( OUTPUT_UNIT ) 163 CALL local_flush( OUTPUT_UNIT ) 164 165 ENDIF 133 166 134 167 END SUBROUTINE output_progress_bar 135 168 136 169 SUBROUTINE finish_progress_bar 170 !------------------------------------------------------------------------------! 171 ! Description: 172 ! ------------ 173 ! Finalization of the progress bar/file 174 !------------------------------------------------------------------------------! 137 175 138 176 IMPLICIT NONE 139 177 140 IF ( batch_job ) RETURN178 IF ( batch_job ) THEN 141 179 180 CALL close_file ( 117 ) 181 182 ELSE 183 142 184 #if defined( __intel_compiler ) 143 185 ! 144 !-- Reset to the default carriage control145 OPEN ( OUTPUT_UNIT, CARRIAGECONTROL='LIST' )186 !-- Reset to the default carriage control 187 OPEN ( OUTPUT_UNIT, CARRIAGECONTROL='LIST' ) 146 188 #endif 147 189 ! 148 !-- Line feed when simulation has finished 149 WRITE ( OUTPUT_UNIT, '(1X)' ) 190 !-- Line feed when simulation has finished 191 WRITE ( OUTPUT_UNIT, '(1X)' ) 192 193 ENDIF 150 194 151 195 END SUBROUTINE finish_progress_bar 152 196 197 153 198 END MODULE progress_bar -
palm/trunk/SOURCE/read_3d_binary.f90
r1401 r1468 20 20 ! Current revisions: 21 21 ! ----------------- 22 ! 22 ! Adapted for use on up to 6-digit processor cores 23 23 ! 24 24 ! Former revisions: … … 108 108 IMPLICIT NONE 109 109 110 CHARACTER (LEN= 5) :: myid_char_save110 CHARACTER (LEN=7) :: myid_char_save 111 111 CHARACTER (LEN=10) :: binary_version 112 112 CHARACTER (LEN=10) :: version_on_file … … 296 296 ! 297 297 !-- Set the filename (underscore followed by four digit processor id) 298 WRITE (myid_char,'(''_'',I 4.4)') j298 WRITE (myid_char,'(''_'',I6.6)') j 299 299 WRITE (9,*) 'myid=',myid,' opening file "',myid_char,'"' 300 300 CALL local_flush( 9 ) 301 301 302 302 ! 303 !-- Open the restart file. If this file has been created by PE0 (_0000 ),303 !-- Open the restart file. If this file has been created by PE0 (_000000), 304 304 !-- the global variables at the beginning of the file have to be skipped 305 305 !-- first. -
palm/trunk/UTIL/combine_plot_fields.f90
r1395 r1468 20 20 ! Current revisions: 21 21 ! ----------------- 22 ! 22 ! Adapted for use on up to 6-digit processor cores (not tested) 23 23 ! 24 24 ! Former revisions: … … 87 87 !-- Local variables 88 88 CHARACTER (LEN=2) :: modus, model_string 89 CHARACTER (LEN= 4) :: id_string89 CHARACTER (LEN=6) :: id_string 90 90 CHARACTER (LEN=10) :: dimname, var_name 91 91 CHARACTER (LEN=40) :: filename … … 187 187 !-- create any output for this cross-section. 188 188 danz = 0 189 WRITE (id_string,'(I 4.4)') danz189 WRITE (id_string,'(I6.6)') danz 190 190 INQUIRE ( & 191 191 FILE='PLOT2D_'//modus//TRIM( model_string )//'_'//id_string, & … … 200 200 FORM='UNFORMATTED' ) 201 201 danz = danz + 1 202 WRITE (id_string,'(I 4.4)') danz202 WRITE (id_string,'(I6.6)') danz 203 203 INQUIRE ( & 204 204 FILE='PLOT2D_'//modus//TRIM( model_string )//'_'//id_string, & … … 604 604 IF ( .NOT. netcdf_parallel ) THEN 605 605 danz = 0 606 WRITE (id_string,'(I 4.4)') danz606 WRITE (id_string,'(I6.6)') danz 607 607 INQUIRE ( & 608 608 FILE='PLOT3D_DATA'//TRIM( model_string )//'_'//TRIM( id_string ), & … … 626 626 FORM='UNFORMATTED') 627 627 danz = danz + 1 628 WRITE (id_string,'(I 4.4)') danz628 WRITE (id_string,'(I6.6)') danz 629 629 INQUIRE ( & 630 630 FILE='PLOT3D_DATA'//TRIM( model_string )//'_'//TRIM(id_string), & -
palm/trunk/UTIL/combine_plot_fields_single_open.f90
r1310 r1468 20 20 ! Current revisions: 21 21 ! ----------------- 22 ! Adapted for use on up to 6-digit processor cores (not tested) 22 23 ! 23 24 ! Former revisions: … … 43 44 !-- Lokale Variablen 44 45 CHARACTER (LEN=2) :: modus 45 CHARACTER (LEN= 7) :: id_char46 CHARACTER (LEN=9) :: id_char 46 47 47 48 INTEGER, PARAMETER :: spk = SELECTED_REAL_KIND( 6 ) … … 67 68 !-- Pruefen, ob Basisdatei von PE0 vorhanden 68 69 danz = 0 69 WRITE (id_char,'(A2,''_'',I 4.4)') modus, danz70 WRITE (id_char,'(A2,''_'',I6.6)') modus, danz 70 71 INQUIRE ( FILE='PLOT2D_'//id_char, EXIST=found ) 71 72 ! … … 74 75 75 76 danz = danz + 1 76 WRITE (id_char,'(A2,''_'',I 4.4)') modus, danz77 WRITE (id_char,'(A2,''_'',I6.6)') modus, danz 77 78 INQUIRE ( FILE='PLOT2D_'//id_char, EXIST=found ) 78 79 … … 98 99 ! 99 100 !-- Prozessordatei oeffnen 100 WRITE (id_char,'(A2,''_'',I 4.4)') modus, id101 WRITE (id_char,'(A2,''_'',I6.6)') modus, id 101 102 OPEN ( 1, FILE='PLOT2D_'//id_char, FORM='UNFORMATTED', & 102 103 POSITION='ASIS' ) … … 168 169 !-- Pruefen, ob Basisdatei von PE0 vorhanden 169 170 danz = 0 170 WRITE (id_char,'(I 4.4)') danz171 WRITE (id_char,'(I6.6)') danz 171 172 INQUIRE ( FILE='PLOT3D_DATA_'//TRIM( id_char ), EXIST=found ) 172 173 … … 180 181 181 182 danz = danz + 1 182 WRITE (id_char,'(I 4.4)') danz183 WRITE (id_char,'(I6.6)') danz 183 184 INQUIRE ( FILE='PLOT3D_DATA_'//TRIM( id_char ), EXIST=found ) 184 185 … … 209 210 ! 210 211 !-- Prozessordatei oeffnen 211 WRITE (id_char,'(I 4.4)') id212 WRITE (id_char,'(I6.6)') id 212 213 OPEN ( 1, FILE='PLOT3D_DATA_'//TRIM( id_char ), FORM='UNFORMATTED', & 213 214 POSITION='ASIS' )
Note: See TracChangeset
for help on using the changeset viewer.