#!/bin/ksh # subjob - Plot-Shellskript Version: @(#)SUBJOB 1.0 # $Id: subjob 814 2012-01-31 18:31:48Z gryschka $ # Prozedur zum automatischen Generieren von Batch-Jobs, die unter NQS # laufen sollen und deren Ergebnis (Dayfile) zum Job-generierenden # Host zurueckgeschickt werden sollen # letzte Aenderung: # 29/06/94 - Siggi - Beginn mit Erstellung der Originalversion # 08/07/94 - Siggi - Originalversion abgeschlossen (Version 1.0) # 06/02/98 - Siggi - berte validiert # 27/01/01 - Siggi - ground.yonsei.ac.kr validiert, Jobs zur T3E in Korea # moeglich # 08/02/01 - Siggi - alle subjob-Meldungen ins englische uebersetzt # 25/05/02 - Siggi - Unterstuetzung des LoadLeveler # 30/05/02 - Siggi - Validierung fuer ibm-Rechner in Seoul (nobel) sowie # allgemeine Anpassungen fuer ibm-Rechner # 15/10/02 - Siggi - Neue Default-Jobklasse (p_normal) fuer IBM in Seoul # Ruecktransfer des Jobprotokolls fuer diese # Maschine abgeschaltet # 31/10/02 - Siggi - berni validiert # 06/11/02 - Siggi - Neue Jobklassen auf ibmb und ibmh # 08/11/02 - Siggi - quanero validiert # 11/12/02 - Siggi - Notification fuer Transfer-Jobs abgeschaltet # 23/01/03 - Siggi - hostname nobel changed to nobela # 06/02/03 - Siggi - gregale validated # 12/02/03 - Siggi - orkan and maestro validated # 21/02/03 - Siggi - all nobel nodes in Seoul validated # 12/03/03 - Siggi - nec at DKRZ validated # 13/03/03 - Siggi - new nqs resource variable Memory # 07/04/03 - Siggi - processor request option -c on nech needs tasks per # node # 11/04/03 - Siggi - network on ibms has attribute "shared" # 31/07/03 - Siggi - nqs2 on nech implemented (provisional: -h nech2) # cxxl added to ibmh # 29/08/03 - Siggi - changes in job queues and communication system on # ibms # 24/10/03 - Siggi - using alternate hanni address 130.75.4.2 # 30/10/03 - Siggi - nech is not supported any more # 10/11/03 - Siggi - nech2 renamed to nech # 20/11/03 - Siggi - submit command on nech changed from qsub.test to qsub # 29/03/04 - Siggi - ground not supported any more, gfdl3 validated # 31/03/04 - Siggi - new option -N for node usage # 12/04/04 - Siggi - scp2 instead of scp used for transfer from decalpha # due to error in ssh installation (otherwise a prompt # for the password appears) # 23/07/04 - Siggi - changes due to the new berni configuration # (federation switch) # 01/09/04 - Gerald new job-classes on hanni # 08/09/04 - Siggi - hanni IP address changed to 130.75.4.10 # 23/11/04 - Siggi - new job class cdata on hanni and berni # 03/12/04 - Siggi - notification on ibm switched of in case of # delete_dayfile = true, node usage in cdev set to # shared # 16/02/05 - Gerald hababai validated # 29/03/05 - Micha - new job class channi on hanni # 11/05/05 - Siggi - ConsumableMemory is now required as resource keyword # on ibms # 24/05/05 - Siggi - Default queue on ibms changed from p_normal_1.3 to # p_normal # 30/06/05 - Siggi - network changed for queue cdev from "us" to "ip" # 12/07/05 - Siggi - in network.mpi on ibmh/ibmb "csss" changed to # "sn_all", new job class cexp # 08/09/05 - Siggi - IP-address of gfdl3 changed # 31/10/05 - Siggi - new job class pp on hurricane, serial jobs on # hurricane (with -X0) # 01/11/05 - Siggi - missing queue for jobs submitted on nech (for nech) # added # 30/12/05 - Siggi - change of IP adresses in subnet 130.75.105 # 09/02/06 - Siggi - ibmy admitted # 10/02/06 - Siggi - scp2 changed to /bin/scp on decalpha # 13/04/06 - Siggi - ostria admitted # 18/04/06 - Siggi - new option -O for OpenMP usage # 24/05/06 - Siggi - lctit admitted, ftpjob renamed scpjob # 25/07/06 - Siggi - gfdl5 (ibmy) admitted for submitting jobs # 27/09/06 - Siggi - breg/hreg extended with berni/hanni # 25/10/06 - Siggi - data_limit set to 1.76 GByte on hanni and berni # 28/11/06 - Siggi - levanto admitted # 13/02/07 - Siggi - hpmuk releated code removed # 01/03/07 - Siggi - adjustments for RIAM machines gate and NEC-SX8 (n-sx) # 12/04/07 - Siggi - option -f (filetransfer protocol) removed, scp only # 27/07/07 - Siggi - autan admitted # 03/08/07 - Marcus- lcfimm admitted # 08/10/07 - Siggi - further job classes added for hanni (csoloh...) # 15/10/07 - Siggi - preliminary adjustments for lctit based on Jin's # suggestions # 19/10/07 - Marcus- add new optional argument -g group_number # 19/10/07 - Siggi - a ";" was missing in the last change done by Marcus # 30/10/07 - Marcus- further adjustments for queues on lctit # 15/05/08 - Siggi - adjustments for lcxt4 (Bergen Center for Computational # Science) # 14/07/08 - Siggi - adjustments for lcsgih # 23/09/08 - Gerald- paesano admitted # 02/10/08 - Siggi - PBS adjustments for lcxt4 # 02/03/09 - Siggi - Adjustments for new NEC-SX9 at RIAM # 16/04/09 - Marcus- Adjustments for lcsgib and lcsgih # 21/04/09 - Siggi - adjustments for new IBM at DKRZ, which is now ibmh # 18/05/09 - Siggi - Settings for serial jobs on lcsgi changed # 24/06/09 - BjornM- adjustments for lcxt4 (loading modules manually) # 08/07/09 - Siggi - option -e added (email notification on lcsgih/b) # 20/07/09 - Siggi - On lcsgi, jobs for returning the job protocol are # now run on the data nodes (feature=data) # 25/08/09 - BjornM- adapted for lck # 26/08/09 - Marcus- caurus admitted; optional qos feature hiprio on lcsgi # 03/09/09 - Siggi - PBS sgi feature directive only used if explicitly # set in the config file by the user # 16/10/09 - Carolin-adjustments for archiving on SGI-ICE of binary files; # adding special1q # 01/12/09 - BjornM- re-adjustments for lcxt4, added sno (130.75.105.113) # 16/10/09 - Carolin-adding permq # 01/02/10 - Siggi - adapted for lcxt5m and lckyoto (Fujitsu HX600) # 03/02/10 - Siggi - bug in serial jobs removed # 26/02/10 - BjornM- re-adjustments for lcxt4 (new modules, email # notification) # 01/03/10 - Siggi - loading of modules controlled by environment variable # module_calls # 17/08/10 - BjornM- re-adjustments for lcxt4 (location of qsub) # 25/08/10 - BjornM- account geofysisk replaced by guest for lcxt4 # 25/08/10 - Siggi - new variable project_account in pbs-statements for # lcxt4 # 08/12/10 - Siggi - initialization of the module command changed for # SGI-ICE/lcsgi # adjustments for Kyushu Univ. (lcrte, ibmku) # 14/12/10 - Siggi - adjustments for new Tsubame system at Tokyo # institute of technology (lctit) # 02/02/11 - Siggi - further asjustments for Tsubame concerning openMP # 06/03/11 - Siggi - adjustments for ibmkisti # 17/03/11 - Siggi - adjustments for openmp usage on ibmkisti # 03/04/11 - Micha - added lckordi # 17/08/11 - Siggi - hicegate0 added # 18/08/11 - Siggi - workaround on lcsgi in order to avoid appends to file # last_job_transfer_protocol # 21/08/11 - Siggi - inferno admitted # 29/11/11 - Siggi - adjustments for lcsgih/lcsgib queues, pingui admitted # 21/12/11 - Theres- solano admitted # 31/01/12 - Matthias- adjustments for ibmh # VARIABLENVEREINBARUNGEN + DEFAULTWERTE delete_dayfile=false email_notification=none group_number=none locat=normal no_default_queue=none no_submit=false job_catalog="~/job_queue" job_name=none local_user=$LOGNAME node_usage=shared numprocs=0 punkte="..........................................................." submcom=qsub queue=default remote_host=none remote_user="" verify=true typeset -i cputime=0 memory=0 Memory=0 minuten resttime sekunden stunden typeset -i inumprocs mpi_tasks=0 nodes=0 processes_per_node=0 tasks_per_node=0 threads_per_task=1 typeset -L20 spalte1 typeset -R40 spalte2 typeset -L60 spalte3 # FEHLERBEHANDLUNG # BEI EXIT: trap 'if [[ $locat != normal ]] then case $locat in (option) printf "\n --> available optios can be displayed" printf " by typing:" printf "\n \"subjob ?\" \n";; (ftpcopy|parameter|scp|verify) printf "\n";; (*) printf "\n +++ unknown error" printf "\n please inform S. Raasch!\n" esac [[ -f $job_to_send ]] && rm $job_to_send printf "\n\n+++ SUBJOB killed \n\n" fi' exit # BEI TERMINAL-BREAK: trap '[[ -f $job_to_send ]] && rm $job_to_send printf "\n\n+++ SUBJOB killed \n\n" exit ' 2 # LOKALEN HOSTNAMEN FESTSTELLEN local_host=$(hostname) # HOSTSPEZIFISCHE VARIABLEN VEREINBAREN BZW. PRUEFEN, OB LOKALER HOST # UEBERHAUPT ZULAESSIG IST # Note: One of the entries for "lck" or "lckordi" always has to be # comment out, because the hostname (node*) is same for both machines case $local_host in (atmos) local_addres=172.20.25.35; local_host=lcide;; (autan) local_addres=130.75.105.57; local_host=lcmuk;; (bora) local_addres=130.75.105.103; local_host=lcmuk;; (bd1) local_addres=130.73.232.64; local_host=lcsgib;; (bd2) local_addres=130.73.232.65; local_host=lcsgib;; (bd3) local_addres=130.73.232.66; local_host=lcsgib;; (bd4) local_addres=130.73.232.67; local_host=lcsgib;; (b01*|bicegate1) local_addres=130.73.232.102; local_host=lcsgib;; (bicegate2) local_addres=130.73.232.103; local_host=lcsgib;; (blizzard1) local_addres=136.172.40.15; local_host=ibmh;; (breva) local_addres=130.75.105.98; local_host=lcmuk;; (caurus) local_addres=130.75.105.19; local_host=lcmuk;; (climate*) local_addres=165.132.26.68; local_host=lcyon;; (compute-*.local) local_addres=172.20.4.2; local_host=lcfimm;; (cs*) local_addres=136.172.44.131; local_host=nech;; (elephanta) local_addres=130.75.105.6; local_host=lcmuk;; (fimm.bccs.uib.no) local_addres=172.20.4.2; local_host=lcfimm;; (node*) local_addres=165.132.26.61 local_host=lck;; # (node*) local_addres=210.219.61.8 local_host=lckordi;; (gaia*) local_addres=150.183.146.24; local_host=ibmkisti;; (gallego) local_addres=130.75.105.10; local_host=lcmuk;; (gfdl5) local_addres=165.132.26.58; local_host=ibmy;; (gfdl3.yonsei.ac.kr) local_addres=165.132.26.56; local_host=decalpha;; (gregale) local_addres=130.75.105.109; local_host=lcmuk;; (hababai) local_addres=130.75.105.108; local_host=lcmuk;; (hexagon.bccs.uib.no) local_addres=129.177.20.113; local_host=lcxt4;; (hd1) local_addres=130.75.4.104; local_host=lcsgih;; (hd2) local_addres=130.75.4.105; local_host=lcsgih;; (hd3) local_addres=130.75.4.106; local_host=lcsgih;; (hd4) local_addres=130.75.4.107; local_host=lcsgih;; (hicegate0) local_addres=130.75.4.101; local_host=lcsgih;; (h01*|hicegate1) local_addres=130.75.4.102; local_host=lcsgih;; (hicegate2) local_addres=130.75.4.103; local_host=lcsgih;; (hx*) local_addres=133.3.51.11; local_host=lckyoto;; (inferno) local_addres=130.75.105.5; local_host=lcmuk;; (irifi) local_addres=130.75.105.104; local_host=lcmuk;; (sno) local_addres=130.75.105.113; local_host=lcmuk;; (levanto) local_addres=130.75.105.45; local_host=lcmuk;; (maestro) local_addres=130.75.105.2; local_host=lcmuk;; (meteo-login*) local_addres=193.166.211.144;local_host=lcxt5m;; (nid*) local_addres=129.177.20.113; local_host=lcxt4;; (nobel*) local_addres=150.183.5.101; local_host=ibms;; (orkan) local_addres=130.75.105.3; local_host=lcmuk;; (ostria) local_addres=130.75.105.106; local_host=lcmuk;; (paesano) local_addres=130.75.105.46; local_host=lcmuk;; (pingui) local_addres=134.106.74.118; local_host=lcfor;; (quanero) local_addres=130.75.105.107; local_host=lcmuk;; (rte*) local_addres=133.5.185.60; local_host=lcrte;; (scirocco) local_addres=172.20.25.41; local_host=lcmuk;; (solano) local_addres=130.75.105.110; local_host=lcmuk;; (sun1|sun2) local_addres=130.75.6.1; local_host=unics;; (sx-*) local_addres=172.16.1.131; local_host=necriam;; (r1*) local_addres=130.75.4.102; local_host=lcsgih;; (r2*) local_addres=130.73.232.102; local_host=lcsgib;; (t2a*) local_addres=10.1.6.165; local_host=lctit;; (vorias) local_addres=172.20.25.43; local_host=lcmuk;; (*.cc.kyushu-u.ac.jp) local_addres=133.5.4.129; local_host=ibmku;; (*) printf "\n +++ \"$local_host\" unknown"; printf "\n please inform S. Raasch!"; locat=parameter; exit;; esac # REMOTE HOST DEFAULTMAESSIG = LOCAL HOST SETZEN remote_host=$local_host # PROZEDUROPTIONEN EINLESEN while getopts :c:dDe:g:h:m:n:N:O:q:t:T:u:vX: option do case $option in (c) job_catalog=$OPTARG;; (d) delete_dayfile=true;; (D) no_submit=true;; (e) email_notification=$OPTARG;; (g) group_number=$OPTARG;; (h) remote_host=$OPTARG;; (m) memory=$OPTARG;; (n) job_name=$OPTARG;; (N) node_usage=$OPTARG;; (O) threads_per_task=$OPTARG;; (q) no_default_queue=$OPTARG;; (t) cputime=$OPTARG;; (T) tasks_per_node=$OPTARG;; (u) remote_user=$OPTARG;; (v) verify=false;; (X) numprocs=$OPTARG;; (\?) printf "\n +++ Option $OPTARG unknown \n"; locat=option; exit;; esac done # JOBDATEINAMEN ALS NAECHSTES ARGUMENT HOLEN shift OPTIND-1; file_to_send=$1 # KURZE AUFRUFBESCHREIBUNG WIRD HIER AUSGEGEBEN if [ "$1" = "?" ] then (printf "\n *** subjob can be called as follows:\n" printf "\n subjob -c.. -d -D -h.. -m.. -q.. -t.. -u.. -v \n" printf "\n Description of available options:\n" printf "\n Option Description Default-Value" printf "\n -c job-input- and output-catalog ~/job_queue" printf "\n -d no job-protocol will be created ---" printf "\n -D only the job-file will be created ---" printf "\n -h execution host, available hosts: $remote_host" printf "\n ibm, ibmh, ibmkisti, ibmku, ibms, ibmy, lc...," printf "\n lctit, nech, necriam, unics" printf "\n -m memory demand per process in MByte ---" printf "\n -n jobname " printf "\n -O threads per task (for OpenMP usage) 1" printf "\n -q job-queue to be used default" printf "\n -t allowed cpu-time in seconds ---" printf "\n -T tasks per node (on parallel hosts) ---" printf "\n -u username on execution host from .netrc" printf "\n -v no prompt for confirmation ---" printf "\n -X # of processors (on parallel hosts) 1" printf "\n " printf "\n The only possible positional parameter is :" printf "\n The complete NQS-job must be provided here." printf "\n =? creates this outline\n\n") | more exit fi # PRUEFEN, OB JOBDATEI ANGEGEBEN WURDE UND OB SIE AUCH EXISTIERT if [[ "$file_to_send" = "" ]] then printf "\n +++ job-file missing" locat=parameter; exit else if [[ -f $file_to_send ]] then true else printf "\n +++ job-file: " printf "\n $file_to_send" printf "\n does not exist" locat=parameter; exit fi fi # FALLS KEIN JOBNAME ANGEGEBEN WURDE, WIRD JOBNAME = JOBDATEINAME # GESETZT. VORAUSSETZUNG: JOBDATEINAME BEINHALTET KEINE PFADE if [[ $job_name = none ]] then job_name=$file_to_send fi if [[ $(echo $job_name | grep -c "/") != 0 ]] then printf "\n +++ job-file name: " printf "\n $job_name" printf "\n must not contain \"/\"-characters" locat=parameter; exit fi # HOSTSPEZIFISCHE GROESSEN VEREINBAREN BZW. ABBRUCH BEI UNZULAESSIGEM HOST # ODER WENN HOST NICHT ANGEGEBEN WURDE if [[ $remote_host = none ]] then printf "\n +++ host missing" locat=option; exit else case $remote_host in (ibm) queue=p690_standard; remote_addres=134.76.99.81; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; (ibmh) queue=no_class; remote_addres=136.172.40.15; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; (ibmkisti) queue=class.32plus; remote_addres=150.183.146.24; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; (ibmku) queue=s4; remote_addres=133.5.4.129; submcom=/usr/local/bin/llsubmit;; (ibms) queue=p_normal; remote_addres=150.183.5.101; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; (ibmy) queue=parallel; remote_addres=165.132.26.58; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; (lcfimm) remote_addres=172.20.4.2; submcom=/opt/torque/bin/qsub;; (lckyoto) remote_addres=133.3.51.11; submcom=/thin/local/bin/qsub;; (lcsgib) queue=smallq; remote_addres=130.73.232.104; submcom=/opt/moab/bin/msub;; (lcsgih) queue=smallq; remote_addres=130.75.4.101; submcom=/opt/moab/bin/msub;; (lck) remote_addres=165.132.26.61; submcom=/usr/torque/bin/qsub;; (lckordi) remote_addres=210.219.61.8; submcom=/usr/torque/bin/qsub;; (lctit) queue=S; remote_addres=10.1.6.165; submcom=/opt/pbs/tools/bin/t2sub;; (lcxt4) remote_addres=129.177.20.113; submcom=/opt/torque/2.4.9-snap.201005191035/bin/qsub;; (lcxt5m) remote_addres=193.166.211.144; submcom=/opt/pbs/10.1.0.91350/bin/qsub;; (lcyon) remote_addres=165.132.26.68; submcom=/usr/torque/bin/qsub;; (nech) qsubmem=memsz_job; qsubtime=cputim_job; remote_addres=136.172.44.147; submcom="/usr/local/bin/qsub";; (necriam) qsubmem=memsz_job; qsubtime=cputim_job; remote_addres=172.16.1.131; submcom="/usr/bin/nqsII/qsub";; (vpp) qsubmem=m; qsubtime=t; queue=vpp; remote_addres=130.75.4.130;; (unics) qsubmem=d; qsubtime=t; queue=unics; remote_addres=130.75.6.1;; (*) printf "\n +++ hostname \"$remote_host\" not allowed"; locat=parameter; exit;; esac fi # EVTL. PRUEFEN, OB ANGEGEBENE QUEUE ZULAESSIG IST if [[ $no_default_queue != none ]] then error=false ndq=$no_default_queue case $remote_host in (ibm) case $ndq in (p690_express|p690_standard|p690_long) error=false;; (*) error=true;; esac;; (ibmh) case $ndq in (no_class) error=false;; (*) error=true;; esac;; (ibmkisti) case $ndq in (class.32plus|class.1-2|class.2-32) error=false;; (*) error=true;; esac;; (ibmku) case $ndq in (sdbg1|sdbg2|sdbg4|s4|s16|s32|s32-s) error=false;; (*) error=true;; esac;; (ibms) case $ndq in (express|normal|p_express|p_normal|p_normal_1.3|p_normal_1.7|grand) error=false;; (*) error=true;; esac;; (ibmy) case $ndq in (parallel) error=false;; (*) error=true;; esac;; (lckyoto) case $ndq in (eh|ph) error=false;; (*) error=true;; esac;; (lcsgib|lcsgih) case $ndq in (testq|serialq|smallq|mediumq|bigq|workq|dataq|permq|special1q) error=false;; (*) error=true;; esac;; (lctit) case $ndq in (G|L128|L256|L512H|S|S96|V) error=false;; (*) error=true;; esac;; (t3eb) case $ndq in (berte|p50|p100|p392|forfree|p25himem) error=false;; (*) error=true;; esac;; (necriam) case $ndq in (SP|SS|P6) error=false;; (*) error=true;; esac;; (t3eh) case $ndq in (para_t3e|em|k|l|lm|comp_t3e|c|p|ht) error=false;; (*) error=true;; esac;; (t3ej2|t3ej5) case $ndq in (low|normal|high) error=false;; (*) error=true;; esac;; (t3es) case $ndq in (batch|serial-4|pe4|p48|pe16|pe32|pe64|pe128) error=false;; (*) error=true;; esac;; (unics) case $ndq in (unics|ht) error=false;; (*) error=true;; esac;; esac if [[ $error = true ]] then printf "\n +++ queue \"$no_default_queue\" on host \"$remote_host\" not allowed" locat=parameter; exit else queue=$no_default_queue fi fi # PRUEFEN DER CPU-ZEIT, ZEIT NACH STUNDEN, MINUTEN UND SEKUNDEN # AUFTEILEN done=false while [[ $done = false ]] do if (( $cputime <= 0 )) then printf "\n +++ wrong cpu-time or cpu-time missing" printf "\n >>> Please type cpu-time in seconds as INTEGER:" printf "\n >>> " read cputime 1>/dev/null 2>&1 else done=true fi done if [[ $remote_host = nech ]] then if (( tasks_per_node != 0 )) then (( cputime = cputime * tasks_per_node )) elif [[ $numprocs != 0 ]] then (( cputime = cputime * numprocs )) fi fi (( stunden = cputime / 3600 )) (( resttime = cputime - stunden * 3600 )) (( minuten = resttime / 60 )) (( sekunden = resttime - minuten * 60 )) timestring=${stunden}:${minuten}:${sekunden} # PRUEFEN DER KERNSPEICHERANFORDERUNG done=false while [[ $done = false ]] do if (( memory <= 0 )) then printf "\n +++ wrong memory demand or memory demand missing" printf "\n >>> Please type memory in MByte per process as INTEGER:" printf "\n >>> " read memory 1>/dev/null 2>&1 else done=true fi done if [[ $remote_host = nech || $remote_host = necriam ]] then if (( tasks_per_node != 0 )) then (( Memory = memory * tasks_per_node / 1000 )) elif [[ $numprocs != 0 ]] then (( Memory = memory * numprocs / 1000 )) else (( Memory = memory / 1000 )) fi elif [[ $remote_host = lctit ]] then (( Memory = memory * tasks_per_node / 1000 )) fi # SPEICHERBERECHNUNG BEI OPENMP-NUTZUNG if [[ $(echo $remote_host | cut -c1-3) = ibm ]] then (( memory = memory * threads_per_task )) fi # BERECHNUNG DER ANZAHL DER ZU VERWENDENDEN KNOTEN if (( tasks_per_node != 0 )) then (( nodes = numprocs / ( tasks_per_node * threads_per_task ) )) fi # Calculate number of processes per node (( processes_per_node = tasks_per_node * threads_per_task )) # Calculate number of MPI tasks (( mpi_tasks = numprocs / threads_per_task )) # HEADER-AUSGABE if [[ $verify = true ]] then printf "\n\n" printf "#--------------------------------------------------------------# \n" spalte1=SUBJOB;spalte2=$(date) printf "| $spalte1$spalte2 | \n" printf "| | \n" printf "| values of parameters/options: | \n" spalte1=local_host$punkte; spalte2=$punkte$local_host printf "| $spalte1$spalte2 | \n" spalte1=remote_host$punkte; spalte2=$punkte$remote_host printf "| $spalte1$spalte2 | \n" spalte1=queue$punkte; spalte2=$punkte$queue printf "| $spalte1$spalte2 | \n" spalte1=memory$punkte; spalte2="$punkte$memory mb" printf "| $spalte1$spalte2 | \n" spalte1=cputime$punkte; spalte2="$punkte$cputime sec" printf "| $spalte1$spalte2 | \n" spalte1=job_name$punkte; spalte2="$punkte$job_name" printf "| $spalte1$spalte2 | \n" printf "#--------------------------------------------------------------# \n\n" # KONTROLLABFRAGE, OB ALLES O.K. antwort="dummy" while [[ $antwort != y && $antwort != Y && $antwort != n && $antwort != N ]] do read antwort?" >>> continue (y/n) ? " done if [[ $antwort = n || $antwort = N ]] then locat=verify; exit fi printf "\n" fi # ZUFALLSKENNUNG GENERIEREN UND JOBNAMEN AUF ZIELRECHNER BESTIMMEN kennung=$RANDOM job_on_remhost=${job_name}_${kennung}_$local_host job_to_send=job_to_send_$kennung if [[ $delete_dayfile = false ]] then remote_dayfile=${local_host}_${job_name}_result_$kennung local_dayfile=${remote_host}_${job_name} else remote_dayfile=/dev/null fi # Generate the batch job scripts (qsub/msub/LoadLeveler) if [[ $(echo $remote_host | cut -c1-3) = ibm && $numprocs != 0 ]] then # General LoadLeveler settings execute_in_shell="#!/bin/ksh" use_shell="# @ shell = /bin/ksh" consumable_memory="ConsumableMemory($memory mb)" class="# @ class = $queue" environment="# @ environment = OMP_NUM_THREADS=$threads_per_task; MP_SHARED_MEMORY=yes" network_to_use="# @ network.mpi = sn_all,shared,us" data_limit="# @ data_limit = 1.76gb" image_size="# @ image_size = 50" wall_clock_limit="# @ wall_clock_limit = ${timestring},$timestring" if [[ $email_notification = none ]] then notify_user="" else notify_user="# @ notify_user = $email_notification" if [[ $delete_dayfile = true ]] then notification='# @ notification = never' fi fi if [[ $remote_host = ibmh ]] then data_limit="" network_to_use="" class="" environment="" rset="# @ rset = RSET_MCM_AFFINITY" task_affinity="# @ task_affinity = core(1)" elif [[ $remote_host = ibmkisti ]] then network_to_use="# @ network.MPI = sn_all,shared,US" wall_clock_limit="# @ wall_clock_limit = $timestring" if [[ $threads_per_task = 1 ]] then rset="# @ rset = RSET_MCM_AFFINITY" mcm_affinity_options="# @ mcm_affinity_options = mcm_mem_pref mcm_sni_none mcm_distribute" fi environment="" use_shell="" data_limit="" image_size="" elif [[ $remote_host = ibmku ]] then execute_in_shell="#!/usr/bin/ksh" use_shell="# @ shell = /usr/bin/ksh" consumable_memory="" environment="" network_to_use="# @ network.mpi = sn_all,shared,us" data_limit="" image_size="" elif [[ $remote_host = ibms ]] then network_to_use="# @ network.mpi = csss,shared,us" elif [[ $remote_host = ibmy ]] then consumable_memory="" network_to_use="" fi cat > $job_to_send << %%END%% $execute_in_shell $use_shell # @ job_type = parallel # @ job_name = $job_name # @ resources = ConsumableCpus($threads_per_task) $consumable_memory # @ output = $remote_dayfile # @ error = $remote_dayfile $wall_clock_limit $image_size $class $environment $network_to_use $data_limit $rset $mcm_affinity_options $task_affinity $notification $notify_user %%END%% if (( nodes > 0 )) then if [[ $remote_host != ibmkisti ]] then cat >> $job_to_send << %%END%% # @ node = $nodes # @ tasks_per_node = $processes_per_node # @ node_usage = $node_usage # @ queue %%END%% else cat >> $job_to_send << %%END%% # @ total_tasks = $mpi_tasks # @ blocking = unlimited # @ queue %%END%% fi else if [[ $remote_host != ibmy ]] then cat >> $job_to_send << %%END%% # @ blocking = unlimited # @ total_tasks = $numprocs # @ node_usage = $node_usage # @ queue %%END%% else cat >> $job_to_send << %%END%% # @ node = 1 # @ total_tasks = $numprocs # @ queue %%END%% fi fi # workaround because of silly job filter on ibmkisti if [[ $remote_host = ibmkisti && $threads_per_task != 1 ]] then echo "export OMP_NUM_THREADS=$threads_per_task" >> $job_to_send fi elif [[ $(echo $remote_host | cut -c1-3) = ibm && $numprocs = 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh # @ job_type = serial # @ node_usage = $node_usage # @ job_name = palm # @ wall_clock_limit = ${timestring},$timestring # @ resources = ConsumableCpus(1) ConsumableMemory(1 gb) # @ output = $remote_dayfile # @ error = $remote_dayfile $class $notification # @ queue %%END%% elif [[ $remote_host = lcfimm ]] then if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -A $project_account #PBS -l walltime=$timestring #PBS -l nodes=${nodes}:ppn=$processes_per_node #PBS -l pmem=${memory}mb #PBS -m abe #PBS -o $remote_dayfile #PBS -j oe mpd & %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -A $project_account #PBS -l walltime=$timestring #PBS -l ncpus=1 #PBS -l pmem=${memory}mb #PBS -m abe #PBS -o $remote_dayfile #PBS -j oe %%END%% fi elif [[ $remote_host = lck || $remote_host = lckordi ]] then if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l ncpus=$numprocs #PBS -l pmem=${memory}mb #PBS -o $remote_dayfile #PBS -j oe mpd & %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l ncpus=1 #PBS -l pmem=${memory}mb #PBS -o $remote_dayfile #PBS -j oe %%END%% fi elif [[ $remote_host = lcyon ]] then if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l ncpus=$numprocs #PBS -l pmem=${memory}mb #PBS -o $remote_dayfile #PBS -j oe %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l ncpus=1 #PBS -l pmem=${memory}mb #PBS -o $remote_dayfile #PBS -j oe %%END%% fi elif [[ $remote_host = lcsgih || $remote_host = lcsgib ]] then if [[ "$sgi_feature" != "" ]] then feature_directive="#PBS -l feature=$sgi_feature" else feature_directive="" fi if [[ $queue = dataq || $queue = permq ]] then feature_directive="#PBS -l feature=data" fi if [[ $queue = testq || $queue = mediumq || $queue = bigq || $queue = workq || $queue = dataq || $queue = permq || $queue = serialq || $queue = special1q ]] then queue_directive="#PBS -q $queue" else queue_directive="" fi if [[ $email_notification = none ]] then email_directive="" else email_directive="#PBS -M $email_notification" fi if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/bash #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l nodes=$nodes:ppn=${processes_per_node} #PBS -l naccesspolicy=$node_usage #PBS -o $remote_dayfile #PBS -j oe $feature_directive $queue_directive $email_directive eval \`/sw/swdist/bin/modulesinit\` #. /usr/share/modules/init/bash $module_calls echo ld_library_path=\$LD_LIBRARY_PATH %%END%% else cat > $job_to_send << %%END%% #PBS -S /bin/bash #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l ncpus=1 #PBS -o $remote_dayfile #PBS -j oe $queue_directive $email_directive eval \`/sw/swdist/bin/modulesinit\` #. /usr/share/modules/init/bash $module_calls %%END%% fi elif [[ $remote_host = lcxt4 ]] then if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -S /bin/ksh #PBS -N $job_name #PBS -A $project_account #PBS -j oe #PBS -l walltime=$timestring #PBS -l mppwidth=${numprocs} #PBS -l mppnppn=${processes_per_node} #PBS -m abe #PBS -o $remote_dayfile $email_directive . /opt/modules/default/init/ksh $module_calls %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -S /bin/ksh #PBS -N $job_name #PBS -A $project_account #PBS -j oe #PBS -l walltime=$timestring #PBS -l ncpus=1 #PBS -l pmem=${memory}mb #PBS -m abe $email_directive #PBS -o $remote_dayfile . /opt/modules/default/init/ksh $module_calls %%END%% fi elif [[ $remote_host = lckyoto ]] then cat > $job_to_send << %%END%% #!/bin/ksh # @\$-o $remote_dayfile # @\$-eo -oi # @\$-lP 16 # @\$-lp 1 # @\$-lm 28gb -llm unlimited -ls unlimited # @\$-q $queue # @\$-Pvn abs_pack ##for intel? @\$-Pvn abs_unpack -Pvs unpack -Pvc unpack #. /thin/local/etc/setprofile/intel-11.0.sh #. /thin/local/etc/setprofile/mvapich2-1.4+intel-11.0.sh . ~/.myprofile #. /home2/t/t51254/palm/current_version/myprofile #. /thin/apps/pgi/mpi.sh # env # set -x %%END%% elif [[ $remote_host = lcxt5m ]] then if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -S /bin/ksh #PBS -N $job_name #PBS -j oe #PBS -l walltime=$timestring #PBS -l mppwidth=${numprocs} #PBS -l mppnppn=${processes_per_node} #PBS -m abe #PBS -o $remote_dayfile . /opt/modules/default/init/ksh $module_calls %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -S /bin/ksh #PBS -N $job_name #PBS -j oe #PBS -l walltime=$timestring #PBS -l ncpus=1 #PBS -l pmem=${memory}mb #PBS -m abe #PBS -o $remote_dayfile . /opt/modules/default/init/ksh $module_calls %%END%% fi elif [[ $remote_host = nech ]] then if (( nodes > 1 )) then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -l cpunum_prc=$processes_per_node,cputim_job=$cputime #PBS -l ${qsubmem}=${Memory}gb #PBS -b $nodes #PBS -o $remote_dayfile #PBS -N palm #PBS -j o #PBS -T mpisx %%END%% elif [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -l cpunum_prc=$processes_per_node,cputim_job=$cputime #PBS -l ${qsubmem}=${Memory}gb #PBS -o $remote_dayfile #PBS -N palm #PBS -j o %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -l ${qsubmem}=${Memory}gb,${qsubtime}=$cputime #PBS -o $remote_dayfile #PBS -j o %%END%% fi elif [[ $remote_host = necriam ]] then if (( nodes > 1 )) then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -b $nodes #PBS -o $remote_dayfile #PBS -N $job_name #PBS -j o #PBS -v MPIPROGINV=YES %%END%% elif [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -q ${queue} #PBS -o $remote_dayfile #PBS -N $job_name #PBS -j o #PBS -v MPIPROGINV=YES %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -o $remote_dayfile #PBS -j o %%END%% fi elif [[ $remote_host = lctit ]] then cat > $job_to_send << %%END%% #!/bin/ksh $module_calls %%END%% # OPTIONEN FUER SUBMIT-KOMMANDO ZUSAMMENSTELLEN if [[ $tasks_per_node != $processes_per_node ]] then submcom="$submcom -W group_list=$group_number -N $job_name -l walltime=$timestring -l select=$nodes:ncpus=$processes_per_node:mpiprocs=$tasks_per_node:mem=${Memory}gb -o $remote_dayfile -j oe -q $queue " else submcom="$submcom -W group_list=$group_number -N $job_name -l walltime=$timestring -l select=$nodes:ncpus=$processes_per_node:mpiprocs=$tasks_per_node:mem=${Memory}gb -l place=scatter -o $remote_dayfile -j oe -q $queue " fi else cat > $job_to_send << %%END%% # @\$-q ${queue} # @\$-l${qsubtime} $timestring # @\$-l${qsubmem} ${memory}mb # @\$-o $remote_dayfile # @\$-eo %%END%% fi # BEI RECHNUNG AUF REMOTE-MASCHINEN RUECKTRANSFER DES DAYFILES PER TRAP # BEI EXIT VERANLASSEN # VEKTORRECHNER MUSS EIGENEN JOB STARTEN, DA DORT NOHUP NICHT FUNKTIONIERT # AUF IBM IN SEOUL IST RUECKTRANSFER ZUR ZEIT GENERELL NICHT MOEGLICH if [[ $delete_dayfile = false && $remote_host != $local_host ]] then echo "set +vx" >> $job_to_send echo "trap '" >> $job_to_send echo "set +vx" >> $job_to_send if [[ $(echo $remote_host | cut -c1-3) = ibm || $(echo $remote_host | cut -c1-5) = lcsgi || $(echo $remote_host | cut -c1-3) = nec ]] then if [[ $remote_host = ibmh ]] then return_queue=c1 elif [[ $remote_host = ibmkisti ]] then return_queue=class.1-2 elif [[ $remote_host = ibmku ]] then return_queue=sdbg2 elif [[ $remote_host = ibms ]] then return_queue=p_normal elif [[ $remote_host = ibmy ]] then return_queue=serial elif [[ $remote_host = lcsgih || $remote_host = lcsgib ]] then return_queue=serialq elif [[ $remote_host = necriam ]] then return_queue=SP else return_queue=unknown fi if [[ $(echo $remote_host | cut -c1-3) = ibm ]] then if [[ $remote_host = ibmku ]] then echo "echo \"#!/usr/bin/ksh\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ shell = /usr/bin/ksh\" >> scpjob.$kennung" >> $job_to_send else echo "echo \"#!/bin/ksh\" >> scpjob.$kennung" >> $job_to_send fi echo "echo \"# @ job_type = serial\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ job_name = transfer\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ resources = ConsumableCpus(1) ConsumableMemory(1 gb)\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ wall_clock_limit = 00:10:00,00:10:00\" >> scpjob.$kennung " >> $job_to_send echo "echo \"# @ output = job_queue/last_job_transfer_protocol\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ error = job_queue/last_job_transfer_protocol\" >> scpjob.$kennung" >> $job_to_send if [[ $host != "ibmh" ]] then echo "echo \"# @ class = $return_queue\" >> scpjob.$kennung" >> $job_to_send fi echo "echo \"# @ image_size = 10\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ notification = never\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ queue\" >> scpjob.$kennung" >> $job_to_send echo "echo \" \" >> scpjob.$kennung" >> $job_to_send echo "echo \"set -x\" >> scpjob.$kennung" >> $job_to_send echo "echo \"batch_scp -d -w 10 -u $local_user $local_addres ${job_catalog}/$remote_dayfile \\\"$job_catalog\\\" $local_dayfile\" >> scpjob.$kennung" >> $job_to_send if [[ $remote_host = ibmku ]] then echo "echo \"rm scpjob.$kennung\" >> scpjob.$kennung" >> $job_to_send fi echo "echo \"exit\" >> scpjob.$kennung" >> $job_to_send elif [[ $remote_host = nech ]] then echo "cd /pf/b/${remote_user}/job_queue" >> $job_to_send echo "cat > scpjob.$kennung << %%END%%" >> $job_to_send echo "#PBS -l ${qsubmem}=1GB,${qsubtime}=100" >> $job_to_send echo "#PBS -o last_job_transfer_protocol" >> $job_to_send echo "#PBS -j o" >> $job_to_send echo " " >> $job_to_send echo "set -x" >> $job_to_send echo "cd /pf/b/${remote_user}/job_queue" >> $job_to_send echo "batch_scp -d -w 10 -u $local_user $local_addres $remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send echo "%%END%%" >> $job_to_send elif [[ $remote_host = necriam ]] then echo "cat > scpjob.$kennung << %%END%%" >> $job_to_send echo "#PBS -q $return_queue" >> $job_to_send echo "#PBS -o last_job_transfer_protocol" >> $job_to_send echo "#PBS -j o" >> $job_to_send echo " " >> $job_to_send echo "set -x" >> $job_to_send echo "batch_scp -d -w 10 -u $local_user $local_addres $remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send echo "%%END%%" >> $job_to_send elif [[ $(echo $remote_host | cut -c1-5) = lcsgi ]] then echo "cat > scpjob.$kennung << %%END%%" >> $job_to_send echo "#!/bin/bash" >> $job_to_send echo "#PBS -N job_protocol_transfer" >> $job_to_send echo "#PBS -l walltime=00:30:00" >> $job_to_send echo "#PBS -l nodes=1:ppn=1" >> $job_to_send echo "#PBS -l feature=data" >> $job_to_send echo "#PBS -o \$HOME/job_queue/last_job_transfer_protocol" >> $job_to_send echo "#PBS -j oe" >> $job_to_send echo " " >> $job_to_send echo ". /usr/share/modules/init/bash" >> $job_to_send echo "set -x" >> $job_to_send echo "batch_scp -d -w 10 -u $local_user $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send echo "%%END%%" >> $job_to_send else echo "cat > scpjob.$kennung << %%END%%" >> $job_to_send echo "# @\\\$-q $return_queue" >> $job_to_send echo "# @\\\$-l${qsubtime} 10" >> $job_to_send echo "# @\\\$-l${qsubmem} 10mb" >> $job_to_send if [[ $remote_host = t3ej2 || $remote_host = t3ej5 || $remote_host = t3es ]] then echo "# @\$-l mpp_p=0" >> $job_to_send fi echo '# @\$-lF 10mb' >> $job_to_send # echo '# @\$-o /dev/null' >> $job_to_send echo '# @\$-o job_queue/last_job_transfer_protocol' >> $job_to_send echo '# @\\\$-eo' >> $job_to_send echo " " >> $job_to_send if [[ $remote_host = t3ej2 || $remote_host = t3ej5 ]] then echo "set +vx" >> $job_to_send echo ". .profile" >> $job_to_send fi echo "set -x" >> $job_to_send echo "batch_scp -d -w 10 -u $local_user $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile > /dev/null" >> $job_to_send echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send echo "%%END%%" >> $job_to_send fi if [[ $(echo $remote_host | cut -c1-3) = ibm ]] then echo "llsubmit scpjob.$kennung" >> $job_to_send elif [[ $(echo $remote_host | cut -c1-5) = lcsgi ]] then echo "rm -rf \$HOME/job_queue/last_job_transfer_protocol" >> $job_to_send echo "chmod u+x scpjob.$kennung" >> $job_to_send echo "msub scpjob.$kennung" >> $job_to_send elif [[ $remote_host = t3eb || $remote_host = t3eh || $remote_host = t3ej2 || $remote_host = t3ej5 ]] then echo "qsub -J n scpjob.$kennung" >> $job_to_send elif [[ $remote_host = t3es ]] then echo "qsub -J n -s /bin/ksh scpjob.$kennung" >> $job_to_send else echo "qsub scpjob.$kennung" >> $job_to_send fi if [[ $remote_host != ibmku ]] then echo "rm scpjob.$kennung" >> $job_to_send fi if [[ $remote_host = nech ]] then echo "cd -" >> $job_to_send fi else # echo "ftpcopy -d $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send echo "nohup ftpcopy -d -w 15 $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile > /dev/null &" >> $job_to_send fi echo "set -x" >> $job_to_send echo " ' exit" >> $job_to_send echo "set -x" >> $job_to_send fi # EIGENTLICHE JOB-DATEI AN QSUB-KOMMANDOS ANHAENGEN cat $file_to_send >> $job_to_send if [[ $remote_host = ibm ]] then echo " " >> $job_to_send echo "exit" >> $job_to_send fi # remove job file if [[ $remote_host = lctit || $remote_host = ibmku ]] then echo " " >> $job_to_send echo "rm ~/job_queue/$job_on_remhost" >> $job_to_send fi # USER-NAME AUF ZIELRECHNER AUS .NETRC-DATEI ERMITTELN if [[ -z $remote_user ]] then if [[ $remote_host = t3eb || $remote_host = t3eh || $remote_host = t3ej2 || $remote_host = t3ej5 || $remote_host = t3es || $remote_host = vpp ]] then grep $remote_addres ~/.netrc | read dum dum dum remote_user dum dum fi fi # JOB AUF ZIELRECHNER TRANSFERIEREN BZW. INS JOBVERZEICHNIS KOPIEREN if [[ $no_submit = false ]] then if [[ $remote_host != $local_host ]] then [[ $verify = true ]] && printf "\n >>> transfering job to \"$remote_host\"..." if [[ $remote_host = ibms || $remote_host = ibmy ]] # ssh on ibms cannot handle "~/" then job_catalog_save=$job_catalog job_catalog=job_queue elif [[ $remote_host = nech ]] then job_catalog_save=$job_catalog job_catalog=/hpf/b/${remote_user}/job_queue fi if [[ $local_host = decalpha ]] then # VERWENDUNG VON SCP AUF DECALPHA FRAGT FEHLERHAFTERWEISE # PASSWORT AB /bin/scp $job_to_send ${remote_user}@${remote_addres}:${job_catalog}/$job_on_remhost elif [[ $remote_host = nech ]] then # DATEIEN KOENNEN NUR UEBER DEN ARCHIVE-SERVER DES DKRZ # TRANSFERIERT WERDEN scp $job_to_send ${remote_user}@136.172.44.205:${job_catalog}/$job_on_remhost else scp $job_to_send ${remote_user}@${remote_addres}:${job_catalog}/$job_on_remhost fi if [[ $? = 1 ]] then locat=scp; exit fi if [[ $remote_host = ibms ]] then job_catalog=$job_catalog_save fi [[ $verify = true ]] && printf "\n >>> finished\n" else eval job_catalog=$job_catalog cp $job_to_send ${job_catalog}/$job_on_remhost fi # NQS- BZW. LOADLEVELER-JOB STARTEN if [[ $remote_host != $local_host ]] then [[ $verify = true ]] && printf "\n >>> submitting job using \"qsub\"...\n" if [[ $(echo $remote_host | cut -c1-5) = lcsgi && $prio = true ]] then printf "\n >>> submit with HLRN qos-feature hiprio...\n" ssh $remote_addres -l $remote_user "cd $job_catalog; $submcom -l qos=hiprio $job_on_remhost; rm $job_on_remhost" elif [[ $remote_host = ibmku ]] then ssh $remote_addres -l $remote_user "cd $job_catalog; $submcom $job_on_remhost" else ssh $remote_addres -l $remote_user "cd $job_catalog; $submcom $job_on_remhost; rm $job_on_remhost" fi [[ $verify = true ]] && printf " >>> o.k.\n" else cd $job_catalog if [[ $(echo $local_host | cut -c1-5) = lcsgi || $(echo $local_host | cut -c1-3) = ibm ]] then eval $submcom $job_on_remhost elif [[ $local_host = lcfimm || $local_host = lctit || $localhost = lcxt4 || $localhost = lck || $localhost = lckordi|| $localhost = lcyon ]] then chmod u+x $job_on_remhost echo "$submcom $job_on_remhost" eval $submcom $job_on_remhost elif [[ $local_host = nech ]] then if [[ $queue = default ]] then eval $submcom $job_on_remhost else eval $submcom -q $queue $job_on_remhost fi else qsub $job_on_remhost fi # Jobfile must not be deleted on lctit/ibmku!! This will be done # only at the end of the job. if [[ $local_host != lctit && $local_host != ibmku ]] then rm $job_on_remhost fi cd - > /dev/null fi fi # ABSCHLUSSARBEITEN if [[ $no_submit = false ]] then rm $job_to_send fi [[ $verify = true ]] && printf "\n\n *** SUBJOB finished \n\n"