#!/bin/ksh # subjob - script for automatic generation and submission of batch-job files # for various batch queuing systems #--------------------------------------------------------------------------------# # This file is part of PALM. # # PALM is free software: you can redistribute it and/or modify it under the terms # of the GNU General Public License as published by the Free Software Foundation, # either version 3 of the License, or (at your option) any later version. # # PALM is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # PALM. If not, see . # # Copyright 1997-2012 Leibniz University Hannover #--------------------------------------------------------------------------------# # # Current revisions: # ------------------ # # # Former revisions: # ----------------- # $Id: subjob 1204 2013-07-11 12:50:09Z raasch $ # # 1202 2013-07-10 16:22:07Z witha # adjustments for Forwind cluster (lcflow) # # 1199 2013-07-05 14:52:22Z raasch # adjustments for CSC Helsinki (lccrayf) # # use of cluster/express queue enabled (ibmh) # vinessa added (imuk) # # 1103 2013-02-20 02:15:53Z raasch # bash compatibility adjustments (usage of OPTIND, output formatting with printf # instead typeset -L/R), # further adjustments for lckyuh # # 2013-02-10 01:47:43Z raasch # adjustments for Kyushu-Univeristy computing center (lckyuh - hayaka) # and for Forwind cluster (lcflow) # # 1094 2013-02-03 01:52:12Z raasch # new option -P for explicit setting of ssh/scp port, # decalpha parts (yonsei) removed # # 2013-02-02 07:06:13Z raasch # adjustments for Kyushu-University computing center (lckyut - tatara) # old changelog messages removed # # 1046 2012-11-09 14:38:45Z maronga # code put under GPL (PALM 3.9) # # 08/07/94 - Siggi - first version finished # 29/06/94 - Siggi - script development started #--------------------------------------------------------------------------------# # subjob - script for automatic generation and submission of batch-job files # for various batch queuing systems #--------------------------------------------------------------------------------# # VARIABLENVEREINBARUNGEN + DEFAULTWERTE delete_dayfile=false email_notification=none group_number=none locat=normal no_default_queue=none no_submit=false job_catalog="~/job_queue" job_name=none local_user=$LOGNAME node_usage=shared numprocs=0 punkte="..........................................................." submcom=qsub queue=default remote_host=none remote_user="" verify=true typeset -i cputime=0 memory=0 Memory=0 minuten resttime sekunden stunden typeset -i inumprocs mpi_tasks=0 nodes=0 processes_per_node=0 tasks_per_node=0 threads_per_task=1 # FEHLERBEHANDLUNG # BEI EXIT: trap 'if [[ $locat != normal ]] then case $locat in (option) printf "\n --> available optios can be displayed" printf " by typing:" printf "\n \"subjob ?\" \n";; (ftpcopy|parameter|scp|verify) printf "\n";; (*) printf "\n +++ unknown error" printf "\n please inform S. Raasch!\n" esac [[ -f $job_to_send ]] && rm $job_to_send printf "\n\n+++ SUBJOB killed \n\n" fi' exit # BEI TERMINAL-BREAK: trap '[[ -f $job_to_send ]] && rm $job_to_send printf "\n\n+++ SUBJOB killed \n\n" exit ' 2 # LOKALEN HOSTNAMEN FESTSTELLEN local_host=$(hostname) # HOSTSPEZIFISCHE VARIABLEN VEREINBAREN BZW. PRUEFEN, OB LOKALER HOST # UEBERHAUPT ZULAESSIG IST # Note: One of the entries for "lck" or "lckordi" always has to be # comment out, because the hostname (node*) is same for both machines case $local_host in (ambiel-lx) local_addres=134.106.74.48; local_host=lcfor;; (atmos) local_addres=172.20.25.35; local_host=lcide;; (autan) local_addres=130.75.105.57; local_host=lcmuk;; (bora) local_addres=130.75.105.103; local_host=lcmuk;; (bd1) local_addres=130.73.232.64; local_host=lcsgib;; (bd2) local_addres=130.73.232.65; local_host=lcsgib;; (bd3) local_addres=130.73.232.66; local_host=lcsgib;; (bd4) local_addres=130.73.232.67; local_host=lcsgib;; (b01*|bicegate1) local_addres=130.73.232.102; local_host=lcsgib;; (b04*) local_addres=133.5.4.33; local_host=lckyuh;; (bicegate2) local_addres=130.73.232.103; local_host=lcsgib;; (blizzard1) local_addres=136.172.40.15; local_host=ibmh;; (breva) local_addres=130.75.105.98; local_host=lcmuk;; (buran) local_addres=130.75.105.58; local_host=lcmuk;; (caurus) local_addres=130.75.105.19; local_host=lcmuk;; (climate*) local_addres=165.132.26.68; local_host=lcyon;; (clogin*) local_addres=86.50.166.21; local_host=lccrayf;; (compute-*.local) local_addres=172.20.4.2; local_host=lcfimm;; (cs*) local_addres=136.172.44.131; local_host=nech;; (elephanta) local_addres=130.75.105.6; local_host=lcmuk;; (fimm.bccs.uib.no) local_addres=172.20.4.2; local_host=lcfimm;; (flow01) local_addres=10.141.255.71; local_host=lcflow;; (flow02) local_addres=10.141.255.72; local_host=lcflow;; (node*) local_addres=165.132.26.61 local_host=lck;; # (node*) local_addres=210.219.61.8 local_host=lckordi;; (gaia*) local_addres=150.183.146.24; local_host=ibmkisti;; (gallego) local_addres=130.75.105.10; local_host=lcmuk;; (gfdl5) local_addres=165.132.26.58; local_host=ibmy;; (gregale) local_addres=130.75.105.109; local_host=lcmuk;; (hababai) local_addres=130.75.105.108; local_host=lcmuk;; (hayaka*) local_addres=133.5.4.33; local_host=lckyuh;; (hexagon.bccs.uib.no) local_addres=129.177.20.113; local_host=lcxe6;; (hd1) local_addres=130.75.4.104; local_host=lcsgih;; (hd2) local_addres=130.75.4.105; local_host=lcsgih;; (hd3) local_addres=130.75.4.106; local_host=lcsgih;; (hd4) local_addres=130.75.4.107; local_host=lcsgih;; (hicegate0) local_addres=130.75.4.101; local_host=lcsgih;; (h01*|hicegate1) local_addres=130.75.4.102; local_host=lcsgih;; (hicegate2) local_addres=130.75.4.103; local_host=lcsgih;; (hx*) local_addres=133.3.51.11; local_host=lckyoto;; (inferno) local_addres=130.75.105.5; local_host=lcmuk;; (irifi) local_addres=130.75.105.104; local_host=lcmuk;; (sno) local_addres=130.75.105.113; local_host=lcmuk;; (levanto) local_addres=130.75.105.45; local_host=lcmuk;; (login*) local_addres=118.128.66.223; local_host=lckiaps;; (maestro) local_addres=130.75.105.2; local_host=lcmuk;; (meller) local_addres=134.106.74.155; local_host=lcfor;; (meteo-login*) local_addres=193.166.211.144;local_host=lcxt5m;; (hexagon*) local_addres=129.177.20.113; local_host=lcxe6;; (nobel*) local_addres=150.183.5.101; local_host=ibms;; (orkan) local_addres=130.75.105.3; local_host=lcmuk;; (ostria) local_addres=130.75.105.106; local_host=lcmuk;; (paesano) local_addres=130.75.105.46; local_host=lcmuk;; (pcj*) local_addres=172.31.120.1; local_host=lckyut;; (pingui) local_addres=134.106.74.118; local_host=lcfor;; (quanero) local_addres=130.75.105.107; local_host=lcmuk;; (rte*) local_addres=133.5.185.60; local_host=lcrte;; (r1*) local_addres=130.75.4.102; local_host=lcsgih;; (r2*) local_addres=130.73.232.102; local_host=lcsgib;; (scirocco) local_addres=172.20.25.41; local_host=lcmuk;; (shiokaze-lx) local_addres=134.106.74.123; local_host=lcfor;; (sisu-login*) local_addres=86.50.166.21; local_host=lccrayf;; (solano) local_addres=130.75.105.110; local_host=lcmuk;; (sugoka*) local_addres=172.31.120.1; local_host=lckyut;; (sun1|sun2) local_addres=130.75.6.1; local_host=unics;; (sx-*) local_addres=172.16.1.131; local_host=necriam;; (t2a*) local_addres=10.1.6.165; local_host=lctit;; (urban*) local_addres=147.46.30.151 local_host=lcsb;; (vinessa) local_addres=130.75.105.112; local_host=lcmuk;; (vorias) local_addres=172.20.25.43; local_host=lcmuk;; (*.cc.kyushu-u.ac.jp) local_addres=133.5.4.129; local_host=ibmku;; (*) printf "\n +++ \"$local_host\" unknown"; printf "\n please inform S. Raasch!"; locat=parameter; exit;; esac # REMOTE HOST DEFAULTMAESSIG = LOCAL HOST SETZEN remote_host=$local_host # PROZEDUROPTIONEN EINLESEN while getopts :c:dDe:g:h:m:n:N:O:P:q:t:T:u:vX: option do case $option in (c) job_catalog=$OPTARG;; (d) delete_dayfile=true;; (D) no_submit=true;; (e) email_notification=$OPTARG;; (g) group_number=$OPTARG;; (h) remote_host=$OPTARG;; (m) memory=$OPTARG;; (n) job_name=$OPTARG;; (N) node_usage=$OPTARG;; (O) threads_per_task=$OPTARG;; (P) scp_port=$OPTARG;; (q) no_default_queue=$OPTARG;; (t) cputime=$OPTARG;; (T) tasks_per_node=$OPTARG;; (u) remote_user=$OPTARG;; (v) verify=false;; (X) numprocs=$OPTARG;; (\?) printf "\n +++ Option $OPTARG unknown \n"; locat=option; exit;; esac done # JOBDATEINAMEN ALS NAECHSTES ARGUMENT HOLEN (( to_shift = $OPTIND - 1 )) shift $to_shift; file_to_send=$1 # KURZE AUFRUFBESCHREIBUNG WIRD HIER AUSGEGEBEN if [ "$1" = "?" ] then (printf "\n *** subjob can be called as follows:\n" printf "\n subjob -c.. -d -D -h.. -m.. -q.. -t.. -u.. -v \n" printf "\n Description of available options:\n" printf "\n Option Description Default-Value" printf "\n -c job-input- and output-catalog ~/job_queue" printf "\n -d no job-protocol will be created ---" printf "\n -D only the job-file will be created ---" printf "\n -h execution host, available hosts: $remote_host" printf "\n ibm, ibmh, ibmkisti, ibmku, ibms, ibmy, lc...," printf "\n lckiaps, lctit, nech, necriam, unics" printf "\n -m memory demand per process in MByte ---" printf "\n -n jobname " printf "\n -O threads per task (for OpenMP usage) 1" printf "\n -P ssh/scp port default port" printf "\n -q job-queue to be used default" printf "\n -t allowed cpu-time in seconds ---" printf "\n -T tasks per node (on parallel hosts) ---" printf "\n -u username on execution host from .netrc" printf "\n -v no prompt for confirmation ---" printf "\n -X # of processors (on parallel hosts) 1" printf "\n " printf "\n The only possible positional parameter is :" printf "\n The complete NQS-job must be provided here." printf "\n =? creates this outline\n\n") | more exit fi # PRUEFEN, OB JOBDATEI ANGEGEBEN WURDE UND OB SIE AUCH EXISTIERT if [[ "$file_to_send" = "" ]] then printf "\n +++ job-file missing" locat=parameter; exit else if [[ -f $file_to_send ]] then true else printf "\n +++ job-file: " printf "\n $file_to_send" printf "\n does not exist" locat=parameter; exit fi fi # FALLS KEIN JOBNAME ANGEGEBEN WURDE, WIRD JOBNAME = JOBDATEINAME # GESETZT. VORAUSSETZUNG: JOBDATEINAME BEINHALTET KEINE PFADE if [[ $job_name = none ]] then job_name=$file_to_send fi if [[ $(echo $job_name | grep -c "/") != 0 ]] then printf "\n +++ job-file name: " printf "\n $job_name" printf "\n must not contain \"/\"-characters" locat=parameter; exit fi # HOSTSPEZIFISCHE GROESSEN VEREINBAREN BZW. ABBRUCH BEI UNZULAESSIGEM HOST # ODER WENN HOST NICHT ANGEGEBEN WURDE if [[ $remote_host = none ]] then printf "\n +++ host missing" locat=option; exit else case $remote_host in (ibm) queue=p690_standard; remote_addres=134.76.99.81; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; (ibmh) queue=cluster; remote_addres=136.172.40.15; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; (ibmkisti) queue=class.32plus; remote_addres=150.183.146.24; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; (ibmku) queue=s4; remote_addres=133.5.4.129; submcom=/usr/local/bin/llsubmit;; (ibms) queue=p_normal; remote_addres=150.183.5.101; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; (ibmy) queue=parallel; remote_addres=165.132.26.58; submcom=/usr/lpp/LoadL/full/bin/llsubmit;; (lccrayf) queue=small; remote_addres=86.50.166.21; submcom=/opt/slurm/default/bin/sbatch;; (lcfimm) remote_addres=172.20.4.2; submcom=/opt/torque/bin/qsub;; (lcflow) remote_addres=10.140.1.71; submcom=/cm/shared/apps/sge/6.2u5p2/bin/lx26-amd64/qsub;; (lckyoto) remote_addres=133.3.51.11; submcom=/thin/local/bin/qsub;; (lcsgib) queue=smallq; remote_addres=130.73.232.104; submcom=/opt/moab/bin/msub;; (lcsgih) queue=smallq; remote_addres=130.75.4.101; submcom=/opt/moab/bin/msub;; (lck) remote_addres=165.132.26.61; submcom=/usr/torque/bin/qsub;; (lckiaps) remote_addres=118.128.66.223; submcom=/cm/shared/apps/pbspro/11.0.2.110766/bin/qsub;; (lckordi) remote_addres=210.219.61.8; submcom=/usr/torque/bin/qsub;; (lckyuh) remote_addres=133.5.4.33; submcom=/usr/bin/pjsub;; (lckyut) remote_addres=133.5.4.37; submcom=/usr/bin/pjsub;; (lcsb) remote_addres=147.46.30.151; submcom=/usr/torque/bin/qsub;; (lctit) queue=S; remote_addres=10.1.6.165; submcom=/opt/pbs/tools/bin/t2sub;; (lcxe6) remote_addres=129.177.20.113; submcom=/opt/torque/2.5.10/bin/qsub;; (lcxt5m) remote_addres=193.166.211.144; submcom=/opt/pbs/10.1.0.91350/bin/qsub;; (lcyon) remote_addres=165.132.26.68; submcom=/usr/torque/bin/qsub;; (nech) qsubmem=memsz_job; qsubtime=cputim_job; remote_addres=136.172.44.147; submcom="/usr/local/bin/qsub";; (necriam) qsubmem=memsz_job; qsubtime=cputim_job; remote_addres=172.16.1.131; submcom="/usr/bin/nqsII/qsub";; (vpp) qsubmem=m; qsubtime=t; queue=vpp; remote_addres=130.75.4.130;; (unics) qsubmem=d; qsubtime=t; queue=unics; remote_addres=130.75.6.1;; (*) printf "\n +++ hostname \"$remote_host\" not allowed"; locat=parameter; exit;; esac fi # EVTL. PRUEFEN, OB ANGEGEBENE QUEUE ZULAESSIG IST if [[ $no_default_queue != none ]] then error=false ndq=$no_default_queue case $remote_host in (ibm) case $ndq in (p690_express|p690_standard|p690_long) error=false;; (*) error=true;; esac;; (ibmh) case $ndq in (cluster|express) error=false;; (*) error=true;; esac;; (ibmkisti) case $ndq in (class.32plus|class.1-2|class.2-32) error=false;; (*) error=true;; esac;; (ibmku) case $ndq in (sdbg1|sdbg2|sdbg4|s4|s16|s32|s32-s) error=false;; (*) error=true;; esac;; (ibms) case $ndq in (express|normal|p_express|p_normal|p_normal_1.3|p_normal_1.7|grand) error=false;; (*) error=true;; esac;; (ibmy) case $ndq in (parallel) error=false;; (*) error=true;; esac;; (lccrayf) case $ndq in (usup|test*|small|large) error=false;; (*) error=true;; esac;; (lcflow) case $ndq in (cfd_lom_long.q|cfd_him_long.q|cfd_lom_serl.q|cfd_lom_shrt.q|cfd_him_shrt.q) error=false;; (*) error=true;; esac;; (lckiaps) case $ndq in (express|normal) error=false;; (*) error=true;; esac;; (lckyoto) case $ndq in (eh|ph) error=false;; (*) error=true;; esac;; (lckyuh) case $ndq in (fx-dbg|fx-single|fx-small|fx-middle|fx-large) error=false;; (*) error=true;; esac;; (lckyut) case $ndq in (cx-dbg|cx-single|cx-small|cx-middle|cx-large) error=false;; (*) error=true;; esac;; (lcsgib|lcsgih) case $ndq in (testq|serialq|smallq|mediumq|bigq|workq|dataq|permq|special1q) error=false;; (*) error=true;; esac;; (lctit) case $ndq in (G|L128|L256|L512H|S|S96|V) error=false;; (*) error=true;; esac;; (t3eb) case $ndq in (berte|p50|p100|p392|forfree|p25himem) error=false;; (*) error=true;; esac;; (necriam) case $ndq in (SP|SS|P6) error=false;; (*) error=true;; esac;; (t3eh) case $ndq in (para_t3e|em|k|l|lm|comp_t3e|c|p|ht) error=false;; (*) error=true;; esac;; (t3ej2|t3ej5) case $ndq in (low|normal|high) error=false;; (*) error=true;; esac;; (t3es) case $ndq in (batch|serial-4|pe4|p48|pe16|pe32|pe64|pe128) error=false;; (*) error=true;; esac;; (unics) case $ndq in (unics|ht) error=false;; (*) error=true;; esac;; esac if [[ $error = true ]] then printf "\n +++ queue \"$no_default_queue\" on host \"$remote_host\" not allowed" locat=parameter; exit else queue=$no_default_queue fi fi # PRUEFEN DER CPU-ZEIT, ZEIT NACH STUNDEN, MINUTEN UND SEKUNDEN # AUFTEILEN done=false while [[ $done = false ]] do if (( $cputime <= 0 )) then printf "\n +++ wrong cpu-time or cpu-time missing" printf "\n >>> Please type cpu-time in seconds as INTEGER:" printf "\n >>> " read cputime 1>/dev/null 2>&1 else done=true fi done if [[ $remote_host = nech ]] then if (( tasks_per_node != 0 )) then (( cputime = cputime * tasks_per_node )) elif [[ $numprocs != 0 ]] then (( cputime = cputime * numprocs )) fi fi (( stunden = cputime / 3600 )) (( resttime = cputime - stunden * 3600 )) (( minuten = resttime / 60 )) (( sekunden = resttime - minuten * 60 )) timestring=${stunden}:${minuten}:${sekunden} # PRUEFEN DER KERNSPEICHERANFORDERUNG done=false while [[ $done = false ]] do if (( memory <= 0 )) then printf "\n +++ wrong memory demand or memory demand missing" printf "\n >>> Please type memory in MByte per process as INTEGER:" printf "\n >>> " read memory 1>/dev/null 2>&1 else done=true fi done if [[ $remote_host = nech || $remote_host = necriam ]] then if (( tasks_per_node != 0 )) then (( Memory = memory * tasks_per_node / 1000 )) elif [[ $numprocs != 0 ]] then (( Memory = memory * numprocs / 1000 )) else (( Memory = memory / 1000 )) fi elif [[ $remote_host = lctit ]] then (( Memory = memory * tasks_per_node / 1000 )) fi # SPEICHERBERECHNUNG BEI OPENMP-NUTZUNG if [[ $(echo $remote_host | cut -c1-3) = ibm ]] then (( memory = memory * threads_per_task )) fi # BERECHNUNG DER ANZAHL DER ZU VERWENDENDEN KNOTEN if (( tasks_per_node != 0 )) then (( nodes = numprocs / ( tasks_per_node * threads_per_task ) )) fi # Calculate number of processes per node (( processes_per_node = tasks_per_node * threads_per_task )) # Calculate number of MPI tasks (( mpi_tasks = numprocs / threads_per_task )) # Set port number option for calls of ssh/scp, subjob and batch_scp scripts if [[ "$scp_port" != "" ]] then PORTOPT="-P $scp_port" SSH_PORTOPT="-p $scp_port" fi # HEADER-AUSGABE if [[ $verify = true ]] then printf "\n\n" printf "#--------------------------------------------------------------# \n" spalte1=SUBJOB;spalte2=$(date) printf "| %-20s%40s | \n" "$spalte1" "$spalte2" printf "| | \n" printf "| values of parameters/options: | \n" spalte1=$(echo local_host$punkte | cut -c-20) spalte2=$punkte$local_host printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}" spalte1=$(echo remote_host$punkte | cut -c-20) spalte2=$punkte$remote_host printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}" spalte1=$(echo queue$punkte | cut -c-20) spalte2=$punkte$queue printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}" spalte1=$(echo memory$punkte | cut -c-20) spalte2="$punkte$memory mb" printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}" spalte1=$(echo cputime$punkte | cut -c-20) spalte2="$punkte$cputime sec" printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}" spalte1=$(echo job_name$punkte | cut -c-20) spalte2="$punkte$job_name" printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}" printf "#--------------------------------------------------------------# \n\n" # KONTROLLABFRAGE, OB ALLES O.K. antwort="dummy" while [[ $antwort != y && $antwort != Y && $antwort != n && $antwort != N ]] do read antwort?" >>> continue (y/n) ? " done if [[ $antwort = n || $antwort = N ]] then locat=verify; exit fi printf "\n" fi # ZUFALLSKENNUNG GENERIEREN UND JOBNAMEN AUF ZIELRECHNER BESTIMMEN kennung=$RANDOM job_on_remhost=${job_name}_${kennung}_$local_host job_to_send=job_to_send_$kennung if [[ $delete_dayfile = false ]] then remote_dayfile=${local_host}_${job_name}_result_$kennung local_dayfile=${remote_host}_${job_name} else remote_dayfile=/dev/null fi # Generate the batch job scripts (qsub/msub/LoadLeveler) if [[ $(echo $remote_host | cut -c1-3) = ibm && $numprocs != 0 ]] then # General LoadLeveler settings execute_in_shell="#!/bin/ksh" use_shell="# @ shell = /bin/ksh" consumable_memory="ConsumableMemory($memory mb)" class="# @ class = $queue" environment="# @ environment = OMP_NUM_THREADS=$threads_per_task; MP_SHARED_MEMORY=yes" network_to_use="# @ network.mpi = sn_all,shared,us" data_limit="# @ data_limit = 1.76gb" image_size="# @ image_size = 50" wall_clock_limit="# @ wall_clock_limit = ${timestring},$timestring" if [[ $email_notification = none ]] then notify_user="" else notify_user="# @ notify_user = $email_notification" if [[ $delete_dayfile = true ]] then notification='# @ notification = never' fi fi if [[ $remote_host = ibmh ]] then data_limit="" network_to_use="" class="# @ class = $queue" environment="" rset="# @ rset = RSET_MCM_AFFINITY" task_affinity="# @ task_affinity = core(1)" elif [[ $remote_host = ibmkisti ]] then network_to_use="# @ network.MPI = sn_all,shared,US" wall_clock_limit="# @ wall_clock_limit = $timestring" if [[ $threads_per_task = 1 ]] then rset="# @ rset = RSET_MCM_AFFINITY" mcm_affinity_options="# @ mcm_affinity_options = mcm_mem_pref mcm_sni_none mcm_distribute" fi environment="" use_shell="" data_limit="" image_size="" elif [[ $remote_host = ibmku ]] then execute_in_shell="#!/usr/bin/ksh" use_shell="# @ shell = /usr/bin/ksh" consumable_memory="" environment="" network_to_use="# @ network.mpi = sn_all,shared,us" data_limit="" image_size="" elif [[ $remote_host = ibms ]] then network_to_use="# @ network.mpi = csss,shared,us" elif [[ $remote_host = ibmy ]] then consumable_memory="" network_to_use="" fi cat > $job_to_send << %%END%% $execute_in_shell $use_shell # @ job_type = parallel # @ job_name = $job_name # @ resources = ConsumableCpus($threads_per_task) $consumable_memory # @ output = $remote_dayfile # @ error = $remote_dayfile $wall_clock_limit $image_size $class $environment $network_to_use $data_limit $rset $mcm_affinity_options $task_affinity $notification $notify_user %%END%% if (( nodes > 0 )) then if [[ $remote_host != ibmkisti ]] then cat >> $job_to_send << %%END%% # @ node = $nodes # @ tasks_per_node = $processes_per_node # @ node_usage = $node_usage # @ queue %%END%% else cat >> $job_to_send << %%END%% # @ total_tasks = $mpi_tasks # @ blocking = unlimited # @ queue %%END%% fi else if [[ $remote_host != ibmy ]] then cat >> $job_to_send << %%END%% # @ blocking = unlimited # @ total_tasks = $numprocs # @ node_usage = $node_usage # @ queue %%END%% else cat >> $job_to_send << %%END%% # @ node = 1 # @ total_tasks = $numprocs # @ queue %%END%% fi fi # workaround because of silly job filter on ibmkisti if [[ $remote_host = ibmkisti && $threads_per_task != 1 ]] then echo "export OMP_NUM_THREADS=$threads_per_task" >> $job_to_send fi elif [[ $(echo $remote_host | cut -c1-3) = ibm && $numprocs = 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh # @ job_type = serial # @ node_usage = $node_usage # @ job_name = palm # @ wall_clock_limit = ${timestring},$timestring # @ resources = ConsumableCpus(1) ConsumableMemory(1 gb) # @ output = $remote_dayfile # @ error = $remote_dayfile $class $notification # @ queue %%END%% elif [[ $remote_host = lccrayf ]] then if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/bash -l #SBATCH -J $job_name #SBATCH -t $timestring #SBATCH -N $nodes #SBATCH --ntasks-per-node=$processes_per_node #SBATCH -p $queue #SBATCH -o $remote_dayfile #SBATCH -e $remote_dayfile $init_cmds $module_calls %%END%% else cat > $job_to_send << %%END%% #!/bin/bash -l #SBATCH -J $job_name #SBATCH -t $timestring #SBATCH -l ncpus=1 #SBATCH -l pmem=${memory}mb #SBATCH -m abe #SBATCH -o $remote_dayfile #SBATCH -e $remote_dayfile $init_cmds $module_calls %%END%% fi elif [[ $remote_host = lcfimm ]] then if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -A $project_account #PBS -l walltime=$timestring #PBS -l nodes=${nodes}:ppn=$processes_per_node #PBS -l pmem=${memory}mb #PBS -m abe #PBS -o $remote_dayfile #PBS -j oe mpd & %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -A $project_account #PBS -l walltime=$timestring #PBS -l ncpus=1 #PBS -l pmem=${memory}mb #PBS -m abe #PBS -o $remote_dayfile #PBS -j oe %%END%% fi elif [[ $remote_host = lcflow ]] then if [ $memory -gt 1800 ]; then use_himem="" else use_himem="#" fi if [[ $numprocs != 0 ]] then pe_set="#$ -pe impi41 $numprocs" else pe_set="#$ -pe impi41 1" fi if [[ $queue = default ]] then queue_set="" else queue_set="#$ -q $queue" fi [[ "$disc_space" = "" ]] && disc_space=50 cat > $job_to_send << %%END%% #!/bin/bash #$ -S /bin/bash #$ -N $job_name #$ -cwd #$ -l h_rt=$timestring #$ -l h_vmem=${memory}M #$ -o $remote_dayfile #$ -j y $pe_set #$ -R y ${use_himem}#$ -l highmem=true #$ -l h_fsize=${disc_space}G $queue_set %%END%% elif [[ $remote_host = lck || $remote_host = lckordi || $remote_host = lcsb ]] then if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l ncpus=$numprocs #PBS -l pmem=${memory}mb #PBS -o $remote_dayfile #PBS -l nodes=$nodes:ppn=${processes_per_node} #PBS -j oe mpd & %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l ncpus=1 #PBS -l pmem=${memory}mb #PBS -o $remote_dayfile #PBS -j oe %%END%% fi elif [[ $remote_host = lckiaps ]] then if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l select=1:ncpus=$numprocs #PBS -l pmem=${memory}mb #PBS -q $queue #PBS -o $remote_dayfile #PBS -j oe #PBS -V %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l ncpus=1 #PBS -l pmem=${memory}mb #PBS -o $remote_dayfile #PBS -j oe %%END%% fi elif [[ $remote_host = lcyon ]] then if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l ncpus=$numprocs #PBS -l pmem=${memory}mb #PBS -o $remote_dayfile #PBS -j oe %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l ncpus=1 #PBS -l pmem=${memory}mb #PBS -o $remote_dayfile #PBS -j oe %%END%% fi elif [[ $remote_host = lcsgih || $remote_host = lcsgib ]] then if [[ "$sgi_feature" != "" ]] then feature_directive="#PBS -l feature=$sgi_feature" else feature_directive="" fi if [[ $queue = dataq || $queue = permq ]] then feature_directive="#PBS -l feature=data" fi if [[ $queue = testq || $queue = mediumq || $queue = bigq || $queue = workq || $queue = dataq || $queue = permq || $queue = serialq || $queue = special1q ]] then queue_directive="#PBS -q $queue" else queue_directive="" fi if [[ $email_notification = none ]] then email_directive="" else email_directive="#PBS -M $email_notification" fi if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/bash #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l nodes=$nodes:ppn=${processes_per_node} #PBS -l naccesspolicy=$node_usage #PBS -o $remote_dayfile #PBS -j oe $feature_directive $queue_directive $email_directive eval \`/sw/swdist/bin/modulesinit\` #. /usr/share/modules/init/bash $init_cmds $module_calls echo ld_library_path=\$LD_LIBRARY_PATH %%END%% else cat > $job_to_send << %%END%% #PBS -S /bin/bash #PBS -N $job_name #PBS -l walltime=$timestring #PBS -l ncpus=1 #PBS -o $remote_dayfile #PBS -j oe $queue_directive $email_directive eval \`/sw/swdist/bin/modulesinit\` #. /usr/share/modules/init/bash $init_cmds $module_calls %%END%% fi elif [[ $remote_host = lcxe6 ]] then if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -S /bin/ksh #PBS -N $job_name #PBS -A $project_account #PBS -j oe #PBS -l walltime=$timestring #PBS -l mppwidth=${numprocs} #PBS -l mppnppn=${processes_per_node} #PBS -m abe #PBS -o $remote_dayfile $email_directive $init_cmds $module_calls %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -S /bin/ksh #PBS -N $job_name #PBS -A $project_account #PBS -j oe #PBS -l walltime=$timestring #PBS -l ncpus=1 #PBS -l pmem=${memory}mb #PBS -m abe $email_directive #PBS -o $remote_dayfile $init_cmds $module_calls %%END%% fi elif [[ $remote_host = lckyoto ]] then cat > $job_to_send << %%END%% #!/bin/ksh # @\$-o $remote_dayfile # @\$-eo -oi # @\$-lP 16 # @\$-lp 1 # @\$-lm 28gb -llm unlimited -ls unlimited # @\$-q $queue # @\$-Pvn abs_pack ##for intel? @\$-Pvn abs_unpack -Pvs unpack -Pvc unpack #. /thin/local/etc/setprofile/intel-11.0.sh #. /thin/local/etc/setprofile/mvapich2-1.4+intel-11.0.sh . ~/.myprofile #. /home2/t/t51254/palm/current_version/myprofile #. /thin/apps/pgi/mpi.sh # env # set -x %%END%% elif [[ $remote_host = lcxt5m ]] then if [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -S /bin/ksh #PBS -N $job_name #PBS -j oe #PBS -l walltime=$timestring #PBS -l mppwidth=${numprocs} #PBS -l mppnppn=${processes_per_node} #PBS -m abe #PBS -o $remote_dayfile $init_cmds $module_calls %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -S /bin/ksh #PBS -N $job_name #PBS -j oe #PBS -l walltime=$timestring #PBS -l ncpus=1 #PBS -l pmem=${memory}mb #PBS -m abe #PBS -o $remote_dayfile $init_cmds $module_calls %%END%% fi elif [[ $remote_host = lckyuh ]] then cat > $job_to_send << %%END%% #!/bin/bash #PJM -L "rscgrp=$queue" #PJM -L "node=$nodes" #PJM --mpi "proc=$numprocs" #PJM -L "elapse=$timestring" #PJM -o $remote_dayfile #PJM -j #PJM -X #PJM --no-stging export LANG=en_US.UTF-8 %%END%% elif [[ $remote_host = lckyut ]] then cat > $job_to_send << %%END%% #!/bin/bash #PJM -L "rscgrp=$queue" #PJM -L "vnode=$numprocs" #PJM -L "vnode-core=1" #PJM -L "elapse=$timestring" #PJM --mpi proc=$numprocs #PJM -o $remote_dayfile #PJM -j #PJM -X #PJM --no-stging export LANG=en_US.UTF-8 %%END%% elif [[ $remote_host = nech ]] then if (( nodes > 1 )) then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -l cpunum_prc=$processes_per_node,cputim_job=$cputime #PBS -l ${qsubmem}=${Memory}gb #PBS -b $nodes #PBS -o $remote_dayfile #PBS -N palm #PBS -j o #PBS -T mpisx %%END%% elif [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -l cpunum_prc=$processes_per_node,cputim_job=$cputime #PBS -l ${qsubmem}=${Memory}gb #PBS -o $remote_dayfile #PBS -N palm #PBS -j o %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -l ${qsubmem}=${Memory}gb,${qsubtime}=$cputime #PBS -o $remote_dayfile #PBS -j o %%END%% fi elif [[ $remote_host = necriam ]] then if (( nodes > 1 )) then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -b $nodes #PBS -o $remote_dayfile #PBS -N $job_name #PBS -j o #PBS -v MPIPROGINV=YES %%END%% elif [[ $numprocs != 0 ]] then cat > $job_to_send << %%END%% #!/bin/ksh #PBS -q ${queue} #PBS -o $remote_dayfile #PBS -N $job_name #PBS -j o #PBS -v MPIPROGINV=YES %%END%% else cat > $job_to_send << %%END%% #!/bin/ksh #PBS -o $remote_dayfile #PBS -j o %%END%% fi elif [[ $remote_host = lctit ]] then cat > $job_to_send << %%END%% #!/bin/ksh $init_cmds $module_calls %%END%% # OPTIONEN FUER SUBMIT-KOMMANDO ZUSAMMENSTELLEN if [[ $tasks_per_node != $processes_per_node ]] then submcom="$submcom -W group_list=$group_number -N $job_name -l walltime=$timestring -l select=$nodes:ncpus=$processes_per_node:mpiprocs=$tasks_per_node:mem=${Memory}gb -o $remote_dayfile -j oe -q $queue " else submcom="$submcom -W group_list=$group_number -N $job_name -l walltime=$timestring -l select=$nodes:ncpus=$processes_per_node:mpiprocs=$tasks_per_node:mem=${Memory}gb -l place=scatter -o $remote_dayfile -j oe -q $queue " fi else cat > $job_to_send << %%END%% # @\$-q ${queue} # @\$-l${qsubtime} $timestring # @\$-l${qsubmem} ${memory}mb # @\$-o $remote_dayfile # @\$-eo %%END%% fi # BEI RECHNUNG AUF REMOTE-MASCHINEN RUECKTRANSFER DES DAYFILES PER TRAP # BEI EXIT VERANLASSEN # VEKTORRECHNER MUSS EIGENEN JOB STARTEN, DA DORT NOHUP NICHT FUNKTIONIERT # AUF IBM IN SEOUL IST RUECKTRANSFER ZUR ZEIT GENERELL NICHT MOEGLICH if [[ $delete_dayfile = false && $remote_host != $local_host ]] then echo "set +vx" >> $job_to_send echo "trap '" >> $job_to_send echo "set +vx" >> $job_to_send if [[ $(echo $remote_host | cut -c1-3) = ibm || $(echo $remote_host | cut -c1-5) = lcsgi || $(echo $remote_host | cut -c1-3) = nec || $remote_host = lcflow || $remote_host = lckiaps || $remote_host = lckyu* ]] then if [[ $remote_host = ibmh ]] then return_queue=c1 elif [[ $remote_host = ibmkisti ]] then return_queue=class.1-2 elif [[ $remote_host = ibmku ]] then return_queue=sdbg2 elif [[ $remote_host = ibms ]] then return_queue=p_normal elif [[ $remote_host = ibmy ]] then return_queue=serial elif [[ $remote_host = lcsgih || $remote_host = lcsgib ]] then return_queue=serialq elif [[ $remote_host = necriam ]] then return_queue=SP elif [[ $remote_host = lckiaps ]] then return_queue=express elif [[ $remote_host = lckyuh ]] then return_queue=cx-single elif [[ $remote_host = lckyut ]] then return_queue=cx-single else return_queue=unknown fi if [[ $(echo $remote_host | cut -c1-3) = ibm ]] then if [[ $remote_host = ibmku ]] then echo "echo \"#!/usr/bin/ksh\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ shell = /usr/bin/ksh\" >> scpjob.$kennung" >> $job_to_send else echo "echo \"#!/bin/ksh\" >> scpjob.$kennung" >> $job_to_send fi echo "echo \"# @ job_type = serial\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ job_name = transfer\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ resources = ConsumableCpus(1) ConsumableMemory(1 gb)\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ wall_clock_limit = 00:10:00,00:10:00\" >> scpjob.$kennung " >> $job_to_send echo "echo \"# @ output = job_queue/last_job_transfer_protocol\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ error = job_queue/last_job_transfer_protocol\" >> scpjob.$kennung" >> $job_to_send if [[ $host != "ibmh" ]] then echo "echo \"# @ class = $return_queue\" >> scpjob.$kennung" >> $job_to_send fi echo "echo \"# @ image_size = 10\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ notification = never\" >> scpjob.$kennung" >> $job_to_send echo "echo \"# @ queue\" >> scpjob.$kennung" >> $job_to_send echo "echo \" \" >> scpjob.$kennung" >> $job_to_send echo "echo \"set -x\" >> scpjob.$kennung" >> $job_to_send echo "echo \"batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres ${job_catalog}/$remote_dayfile \\\"$job_catalog\\\" $local_dayfile\" >> scpjob.$kennung" >> $job_to_send if [[ $remote_host = ibmku ]] then echo "echo \"rm scpjob.$kennung\" >> scpjob.$kennung" >> $job_to_send fi echo "echo \"exit\" >> scpjob.$kennung" >> $job_to_send elif [[ $remote_host = nech ]] then echo "cd /pf/b/${remote_user}/job_queue" >> $job_to_send echo "cat > scpjob.$kennung << %%END%%" >> $job_to_send echo "#PBS -l ${qsubmem}=1GB,${qsubtime}=100" >> $job_to_send echo "#PBS -o last_job_transfer_protocol" >> $job_to_send echo "#PBS -j o" >> $job_to_send echo " " >> $job_to_send echo "set -x" >> $job_to_send echo "cd /pf/b/${remote_user}/job_queue" >> $job_to_send echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres $remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send echo "%%END%%" >> $job_to_send elif [[ $remote_host = necriam ]] then echo "cat > scpjob.$kennung << %%END%%" >> $job_to_send echo "#PBS -q $return_queue" >> $job_to_send echo "#PBS -o last_job_transfer_protocol" >> $job_to_send echo "#PBS -j o" >> $job_to_send echo " " >> $job_to_send echo "set -x" >> $job_to_send echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres $remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send echo "%%END%%" >> $job_to_send elif [[ $remote_host = lckyuh ]] then echo "cat > scpjob.$kennung << %%END%%" >> $job_to_send echo "#!/bin/bash" >> $job_to_send echo "#PJM -L \"node=1\"" >> $job_to_send echo "#PJM -L \"rscgrp=$return_queue\"" >> $job_to_send echo "#PJM --no-stging" >> $job_to_send echo "#PJM -L \"elapse=30:00\"" >> $job_to_send echo "#PJM -o \$HOME/job_queue/last_job_transfer_protocol" >> $job_to_send echo "#PJM -j" >> $job_to_send echo " " >> $job_to_send echo "export LANG=en_US.UTF-8" >> $job_to_send echo "set -x" >> $job_to_send echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres $remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send echo "%%END%%" >> $job_to_send elif [[ $remote_host = lckyut ]] then echo "cat > scpjob.$kennung << %%END%%" >> $job_to_send echo "#!/bin/bash" >> $job_to_send echo "#PJM -L \"vnode=1\"" >> $job_to_send echo "#PJM -L \"rscgrp=$return_queue\"" >> $job_to_send echo "#PJM --no-stging" >> $job_to_send echo "#PJM -L \"elapse=30:00\"" >> $job_to_send echo "#PJM -o \$HOME/job_queue/last_job_transfer_protocol" >> $job_to_send echo "#PJM -j" >> $job_to_send echo " " >> $job_to_send echo "export LANG=en_US.UTF-8" >> $job_to_send echo "set -x" >> $job_to_send echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres $remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send echo "%%END%%" >> $job_to_send elif [[ $(echo $remote_host | cut -c1-5) = lcsgi ]] then echo "cat > scpjob.$kennung << %%END%%" >> $job_to_send echo "#!/bin/bash" >> $job_to_send echo "#PBS -N job_protocol_transfer" >> $job_to_send echo "#PBS -l walltime=00:30:00" >> $job_to_send echo "#PBS -l nodes=1:ppn=1" >> $job_to_send echo "#PBS -l feature=data" >> $job_to_send echo "#PBS -o \$HOME/job_queue/last_job_transfer_protocol" >> $job_to_send echo "#PBS -j oe" >> $job_to_send echo " " >> $job_to_send echo ". /usr/share/modules/init/bash" >> $job_to_send echo "set -x" >> $job_to_send echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send echo "%%END%%" >> $job_to_send elif [[ $remote_host = lcflow ]] then echo "cat > scpjob.${kennung}.tmp << %%END%%" >> $job_to_send echo "#!/bin/bash" >> $job_to_send echo "SGEPREFIX -S /bin/bash" >> $job_to_send echo "SGEPREFIX -N transfer_$job_name" >> $job_to_send echo "SGEPREFIX -cwd" >> $job_to_send echo "SGEPREFIX -l h_rt=01:00:00" >> $job_to_send echo "SGEPREFIX -l h_vmem=500M" >> $job_to_send echo "SGEPREFIX -l excl_flow=false" >> $job_to_send echo "SGEPREFIX -j y" >> $job_to_send echo "SGEPREFIX -o ${local_host}_${job_name}_scpjob_$kennung" >> $job_to_send echo " " >> $job_to_send echo "set -x" >> $job_to_send echo "export PALM_BIN=$PALM_BIN" | sed -e 's:'$HOME':$HOME:' >> $job_to_send echo "export PATH=\$PATH:\$PALM_BIN" >> $job_to_send echo "" >> $job_to_send echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send echo "rm -f scpjob.${kennung}" >> $job_to_send echo "%%END%%" >> $job_to_send echo "sed -e 's/SGEPREFIX/#$/g' scpjob.${kennung}.tmp > scpjob.${kennung}" >> $job_to_send echo "rm -f scpjob.${kennung}.tmp" >> $job_to_send else echo "cat > scpjob.$kennung << %%END%%" >> $job_to_send echo "# @\\\$-q $return_queue" >> $job_to_send echo "# @\\\$-l${qsubtime} 10" >> $job_to_send echo "# @\\\$-l${qsubmem} 10mb" >> $job_to_send if [[ $remote_host = t3ej2 || $remote_host = t3ej5 || $remote_host = t3es ]] then echo "# @\$-l mpp_p=0" >> $job_to_send fi echo '# @\$-lF 10mb' >> $job_to_send # echo '# @\$-o /dev/null' >> $job_to_send echo '# @\$-o job_queue/last_job_transfer_protocol' >> $job_to_send echo '# @\\\$-eo' >> $job_to_send echo " " >> $job_to_send if [[ $remote_host = t3ej2 || $remote_host = t3ej5 ]] then echo "set +vx" >> $job_to_send echo ". .profile" >> $job_to_send fi echo "set -x" >> $job_to_send echo "batch_scp $PORTOPT -d -w 10 -u $local_user $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile > /dev/null" >> $job_to_send echo "[[ \"\$for_subjob_to_do\" != \"\" ]] && eval \$for_subjob_to_do" >> $job_to_send echo "%%END%%" >> $job_to_send fi if [[ $(echo $remote_host | cut -c1-3) = ibm ]] then echo "llsubmit scpjob.$kennung" >> $job_to_send elif [[ $(echo $remote_host | cut -c1-5) = lcsgi ]] then echo "rm -rf \$HOME/job_queue/last_job_transfer_protocol" >> $job_to_send echo "chmod u+x scpjob.$kennung" >> $job_to_send echo "msub scpjob.$kennung" >> $job_to_send elif [[ $remote_host = t3eb || $remote_host = t3eh || $remote_host = t3ej2 || $remote_host = t3ej5 ]] then echo "qsub -J n scpjob.$kennung" >> $job_to_send elif [[ $remote_host = t3es ]] then echo "qsub -J n -s /bin/ksh scpjob.$kennung" >> $job_to_send elif [[ $remote_host = lckiaps ]] then echo "mv scpjob.$kennung $job_catalog" >> $job_to_send echo "ssh $SSH_PORTOPT ${remote_username}@${remote_addres} \"$submcom ${job_catalog}/scpjob.$kennung\" " >> $job_to_send echo "rm ${job_catalog}/scpjob.$kennung" >> $job_to_send elif [[ $remote_host = lckyu* ]] then echo "scp $PORTOPT scpjob.$kennung ${remote_username}@${remote_addres}:job_queue" >> $job_to_send echo "ssh $SSH_PORTOPT ${remote_username}@${remote_addres} \"cd job_queue; $submcom scpjob.$kennung; rm scpjob.$kennung\" " >> $job_to_send elif [[ $remote_host = lcflow ]] then echo "mv scpjob.$kennung $job_catalog" >> $job_to_send echo "/usr/bin/ssh ${remote_username}@${remote_addres} \"$init_cmds $module_calls cd $job_catalog; $submcom scpjob.$kennung\" " >> $job_to_send else echo "$submcom scpjob.$kennung" >> $job_to_send fi if [[ $remote_host != ibmku && $remote_host != lckiaps ]] then echo "rm scpjob.$kennung" >> $job_to_send fi if [[ $remote_host = nech ]] then echo "cd -" >> $job_to_send fi else # echo "ftpcopy -d $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile" >> $job_to_send echo "nohup ftpcopy -d -w 15 $local_addres ${job_catalog}/$remote_dayfile \"$job_catalog\" $local_dayfile > /dev/null &" >> $job_to_send fi echo "set -x" >> $job_to_send echo " ' exit" >> $job_to_send echo "set -x" >> $job_to_send fi # EIGENTLICHE JOB-DATEI AN QSUB-KOMMANDOS ANHAENGEN cat $file_to_send >> $job_to_send if [[ $remote_host = ibm ]] then echo " " >> $job_to_send echo "exit" >> $job_to_send fi # remove job file if [[ $remote_host = lctit || $remote_host = ibmku || $remote_host = lcflow ]] then echo " " >> $job_to_send echo "rm ~/job_queue/$job_on_remhost" >> $job_to_send fi # USER-NAME AUF ZIELRECHNER AUS .NETRC-DATEI ERMITTELN if [[ -z $remote_user ]] then if [[ $remote_host = t3eb || $remote_host = t3eh || $remote_host = t3ej2 || $remote_host = t3ej5 || $remote_host = t3es || $remote_host = vpp ]] then grep $remote_addres ~/.netrc | read dum dum dum remote_user dum dum fi fi # JOB AUF ZIELRECHNER TRANSFERIEREN BZW. INS JOBVERZEICHNIS KOPIEREN if [[ $no_submit = false ]] then if [[ $remote_host != $local_host ]] then [[ $verify = true ]] && printf "\n >>> transfering job to \"$remote_host\"..." if [[ $remote_host = ibms || $remote_host = ibmy ]] # ssh on ibms cannot handle "~/" then job_catalog_save=$job_catalog job_catalog=job_queue elif [[ $remote_host = nech ]] then job_catalog_save=$job_catalog job_catalog=/hpf/b/${remote_user}/job_queue fi if [[ $remote_host = nech ]] then # DATEIEN KOENNEN NUR UEBER DEN ARCHIVE-SERVER DES DKRZ # TRANSFERIERT WERDEN scp $PORTOPT $job_to_send ${remote_user}@136.172.44.205:${job_catalog}/$job_on_remhost else scp $PORTOPT $job_to_send ${remote_user}@${remote_addres}:${job_catalog}/$job_on_remhost fi if [[ $? = 1 ]] then locat=scp; exit fi if [[ $remote_host = ibms ]] then job_catalog=$job_catalog_save fi [[ $verify = true ]] && printf "\n >>> finished\n" else eval job_catalog=$job_catalog cp $job_to_send ${job_catalog}/$job_on_remhost fi # NQS- BZW. LOADLEVELER-JOB STARTEN if [[ $remote_host != $local_host ]] then [[ $verify = true ]] && printf "\n >>> submitting job using \"qsub\"...\n" if [[ $(echo $remote_host | cut -c1-5) = lcsgi && $prio = true ]] then printf "\n >>> submit with HLRN qos-feature hiprio...\n" ssh $SSH_PORTOPT $remote_addres -l $remote_user "cd $job_catalog; $submcom -l qos=hiprio $job_on_remhost; rm $job_on_remhost" elif [[ $remote_host = ibmku ]] then ssh $SSH_PORTOPT $remote_addres -l $remote_user "cd $job_catalog; $submcom $job_on_remhost" elif [[ $remote_host = lcflow ]] then /usr/bin/ssh $SSH_PORTOPT $remote_addres -l $remote_user "$init_cmds $module_calls cd $job_catalog; $submcom $job_on_remhost" else ssh $SSH_PORTOPT $remote_addres -l $remote_user "cd $job_catalog; $submcom $job_on_remhost; rm $job_on_remhost" fi [[ $verify = true ]] && printf " >>> o.k.\n" else cd $job_catalog if [[ $(echo $local_host | cut -c1-5) = lcsgi || $(echo $local_host | cut -c1-3) = ibm || $local_host = lccrayf ]] then eval $submcom $job_on_remhost elif [[ $local_host = lcfimm || $local_host = lctit || $localhost = lcxe6 || $localhost = lck || $localhost = lckordi || $localhost = lcyon || $localhost = lcsb || $localhost = lckyu* ]] then chmod u+x $job_on_remhost eval $submcom $job_on_remhost elif [[ $local_host = nech ]] then if [[ $queue = default ]] then eval $submcom $job_on_remhost else eval $submcom -q $queue $job_on_remhost fi else qsub $job_on_remhost fi # Jobfile must not be deleted on lctit/ibmku!! This will be done # only at the end of the job. if [[ $local_host != lctit && $local_host != ibmku && $local_host != lcflow ]] then rm $job_on_remhost fi cd - > /dev/null fi fi # ABSCHLUSSARBEITEN if [[ $no_submit = false ]] then rm -f $job_to_send fi [[ $verify = true ]] && printf "\n\n *** SUBJOB finished \n\n"