source: palm/trunk/SCRIPTS/subjob @ 2261

Last change on this file since 2261 was 2257, checked in by witha, 7 years ago

Bugfix in PALM-WTM, modifications for lceddy

  • Property svn:keywords set to Id Rev
File size: 56.2 KB
RevLine 
[1841]1#!/bin/bash
[1090]2
3# subjob - script for automatic generation and submission of batch-job files
4#          for various batch queuing systems
5
[1046]6#--------------------------------------------------------------------------------#
7# This file is part of PALM.
8#
9# PALM is free software: you can redistribute it and/or modify it under the terms
10# of the GNU General Public License as published by the Free Software Foundation,
11# either version 3 of the License, or (at your option) any later version.
12#
13# PALM is distributed in the hope that it will be useful, but WITHOUT ANY
14# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
15# A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
16#
17# You should have received a copy of the GNU General Public License along with
18# PALM. If not, see <http://www.gnu.org/licenses/>.
19#
[1310]20# Copyright 1997-2014  Leibniz Universitaet Hannover
[1046]21#--------------------------------------------------------------------------------#
22#
23# Current revisions:
[1090]24# ------------------
[1351]25#
[2188]26#
[1046]27# Former revisions:
28# -----------------
[169]29# $Id: subjob 2257 2017-06-07 14:07:05Z raasch $
[2257]30# adjustments for lceddy, removed lcflow-specific code
31#
32# 2188 2017-03-21 06:42:42Z raasch
[1623]33#
[2188]34# 2187 2017-03-21 06:41:25Z raasch
35# adjustment of compute node names for lckyuh
36#
[2186]37# 2184 2017-03-21 04:31:22Z raasch
38# bugfix: localhost renamed local_host
39#
[2150]40# 2148 2017-02-09 16:56:42Z scharf
41# added kuma and gharbi to the list of known hosts
42#
[2135]43# 2134 2017-02-02 07:33:46Z raasch
44# option -E added to msub commands on HLRN-III machines to allow output of more
45# job informations in the job protocol files
46#
[1945]47# 1944 2016-06-15 06:29:00Z raasch
48# adjustments for using HLRN ssh-keys
49#
[1941]50# 1940 2016-06-14 05:15:20Z raasch
51# adjustments for lckiaps
52#
[1867]53# 1866 2016-04-15 06:50:59Z raasch
54# adjusted for lcocean
55#
[1842]56# 1841 2016-04-07 19:14:06Z raasch
57# script now running under bash
58#
[1702]59# 1701 2015-11-02 07:43:04Z maronga
60# Bugfix: added missing init_cmds for lccrayh/lccrayb
61#
[1623]62# 1621 2015-07-17 11:39:33Z heinze
[1621]63# adjustments for Mistral at DKRZ Hamburg (lcbullhh)
[1200]64#
[1576]65# 1575 2015-03-27 09:56:27Z raasch
66# mpp2-queues added to lccrayh
67#
[1548]68# 1547 2015-01-29 15:09:12Z witha
69# adjustments for ForWind computing cluster (lcflow)
70#
[1546]71# 1545 2015-01-29 06:52:23Z heinze
72# local host name for blizzard further specified
73#
[1481]74# 1480 2014-10-17 14:41:49Z raasch
75# adjustments for 2nd stage of HLRNIII
76#
[1469]77# 1468 2014-09-24 14:06:57Z maronga
78# Typo removed (addres->address)
79# Adjustments for lcxe6
80#
[1453]81# 1452 2014-08-22 09:41:06Z heinze
82# local hosts for blizzard added
83#
[1451]84# 1450 2014-08-21 07:31:51Z heinze
85# HLRN-III (lccrayb): testq queue adjusted to mpp1testq
86#
[1443]87# 1442 2014-07-28 07:09:10Z raasch
88# HLRN-III (lccrayb/lccrayh) queues adjusted
89#
[1379]90# 1378 2014-04-28 06:04:58Z raasch
91# -et option added for lctit
92#
[1351]93# 1350 2014-04-04 13:01:30Z maronga
94# location of qsub updated for lcxe6
95#
[1290]96# 1289 2014-03-04 07:12:34Z raasch
97# German comments translated to English
98# fimm-, necriam-, scirocco-, ibmy-, and sgi-specific code removed
99#
[1280]100# 1279 2014-01-28 12:10:14Z raasch
101# node calculation modified due to changes in mrun (tasks_per_node must not be
102# an integral divisor of numprocs any more)
103#
[1275]104# 1274 2014-01-09 13:14:54Z heinze
105# adjustments for lccrayh
106#
[1267]107# 1266 2013-12-11 12:07:34Z heinze
108# further adjustments for lccrayb (use msub instead of qsub)
109#
[1265]110# 1264 2013-12-09 12:46:09Z fricke
111# Bugfix: Using number of nodes instead of number of processors (lccrayb)
112#
[1263]113# 1262 2013-12-09 10:57:20Z fricke
114# further adjustments for lccrayb
115#
[1261]116# 1260 2013-12-04 12:48:04Z raasch
117# jaboticaba admitted
118#
[1256]119# 1255 2013-11-07 14:43:35Z raasch
120# further adjustments for lccrayb
121#
[1225]122# 1224 2013-09-16 07:27:23Z raasch
123# first adjustments for lccrayb
124#
[1203]125# 1202 2013-07-10 16:22:07Z witha
126# adjustments for Forwind cluster (lcflow)
127#
[1200]128# 1199 2013-07-05 14:52:22Z raasch
129# adjustments for CSC Helsinki (lccrayf)
130#
[1185]131# use of cluster/express queue enabled (ibmh)
132# vinessa added (imuk)
[1047]133#
[1104]134# 1103 2013-02-20 02:15:53Z raasch
135# bash compatibility adjustments (usage of OPTIND, output formatting with printf
136# instead typeset -L/R),
137# further adjustments for lckyuh
138#
[1100]139# 2013-02-10 01:47:43Z raasch
140# adjustments for Kyushu-Univeristy computing center (lckyuh - hayaka)
141# and for Forwind cluster (lcflow)
142#
[1096]143# 1094 2013-02-03 01:52:12Z raasch
144# new option -P for explicit setting of ssh/scp port,
145# decalpha parts (yonsei) removed
146#
[1091]147# 2013-02-02 07:06:13Z raasch
[1099]148# adjustments for Kyushu-University computing center (lckyut - tatara)
[1091]149# old changelog messages removed
150#
[1047]151# 1046 2012-11-09 14:38:45Z maronga
152# code put under GPL (PALM 3.9)
153#
[1090]154# 08/07/94 - Siggi - first version finished
155# 29/06/94 - Siggi - script development started
156#--------------------------------------------------------------------------------#
157# subjob - script for automatic generation and submission of batch-job files
158#          for various batch queuing systems
159#--------------------------------------------------------------------------------#
[1]160
161
[1289]162    # VARIABLE-DECLARATIONS AND DEFAULT VALUES
[352]163 delete_dayfile=false
[799]164 email_notification=none
[122]165 group_number=none
[1]166 locat=normal
167 no_default_queue=none
168 no_submit=false
169 job_catalog="~/job_queue"
170 job_name=none
171 local_user=$LOGNAME
172 node_usage=shared
[475]173 numprocs=0
[1]174 punkte="..........................................................."
175 submcom=qsub
176 queue=default
177 remote_host=none
178 remote_user=""
179 verify=true
180
181 typeset  -i   cputime=memory=Memory=0  minuten  resttime  sekunden  stunden
[1779]182 typeset  -i   numprocs  mpi_tasks=nodes=processes_per_node=0 tasks_per_node=threads_per_task=1
[1]183
184
185
[1289]186    # ERROR HANDLING
187    # IN CASE OF EXIT:
[1]188 trap 'if [[ $locat != normal ]]
189       then
190          case  $locat  in
191             (option)  printf "\n  --> available optios can be displayed"
192                       printf " by typing:"
193                       printf "\n      \"subjob ?\" \n";;
194             (ftpcopy|parameter|scp|verify)  printf "\n";;
195             (*)       printf "\n  +++ unknown error"
196                       printf "\n      please inform S. Raasch!\n"
197          esac
198          [[ -f $job_to_send ]]  &&  rm  $job_to_send
199          printf "\n\n+++ SUBJOB killed \n\n"
200       fi' exit
201
202
[1289]203    # IN CASE OF TERMINAL-BREAK:
[1]204 trap '[[ -f $job_to_send ]]  &&  rm  $job_to_send
205       printf "\n\n+++ SUBJOB killed \n\n"
206       exit
207      ' 2
208
209
[1289]210    # DETERMINE NAME OF LOCAL HOST
[1]211 local_host=$(hostname)
212
[1289]213    # SET HOST-SPECIFIC VARIABLES VEREINBAREN (CHECK, IF LOCAL HOST
214    # IS ADMITTED AT ALL)
215    # NOTE: ONE OF THE ENTRIES FOR "lck" OR "lckordi" ALWAYS HAS TO BE
216    # COMMENT OUT, BECAUSE THE HOSTNAME (node*) IS SAME FOR BOTH MACHINES
[1]217 case  $local_host  in
[1468]218     (ambiel-lx)             local_address=134.106.74.48;  local_host=lcfor;;
219     (atmos)                 local_address=172.20.25.35;   local_host=lcide;;
220     (austru)                local_address=130.75.105.128; local_host=lcmuk;;
221     (autan)                 local_address=130.75.105.57;  local_host=lcmuk;;
222     (bora)                  local_address=130.75.105.103; local_host=lcmuk;;
[2187]223     (a0*|b0*)               local_address=133.5.4.33;     local_host=lckyuh;;
[1545]224     (blizzard1|p0*|p1*|p2*|p3*|p4*|p5*|p6*|p7*|p8*|p9*)   local_address=136.172.40.15;  local_host=ibmh;;
225     (blizzard2|p0*|p1*|p2*|p3*|p4*|p5*|p6*|p7*|p8*|p9*)   local_address=136.172.40.16;  local_host=ibmh;;
[1468]226     (blogin*|bxc*)          local_address=130.73.233.1;   local_host=lccrayb;;
227     (hlogin*|hxc*)          local_address=130.75.4.1;     local_host=lccrayh;;
228     (breva)                 local_address=130.75.105.98;  local_host=lcmuk;;
229     (buran)                 local_address=130.75.105.58;  local_host=lcmuk;;
230     (caurus)                local_address=130.75.105.19;  local_host=lcmuk;;
231     (climate*)              local_address=165.132.26.68;  local_host=lcyon;;
232     (clogin*)               local_address=86.50.166.21;   local_host=lccrayf;;
233     (cs*)                   local_address=136.172.44.131; local_host=nech;;
234     (elephanta)             local_address=130.75.105.6;   local_host=lcmuk;;
[2257]235     (hpcl*)                 local_address=eddy.hpc.uni-oldenburg.de; local_host=lceddy;;
236     (cfd*)                  local_address=eddy.hpc.uni-oldenburg.de; local_host=lceddy;;
[1468]237     (node*)                 local_address=165.132.26.61   local_host=lck;;
238   #  (node*)                 local_address=210.219.61.8    local_host=lckordi;;
239     (gaia*)                 local_address=150.183.146.24; local_host=ibmkisti;;
[2147]240     (gharbi)                local_address=130.75.105.47;  local_host=lcmuk;;
[1468]241     (gallego)               local_address=130.75.105.10;  local_host=lcmuk;;
242     (gregale)               local_address=130.75.105.109; local_host=lcmuk;;
243     (hababai)               local_address=130.75.105.108; local_host=lcmuk;;
244     (hayaka*)               local_address=133.5.4.33;     local_host=lckyuh;;
245     (hexagon.bccs.uib.no)   local_address=129.177.20.113; local_host=lcxe6;;
246     (hx*)                   local_address=133.3.51.11;    local_host=lckyoto;;
247     (inferno)               local_address=130.75.105.5;   local_host=lcmuk;;
248     (irifi)                 local_address=130.75.105.104; local_host=lcmuk;;
249     (jaboticaba)            local_address=150.163.25.181; local_host=lcbr;;
250     (sno)                   local_address=130.75.105.113; local_host=lcmuk;;
[2147]251     (kuma)                  local_address=130.75.105.115; local_host=lcmuk;;
[1468]252     (levanto)               local_address=130.75.105.45;  local_host=lcmuk;;
[1940]253     (login*)                local_address=118.128.66.201; local_host=lckiaps;;
[1468]254     (maestro)               local_address=130.75.105.2;   local_host=lcmuk;;
255     (meller)                local_address=134.106.74.155; local_host=lcfor;;
256     (meteo-login*)          local_address=193.166.211.144;local_host=lcxt5m;;
[1620]257     (mlogin1*|m1*)          local_address=136.172.50.13;  local_host=lcbullhh;;
[1468]258     (hexagon*)              local_address=129.177.20.113; local_host=lcxe6;;
259     (nobel*)                local_address=150.183.5.101;  local_host=ibms;;
[1866]260     (ocean)                 local_address="ocean";        local_host=lcocean;;
[1468]261     (orkan)                 local_address=130.75.105.3;   local_host=lcmuk;;
262     (ostria)                local_address=130.75.105.106; local_host=lcmuk;;
263     (paesano)               local_address=130.75.105.46;  local_host=lcmuk;;
264     (pcj*)                  local_address=172.31.120.1;   local_host=lckyut;;
265     (pingui)                local_address=134.106.74.118; local_host=lcfor;;
266     (quanero)               local_address=130.75.105.107; local_host=lcmuk;;
267     (rte*)                  local_address=133.5.185.60;   local_host=lcrte;;
[1866]268     (schultzl-Latitude-E6540)  local_address="schultzl-Latitude-E6540"; local_host=lcsch;;
[1468]269     (shiokaze-lx)           local_address=134.106.74.123; local_host=lcfor;;
270     (sisu-login*)           local_address=86.50.166.21;   local_host=lccrayf;;
271     (solano)                local_address=130.75.105.110; local_host=lcmuk;;
272     (sugoka*)               local_address=172.31.120.1;   local_host=lckyut;;
[1866]273     (tc*)                   local_address="ocean";        local_host=lcocean;;
[1468]274     (t2a*)                  local_address=10.1.6.165;     local_host=lctit;;
275     (urban*)                local_address=147.46.30.151   local_host=lcsb;;
276     (vinessa)               local_address=130.75.105.112; local_host=lcmuk;;
277     (vorias)                local_address=172.20.25.43;   local_host=lcmuk;;
278     (*.cc.kyushu-u.ac.jp)   local_address=133.5.4.129;    local_host=ibmku;;
[1]279     (*)                     printf "\n  +++ \"$local_host\" unknown";
[1255]280                             printf "\n      please contact the PALM group at IMUK";
[1]281                             locat=parameter; exit;;
282 esac
283
284
285
[1289]286    # BY DEFAULT, THE REMOTE HOST IS THE LOCAL HOST
[1]287 remote_host=$local_host
288
289
290
291
[1289]292    # READ THE SHELLSCRIPT-OPTIONS
[1094]293 while  getopts  :c:dDe:g:h:m:n:N:O:P:q:t:T:u:vX:  option
[1]294 do
295   case  $option  in
296       (c)   job_catalog=$OPTARG;;
297       (d)   delete_dayfile=true;;
298       (D)   no_submit=true;;
[352]299       (e)   email_notification=$OPTARG;;
[125]300       (g)   group_number=$OPTARG;;
[1]301       (h)   remote_host=$OPTARG;;
302       (m)   memory=$OPTARG;;
303       (n)   job_name=$OPTARG;;
304       (N)   node_usage=$OPTARG;;
305       (O)   threads_per_task=$OPTARG;;
[1094]306       (P)   scp_port=$OPTARG;;
[1]307       (q)   no_default_queue=$OPTARG;;
308       (t)   cputime=$OPTARG;;
309       (T)   tasks_per_node=$OPTARG;;
310       (u)   remote_user=$OPTARG;;
311       (v)   verify=false;;
312       (X)   numprocs=$OPTARG;;
313       (\?)  printf "\n  +++ Option $OPTARG unknown \n";
314             locat=option; exit;;
315   esac
316 done
317
318
[1289]319    # GET THE NAME OF THE JOBFILE AS NEXT ARGUMENT
[1103]320 (( to_shift = $OPTIND - 1 ))
321 shift $to_shift; file_to_send=$1
[1]322
323
[1289]324    # OUTPUT OF SHORT DESCRIPTION OF SCRIPT-OPTIONS
[1]325 if [ "$1" = "?" ]
326 then
327   (printf "\n  *** subjob can be called as follows:\n"
328    printf "\n      subjob -c.. -d -D -h.. -m.. -q.. -t.. -u.. -v  <jobfile>\n"
329    printf "\n      Description of available options:\n"
330    printf "\n      Option  Description                         Default-Value"
331    printf "\n        -c    job-input- and output-catalog       ~/job_queue"
332    printf "\n        -d    no job-protocol will be created     ---"
333    printf "\n        -D    only the job-file will be created   ---"
334    printf "\n        -h    execution host, available hosts:    $remote_host"
[1289]335    printf "\n              ibm, ibmh, ibmkisti, ibmku, ibms, lc...,"
336    printf "\n              lckiaps, lctit, nech"
[1]337    printf "\n        -m    memory demand per process in MByte  ---"
338    printf "\n        -n    jobname                             <jobdatei>"
339    printf "\n        -O    threads per task (for OpenMP usage) 1"
[1094]340    printf "\n        -P    ssh/scp port                        default port"
[1]341    printf "\n        -q    job-queue to be used                default"
342    printf "\n        -t    allowed cpu-time in seconds         ---"
343    printf "\n        -T    tasks per node (on parallel hosts)  ---"
344    printf "\n        -u    username on execution host          from .netrc"
345    printf "\n        -v    no prompt for confirmation          ---"
346    printf "\n        -X    # of processors (on parallel hosts) 1"
347    printf "\n "
348    printf "\n      The only possible positional parameter is <jobfile>:"
349    printf "\n      The complete NQS-job must be provided here."
350    printf "\n      <jobfile>=? creates this outline\n\n") | more
351    exit
352 fi
353
354
355
[1289]356    # CHECK, IF JOB-FILE HAS BEEN GIVEN AS ARGUMENT AND IF THE FILE ITSELF EXISTS
[1]357 if [[ "$file_to_send" = "" ]]
358 then
359    printf "\n  +++ job-file missing"
360    locat=parameter; exit
361 else
362    if [[ -f $file_to_send ]]
363    then
364       true
365    else
366       printf "\n  +++ job-file: "
367       printf "\n           $file_to_send"
368       printf "\n      does not exist"
369       locat=parameter; exit
370    fi
371 fi
372
373
374
[1289]375    # IF NO JOBNAME HAS BEEN GIVEN, JOBNAME IS SET TO THE NAME OF THE JOB-FILE,
376    # PROVIDED THAT THE JOB-FILE NAME DOES NOT CONTAIN ANY PATH
[1]377 if [[ $job_name = none ]]
378 then
379    job_name=$file_to_send
380 fi
381 if [[ $(echo $job_name | grep -c "/") != 0 ]]
382 then
383    printf "\n  +++ job-file name: "
384    printf "\n           $job_name"
385    printf "\n      must not contain \"/\"-characters"
386    locat=parameter; exit
387 fi
388
389
390
391
[1289]392    # SET HOST-SPECIFIC QUANTITIES, OR TERMINATE IN CASE OF UNKNOWN HOST,
393    # OR IF NO HOST HAS BEEN GIVEN
[1]394 if [[ $remote_host = none ]]
395 then
396    printf "\n  +++ host missing"
397    locat=option; exit
398 else
399    case  $remote_host  in
[1468]400        (ibm)     queue=p690_standard; remote_address=134.76.99.81; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
401        (ibmh)    queue=cluster; remote_address=136.172.40.15; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
402        (ibmkisti) queue=class.32plus; remote_address=150.183.146.24; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
403        (ibmku)   queue=s4; remote_address=133.5.4.129; submcom=/usr/local/bin/llsubmit;;
404        (ibms)    queue=p_normal; remote_address=150.183.5.101; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
[1620]405        (lcbullhh)    queue=compute; remote_address=136.172.50.13; submcom=/usr/bin/sbatch;;
[2134]406        (lccrayb) queue=mpp1testq; remote_address=130.73.233.1; submcom="/opt/moab/default/bin/msub -E";;
407        (lccrayh) queue=mpp1testq; remote_address=130.75.4.1; submcom="/opt/moab/default/bin/msub -E";;
[1468]408        (lccrayf) queue=small; remote_address=86.50.166.21; submcom=/opt/slurm/default/bin/sbatch;;
[2257]409        (lceddy)  remote_address=eddy.hpc.uni-oldenburg.de; submcom=sbatch;;
[1468]410        (lckyoto) remote_address=133.3.51.11; submcom=/thin/local/bin/qsub;;
411        (lck)     remote_address=165.132.26.61; submcom=/usr/torque/bin/qsub;;
[1940]412        (lckiaps) remote_address=118.128.66.201; submcom=/opt/pbs/default/bin/qsub;;
[1468]413        (lckordi) remote_address=210.219.61.8; submcom=/usr/torque/bin/qsub;;
414        (lckyuh)  remote_address=133.5.4.33; submcom=/usr/bin/pjsub;;
415        (lckyut)  remote_address=133.5.4.37; submcom=/usr/bin/pjsub;;
[1866]416        (lcocean) remote_address="ocean"; submcom=qsub;;
[1468]417        (lcsb)    remote_address=147.46.30.151; submcom=/usr/torque/bin/qsub;;
418        (lctit)   queue=S; remote_address=10.1.6.165; submcom=/opt/pbs/tools/bin/t2sub;;
419        (lcxe6)   remote_address=129.177.20.113; submcom=/opt/torque/default/bin/qsub;;
420        (lcxt5m)  remote_address=193.166.211.144; submcom=/opt/pbs/10.1.0.91350/bin/qsub;;
421        (lcyon)   remote_address=165.132.26.68; submcom=/usr/torque/bin/qsub;;
422        (nech)    qsubmem=memsz_job; qsubtime=cputim_job; remote_address=136.172.44.147; submcom="/usr/local/bin/qsub";;
[251]423        (*)       printf "\n  +++ hostname \"$remote_host\" not allowed";
424                  locat=parameter; exit;;
[1]425    esac
426 fi
427
428
[1289]429    # CHECK, IF A VALID QUEUE HAS BEEN GIVEN
[1]430 if [[ $no_default_queue != none ]]
431 then
432    error=false
433    ndq=$no_default_queue
434    case  $remote_host  in
435        (ibm)    case  $ndq  in
436                     (p690_express|p690_standard|p690_long)  error=false;;
437                     (*)                                     error=true;;
438                 esac;;
439        (ibmh)   case  $ndq  in
[1184]440                     (cluster|express)  error=false;;
[1]441                     (*)                                     error=true;;
442                 esac;;
[693]443        (ibmkisti)   case  $ndq  in
444                     (class.32plus|class.1-2|class.2-32)  error=false;;
445                     (*)                                     error=true;;
446                 esac;;
[622]447        (ibmku)  case  $ndq  in
448                     (sdbg1|sdbg2|sdbg4|s4|s16|s32|s32-s)    error=false;;
449                     (*)                                     error=true;;
450                 esac;;
[1]451        (ibms)   case  $ndq  in
452                     (express|normal|p_express|p_normal|p_normal_1.3|p_normal_1.7|grand)     error=false;;
453                     (*)                                     error=true;;
454                 esac;;
[1620]455        (lcbullhh) case  $ndq  in
[2147]456                     (compute|compute2|shared)  error=false;;
[1620]457                     (*)                                     error=true;;
458                 esac;;
[1224]459        (lccrayb) case  $ndq  in
[1480]460                     (dataq|mpp1q|mpp1testq|mpp2q|mpp2testq|smp1q|smp1testq|specialm1q)   error=false;;
[1224]461                     (*)                                     error=true;;
462                 esac;;
[1274]463        (lccrayh) case  $ndq  in
[1575]464                     (dataq|mpp1q|mpp1testq|mpp2q|mpp2testq|smp1q|smp1testq|specialm1q)   error=false;;
[1274]465                     (*)                                     error=true;;
466                 esac;;
[1197]467        (lccrayf) case  $ndq  in
468                     (usup|test*|small|large)                error=false;;
469                     (*)                                     error=true;;
470                 esac;;
[2257]471        (lceddy) case  $ndq  in
472                     (eddy.p|cfdh.p|cfdl.p|carl.p|mpcs.p|mpcl.p|mpcb.p|all_nodes.p)  error=false;;
[1099]473                     (*)                                     error=true;;
474                 esac;;
[1040]475        (lckiaps) case  $ndq  in
[1940]476                     (express|normal|normal20|quickq)        error=false;;
[1040]477                     (*)                                     error=true;;
478                 esac;;
[440]479        (lckyoto) case  $ndq  in
480                     (eh|ph)                                 error=false;;
481                     (*)                                     error=true;;
482                 esac;;
[1099]483        (lckyuh) case  $ndq  in
484                     (fx-dbg|fx-single|fx-small|fx-middle|fx-large)  error=false;;
485                     (*)                                     error=true;;
486                 esac;;
[1090]487        (lckyut) case  $ndq  in
488                     (cx-dbg|cx-single|cx-small|cx-middle|cx-large)  error=false;;
489                     (*)                                     error=true;;
490                 esac;;
[1]491        (lctit)  case  $ndq  in
[635]492                     (G|L128|L256|L512H|S|S96|V)             error=false;;
[1]493                     (*)                                     error=true;;
494                 esac;;
495        (t3eb)   case  $ndq  in
496                     (berte|p50|p100|p392|forfree|p25himem)  error=false;;
497                     (*)    error=true;;
498                 esac;;
499        (t3eh)   case  $ndq  in
500                     (para_t3e|em|k|l|lm|comp_t3e|c|p|ht)  error=false;;
501                     (*)    error=true;;
502                 esac;;
503        (t3ej2|t3ej5)  case  $ndq  in
504                     (low|normal|high)  error=false;;
505                     (*)    error=true;;
506                 esac;;
507        (t3es)  case  $ndq  in
508                     (batch|serial-4|pe4|p48|pe16|pe32|pe64|pe128)  error=false;;
509                     (*)    error=true;;
510                 esac;;
511    esac
512    if [[ $error = true ]]
513    then
514       printf "\n  +++ queue \"$no_default_queue\" on host \"$remote_host\" not allowed"
515       locat=parameter; exit
516    else
517       queue=$no_default_queue
518    fi
519 fi
520
521
522
[1289]523    # CHECK THE CPU-TIME
524    # SPLIT TIME INTO HOURS, MINUTES, AND SECONDS
[1]525 done=false
526 while [[ $done = false ]]
527 do
528    if (( $cputime <= 0 ))
529    then
530       printf "\n  +++ wrong cpu-time or cpu-time missing"
531       printf "\n  >>> Please type cpu-time in seconds as INTEGER:"
532       printf "\n  >>> "
533       read  cputime  1>/dev/null  2>&1
534    else
535       done=true
536    fi
537 done
538 if [[ $remote_host = nech ]]
539 then
540    if (( tasks_per_node != 0 ))
541    then
542       (( cputime = cputime * tasks_per_node ))
543    elif [[ $numprocs != 0 ]]
544    then
545       (( cputime = cputime * numprocs ))
546    fi
547 fi
548 (( stunden  = cputime / 3600 ))
549 (( resttime = cputime - stunden * 3600 ))
550 (( minuten  = resttime / 60 ))
551 (( sekunden = resttime - minuten * 60 ))
552 timestring=${stunden}:${minuten}:${sekunden}
553
554
555
[1289]556    # CHECK THE MEMORY DEMAND
[1]557 done=false
558 while [[ $done = false ]]
559 do
560    if (( memory <= 0 ))
561    then
562       printf "\n  +++ wrong memory demand or memory demand missing"
563       printf "\n  >>> Please type memory in  MByte per process  as INTEGER:"
564       printf "\n  >>> "
565       read  memory  1>/dev/null  2>&1
566    else
567       done=true
568    fi
569 done
570
[1289]571 if [[ $remote_host = nech ]]
[1]572 then
573    if (( tasks_per_node != 0 ))
574    then
575       (( Memory = memory * tasks_per_node / 1000 ))
576    elif [[ $numprocs != 0 ]]
577    then
578       (( Memory = memory * numprocs / 1000 ))
579    else
580       (( Memory = memory / 1000 ))
581    fi
[635]582 elif [[ $remote_host = lctit ]]
583 then
584    (( Memory = memory * tasks_per_node / 1000 ))
[1]585 fi
586
587
[1289]588    # MEMORY DEMAND IN CASE OF OPENMP-USAGE ON IBM-SYSTEMS
[1]589 if [[ $(echo $remote_host | cut -c1-3) = ibm ]]
590 then
591    (( memory = memory * threads_per_task ))
592 fi
593
594
[1289]595    # CALCULATE NUMBER OF REQUIRED NODES
[1]596 if (( tasks_per_node != 0 ))
597 then
[1279]598    (( nodes = ( numprocs - 1 ) / ( tasks_per_node * threads_per_task ) + 1 ))
[1]599 fi
600
[1094]601
[1289]602    # CALCULATE NUMBER OF PROCESSES PER NODE
[622]603 (( processes_per_node = tasks_per_node * threads_per_task ))
[1]604
[1094]605
[1289]606    # CALCULATE NUMBER OF MPI TASKS
[696]607 (( mpi_tasks = numprocs / threads_per_task ))
[1]608
[696]609
[1289]610    # SET PORT NUMBER OPTION FOR CALLS OF ssh/scp, subjob AND batch_scp SCRIPTS
[1094]611 if [[ "$scp_port" != "" ]]
612 then
613    PORTOPT="-P $scp_port"
614    SSH_PORTOPT="-p $scp_port"
615 fi
616
617
[1289]618    # HEADER-OUTPUT
[1]619 if [[ $verify = true ]]
620 then
621    printf "\n\n"
622    printf "#--------------------------------------------------------------# \n"
623    spalte1=SUBJOB;spalte2=$(date)
[1103]624    printf "| %-20s%40s | \n" "$spalte1" "$spalte2"
[1]625    printf "|                                                              | \n"
626    printf "| values of parameters/options:                                | \n"
[1103]627    spalte1=$(echo local_host$punkte | cut -c-20)
628    spalte2=$punkte$local_host
629    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
630    spalte1=$(echo remote_host$punkte | cut -c-20)
631    spalte2=$punkte$remote_host
632    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
633    spalte1=$(echo queue$punkte | cut -c-20)
634    spalte2=$punkte$queue
635    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
636    spalte1=$(echo memory$punkte | cut -c-20)
637    spalte2="$punkte$memory mb"
638    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
639    spalte1=$(echo cputime$punkte | cut -c-20)
640    spalte2="$punkte$cputime sec"
641    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
642    spalte1=$(echo job_name$punkte | cut -c-20)
643    spalte2="$punkte$job_name"
644    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
[1]645    printf "#--------------------------------------------------------------# \n\n"
646
647
[1289]648       # QUERY CHECK
[1]649    antwort="dummy"
650    while [[ $antwort != y  &&  $antwort != Y  &&  $antwort != n  &&  $antwort != N ]]
651    do
652       read antwort?" >>> continue (y/n) ? "
653    done
654    if [[ $antwort = n  ||  $antwort = N ]]
655    then
656       locat=verify; exit
657    fi
658    printf "\n"
659 fi
660
[1289]661    # GENERATE RANDOM IDENTIFIER, AND DETERMINE THE JOBNAME ON THE TARGET HOST
662 identifier=$RANDOM
663 job_on_remhost=${job_name}_${identifier}_$local_host
664 job_to_send=job_to_send_$identifier
[1]665 if [[ $delete_dayfile = false ]]
666 then
[1289]667    remote_dayfile=${local_host}_${job_name}_result_$identifier
[1]668    local_dayfile=${remote_host}_${job_name}
669 else
670    remote_dayfile=/dev/null
671 fi
672
673
[1289]674    # GENERATE THE BATCH-JOB SCRIPTS (FOR QUEUEING-SYSTEMS qsub/msub/LoadLeveler)
[1]675 if [[ $(echo $remote_host | cut -c1-3) = ibm  &&  $numprocs != 0 ]]
676 then
677
[1289]678       # GENERAL LOADLEVELER SETTINGS
[622]679    execute_in_shell="#!/bin/ksh"
680    use_shell="# @ shell = /bin/ksh"
681    consumable_memory="ConsumableMemory($memory mb)"
682    class="# @ class = $queue"
683    environment="# @ environment = OMP_NUM_THREADS=$threads_per_task; MP_SHARED_MEMORY=yes"
684    network_to_use="# @ network.mpi = sn_all,shared,us"
685    data_limit="# @ data_limit = 1.76gb"
686    image_size="# @ image_size = 50"
[693]687    wall_clock_limit="# @ wall_clock_limit = ${timestring},$timestring"
[312]688
[693]689    if [[ $email_notification = none ]]
690    then
691       notify_user=""
692    else
693       notify_user="# @ notify_user = $email_notification"
694       if [[ $delete_dayfile = true ]]
695       then
696          notification='# @ notification = never'
697       fi
698    fi
[622]699
[312]700    if [[ $remote_host = ibmh ]]
[1]701    then
[312]702       data_limit=""
703       network_to_use=""
[1184]704       class="# @ class = $queue"
[312]705       environment=""
[814]706       rset="# @ rset = RSET_MCM_AFFINITY"
707       task_affinity="# @ task_affinity = core(1)"
[693]708    elif [[ $remote_host = ibmkisti ]]
709    then
710       network_to_use="# @ network.MPI = sn_all,shared,US"
711       wall_clock_limit="# @ wall_clock_limit = $timestring"
[696]712       if [[ $threads_per_task = 1 ]]
713       then
714          rset="# @ rset = RSET_MCM_AFFINITY"
715          mcm_affinity_options="# @ mcm_affinity_options = mcm_mem_pref mcm_sni_none mcm_distribute"
716       fi
[693]717       environment=""
718       use_shell=""
719       data_limit=""
720       image_size=""
[622]721    elif [[ $remote_host = ibmku ]]
722    then
723       execute_in_shell="#!/usr/bin/ksh"
724       use_shell="# @ shell = /usr/bin/ksh"
725       consumable_memory=""
726       environment=""
727       network_to_use="# @ network.mpi = sn_all,shared,us"
728       data_limit=""
729       image_size=""
730    elif [[ $remote_host = ibms ]]
731    then
732       network_to_use="# @ network.mpi = csss,shared,us"
[1]733    fi
734
735    cat > $job_to_send << %%END%%
[622]736$execute_in_shell
737$use_shell
[1]738
739# @ job_type = parallel
[693]740# @ job_name = $job_name
[1]741# @ resources = ConsumableCpus($threads_per_task) $consumable_memory
742# @ output = $remote_dayfile
743# @ error = $remote_dayfile
[693]744$wall_clock_limit
[622]745$image_size
[312]746$class
747$environment
[1]748$network_to_use
749$data_limit
[693]750$rset
751$mcm_affinity_options
[814]752$task_affinity
[1]753$notification
[693]754$notify_user
[1]755
756%%END%%
757
758    if (( nodes > 0 ))
759    then
760
[693]761       if [[ $remote_host != ibmkisti ]]
762       then
763
764          cat >> $job_to_send << %%END%%
[1]765# @ node = $nodes
[622]766# @ tasks_per_node = $processes_per_node
[1]767# @ node_usage = $node_usage
768# @ queue
769
770%%END%%
771
[693]772       else
773
774          cat >> $job_to_send << %%END%%
[696]775# @ total_tasks = $mpi_tasks
[693]776# @ blocking = unlimited
777# @ queue
778
779%%END%%
780
781       fi
782
[1]783    else
784
[1289]785       cat >> $job_to_send << %%END%%
[1]786# @ blocking = unlimited
787# @ total_tasks = $numprocs
788# @ node_usage = $node_usage
789# @ queue
790
791%%END%%
792
793    fi
794
[1289]795       # WORKAROUND BECAUSE OF SILLY JOB FILTER ON ibmkisti
[696]796    if [[ $remote_host = ibmkisti  &&  $threads_per_task != 1 ]]
797    then
798       echo  "export OMP_NUM_THREADS=$threads_per_task"  >>  $job_to_send
799    fi
800
[1]801 elif [[ $(echo $remote_host | cut -c1-3) = ibm  &&  $numprocs = 0 ]]
802 then
803
804    cat > $job_to_send << %%END%%
805#!/bin/ksh
806
807# @ job_type = serial
808# @ node_usage = $node_usage
809# @ job_name = palm
810# @ wall_clock_limit = ${timestring},$timestring
811# @ resources = ConsumableCpus(1) ConsumableMemory(1 gb)
812# @ output = $remote_dayfile
813# @ error = $remote_dayfile
[312]814$class
[1]815$notification
816
817# @ queue
818
819%%END%%
820
[1620]821 elif [[ $remote_host = lcbullhh ]]
822 then
823    if [[ $numprocs != 0 ]]
824    then
825       cat > $job_to_send << %%END%%
826#!/bin/bash -l
827#SBATCH -J $job_name
828#SBATCH -t $timestring
829#SBATCH -N $nodes
830#SBATCH --ntasks-per-node=$processes_per_node
831#SBATCH -p $queue
832#SBATCH -o $remote_dayfile
833#SBATCH -e $remote_dayfile
834#SBATCH -A $project_account
835
836$init_cmds
837$module_calls
838
839%%END%%
840
841    else
842       cat > $job_to_send << %%END%%
843#!/bin/bash -l
844#SBATCH -J $job_name
845#SBATCH -t $timestring
846#SBATCH -l ncpus=1
847#SBATCH -l pmem=${memory}mb
848#SBATCH -m abe
849#SBATCH -o $remote_dayfile
850#SBATCH -e $remote_dayfile
851#SBATCH -A $project_account
852
853$init_cmds
854$module_calls
855
856%%END%%
857
858    fi
859
[1274]860 elif [[ $remote_host = lccrayb || $remote_host = lccrayh ]]
[1224]861 then
862
[1480]863    if [[ "$feature" != "" ]]
864    then
865       featuredir="#PBS -l feature=$feature"
866    fi
867
[1224]868    if [[ $numprocs != 0 ]]
869    then
870       cat > $job_to_send << %%END%%
[1255]871#!/bin/bash
[1224]872#PBS -N $job_name
873#PBS -l walltime=$timestring
[1264]874#PBS -l nodes=$nodes:ppn=$processes_per_node
[1224]875#PBS -o $remote_dayfile
876#PBS -j oe
877#PBS -q $queue
[1480]878$featuredir
[1224]879
[1701]880$init_cmds
[1224]881$module_calls
882
883%%END%%
884
885    else
886
887       continue
888
889    fi
890
[1197]891 elif [[ $remote_host = lccrayf ]]
892 then
893
894    if [[ $numprocs != 0 ]]
895    then
896       cat > $job_to_send << %%END%%
897#!/bin/bash -l
898#SBATCH -J $job_name
899#SBATCH -t $timestring
900#SBATCH -N $nodes
901#SBATCH --ntasks-per-node=$processes_per_node
902#SBATCH -p $queue
903#SBATCH -o $remote_dayfile
904#SBATCH -e $remote_dayfile
905
906$init_cmds
907$module_calls
908
909%%END%%
910
911    else
912       cat > $job_to_send << %%END%%
913#!/bin/bash -l
914#SBATCH -J $job_name
915#SBATCH -t $timestring
916#SBATCH -l ncpus=1
917#SBATCH -l pmem=${memory}mb
918#SBATCH -m abe
919#SBATCH -o $remote_dayfile
920#SBATCH -e $remote_dayfile
921
922$init_cmds
923$module_calls
924
925%%END%%
926
927    fi
928
[2257]929
930
931 elif [[ $remote_host = lceddy ]]
[1099]932 then
[2257]933    cat > $job_to_send << %%END%%
[1099]934#!/bin/bash
[2257]935#SBATCH -J $job_name
936#SBATCH -t $timestring
937#SBATCH -n $numprocs
938#SBATCH -N $nodes
939#SBATCH --cpus-per-task 1
940#SBATCH -p $queue
941#SBATCH -o $remote_dayfile
942#SBATCH -e $remote_dayfile
943#SBATCH --mem-per-cpu $memory
944#SBATCH --exclusive
[1099]945
[2257]946module load PALMDependencies/.gcc
947
948$init_cmds
949$module_calls
950
[1099]951%%END%%
952
[1021]953 elif [[ $remote_host = lck || $remote_host = lckordi || $remote_host = lcsb ]]
[368]954 then
955
956    if [[ $numprocs != 0 ]]
957    then
958       cat > $job_to_send << %%END%%
959#!/bin/ksh
960#PBS -N $job_name
961#PBS -l walltime=$timestring
962#PBS -l ncpus=$numprocs
963#PBS -l pmem=${memory}mb
964#PBS -o $remote_dayfile
[1021]965#PBS -l nodes=$nodes:ppn=${processes_per_node}
[368]966#PBS -j oe
967
968mpd &
969
970%%END%%
971
972    else
973       cat > $job_to_send << %%END%%
974#!/bin/ksh
975#PBS -N $job_name
976#PBS -l walltime=$timestring
977#PBS -l ncpus=1
978#PBS -l pmem=${memory}mb
979#PBS -o $remote_dayfile
980#PBS -j oe
981
982%%END%%
983
984    fi
985
[1040]986 elif [[ $remote_host = lckiaps ]]
987 then
988
989    if [[ $numprocs != 0 ]]
990    then
991       cat > $job_to_send << %%END%%
[1940]992#!/bin/bash
[1040]993#PBS -N $job_name
994#PBS -l walltime=$timestring
995#PBS -l select=1:ncpus=$numprocs
996#PBS -l pmem=${memory}mb
997#PBS -q $queue
998#PBS -o $remote_dayfile
999#PBS -j oe
1000#PBS -V
1001
1002%%END%%
1003
1004    else
1005       cat > $job_to_send << %%END%%
[1940]1006#!/bin/bash
[1040]1007#PBS -N $job_name
1008#PBS -l walltime=$timestring
1009#PBS -l ncpus=1
1010#PBS -l pmem=${memory}mb
1011#PBS -o $remote_dayfile
1012#PBS -j oe
1013
1014%%END%%
1015
1016    fi
1017
[693]1018 elif [[ $remote_host = lcyon ]]
1019 then
1020
1021    if [[ $numprocs != 0 ]]
1022    then
1023       cat > $job_to_send << %%END%%
1024#!/bin/ksh
1025#PBS -N $job_name
1026#PBS -l walltime=$timestring
1027#PBS -l ncpus=$numprocs
1028#PBS -l pmem=${memory}mb
1029#PBS -o $remote_dayfile
1030#PBS -j oe
1031
1032%%END%%
1033
1034    else
1035       cat > $job_to_send << %%END%%
1036#!/bin/ksh
1037#PBS -N $job_name
1038#PBS -l walltime=$timestring
1039#PBS -l ncpus=1
1040#PBS -l pmem=${memory}mb
1041#PBS -o $remote_dayfile
1042#PBS -j oe
1043
1044%%END%%
1045
1046    fi
1047
[892]1048 elif [[ $remote_host = lcxe6 ]]
[164]1049 then
1050
1051    if [[ $numprocs != 0 ]]
1052    then
1053       cat > $job_to_send << %%END%%
[799]1054#!/bin/ksh
[164]1055#PBS -S /bin/ksh
1056#PBS -N $job_name
[552]1057#PBS -A $project_account
[206]1058#PBS -j oe
[164]1059#PBS -l walltime=$timestring
1060#PBS -l mppwidth=${numprocs}
[622]1061#PBS -l mppnppn=${processes_per_node}
[164]1062#PBS -m abe
1063#PBS -o $remote_dayfile
[492]1064$email_directive
[164]1065
[892]1066$init_cmds
[493]1067$module_calls
[343]1068
[164]1069%%END%%
1070
[108]1071    else
1072       cat > $job_to_send << %%END%%
1073#!/bin/ksh
[168]1074#PBS -S /bin/ksh
[108]1075#PBS -N $job_name
[552]1076#PBS -A $project_account
[206]1077#PBS -j oe
[108]1078#PBS -l walltime=$timestring
1079#PBS -l ncpus=1
1080#PBS -l pmem=${memory}mb
1081#PBS -m abe
[492]1082$email_directive
[108]1083#PBS -o $remote_dayfile
1084
[892]1085$init_cmds
[493]1086$module_calls
[343]1087
[108]1088%%END%%
1089
1090    fi
1091
[440]1092 elif [[ $remote_host = lckyoto ]]
[437]1093 then
1094
[440]1095       cat > $job_to_send << %%END%%
[799]1096#!/bin/ksh
[440]1097# @\$-o $remote_dayfile
1098# @\$-eo -oi
1099# @\$-lP 16
[799]1100# @\$-lp 1
[440]1101# @\$-lm 28gb  -llm unlimited -ls unlimited
1102# @\$-q $queue
1103# @\$-Pvn abs_pack
1104##for intel? @\$-Pvn abs_unpack -Pvs unpack -Pvc unpack
1105#. /thin/local/etc/setprofile/intel-11.0.sh
1106#. /thin/local/etc/setprofile/mvapich2-1.4+intel-11.0.sh
1107. ~/.myprofile
1108#. /home2/t/t51254/palm/current_version/myprofile
1109#. /thin/apps/pgi/mpi.sh
1110#
1111env
1112#
1113set -x
1114
1115%%END%%
1116
1117 elif [[ $remote_host = lcxt5m ]]
1118 then
1119
[437]1120    if [[ $numprocs != 0 ]]
1121    then
1122       cat > $job_to_send << %%END%%
[799]1123#!/bin/ksh
[437]1124#PBS -S /bin/ksh
1125#PBS -N $job_name
1126#PBS -j oe
1127#PBS -l walltime=$timestring
1128#PBS -l mppwidth=${numprocs}
[622]1129#PBS -l mppnppn=${processes_per_node}
[437]1130#PBS -m abe
1131#PBS -o $remote_dayfile
1132
[892]1133$init_cmds
[493]1134$module_calls
[437]1135
1136%%END%%
1137
1138    else
1139       cat > $job_to_send << %%END%%
1140#!/bin/ksh
1141#PBS -S /bin/ksh
1142#PBS -N $job_name
1143#PBS -j oe
1144#PBS -l walltime=$timestring
1145#PBS -l ncpus=1
1146#PBS -l pmem=${memory}mb
1147#PBS -m abe
1148#PBS -o $remote_dayfile
1149
[892]1150$init_cmds
[493]1151$module_calls
[437]1152
1153%%END%%
1154
1155    fi
1156
[1099]1157 elif [[ $remote_host = lckyuh ]]
1158 then
1159    cat > $job_to_send << %%END%%
1160#!/bin/bash
1161#PJM -L "rscgrp=$queue"
1162#PJM -L "node=$nodes"
1163#PJM --mpi "proc=$numprocs"
1164#PJM -L "elapse=$timestring"
1165#PJM -o $remote_dayfile
1166#PJM -j
1167#PJM -X
1168#PJM --no-stging
1169
1170export LANG=en_US.UTF-8
1171%%END%%
1172
[1090]1173 elif [[ $remote_host = lckyut ]]
1174 then
1175    cat > $job_to_send << %%END%%
1176#!/bin/bash
[1099]1177#PJM -L "rscgrp=$queue"
[1090]1178#PJM -L "vnode=$numprocs"
1179#PJM -L "vnode-core=1"
1180#PJM -L "elapse=$timestring"
1181#PJM --mpi proc=$numprocs
1182#PJM -o $remote_dayfile
1183#PJM -j
[1099]1184#PJM -X
1185#PJM --no-stging
[1090]1186
1187export LANG=en_US.UTF-8
1188%%END%%
1189
[1866]1190 elif [[ $remote_host = lcocean ]]
1191 then
1192   cat > $job_to_send << %%END%%
1193#!/bin/bash
1194#$ -cwd
1195#$ -V
1196#$ -N $job_name
1197#$ -pe orte $numprocs
1198#$ -o $remote_dayfile
1199#$ -j y
1200#$ -R y
1201$init_cmds
1202$module_calls
1203
1204%%END%%
1205
[1]1206 elif [[ $remote_host = nech ]]
1207 then
1208
1209    if (( nodes > 1 ))
1210    then
1211       cat > $job_to_send << %%END%%
1212#!/bin/ksh
[622]1213#PBS -l cpunum_prc=$processes_per_node,cputim_job=$cputime
[1]1214#PBS -l ${qsubmem}=${Memory}gb
1215#PBS -b $nodes
1216#PBS -o $remote_dayfile
1217#PBS -N palm
1218#PBS -j o
1219#PBS -T mpisx
1220
1221%%END%%
1222
1223    elif [[ $numprocs != 0 ]]
1224    then
1225       cat > $job_to_send << %%END%%
1226#!/bin/ksh
[622]1227#PBS -l cpunum_prc=$processes_per_node,cputim_job=$cputime
[1]1228#PBS -l ${qsubmem}=${Memory}gb
1229#PBS -o $remote_dayfile
1230#PBS -N palm
1231#PBS -j o
1232
1233%%END%%
1234
1235    else
1236       cat > $job_to_send << %%END%%
1237#!/bin/ksh
1238#PBS -l ${qsubmem}=${Memory}gb,${qsubtime}=$cputime
1239#PBS -o $remote_dayfile
1240#PBS -j o
1241
1242%%END%%
1243
1244    fi
1245
1246 elif [[ $remote_host = lctit ]]
1247 then
1248    cat > $job_to_send << %%END%%
[635]1249#!/bin/ksh
[892]1250$init_cmds
[678]1251$module_calls
1252
[1]1253%%END%%
1254
[1289]1255       # SET OPTIONS FOR SUBMIT-COMMAND
[678]1256    if [[ $tasks_per_node != $processes_per_node ]]
1257    then
[1378]1258       submcom="$submcom -W group_list=$group_number -N $job_name -l walltime=$timestring -l select=$nodes:ncpus=$processes_per_node:mpiprocs=$tasks_per_node:mem=${Memory}gb -o $remote_dayfile -j oe -et 1 -q $queue "
[678]1259    else
[1378]1260       submcom="$submcom -W group_list=$group_number -N $job_name -l walltime=$timestring -l select=$nodes:ncpus=$processes_per_node:mpiprocs=$tasks_per_node:mem=${Memory}gb -l place=scatter -o $remote_dayfile -j oe -et 1 -q $queue "
[678]1261    fi
[1]1262
1263 else
1264
1265    cat > $job_to_send << %%END%%
1266# @\$-q ${queue}
1267# @\$-l${qsubtime} $timestring
1268# @\$-l${qsubmem} ${memory}mb
1269# @\$-o $remote_dayfile
1270# @\$-eo
1271
1272%%END%%
1273
1274 fi
1275
1276
[1289]1277    # IN CASE OF JOBS EXECUTING ON REMOTE-HOSTS, THE TRANSFER OF THE DAYFILES
1278    # TO THE LOCAL HOSTS WILL BE INITIATED BY TRAP ON EXIT
1279    # NO TRANSFER POSSIBLE ON IBM IN SEOUL
[1]1280 if [[ $delete_dayfile = false  &&  $remote_host != $local_host ]]
1281 then
1282    echo "set +vx"                              >>  $job_to_send
1283    echo "trap '"                               >>  $job_to_send
1284    echo "set +vx"                              >>  $job_to_send
[2257]1285    if [[ $(echo $remote_host | cut -c1-3) = ibm  ||  $remote_host = lcbullhh  ||  $remote_host = lccrayb  ||  $remote_host = lccrayh  ||  $(echo $remote_host | cut -c1-3) = nec  ||  $remote_host = lckiaps  ||  $remote_host = lckyu* || $remote_host = lcxe6  ||  $remote_host = lcocean ]]
[1]1286    then
[622]1287       if [[ $remote_host = ibmh ]]
[1]1288       then
1289          return_queue=c1
[693]1290       elif [[ $remote_host = ibmkisti ]]
1291       then
1292          return_queue=class.1-2
[622]1293       elif [[ $remote_host = ibmku ]]
1294       then
1295          return_queue=sdbg2
[1]1296       elif [[ $remote_host = ibms ]]
1297       then
1298          return_queue=p_normal
[1620]1299       elif [[ $remote_host = lcbullhh ]]
1300       then
1301          return_queue=shared
[1274]1302       elif [[ $remote_host = lccrayb || $remote_host = lccrayh ]]
[1255]1303       then
1304          return_queue=dataq
[1468]1305       elif [[ $remote_host = lcxe6 ]]
1306       then
1307          return_queue=debug
[1040]1308       elif [[ $remote_host = lckiaps ]]
1309       then
1310          return_queue=express
[1099]1311       elif [[ $remote_host = lckyuh ]]
1312       then
1313          return_queue=cx-single
[1090]1314       elif [[ $remote_host = lckyut ]]
1315       then
1316          return_queue=cx-single
[1]1317       else
1318          return_queue=unknown
1319       fi
1320
1321       if [[ $(echo $remote_host | cut -c1-3) = ibm ]]
1322       then
1323
[622]1324          if [[ $remote_host = ibmku ]]
1325          then
[1289]1326             echo "echo \"#!/usr/bin/ksh\" >> scpjob.$identifier"            >>  $job_to_send
1327             echo "echo \"# @ shell = /usr/bin/ksh\" >> scpjob.$identifier"  >>  $job_to_send
[622]1328          else
[1289]1329             echo "echo \"#!/bin/ksh\" >> scpjob.$identifier"                >>  $job_to_send
[622]1330          fi
[1289]1331          echo "echo \"# @ job_type = serial\" >> scpjob.$identifier"    >>  $job_to_send
1332          echo "echo \"# @ job_name = transfer\" >> scpjob.$identifier"  >>  $job_to_send
1333          echo "echo \"# @ resources = ConsumableCpus(1) ConsumableMemory(1 gb)\" >> scpjob.$identifier"  >>  $job_to_send
1334          echo "echo \"# @ wall_clock_limit = 00:10:00,00:10:00\" >> scpjob.$identifier "  >>  $job_to_send
1335          echo "echo \"# @ output = job_queue/last_job_transfer_protocol\" >> scpjob.$identifier"  >>  $job_to_send
1336          echo "echo \"# @ error = job_queue/last_job_transfer_protocol\" >> scpjob.$identifier"  >>  $job_to_send
[312]1337          if [[ $host != "ibmh" ]]
1338          then
[1289]1339             echo "echo \"# @ class = $return_queue\" >> scpjob.$identifier"  >>  $job_to_send
[312]1340          fi
[1289]1341          echo "echo \"# @ image_size = 10\" >> scpjob.$identifier"      >>  $job_to_send
1342          echo "echo \"# @ notification = never\" >> scpjob.$identifier" >>  $job_to_send
[1]1343
[1289]1344          echo "echo \"# @ queue\" >> scpjob.$identifier"                >>  $job_to_send
1345          echo "echo \" \" >> scpjob.$identifier"                        >>  $job_to_send
[1]1346
[1289]1347          echo "echo \"set -x\" >> scpjob.$identifier"                   >>  $job_to_send
[1468]1348          echo "echo \"batch_scp  $PORTOPT  -d  -w 10  -u $local_user  $local_address  ${job_catalog}/$remote_dayfile  \\\"$job_catalog\\\"  $local_dayfile\" >> scpjob.$identifier"  >>  $job_to_send
[622]1349          if [[ $remote_host = ibmku ]]
1350          then
[1289]1351             echo "echo \"rm  scpjob.$identifier\" >> scpjob.$identifier"   >>  $job_to_send
[622]1352          fi
[1289]1353          echo "echo \"exit\" >> scpjob.$identifier"                     >>  $job_to_send
[1]1354
1355       elif [[ $remote_host = nech ]]
1356       then
1357          echo "cd /pf/b/${remote_user}/job_queue" >>  $job_to_send
[1289]1358          echo "cat > scpjob.$identifier << %%END%%"  >>  $job_to_send
[1]1359          echo "#PBS -l ${qsubmem}=1GB,${qsubtime}=100"  >>  $job_to_send
1360          echo "#PBS -o last_job_transfer_protocol"      >>  $job_to_send
1361          echo "#PBS -j o"                         >>  $job_to_send
1362          echo " "                                 >>  $job_to_send
1363          echo "set -x"                            >>  $job_to_send
1364          echo "cd /pf/b/${remote_user}/job_queue" >>  $job_to_send
[1468]1365          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  $remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
[1]1366          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1367          echo "%%END%%"                           >>  $job_to_send
1368
[1620]1369       elif [[ $remote_host = lcbullhh ]]
1370       then
1371          echo "cat > scpjob.$identifier << %%END%%"        >>  $job_to_send
1372          echo "#!/bin/bash"                             >>  $job_to_send
1373          echo "#SBATCH --job-name=job_protocol_transfer" >>  $job_to_send
1374          echo "#SBATCH -t 00:20:00"                     >>  $job_to_send
1375          echo "#SBATCH -N 1"                            >>  $job_to_send
1376          echo "#SBATCH -n 1"                            >>  $job_to_send
1377          echo "#SBATCH -o \$HOME/job_queue/last_job_transfer_protocol"      >>  $job_to_send
1378          echo "#SBATCH -o $remote_dayfile"              >>  $job_to_send
1379          echo "#SBATCH -e $remote_dayfile"              >>  $job_to_send
1380          echo "#SBATCH -A $project_account"             >>  $job_to_send
1381          echo "#SBATCH -p $return_queue"                >>  $job_to_send
1382          echo " "                                       >>  $job_to_send
1383          echo "set -x"                                  >>  $job_to_send
1384          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
1385          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1386          echo "%%END%%"                                 >>  $job_to_send
1387
[1099]1388       elif [[ $remote_host = lckyuh ]]
1389       then
[1289]1390          echo "cat > scpjob.$identifier << %%END%%"  >>  $job_to_send
[1099]1391          echo "#!/bin/bash"                       >>  $job_to_send
1392          echo "#PJM -L \"node=1\""                >>  $job_to_send
1393          echo "#PJM -L \"rscgrp=$return_queue\""  >>  $job_to_send
1394          echo "#PJM --no-stging"                  >>  $job_to_send
1395          echo "#PJM -L \"elapse=30:00\""          >>  $job_to_send
1396          echo "#PJM -o \$HOME/job_queue/last_job_transfer_protocol"  >>  $job_to_send
1397          echo "#PJM -j"                           >>  $job_to_send
1398          echo " "                                 >>  $job_to_send
1399          echo "export LANG=en_US.UTF-8"           >>  $job_to_send
1400          echo "set -x"                            >>  $job_to_send
[1468]1401          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  $remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
[1099]1402          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1403          echo "%%END%%"                           >>  $job_to_send
1404
[1090]1405       elif [[ $remote_host = lckyut ]]
1406       then
[1289]1407          echo "cat > scpjob.$identifier << %%END%%"  >>  $job_to_send
[1090]1408          echo "#!/bin/bash"                       >>  $job_to_send
1409          echo "#PJM -L \"vnode=1\""               >>  $job_to_send
1410          echo "#PJM -L \"rscgrp=$return_queue\""  >>  $job_to_send
1411          echo "#PJM --no-stging"                  >>  $job_to_send
1412          echo "#PJM -L \"elapse=30:00\""          >>  $job_to_send
1413          echo "#PJM -o \$HOME/job_queue/last_job_transfer_protocol"  >>  $job_to_send
1414          echo "#PJM -j"                           >>  $job_to_send
1415          echo " "                                 >>  $job_to_send
1416          echo "export LANG=en_US.UTF-8"           >>  $job_to_send
1417          echo "set -x"                            >>  $job_to_send
[1468]1418          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  $remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
[1090]1419          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1420          echo "%%END%%"                           >>  $job_to_send
1421
[1274]1422       elif [[ $remote_host = lccrayb || $remote_host = lccrayh ]]
[1255]1423       then
[1289]1424          echo "cat > scpjob.$identifier << %%END%%"        >>  $job_to_send
[1255]1425          echo "#!/bin/bash"                             >>  $job_to_send
1426          echo "#PBS -N job_protocol_transfer"           >>  $job_to_send
1427          echo "#PBS -l walltime=00:30:00"               >>  $job_to_send
[1262]1428          echo "#PBS -l nodes=1:ppn=1"                   >>  $job_to_send
[1255]1429          echo "#PBS -o \$HOME/job_queue/last_job_transfer_protocol"      >>  $job_to_send
1430          echo "#PBS -j oe"                              >>  $job_to_send
1431          echo " "                                       >>  $job_to_send
1432          echo "set -x"                                  >>  $job_to_send
[1468]1433          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
[1255]1434          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1435          echo "%%END%%"                                 >>  $job_to_send
1436
[1866]1437       elif [[ $remote_host = lcocean ]]
1438       then
1439          echo "cat > scpjob.${identifier}.tmp << %%END%%"                  >>  $job_to_send
1440          echo "#!/bin/bash"                                             >>  $job_to_send
1441          echo "SGEPREFIX -S /bin/bash"                                  >>  $job_to_send
1442          echo "SGEPREFIX -N transfer_$job_name"                         >>  $job_to_send
1443          echo "SGEPREFIX -cwd"                                          >>  $job_to_send
1444          echo "SGEPREFIX -j y"                                          >>  $job_to_send
1445          echo "SGEPREFIX -o ${local_host}_${job_name}_scpjob_$identifier"  >>  $job_to_send 
1446          echo " "                                                       >>  $job_to_send 
1447          echo "set -x"                                                  >>  $job_to_send 
1448          echo "export PALM_BIN=$PALM_BIN" | sed -e 's:'$HOME':$HOME:'   >>  $job_to_send
1449          echo "export PATH=\$PATH:\$PALM_BIN"                           >>  $job_to_send
1450          echo ""                                 >>  $job_to_send         
1451          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
1452          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1453          echo "rm -f scpjob.${identifier}"                                 >>  $job_to_send         
1454          echo "%%END%%"                                                 >>  $job_to_send
1455          echo "sed -e 's/SGEPREFIX/#$/g' scpjob.${identifier}.tmp > scpjob.${identifier}" >>  $job_to_send         
1456          echo "rm -f scpjob.${identifier}.tmp"                             >>  $job_to_send         
1457
[1468]1458       elif [[ $remote_host = lcxe6 ]]
1459       then
1460          echo "cat > scpjob.${identifier}  << %%END%%"  >>  $job_to_send
1461          echo "#!/bin/ksh"                              >>  $job_to_send
1462          echo "#PBS -N job_protocol_transfer"           >>  $job_to_send
1463          echo "#PBS -l walltime=00:30:00"               >>  $job_to_send
1464          echo "#PBS -A $project_account"                >>  $job_to_send
1465          echo "#PBS -l mppwidth=1"                      >>  $job_to_send
1466          echo "#PBS -l mppnppn=1"                       >>  $job_to_send
1467          echo "#PBS -o \$HOME/job_queue/last_job_transfer_protocol"  >>  $job_to_send
1468          echo "#PBS -j oe"                              >>  $job_to_send
1469          echo " "                                       >>  $job_to_send
1470          echo "set -x"                                  >>  $job_to_send
1471          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
1472          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1473          echo "%%END%%"                                 >>  $job_to_send
[1]1474       else
1475
[1289]1476          echo "cat > scpjob.$identifier << %%END%%"  >>  $job_to_send
[1]1477          echo "# @\\\$-q $return_queue"           >>  $job_to_send
1478          echo "# @\\\$-l${qsubtime} 10"           >>  $job_to_send
1479          echo "# @\\\$-l${qsubmem} 10mb"          >>  $job_to_send
1480          if [[ $remote_host = t3ej2  ||  $remote_host = t3ej5  ||  $remote_host = t3es ]]
1481          then
1482             echo "# @\$-l mpp_p=0"                >>  $job_to_send
1483          fi
1484          echo '# @\$-lF 10mb'                     >>  $job_to_send
1485          echo '# @\$-o job_queue/last_job_transfer_protocol'    >>  $job_to_send
1486          echo '# @\\\$-eo'                          >>  $job_to_send
1487          echo " "                                 >>  $job_to_send
1488          if [[ $remote_host = t3ej2  ||  $remote_host = t3ej5 ]]
1489          then
1490             echo "set +vx"                        >>  $job_to_send
1491             echo ". .profile"                     >>  $job_to_send
1492          fi
1493          echo "set -x"                            >>  $job_to_send
[1468]1494          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile  >  /dev/null"  >>  $job_to_send
[1]1495          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1496          echo "%%END%%"                           >>  $job_to_send
[1620]1497
[1]1498       fi
1499
1500       if [[ $(echo $remote_host | cut -c1-3) = ibm ]]
1501       then
[1289]1502          echo "llsubmit  scpjob.$identifier"      >>  $job_to_send
[1620]1503       elif [[ $remote_host = lcbullhh ]]
1504       then
1505          echo "sbatch  scpjob.$identifier"               >>  $job_to_send
[1274]1506       elif [[ $remote_host = lccrayb || $remote_host = lccrayh ]]
[1255]1507       then
[2134]1508          echo "msub -E -q $return_queue  scpjob.$identifier"               >>  $job_to_send
[1]1509       elif [[ $remote_host = t3eb  ||  $remote_host = t3eh  ||  $remote_host = t3ej2  ||  $remote_host = t3ej5 ]]
1510       then
[1289]1511          echo "qsub -J n  scpjob.$identifier"     >>  $job_to_send
[1]1512       elif [[ $remote_host = t3es ]]
1513       then
[1289]1514          echo "qsub -J n  -s /bin/ksh  scpjob.$identifier"     >>  $job_to_send
[1043]1515       elif [[ $remote_host = lckiaps ]]
1516       then
[1289]1517          echo "mv  scpjob.$identifier  $job_catalog"           >>  $job_to_send
[1468]1518          echo "ssh $SSH_PORTOPT ${remote_username}@${remote_address}  \"$submcom ${job_catalog}/scpjob.$identifier\" "  >>  $job_to_send
[1289]1519          echo "rm  ${job_catalog}/scpjob.$identifier"          >>  $job_to_send
[1099]1520       elif [[ $remote_host = lckyu* ]]
[1090]1521       then
[1468]1522          echo "scp $PORTOPT scpjob.$identifier  ${remote_username}@${remote_address}:job_queue"           >>  $job_to_send
1523          echo "ssh $SSH_PORTOPT ${remote_username}@${remote_address}  \"cd job_queue; $submcom scpjob.$identifier; rm scpjob.$identifier\" "  >>  $job_to_send
[2257]1524       elif [[ $remote_host = lcocean ]]
[1099]1525       then
[1289]1526          echo "mv  scpjob.$identifier  $job_catalog"           >>  $job_to_send
[1468]1527          echo "/usr/bin/ssh ${remote_username}@${remote_address}  \"$init_cmds $module_calls cd $job_catalog; $submcom scpjob.$identifier\" "  >>  $job_to_send
[1]1528       else
[1289]1529          echo "$submcom  scpjob.$identifier"      >>  $job_to_send
[1]1530       fi
[1043]1531       if [[ $remote_host != ibmku  &&  $remote_host != lckiaps ]]
[622]1532       then
[1289]1533          echo "rm  scpjob.$identifier"            >>  $job_to_send
[622]1534       fi
[1]1535       if [[ $remote_host = nech ]]
1536       then
1537          echo "cd -"                           >>  $job_to_send
1538       fi
1539    else
[1468]1540#       echo "ftpcopy  -d  $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
[1289]1541       # ??? funktioniert das ÃŒberhaupt noch ???
[1468]1542       echo "nohup  ftpcopy  -d  -w 15  $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile  >  /dev/null  &"  >>  $job_to_send
[1]1543    fi
1544    echo "set -x"                               >>  $job_to_send
1545    echo "     ' exit"                          >>  $job_to_send
1546    echo "set -x"                               >>  $job_to_send
1547 fi
1548
1549
[1289]1550    # APPEND THE JOB-FILE (CREATE BY mrun) TO THE JOB-DIRECTIVES GENERATED ABOVE
[1]1551 cat  $file_to_send  >>  $job_to_send
[69]1552
[1]1553 if [[ $remote_host = ibm ]]
1554 then
1555    echo " "         >>  $job_to_send
1556    echo "exit"      >>  $job_to_send
1557 fi
[635]1558
[1289]1559    # REMOVE JOB-FILE
[2257]1560 if [[ $remote_host = lctit  ||  $remote_host = ibmku ]]
[69]1561 then
1562    echo " "                               >>  $job_to_send
1563    echo "rm ~/job_queue/$job_on_remhost"  >>  $job_to_send
1564 fi
[1]1565
1566
[1289]1567    # TRANSFER JOB TO THE TARGET HOST (JOB-DIRECTORY)
[1]1568 if [[ $no_submit = false ]]
1569 then
1570    if [[ $remote_host != $local_host ]]
1571    then
1572       [[ $verify = true ]]  &&  printf "\n >>> transfering job to \"$remote_host\"..."
[1289]1573       if [[ $remote_host = ibms ]]    # ssh on ibms cannot handle "~/"
[1]1574       then
[82]1575          job_catalog_save=$job_catalog
1576          job_catalog=job_queue
1577       elif [[ $remote_host = nech ]]
1578       then
1579          job_catalog_save=$job_catalog
1580          job_catalog=/hpf/b/${remote_user}/job_queue
1581       fi
[1096]1582       if [[ $remote_host = nech ]]
[82]1583       then
[1289]1584             # FILES CAN ONLY BE TRANSFERED VIA DKRZ'S ARCHIVE-SERVER
[1094]1585          scp  $PORTOPT  $job_to_send  ${remote_user}@136.172.44.205:${job_catalog}/$job_on_remhost
[1]1586       else
[1944]1587          scp  $ssh_key  $PORTOPT  $job_to_send  ${remote_user}@${remote_address}:${job_catalog}/$job_on_remhost
[1]1588       fi
[82]1589       if [[ $? = 1 ]]
1590       then
1591          locat=scp; exit
1592       fi
1593       if [[ $remote_host = ibms ]]
1594       then
1595          job_catalog=$job_catalog_save
1596       fi
[1]1597       [[ $verify = true ]]  &&  printf "\n >>> finished\n"
1598    else
1599       eval  job_catalog=$job_catalog
1600       cp  $job_to_send  ${job_catalog}/$job_on_remhost
1601    fi
1602
1603
1604
[1289]1605       # START NQS- / LOADLEVELER-JOB
[1]1606    if [[ $remote_host != $local_host ]]
1607    then
1608       [[ $verify = true ]]  &&  printf "\n >>> submitting job using \"qsub\"...\n"
[635]1609
[1289]1610       if [[ $remote_host = ibmku ]]
[1]1611       then
[1468]1612          ssh  $SSH_PORTOPT $remote_address  -l $remote_user  "cd $job_catalog; $submcom $job_on_remhost"
[1]1613       else
[1944]1614          ssh  $ssh_key  $SSH_PORTOPT $remote_address  -l $remote_user  "cd $job_catalog; $submcom $job_on_remhost; rm $job_on_remhost"
[82]1615       fi
[1]1616
1617       [[ $verify = true ]]  &&  printf " >>> o.k.\n"
1618    else
1619       cd  $job_catalog
[2257]1620       if [[ $(echo $local_host | cut -c1-3) = ibm  ||  $(echo $local_host | cut -c1-6) = lccray || $local_host = lceddy ]]
[1]1621       then
1622          eval  $submcom  $job_on_remhost
[2184]1623       elif [[  $local_host = lctit  ||  $local_host = lcxe6  ||  $local_host = lck  || $local_host = lckordi ||  $local_host = lcyon || $local_host = lcsb  ||  $local_host = lckyu* ]]
[108]1624       then
[635]1625          chmod  u+x  $job_on_remhost
[108]1626          eval  $submcom  $job_on_remhost
[1]1627       elif [[ $local_host = nech ]]
1628       then
1629          if [[ $queue = default ]]
1630          then
[799]1631             eval  $submcom  $job_on_remhost
[1]1632          else
[799]1633             eval  $submcom  -q $queue  $job_on_remhost
[1]1634          fi
[1620]1635       elif [[ $local_host = lcbullhh ]]
1636       then
1637          if [[ $queue = default ]]
1638          then
1639             eval  $submcom  $job_on_remhost
1640          fi
[1]1641       else
1642          qsub  $job_on_remhost
1643       fi
[622]1644
[2257]1645          # JOBFILE MUST NOT BE DELETED ON lctit/ibmku. THIS WILL BE DONE
[1289]1646          # AT THE END OF THE JOB
[2257]1647       if [[ $local_host != lctit  &&  $local_host != ibmku ]]
[622]1648       then
1649          rm  $job_on_remhost
1650       fi
[1]1651       cd  -  > /dev/null
1652    fi
1653 fi
1654
[1289]1655    # FINAL ACTIONS
[1]1656 if [[ $no_submit = false ]]
1657 then
[1099]1658    rm  -f $job_to_send
[1]1659 fi
[1266]1660 [[ $verify = true ]]  &&  printf "\n\n *** SUBJOB finished \n\n"
Note: See TracBrowser for help on using the repository browser.