source: palm/trunk/SCRIPTS/subjob @ 2282

Last change on this file since 2282 was 2266, checked in by raasch, 7 years ago

bugfix for calculating cpu-time per gridpoint, nech related parts removed from subjob script

  • Property svn:keywords set to Id Rev
File size: 53.3 KB
RevLine 
[1841]1#!/bin/bash
[1090]2
3# subjob - script for automatic generation and submission of batch-job files
4#          for various batch queuing systems
5
[1046]6#--------------------------------------------------------------------------------#
7# This file is part of PALM.
8#
9# PALM is free software: you can redistribute it and/or modify it under the terms
10# of the GNU General Public License as published by the Free Software Foundation,
11# either version 3 of the License, or (at your option) any later version.
12#
13# PALM is distributed in the hope that it will be useful, but WITHOUT ANY
14# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
15# A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
16#
17# You should have received a copy of the GNU General Public License along with
18# PALM. If not, see <http://www.gnu.org/licenses/>.
19#
[1310]20# Copyright 1997-2014  Leibniz Universitaet Hannover
[1046]21#--------------------------------------------------------------------------------#
22#
23# Current revisions:
[1090]24# ------------------
[1351]25#
[2188]26#
[1046]27# Former revisions:
28# -----------------
[169]29# $Id: subjob 2266 2017-06-09 09:27:21Z schwenkel $
[2266]30# nech related parts removed
31#
32# 2257 2017-06-07 14:07:05Z witha
[2257]33# adjustments for lceddy, removed lcflow-specific code
34#
35# 2188 2017-03-21 06:42:42Z raasch
[1623]36#
[2188]37# 2187 2017-03-21 06:41:25Z raasch
38# adjustment of compute node names for lckyuh
39#
[2186]40# 2184 2017-03-21 04:31:22Z raasch
41# bugfix: localhost renamed local_host
42#
[2150]43# 2148 2017-02-09 16:56:42Z scharf
44# added kuma and gharbi to the list of known hosts
45#
[2135]46# 2134 2017-02-02 07:33:46Z raasch
47# option -E added to msub commands on HLRN-III machines to allow output of more
48# job informations in the job protocol files
49#
[1945]50# 1944 2016-06-15 06:29:00Z raasch
51# adjustments for using HLRN ssh-keys
52#
[1941]53# 1940 2016-06-14 05:15:20Z raasch
54# adjustments for lckiaps
55#
[1867]56# 1866 2016-04-15 06:50:59Z raasch
57# adjusted for lcocean
58#
[1842]59# 1841 2016-04-07 19:14:06Z raasch
60# script now running under bash
61#
[1702]62# 1701 2015-11-02 07:43:04Z maronga
63# Bugfix: added missing init_cmds for lccrayh/lccrayb
64#
[1623]65# 1621 2015-07-17 11:39:33Z heinze
[1621]66# adjustments for Mistral at DKRZ Hamburg (lcbullhh)
[1200]67#
[1576]68# 1575 2015-03-27 09:56:27Z raasch
69# mpp2-queues added to lccrayh
70#
[1548]71# 1547 2015-01-29 15:09:12Z witha
72# adjustments for ForWind computing cluster (lcflow)
73#
[1546]74# 1545 2015-01-29 06:52:23Z heinze
75# local host name for blizzard further specified
76#
[1481]77# 1480 2014-10-17 14:41:49Z raasch
78# adjustments for 2nd stage of HLRNIII
79#
[1469]80# 1468 2014-09-24 14:06:57Z maronga
81# Typo removed (addres->address)
82# Adjustments for lcxe6
83#
[1453]84# 1452 2014-08-22 09:41:06Z heinze
85# local hosts for blizzard added
86#
[1451]87# 1450 2014-08-21 07:31:51Z heinze
88# HLRN-III (lccrayb): testq queue adjusted to mpp1testq
89#
[1443]90# 1442 2014-07-28 07:09:10Z raasch
91# HLRN-III (lccrayb/lccrayh) queues adjusted
92#
[1379]93# 1378 2014-04-28 06:04:58Z raasch
94# -et option added for lctit
95#
[1351]96# 1350 2014-04-04 13:01:30Z maronga
97# location of qsub updated for lcxe6
98#
[1290]99# 1289 2014-03-04 07:12:34Z raasch
100# German comments translated to English
101# fimm-, necriam-, scirocco-, ibmy-, and sgi-specific code removed
102#
[1280]103# 1279 2014-01-28 12:10:14Z raasch
104# node calculation modified due to changes in mrun (tasks_per_node must not be
105# an integral divisor of numprocs any more)
106#
[1275]107# 1274 2014-01-09 13:14:54Z heinze
108# adjustments for lccrayh
109#
[1267]110# 1266 2013-12-11 12:07:34Z heinze
111# further adjustments for lccrayb (use msub instead of qsub)
112#
[1265]113# 1264 2013-12-09 12:46:09Z fricke
114# Bugfix: Using number of nodes instead of number of processors (lccrayb)
115#
[1263]116# 1262 2013-12-09 10:57:20Z fricke
117# further adjustments for lccrayb
118#
[1261]119# 1260 2013-12-04 12:48:04Z raasch
120# jaboticaba admitted
121#
[1256]122# 1255 2013-11-07 14:43:35Z raasch
123# further adjustments for lccrayb
124#
[1225]125# 1224 2013-09-16 07:27:23Z raasch
126# first adjustments for lccrayb
127#
[1203]128# 1202 2013-07-10 16:22:07Z witha
129# adjustments for Forwind cluster (lcflow)
130#
[1200]131# 1199 2013-07-05 14:52:22Z raasch
132# adjustments for CSC Helsinki (lccrayf)
133#
[1185]134# use of cluster/express queue enabled (ibmh)
135# vinessa added (imuk)
[1047]136#
[1104]137# 1103 2013-02-20 02:15:53Z raasch
138# bash compatibility adjustments (usage of OPTIND, output formatting with printf
139# instead typeset -L/R),
140# further adjustments for lckyuh
141#
[1100]142# 2013-02-10 01:47:43Z raasch
143# adjustments for Kyushu-Univeristy computing center (lckyuh - hayaka)
144# and for Forwind cluster (lcflow)
145#
[1096]146# 1094 2013-02-03 01:52:12Z raasch
147# new option -P for explicit setting of ssh/scp port,
148# decalpha parts (yonsei) removed
149#
[1091]150# 2013-02-02 07:06:13Z raasch
[1099]151# adjustments for Kyushu-University computing center (lckyut - tatara)
[1091]152# old changelog messages removed
153#
[1047]154# 1046 2012-11-09 14:38:45Z maronga
155# code put under GPL (PALM 3.9)
156#
[1090]157# 08/07/94 - Siggi - first version finished
158# 29/06/94 - Siggi - script development started
159#--------------------------------------------------------------------------------#
160# subjob - script for automatic generation and submission of batch-job files
161#          for various batch queuing systems
162#--------------------------------------------------------------------------------#
[1]163
164
[1289]165    # VARIABLE-DECLARATIONS AND DEFAULT VALUES
[352]166 delete_dayfile=false
[799]167 email_notification=none
[122]168 group_number=none
[1]169 locat=normal
170 no_default_queue=none
171 no_submit=false
172 job_catalog="~/job_queue"
173 job_name=none
174 local_user=$LOGNAME
175 node_usage=shared
[475]176 numprocs=0
[1]177 punkte="..........................................................."
178 submcom=qsub
179 queue=default
180 remote_host=none
181 remote_user=""
182 verify=true
183
184 typeset  -i   cputime=memory=Memory=0  minuten  resttime  sekunden  stunden
[1779]185 typeset  -i   numprocs  mpi_tasks=nodes=processes_per_node=0 tasks_per_node=threads_per_task=1
[1]186
187
188
[1289]189    # ERROR HANDLING
190    # IN CASE OF EXIT:
[1]191 trap 'if [[ $locat != normal ]]
192       then
193          case  $locat  in
194             (option)  printf "\n  --> available optios can be displayed"
195                       printf " by typing:"
196                       printf "\n      \"subjob ?\" \n";;
197             (ftpcopy|parameter|scp|verify)  printf "\n";;
198             (*)       printf "\n  +++ unknown error"
199                       printf "\n      please inform S. Raasch!\n"
200          esac
201          [[ -f $job_to_send ]]  &&  rm  $job_to_send
202          printf "\n\n+++ SUBJOB killed \n\n"
203       fi' exit
204
205
[1289]206    # IN CASE OF TERMINAL-BREAK:
[1]207 trap '[[ -f $job_to_send ]]  &&  rm  $job_to_send
208       printf "\n\n+++ SUBJOB killed \n\n"
209       exit
210      ' 2
211
212
[1289]213    # DETERMINE NAME OF LOCAL HOST
[1]214 local_host=$(hostname)
215
[1289]216    # SET HOST-SPECIFIC VARIABLES VEREINBAREN (CHECK, IF LOCAL HOST
217    # IS ADMITTED AT ALL)
218    # NOTE: ONE OF THE ENTRIES FOR "lck" OR "lckordi" ALWAYS HAS TO BE
219    # COMMENT OUT, BECAUSE THE HOSTNAME (node*) IS SAME FOR BOTH MACHINES
[1]220 case  $local_host  in
[1468]221     (ambiel-lx)             local_address=134.106.74.48;  local_host=lcfor;;
222     (atmos)                 local_address=172.20.25.35;   local_host=lcide;;
223     (austru)                local_address=130.75.105.128; local_host=lcmuk;;
224     (autan)                 local_address=130.75.105.57;  local_host=lcmuk;;
225     (bora)                  local_address=130.75.105.103; local_host=lcmuk;;
[2187]226     (a0*|b0*)               local_address=133.5.4.33;     local_host=lckyuh;;
[1545]227     (blizzard1|p0*|p1*|p2*|p3*|p4*|p5*|p6*|p7*|p8*|p9*)   local_address=136.172.40.15;  local_host=ibmh;;
228     (blizzard2|p0*|p1*|p2*|p3*|p4*|p5*|p6*|p7*|p8*|p9*)   local_address=136.172.40.16;  local_host=ibmh;;
[1468]229     (blogin*|bxc*)          local_address=130.73.233.1;   local_host=lccrayb;;
230     (hlogin*|hxc*)          local_address=130.75.4.1;     local_host=lccrayh;;
231     (breva)                 local_address=130.75.105.98;  local_host=lcmuk;;
232     (buran)                 local_address=130.75.105.58;  local_host=lcmuk;;
233     (caurus)                local_address=130.75.105.19;  local_host=lcmuk;;
234     (climate*)              local_address=165.132.26.68;  local_host=lcyon;;
235     (clogin*)               local_address=86.50.166.21;   local_host=lccrayf;;
236     (elephanta)             local_address=130.75.105.6;   local_host=lcmuk;;
[2257]237     (hpcl*)                 local_address=eddy.hpc.uni-oldenburg.de; local_host=lceddy;;
238     (cfd*)                  local_address=eddy.hpc.uni-oldenburg.de; local_host=lceddy;;
[1468]239     (node*)                 local_address=165.132.26.61   local_host=lck;;
240   #  (node*)                 local_address=210.219.61.8    local_host=lckordi;;
241     (gaia*)                 local_address=150.183.146.24; local_host=ibmkisti;;
[2147]242     (gharbi)                local_address=130.75.105.47;  local_host=lcmuk;;
[1468]243     (gallego)               local_address=130.75.105.10;  local_host=lcmuk;;
244     (gregale)               local_address=130.75.105.109; local_host=lcmuk;;
245     (hababai)               local_address=130.75.105.108; local_host=lcmuk;;
246     (hayaka*)               local_address=133.5.4.33;     local_host=lckyuh;;
247     (hexagon.bccs.uib.no)   local_address=129.177.20.113; local_host=lcxe6;;
248     (hx*)                   local_address=133.3.51.11;    local_host=lckyoto;;
249     (inferno)               local_address=130.75.105.5;   local_host=lcmuk;;
250     (irifi)                 local_address=130.75.105.104; local_host=lcmuk;;
251     (jaboticaba)            local_address=150.163.25.181; local_host=lcbr;;
252     (sno)                   local_address=130.75.105.113; local_host=lcmuk;;
[2147]253     (kuma)                  local_address=130.75.105.115; local_host=lcmuk;;
[1468]254     (levanto)               local_address=130.75.105.45;  local_host=lcmuk;;
[1940]255     (login*)                local_address=118.128.66.201; local_host=lckiaps;;
[1468]256     (maestro)               local_address=130.75.105.2;   local_host=lcmuk;;
257     (meller)                local_address=134.106.74.155; local_host=lcfor;;
258     (meteo-login*)          local_address=193.166.211.144;local_host=lcxt5m;;
[1620]259     (mlogin1*|m1*)          local_address=136.172.50.13;  local_host=lcbullhh;;
[1468]260     (hexagon*)              local_address=129.177.20.113; local_host=lcxe6;;
261     (nobel*)                local_address=150.183.5.101;  local_host=ibms;;
[1866]262     (ocean)                 local_address="ocean";        local_host=lcocean;;
[1468]263     (orkan)                 local_address=130.75.105.3;   local_host=lcmuk;;
264     (ostria)                local_address=130.75.105.106; local_host=lcmuk;;
265     (paesano)               local_address=130.75.105.46;  local_host=lcmuk;;
266     (pcj*)                  local_address=172.31.120.1;   local_host=lckyut;;
267     (pingui)                local_address=134.106.74.118; local_host=lcfor;;
268     (quanero)               local_address=130.75.105.107; local_host=lcmuk;;
269     (rte*)                  local_address=133.5.185.60;   local_host=lcrte;;
[1866]270     (schultzl-Latitude-E6540)  local_address="schultzl-Latitude-E6540"; local_host=lcsch;;
[1468]271     (shiokaze-lx)           local_address=134.106.74.123; local_host=lcfor;;
272     (sisu-login*)           local_address=86.50.166.21;   local_host=lccrayf;;
273     (solano)                local_address=130.75.105.110; local_host=lcmuk;;
274     (sugoka*)               local_address=172.31.120.1;   local_host=lckyut;;
[1866]275     (tc*)                   local_address="ocean";        local_host=lcocean;;
[1468]276     (t2a*)                  local_address=10.1.6.165;     local_host=lctit;;
277     (urban*)                local_address=147.46.30.151   local_host=lcsb;;
278     (vinessa)               local_address=130.75.105.112; local_host=lcmuk;;
279     (vorias)                local_address=172.20.25.43;   local_host=lcmuk;;
280     (*.cc.kyushu-u.ac.jp)   local_address=133.5.4.129;    local_host=ibmku;;
[1]281     (*)                     printf "\n  +++ \"$local_host\" unknown";
[1255]282                             printf "\n      please contact the PALM group at IMUK";
[1]283                             locat=parameter; exit;;
284 esac
285
286
287
[1289]288    # BY DEFAULT, THE REMOTE HOST IS THE LOCAL HOST
[1]289 remote_host=$local_host
290
291
292
293
[1289]294    # READ THE SHELLSCRIPT-OPTIONS
[1094]295 while  getopts  :c:dDe:g:h:m:n:N:O:P:q:t:T:u:vX:  option
[1]296 do
297   case  $option  in
298       (c)   job_catalog=$OPTARG;;
299       (d)   delete_dayfile=true;;
300       (D)   no_submit=true;;
[352]301       (e)   email_notification=$OPTARG;;
[125]302       (g)   group_number=$OPTARG;;
[1]303       (h)   remote_host=$OPTARG;;
304       (m)   memory=$OPTARG;;
305       (n)   job_name=$OPTARG;;
306       (N)   node_usage=$OPTARG;;
307       (O)   threads_per_task=$OPTARG;;
[1094]308       (P)   scp_port=$OPTARG;;
[1]309       (q)   no_default_queue=$OPTARG;;
310       (t)   cputime=$OPTARG;;
311       (T)   tasks_per_node=$OPTARG;;
312       (u)   remote_user=$OPTARG;;
313       (v)   verify=false;;
314       (X)   numprocs=$OPTARG;;
315       (\?)  printf "\n  +++ Option $OPTARG unknown \n";
316             locat=option; exit;;
317   esac
318 done
319
320
[1289]321    # GET THE NAME OF THE JOBFILE AS NEXT ARGUMENT
[1103]322 (( to_shift = $OPTIND - 1 ))
323 shift $to_shift; file_to_send=$1
[1]324
325
[1289]326    # OUTPUT OF SHORT DESCRIPTION OF SCRIPT-OPTIONS
[1]327 if [ "$1" = "?" ]
328 then
329   (printf "\n  *** subjob can be called as follows:\n"
330    printf "\n      subjob -c.. -d -D -h.. -m.. -q.. -t.. -u.. -v  <jobfile>\n"
331    printf "\n      Description of available options:\n"
332    printf "\n      Option  Description                         Default-Value"
333    printf "\n        -c    job-input- and output-catalog       ~/job_queue"
334    printf "\n        -d    no job-protocol will be created     ---"
335    printf "\n        -D    only the job-file will be created   ---"
336    printf "\n        -h    execution host, available hosts:    $remote_host"
[1289]337    printf "\n              ibm, ibmh, ibmkisti, ibmku, ibms, lc...,"
[2266]338    printf "\n              lckiaps, lctit"
[1]339    printf "\n        -m    memory demand per process in MByte  ---"
340    printf "\n        -n    jobname                             <jobdatei>"
341    printf "\n        -O    threads per task (for OpenMP usage) 1"
[1094]342    printf "\n        -P    ssh/scp port                        default port"
[1]343    printf "\n        -q    job-queue to be used                default"
344    printf "\n        -t    allowed cpu-time in seconds         ---"
345    printf "\n        -T    tasks per node (on parallel hosts)  ---"
346    printf "\n        -u    username on execution host          from .netrc"
347    printf "\n        -v    no prompt for confirmation          ---"
348    printf "\n        -X    # of processors (on parallel hosts) 1"
349    printf "\n "
350    printf "\n      The only possible positional parameter is <jobfile>:"
351    printf "\n      The complete NQS-job must be provided here."
352    printf "\n      <jobfile>=? creates this outline\n\n") | more
353    exit
354 fi
355
356
357
[1289]358    # CHECK, IF JOB-FILE HAS BEEN GIVEN AS ARGUMENT AND IF THE FILE ITSELF EXISTS
[1]359 if [[ "$file_to_send" = "" ]]
360 then
361    printf "\n  +++ job-file missing"
362    locat=parameter; exit
363 else
364    if [[ -f $file_to_send ]]
365    then
366       true
367    else
368       printf "\n  +++ job-file: "
369       printf "\n           $file_to_send"
370       printf "\n      does not exist"
371       locat=parameter; exit
372    fi
373 fi
374
375
376
[1289]377    # IF NO JOBNAME HAS BEEN GIVEN, JOBNAME IS SET TO THE NAME OF THE JOB-FILE,
378    # PROVIDED THAT THE JOB-FILE NAME DOES NOT CONTAIN ANY PATH
[1]379 if [[ $job_name = none ]]
380 then
381    job_name=$file_to_send
382 fi
383 if [[ $(echo $job_name | grep -c "/") != 0 ]]
384 then
385    printf "\n  +++ job-file name: "
386    printf "\n           $job_name"
387    printf "\n      must not contain \"/\"-characters"
388    locat=parameter; exit
389 fi
390
391
392
393
[1289]394    # SET HOST-SPECIFIC QUANTITIES, OR TERMINATE IN CASE OF UNKNOWN HOST,
395    # OR IF NO HOST HAS BEEN GIVEN
[1]396 if [[ $remote_host = none ]]
397 then
398    printf "\n  +++ host missing"
399    locat=option; exit
400 else
401    case  $remote_host  in
[1468]402        (ibm)     queue=p690_standard; remote_address=134.76.99.81; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
403        (ibmh)    queue=cluster; remote_address=136.172.40.15; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
404        (ibmkisti) queue=class.32plus; remote_address=150.183.146.24; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
405        (ibmku)   queue=s4; remote_address=133.5.4.129; submcom=/usr/local/bin/llsubmit;;
406        (ibms)    queue=p_normal; remote_address=150.183.5.101; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
[1620]407        (lcbullhh)    queue=compute; remote_address=136.172.50.13; submcom=/usr/bin/sbatch;;
[2134]408        (lccrayb) queue=mpp1testq; remote_address=130.73.233.1; submcom="/opt/moab/default/bin/msub -E";;
409        (lccrayh) queue=mpp1testq; remote_address=130.75.4.1; submcom="/opt/moab/default/bin/msub -E";;
[1468]410        (lccrayf) queue=small; remote_address=86.50.166.21; submcom=/opt/slurm/default/bin/sbatch;;
[2257]411        (lceddy)  remote_address=eddy.hpc.uni-oldenburg.de; submcom=sbatch;;
[1468]412        (lckyoto) remote_address=133.3.51.11; submcom=/thin/local/bin/qsub;;
413        (lck)     remote_address=165.132.26.61; submcom=/usr/torque/bin/qsub;;
[1940]414        (lckiaps) remote_address=118.128.66.201; submcom=/opt/pbs/default/bin/qsub;;
[1468]415        (lckordi) remote_address=210.219.61.8; submcom=/usr/torque/bin/qsub;;
416        (lckyuh)  remote_address=133.5.4.33; submcom=/usr/bin/pjsub;;
417        (lckyut)  remote_address=133.5.4.37; submcom=/usr/bin/pjsub;;
[1866]418        (lcocean) remote_address="ocean"; submcom=qsub;;
[1468]419        (lcsb)    remote_address=147.46.30.151; submcom=/usr/torque/bin/qsub;;
420        (lctit)   queue=S; remote_address=10.1.6.165; submcom=/opt/pbs/tools/bin/t2sub;;
421        (lcxe6)   remote_address=129.177.20.113; submcom=/opt/torque/default/bin/qsub;;
422        (lcxt5m)  remote_address=193.166.211.144; submcom=/opt/pbs/10.1.0.91350/bin/qsub;;
423        (lcyon)   remote_address=165.132.26.68; submcom=/usr/torque/bin/qsub;;
[251]424        (*)       printf "\n  +++ hostname \"$remote_host\" not allowed";
425                  locat=parameter; exit;;
[1]426    esac
427 fi
428
429
[1289]430    # CHECK, IF A VALID QUEUE HAS BEEN GIVEN
[1]431 if [[ $no_default_queue != none ]]
432 then
433    error=false
434    ndq=$no_default_queue
435    case  $remote_host  in
436        (ibm)    case  $ndq  in
437                     (p690_express|p690_standard|p690_long)  error=false;;
438                     (*)                                     error=true;;
439                 esac;;
440        (ibmh)   case  $ndq  in
[1184]441                     (cluster|express)  error=false;;
[1]442                     (*)                                     error=true;;
443                 esac;;
[693]444        (ibmkisti)   case  $ndq  in
445                     (class.32plus|class.1-2|class.2-32)  error=false;;
446                     (*)                                     error=true;;
447                 esac;;
[622]448        (ibmku)  case  $ndq  in
449                     (sdbg1|sdbg2|sdbg4|s4|s16|s32|s32-s)    error=false;;
450                     (*)                                     error=true;;
451                 esac;;
[1]452        (ibms)   case  $ndq  in
453                     (express|normal|p_express|p_normal|p_normal_1.3|p_normal_1.7|grand)     error=false;;
454                     (*)                                     error=true;;
455                 esac;;
[1620]456        (lcbullhh) case  $ndq  in
[2147]457                     (compute|compute2|shared)  error=false;;
[1620]458                     (*)                                     error=true;;
459                 esac;;
[1224]460        (lccrayb) case  $ndq  in
[1480]461                     (dataq|mpp1q|mpp1testq|mpp2q|mpp2testq|smp1q|smp1testq|specialm1q)   error=false;;
[1224]462                     (*)                                     error=true;;
463                 esac;;
[1274]464        (lccrayh) case  $ndq  in
[1575]465                     (dataq|mpp1q|mpp1testq|mpp2q|mpp2testq|smp1q|smp1testq|specialm1q)   error=false;;
[1274]466                     (*)                                     error=true;;
467                 esac;;
[1197]468        (lccrayf) case  $ndq  in
469                     (usup|test*|small|large)                error=false;;
470                     (*)                                     error=true;;
471                 esac;;
[2257]472        (lceddy) case  $ndq  in
473                     (eddy.p|cfdh.p|cfdl.p|carl.p|mpcs.p|mpcl.p|mpcb.p|all_nodes.p)  error=false;;
[1099]474                     (*)                                     error=true;;
475                 esac;;
[1040]476        (lckiaps) case  $ndq  in
[1940]477                     (express|normal|normal20|quickq)        error=false;;
[1040]478                     (*)                                     error=true;;
479                 esac;;
[440]480        (lckyoto) case  $ndq  in
481                     (eh|ph)                                 error=false;;
482                     (*)                                     error=true;;
483                 esac;;
[1099]484        (lckyuh) case  $ndq  in
485                     (fx-dbg|fx-single|fx-small|fx-middle|fx-large)  error=false;;
486                     (*)                                     error=true;;
487                 esac;;
[1090]488        (lckyut) case  $ndq  in
489                     (cx-dbg|cx-single|cx-small|cx-middle|cx-large)  error=false;;
490                     (*)                                     error=true;;
491                 esac;;
[1]492        (lctit)  case  $ndq  in
[635]493                     (G|L128|L256|L512H|S|S96|V)             error=false;;
[1]494                     (*)                                     error=true;;
495                 esac;;
496        (t3eb)   case  $ndq  in
497                     (berte|p50|p100|p392|forfree|p25himem)  error=false;;
498                     (*)    error=true;;
499                 esac;;
500        (t3eh)   case  $ndq  in
501                     (para_t3e|em|k|l|lm|comp_t3e|c|p|ht)  error=false;;
502                     (*)    error=true;;
503                 esac;;
504        (t3ej2|t3ej5)  case  $ndq  in
505                     (low|normal|high)  error=false;;
506                     (*)    error=true;;
507                 esac;;
508        (t3es)  case  $ndq  in
509                     (batch|serial-4|pe4|p48|pe16|pe32|pe64|pe128)  error=false;;
510                     (*)    error=true;;
511                 esac;;
512    esac
513    if [[ $error = true ]]
514    then
515       printf "\n  +++ queue \"$no_default_queue\" on host \"$remote_host\" not allowed"
516       locat=parameter; exit
517    else
518       queue=$no_default_queue
519    fi
520 fi
521
522
523
[1289]524    # CHECK THE CPU-TIME
525    # SPLIT TIME INTO HOURS, MINUTES, AND SECONDS
[1]526 done=false
527 while [[ $done = false ]]
528 do
529    if (( $cputime <= 0 ))
530    then
531       printf "\n  +++ wrong cpu-time or cpu-time missing"
532       printf "\n  >>> Please type cpu-time in seconds as INTEGER:"
533       printf "\n  >>> "
534       read  cputime  1>/dev/null  2>&1
535    else
536       done=true
537    fi
538 done
539 (( stunden  = cputime / 3600 ))
540 (( resttime = cputime - stunden * 3600 ))
541 (( minuten  = resttime / 60 ))
542 (( sekunden = resttime - minuten * 60 ))
543 timestring=${stunden}:${minuten}:${sekunden}
544
545
546
[1289]547    # CHECK THE MEMORY DEMAND
[1]548 done=false
549 while [[ $done = false ]]
550 do
551    if (( memory <= 0 ))
552    then
553       printf "\n  +++ wrong memory demand or memory demand missing"
554       printf "\n  >>> Please type memory in  MByte per process  as INTEGER:"
555       printf "\n  >>> "
556       read  memory  1>/dev/null  2>&1
557    else
558       done=true
559    fi
560 done
561
[2266]562 if [[ $remote_host = lctit ]]
[1]563 then
[635]564    (( Memory = memory * tasks_per_node / 1000 ))
[1]565 fi
566
567
[1289]568    # MEMORY DEMAND IN CASE OF OPENMP-USAGE ON IBM-SYSTEMS
[1]569 if [[ $(echo $remote_host | cut -c1-3) = ibm ]]
570 then
571    (( memory = memory * threads_per_task ))
572 fi
573
574
[1289]575    # CALCULATE NUMBER OF REQUIRED NODES
[1]576 if (( tasks_per_node != 0 ))
577 then
[1279]578    (( nodes = ( numprocs - 1 ) / ( tasks_per_node * threads_per_task ) + 1 ))
[1]579 fi
580
[1094]581
[1289]582    # CALCULATE NUMBER OF PROCESSES PER NODE
[622]583 (( processes_per_node = tasks_per_node * threads_per_task ))
[1]584
[1094]585
[1289]586    # CALCULATE NUMBER OF MPI TASKS
[696]587 (( mpi_tasks = numprocs / threads_per_task ))
[1]588
[696]589
[1289]590    # SET PORT NUMBER OPTION FOR CALLS OF ssh/scp, subjob AND batch_scp SCRIPTS
[1094]591 if [[ "$scp_port" != "" ]]
592 then
593    PORTOPT="-P $scp_port"
594    SSH_PORTOPT="-p $scp_port"
595 fi
596
597
[1289]598    # HEADER-OUTPUT
[1]599 if [[ $verify = true ]]
600 then
601    printf "\n\n"
602    printf "#--------------------------------------------------------------# \n"
603    spalte1=SUBJOB;spalte2=$(date)
[1103]604    printf "| %-20s%40s | \n" "$spalte1" "$spalte2"
[1]605    printf "|                                                              | \n"
606    printf "| values of parameters/options:                                | \n"
[1103]607    spalte1=$(echo local_host$punkte | cut -c-20)
608    spalte2=$punkte$local_host
609    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
610    spalte1=$(echo remote_host$punkte | cut -c-20)
611    spalte2=$punkte$remote_host
612    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
613    spalte1=$(echo queue$punkte | cut -c-20)
614    spalte2=$punkte$queue
615    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
616    spalte1=$(echo memory$punkte | cut -c-20)
617    spalte2="$punkte$memory mb"
618    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
619    spalte1=$(echo cputime$punkte | cut -c-20)
620    spalte2="$punkte$cputime sec"
621    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
622    spalte1=$(echo job_name$punkte | cut -c-20)
623    spalte2="$punkte$job_name"
624    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
[1]625    printf "#--------------------------------------------------------------# \n\n"
626
627
[1289]628       # QUERY CHECK
[1]629    antwort="dummy"
630    while [[ $antwort != y  &&  $antwort != Y  &&  $antwort != n  &&  $antwort != N ]]
631    do
632       read antwort?" >>> continue (y/n) ? "
633    done
634    if [[ $antwort = n  ||  $antwort = N ]]
635    then
636       locat=verify; exit
637    fi
638    printf "\n"
639 fi
640
[1289]641    # GENERATE RANDOM IDENTIFIER, AND DETERMINE THE JOBNAME ON THE TARGET HOST
642 identifier=$RANDOM
643 job_on_remhost=${job_name}_${identifier}_$local_host
644 job_to_send=job_to_send_$identifier
[1]645 if [[ $delete_dayfile = false ]]
646 then
[1289]647    remote_dayfile=${local_host}_${job_name}_result_$identifier
[1]648    local_dayfile=${remote_host}_${job_name}
649 else
650    remote_dayfile=/dev/null
651 fi
652
653
[1289]654    # GENERATE THE BATCH-JOB SCRIPTS (FOR QUEUEING-SYSTEMS qsub/msub/LoadLeveler)
[1]655 if [[ $(echo $remote_host | cut -c1-3) = ibm  &&  $numprocs != 0 ]]
656 then
657
[1289]658       # GENERAL LOADLEVELER SETTINGS
[622]659    execute_in_shell="#!/bin/ksh"
660    use_shell="# @ shell = /bin/ksh"
661    consumable_memory="ConsumableMemory($memory mb)"
662    class="# @ class = $queue"
663    environment="# @ environment = OMP_NUM_THREADS=$threads_per_task; MP_SHARED_MEMORY=yes"
664    network_to_use="# @ network.mpi = sn_all,shared,us"
665    data_limit="# @ data_limit = 1.76gb"
666    image_size="# @ image_size = 50"
[693]667    wall_clock_limit="# @ wall_clock_limit = ${timestring},$timestring"
[312]668
[693]669    if [[ $email_notification = none ]]
670    then
671       notify_user=""
672    else
673       notify_user="# @ notify_user = $email_notification"
674       if [[ $delete_dayfile = true ]]
675       then
676          notification='# @ notification = never'
677       fi
678    fi
[622]679
[312]680    if [[ $remote_host = ibmh ]]
[1]681    then
[312]682       data_limit=""
683       network_to_use=""
[1184]684       class="# @ class = $queue"
[312]685       environment=""
[814]686       rset="# @ rset = RSET_MCM_AFFINITY"
687       task_affinity="# @ task_affinity = core(1)"
[693]688    elif [[ $remote_host = ibmkisti ]]
689    then
690       network_to_use="# @ network.MPI = sn_all,shared,US"
691       wall_clock_limit="# @ wall_clock_limit = $timestring"
[696]692       if [[ $threads_per_task = 1 ]]
693       then
694          rset="# @ rset = RSET_MCM_AFFINITY"
695          mcm_affinity_options="# @ mcm_affinity_options = mcm_mem_pref mcm_sni_none mcm_distribute"
696       fi
[693]697       environment=""
698       use_shell=""
699       data_limit=""
700       image_size=""
[622]701    elif [[ $remote_host = ibmku ]]
702    then
703       execute_in_shell="#!/usr/bin/ksh"
704       use_shell="# @ shell = /usr/bin/ksh"
705       consumable_memory=""
706       environment=""
707       network_to_use="# @ network.mpi = sn_all,shared,us"
708       data_limit=""
709       image_size=""
710    elif [[ $remote_host = ibms ]]
711    then
712       network_to_use="# @ network.mpi = csss,shared,us"
[1]713    fi
714
715    cat > $job_to_send << %%END%%
[622]716$execute_in_shell
717$use_shell
[1]718
719# @ job_type = parallel
[693]720# @ job_name = $job_name
[1]721# @ resources = ConsumableCpus($threads_per_task) $consumable_memory
722# @ output = $remote_dayfile
723# @ error = $remote_dayfile
[693]724$wall_clock_limit
[622]725$image_size
[312]726$class
727$environment
[1]728$network_to_use
729$data_limit
[693]730$rset
731$mcm_affinity_options
[814]732$task_affinity
[1]733$notification
[693]734$notify_user
[1]735
736%%END%%
737
738    if (( nodes > 0 ))
739    then
740
[693]741       if [[ $remote_host != ibmkisti ]]
742       then
743
744          cat >> $job_to_send << %%END%%
[1]745# @ node = $nodes
[622]746# @ tasks_per_node = $processes_per_node
[1]747# @ node_usage = $node_usage
748# @ queue
749
750%%END%%
751
[693]752       else
753
754          cat >> $job_to_send << %%END%%
[696]755# @ total_tasks = $mpi_tasks
[693]756# @ blocking = unlimited
757# @ queue
758
759%%END%%
760
761       fi
762
[1]763    else
764
[1289]765       cat >> $job_to_send << %%END%%
[1]766# @ blocking = unlimited
767# @ total_tasks = $numprocs
768# @ node_usage = $node_usage
769# @ queue
770
771%%END%%
772
773    fi
774
[1289]775       # WORKAROUND BECAUSE OF SILLY JOB FILTER ON ibmkisti
[696]776    if [[ $remote_host = ibmkisti  &&  $threads_per_task != 1 ]]
777    then
778       echo  "export OMP_NUM_THREADS=$threads_per_task"  >>  $job_to_send
779    fi
780
[1]781 elif [[ $(echo $remote_host | cut -c1-3) = ibm  &&  $numprocs = 0 ]]
782 then
783
784    cat > $job_to_send << %%END%%
785#!/bin/ksh
786
787# @ job_type = serial
788# @ node_usage = $node_usage
789# @ job_name = palm
790# @ wall_clock_limit = ${timestring},$timestring
791# @ resources = ConsumableCpus(1) ConsumableMemory(1 gb)
792# @ output = $remote_dayfile
793# @ error = $remote_dayfile
[312]794$class
[1]795$notification
796
797# @ queue
798
799%%END%%
800
[1620]801 elif [[ $remote_host = lcbullhh ]]
802 then
803    if [[ $numprocs != 0 ]]
804    then
805       cat > $job_to_send << %%END%%
806#!/bin/bash -l
807#SBATCH -J $job_name
808#SBATCH -t $timestring
809#SBATCH -N $nodes
810#SBATCH --ntasks-per-node=$processes_per_node
811#SBATCH -p $queue
812#SBATCH -o $remote_dayfile
813#SBATCH -e $remote_dayfile
814#SBATCH -A $project_account
815
816$init_cmds
817$module_calls
818
819%%END%%
820
821    else
822       cat > $job_to_send << %%END%%
823#!/bin/bash -l
824#SBATCH -J $job_name
825#SBATCH -t $timestring
826#SBATCH -l ncpus=1
827#SBATCH -l pmem=${memory}mb
828#SBATCH -m abe
829#SBATCH -o $remote_dayfile
830#SBATCH -e $remote_dayfile
831#SBATCH -A $project_account
832
833$init_cmds
834$module_calls
835
836%%END%%
837
838    fi
839
[1274]840 elif [[ $remote_host = lccrayb || $remote_host = lccrayh ]]
[1224]841 then
842
[1480]843    if [[ "$feature" != "" ]]
844    then
845       featuredir="#PBS -l feature=$feature"
846    fi
847
[1224]848    if [[ $numprocs != 0 ]]
849    then
850       cat > $job_to_send << %%END%%
[1255]851#!/bin/bash
[1224]852#PBS -N $job_name
853#PBS -l walltime=$timestring
[1264]854#PBS -l nodes=$nodes:ppn=$processes_per_node
[1224]855#PBS -o $remote_dayfile
856#PBS -j oe
857#PBS -q $queue
[1480]858$featuredir
[1224]859
[1701]860$init_cmds
[1224]861$module_calls
862
863%%END%%
864
865    else
866
867       continue
868
869    fi
870
[1197]871 elif [[ $remote_host = lccrayf ]]
872 then
873
874    if [[ $numprocs != 0 ]]
875    then
876       cat > $job_to_send << %%END%%
877#!/bin/bash -l
878#SBATCH -J $job_name
879#SBATCH -t $timestring
880#SBATCH -N $nodes
881#SBATCH --ntasks-per-node=$processes_per_node
882#SBATCH -p $queue
883#SBATCH -o $remote_dayfile
884#SBATCH -e $remote_dayfile
885
886$init_cmds
887$module_calls
888
889%%END%%
890
891    else
892       cat > $job_to_send << %%END%%
893#!/bin/bash -l
894#SBATCH -J $job_name
895#SBATCH -t $timestring
896#SBATCH -l ncpus=1
897#SBATCH -l pmem=${memory}mb
898#SBATCH -m abe
899#SBATCH -o $remote_dayfile
900#SBATCH -e $remote_dayfile
901
902$init_cmds
903$module_calls
904
905%%END%%
906
907    fi
908
[2257]909
910
911 elif [[ $remote_host = lceddy ]]
[1099]912 then
[2257]913    cat > $job_to_send << %%END%%
[1099]914#!/bin/bash
[2257]915#SBATCH -J $job_name
916#SBATCH -t $timestring
917#SBATCH -n $numprocs
918#SBATCH -N $nodes
919#SBATCH --cpus-per-task 1
920#SBATCH -p $queue
921#SBATCH -o $remote_dayfile
922#SBATCH -e $remote_dayfile
923#SBATCH --mem-per-cpu $memory
924#SBATCH --exclusive
[1099]925
[2257]926module load PALMDependencies/.gcc
927
928$init_cmds
929$module_calls
930
[1099]931%%END%%
932
[1021]933 elif [[ $remote_host = lck || $remote_host = lckordi || $remote_host = lcsb ]]
[368]934 then
935
936    if [[ $numprocs != 0 ]]
937    then
938       cat > $job_to_send << %%END%%
939#!/bin/ksh
940#PBS -N $job_name
941#PBS -l walltime=$timestring
942#PBS -l ncpus=$numprocs
943#PBS -l pmem=${memory}mb
944#PBS -o $remote_dayfile
[1021]945#PBS -l nodes=$nodes:ppn=${processes_per_node}
[368]946#PBS -j oe
947
948mpd &
949
950%%END%%
951
952    else
953       cat > $job_to_send << %%END%%
954#!/bin/ksh
955#PBS -N $job_name
956#PBS -l walltime=$timestring
957#PBS -l ncpus=1
958#PBS -l pmem=${memory}mb
959#PBS -o $remote_dayfile
960#PBS -j oe
961
962%%END%%
963
964    fi
965
[1040]966 elif [[ $remote_host = lckiaps ]]
967 then
968
969    if [[ $numprocs != 0 ]]
970    then
971       cat > $job_to_send << %%END%%
[1940]972#!/bin/bash
[1040]973#PBS -N $job_name
974#PBS -l walltime=$timestring
975#PBS -l select=1:ncpus=$numprocs
976#PBS -l pmem=${memory}mb
977#PBS -q $queue
978#PBS -o $remote_dayfile
979#PBS -j oe
980#PBS -V
981
982%%END%%
983
984    else
985       cat > $job_to_send << %%END%%
[1940]986#!/bin/bash
[1040]987#PBS -N $job_name
988#PBS -l walltime=$timestring
989#PBS -l ncpus=1
990#PBS -l pmem=${memory}mb
991#PBS -o $remote_dayfile
992#PBS -j oe
993
994%%END%%
995
996    fi
997
[693]998 elif [[ $remote_host = lcyon ]]
999 then
1000
1001    if [[ $numprocs != 0 ]]
1002    then
1003       cat > $job_to_send << %%END%%
1004#!/bin/ksh
1005#PBS -N $job_name
1006#PBS -l walltime=$timestring
1007#PBS -l ncpus=$numprocs
1008#PBS -l pmem=${memory}mb
1009#PBS -o $remote_dayfile
1010#PBS -j oe
1011
1012%%END%%
1013
1014    else
1015       cat > $job_to_send << %%END%%
1016#!/bin/ksh
1017#PBS -N $job_name
1018#PBS -l walltime=$timestring
1019#PBS -l ncpus=1
1020#PBS -l pmem=${memory}mb
1021#PBS -o $remote_dayfile
1022#PBS -j oe
1023
1024%%END%%
1025
1026    fi
1027
[892]1028 elif [[ $remote_host = lcxe6 ]]
[164]1029 then
1030
1031    if [[ $numprocs != 0 ]]
1032    then
1033       cat > $job_to_send << %%END%%
[799]1034#!/bin/ksh
[164]1035#PBS -S /bin/ksh
1036#PBS -N $job_name
[552]1037#PBS -A $project_account
[206]1038#PBS -j oe
[164]1039#PBS -l walltime=$timestring
1040#PBS -l mppwidth=${numprocs}
[622]1041#PBS -l mppnppn=${processes_per_node}
[164]1042#PBS -m abe
1043#PBS -o $remote_dayfile
[492]1044$email_directive
[164]1045
[892]1046$init_cmds
[493]1047$module_calls
[343]1048
[164]1049%%END%%
1050
[108]1051    else
1052       cat > $job_to_send << %%END%%
1053#!/bin/ksh
[168]1054#PBS -S /bin/ksh
[108]1055#PBS -N $job_name
[552]1056#PBS -A $project_account
[206]1057#PBS -j oe
[108]1058#PBS -l walltime=$timestring
1059#PBS -l ncpus=1
1060#PBS -l pmem=${memory}mb
1061#PBS -m abe
[492]1062$email_directive
[108]1063#PBS -o $remote_dayfile
1064
[892]1065$init_cmds
[493]1066$module_calls
[343]1067
[108]1068%%END%%
1069
1070    fi
1071
[440]1072 elif [[ $remote_host = lckyoto ]]
[437]1073 then
1074
[440]1075       cat > $job_to_send << %%END%%
[799]1076#!/bin/ksh
[440]1077# @\$-o $remote_dayfile
1078# @\$-eo -oi
1079# @\$-lP 16
[799]1080# @\$-lp 1
[440]1081# @\$-lm 28gb  -llm unlimited -ls unlimited
1082# @\$-q $queue
1083# @\$-Pvn abs_pack
1084##for intel? @\$-Pvn abs_unpack -Pvs unpack -Pvc unpack
1085#. /thin/local/etc/setprofile/intel-11.0.sh
1086#. /thin/local/etc/setprofile/mvapich2-1.4+intel-11.0.sh
1087. ~/.myprofile
1088#. /home2/t/t51254/palm/current_version/myprofile
1089#. /thin/apps/pgi/mpi.sh
1090#
1091env
1092#
1093set -x
1094
1095%%END%%
1096
1097 elif [[ $remote_host = lcxt5m ]]
1098 then
1099
[437]1100    if [[ $numprocs != 0 ]]
1101    then
1102       cat > $job_to_send << %%END%%
[799]1103#!/bin/ksh
[437]1104#PBS -S /bin/ksh
1105#PBS -N $job_name
1106#PBS -j oe
1107#PBS -l walltime=$timestring
1108#PBS -l mppwidth=${numprocs}
[622]1109#PBS -l mppnppn=${processes_per_node}
[437]1110#PBS -m abe
1111#PBS -o $remote_dayfile
1112
[892]1113$init_cmds
[493]1114$module_calls
[437]1115
1116%%END%%
1117
1118    else
1119       cat > $job_to_send << %%END%%
1120#!/bin/ksh
1121#PBS -S /bin/ksh
1122#PBS -N $job_name
1123#PBS -j oe
1124#PBS -l walltime=$timestring
1125#PBS -l ncpus=1
1126#PBS -l pmem=${memory}mb
1127#PBS -m abe
1128#PBS -o $remote_dayfile
1129
[892]1130$init_cmds
[493]1131$module_calls
[437]1132
1133%%END%%
1134
1135    fi
1136
[1099]1137 elif [[ $remote_host = lckyuh ]]
1138 then
1139    cat > $job_to_send << %%END%%
1140#!/bin/bash
1141#PJM -L "rscgrp=$queue"
1142#PJM -L "node=$nodes"
1143#PJM --mpi "proc=$numprocs"
1144#PJM -L "elapse=$timestring"
1145#PJM -o $remote_dayfile
1146#PJM -j
1147#PJM -X
1148#PJM --no-stging
1149
1150export LANG=en_US.UTF-8
1151%%END%%
1152
[1090]1153 elif [[ $remote_host = lckyut ]]
1154 then
1155    cat > $job_to_send << %%END%%
1156#!/bin/bash
[1099]1157#PJM -L "rscgrp=$queue"
[1090]1158#PJM -L "vnode=$numprocs"
1159#PJM -L "vnode-core=1"
1160#PJM -L "elapse=$timestring"
1161#PJM --mpi proc=$numprocs
1162#PJM -o $remote_dayfile
1163#PJM -j
[1099]1164#PJM -X
1165#PJM --no-stging
[1090]1166
1167export LANG=en_US.UTF-8
1168%%END%%
1169
[1866]1170 elif [[ $remote_host = lcocean ]]
1171 then
1172   cat > $job_to_send << %%END%%
1173#!/bin/bash
1174#$ -cwd
1175#$ -V
1176#$ -N $job_name
1177#$ -pe orte $numprocs
1178#$ -o $remote_dayfile
1179#$ -j y
1180#$ -R y
1181$init_cmds
1182$module_calls
1183
1184%%END%%
1185
[1]1186 elif [[ $remote_host = lctit ]]
1187 then
1188    cat > $job_to_send << %%END%%
[635]1189#!/bin/ksh
[892]1190$init_cmds
[678]1191$module_calls
1192
[1]1193%%END%%
1194
[1289]1195       # SET OPTIONS FOR SUBMIT-COMMAND
[678]1196    if [[ $tasks_per_node != $processes_per_node ]]
1197    then
[1378]1198       submcom="$submcom -W group_list=$group_number -N $job_name -l walltime=$timestring -l select=$nodes:ncpus=$processes_per_node:mpiprocs=$tasks_per_node:mem=${Memory}gb -o $remote_dayfile -j oe -et 1 -q $queue "
[678]1199    else
[1378]1200       submcom="$submcom -W group_list=$group_number -N $job_name -l walltime=$timestring -l select=$nodes:ncpus=$processes_per_node:mpiprocs=$tasks_per_node:mem=${Memory}gb -l place=scatter -o $remote_dayfile -j oe -et 1 -q $queue "
[678]1201    fi
[1]1202
1203 else
1204
1205    cat > $job_to_send << %%END%%
1206# @\$-q ${queue}
1207# @\$-l${qsubtime} $timestring
1208# @\$-l${qsubmem} ${memory}mb
1209# @\$-o $remote_dayfile
1210# @\$-eo
1211
1212%%END%%
1213
1214 fi
1215
1216
[1289]1217    # IN CASE OF JOBS EXECUTING ON REMOTE-HOSTS, THE TRANSFER OF THE DAYFILES
1218    # TO THE LOCAL HOSTS WILL BE INITIATED BY TRAP ON EXIT
1219    # NO TRANSFER POSSIBLE ON IBM IN SEOUL
[1]1220 if [[ $delete_dayfile = false  &&  $remote_host != $local_host ]]
1221 then
1222    echo "set +vx"                              >>  $job_to_send
1223    echo "trap '"                               >>  $job_to_send
1224    echo "set +vx"                              >>  $job_to_send
[2257]1225    if [[ $(echo $remote_host | cut -c1-3) = ibm  ||  $remote_host = lcbullhh  ||  $remote_host = lccrayb  ||  $remote_host = lccrayh  ||  $(echo $remote_host | cut -c1-3) = nec  ||  $remote_host = lckiaps  ||  $remote_host = lckyu* || $remote_host = lcxe6  ||  $remote_host = lcocean ]]
[1]1226    then
[622]1227       if [[ $remote_host = ibmh ]]
[1]1228       then
1229          return_queue=c1
[693]1230       elif [[ $remote_host = ibmkisti ]]
1231       then
1232          return_queue=class.1-2
[622]1233       elif [[ $remote_host = ibmku ]]
1234       then
1235          return_queue=sdbg2
[1]1236       elif [[ $remote_host = ibms ]]
1237       then
1238          return_queue=p_normal
[1620]1239       elif [[ $remote_host = lcbullhh ]]
1240       then
1241          return_queue=shared
[1274]1242       elif [[ $remote_host = lccrayb || $remote_host = lccrayh ]]
[1255]1243       then
1244          return_queue=dataq
[1468]1245       elif [[ $remote_host = lcxe6 ]]
1246       then
1247          return_queue=debug
[1040]1248       elif [[ $remote_host = lckiaps ]]
1249       then
1250          return_queue=express
[1099]1251       elif [[ $remote_host = lckyuh ]]
1252       then
1253          return_queue=cx-single
[1090]1254       elif [[ $remote_host = lckyut ]]
1255       then
1256          return_queue=cx-single
[1]1257       else
1258          return_queue=unknown
1259       fi
1260
1261       if [[ $(echo $remote_host | cut -c1-3) = ibm ]]
1262       then
1263
[622]1264          if [[ $remote_host = ibmku ]]
1265          then
[1289]1266             echo "echo \"#!/usr/bin/ksh\" >> scpjob.$identifier"            >>  $job_to_send
1267             echo "echo \"# @ shell = /usr/bin/ksh\" >> scpjob.$identifier"  >>  $job_to_send
[622]1268          else
[1289]1269             echo "echo \"#!/bin/ksh\" >> scpjob.$identifier"                >>  $job_to_send
[622]1270          fi
[1289]1271          echo "echo \"# @ job_type = serial\" >> scpjob.$identifier"    >>  $job_to_send
1272          echo "echo \"# @ job_name = transfer\" >> scpjob.$identifier"  >>  $job_to_send
1273          echo "echo \"# @ resources = ConsumableCpus(1) ConsumableMemory(1 gb)\" >> scpjob.$identifier"  >>  $job_to_send
1274          echo "echo \"# @ wall_clock_limit = 00:10:00,00:10:00\" >> scpjob.$identifier "  >>  $job_to_send
1275          echo "echo \"# @ output = job_queue/last_job_transfer_protocol\" >> scpjob.$identifier"  >>  $job_to_send
1276          echo "echo \"# @ error = job_queue/last_job_transfer_protocol\" >> scpjob.$identifier"  >>  $job_to_send
[312]1277          if [[ $host != "ibmh" ]]
1278          then
[1289]1279             echo "echo \"# @ class = $return_queue\" >> scpjob.$identifier"  >>  $job_to_send
[312]1280          fi
[1289]1281          echo "echo \"# @ image_size = 10\" >> scpjob.$identifier"      >>  $job_to_send
1282          echo "echo \"# @ notification = never\" >> scpjob.$identifier" >>  $job_to_send
[1]1283
[1289]1284          echo "echo \"# @ queue\" >> scpjob.$identifier"                >>  $job_to_send
1285          echo "echo \" \" >> scpjob.$identifier"                        >>  $job_to_send
[1]1286
[1289]1287          echo "echo \"set -x\" >> scpjob.$identifier"                   >>  $job_to_send
[1468]1288          echo "echo \"batch_scp  $PORTOPT  -d  -w 10  -u $local_user  $local_address  ${job_catalog}/$remote_dayfile  \\\"$job_catalog\\\"  $local_dayfile\" >> scpjob.$identifier"  >>  $job_to_send
[622]1289          if [[ $remote_host = ibmku ]]
1290          then
[1289]1291             echo "echo \"rm  scpjob.$identifier\" >> scpjob.$identifier"   >>  $job_to_send
[622]1292          fi
[1289]1293          echo "echo \"exit\" >> scpjob.$identifier"                     >>  $job_to_send
[1]1294
[1620]1295       elif [[ $remote_host = lcbullhh ]]
1296       then
1297          echo "cat > scpjob.$identifier << %%END%%"        >>  $job_to_send
1298          echo "#!/bin/bash"                             >>  $job_to_send
1299          echo "#SBATCH --job-name=job_protocol_transfer" >>  $job_to_send
1300          echo "#SBATCH -t 00:20:00"                     >>  $job_to_send
1301          echo "#SBATCH -N 1"                            >>  $job_to_send
1302          echo "#SBATCH -n 1"                            >>  $job_to_send
1303          echo "#SBATCH -o \$HOME/job_queue/last_job_transfer_protocol"      >>  $job_to_send
1304          echo "#SBATCH -o $remote_dayfile"              >>  $job_to_send
1305          echo "#SBATCH -e $remote_dayfile"              >>  $job_to_send
1306          echo "#SBATCH -A $project_account"             >>  $job_to_send
1307          echo "#SBATCH -p $return_queue"                >>  $job_to_send
1308          echo " "                                       >>  $job_to_send
1309          echo "set -x"                                  >>  $job_to_send
1310          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
1311          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1312          echo "%%END%%"                                 >>  $job_to_send
1313
[1099]1314       elif [[ $remote_host = lckyuh ]]
1315       then
[1289]1316          echo "cat > scpjob.$identifier << %%END%%"  >>  $job_to_send
[1099]1317          echo "#!/bin/bash"                       >>  $job_to_send
1318          echo "#PJM -L \"node=1\""                >>  $job_to_send
1319          echo "#PJM -L \"rscgrp=$return_queue\""  >>  $job_to_send
1320          echo "#PJM --no-stging"                  >>  $job_to_send
1321          echo "#PJM -L \"elapse=30:00\""          >>  $job_to_send
1322          echo "#PJM -o \$HOME/job_queue/last_job_transfer_protocol"  >>  $job_to_send
1323          echo "#PJM -j"                           >>  $job_to_send
1324          echo " "                                 >>  $job_to_send
1325          echo "export LANG=en_US.UTF-8"           >>  $job_to_send
1326          echo "set -x"                            >>  $job_to_send
[1468]1327          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  $remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
[1099]1328          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1329          echo "%%END%%"                           >>  $job_to_send
1330
[1090]1331       elif [[ $remote_host = lckyut ]]
1332       then
[1289]1333          echo "cat > scpjob.$identifier << %%END%%"  >>  $job_to_send
[1090]1334          echo "#!/bin/bash"                       >>  $job_to_send
1335          echo "#PJM -L \"vnode=1\""               >>  $job_to_send
1336          echo "#PJM -L \"rscgrp=$return_queue\""  >>  $job_to_send
1337          echo "#PJM --no-stging"                  >>  $job_to_send
1338          echo "#PJM -L \"elapse=30:00\""          >>  $job_to_send
1339          echo "#PJM -o \$HOME/job_queue/last_job_transfer_protocol"  >>  $job_to_send
1340          echo "#PJM -j"                           >>  $job_to_send
1341          echo " "                                 >>  $job_to_send
1342          echo "export LANG=en_US.UTF-8"           >>  $job_to_send
1343          echo "set -x"                            >>  $job_to_send
[1468]1344          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  $remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
[1090]1345          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1346          echo "%%END%%"                           >>  $job_to_send
1347
[1274]1348       elif [[ $remote_host = lccrayb || $remote_host = lccrayh ]]
[1255]1349       then
[1289]1350          echo "cat > scpjob.$identifier << %%END%%"        >>  $job_to_send
[1255]1351          echo "#!/bin/bash"                             >>  $job_to_send
1352          echo "#PBS -N job_protocol_transfer"           >>  $job_to_send
1353          echo "#PBS -l walltime=00:30:00"               >>  $job_to_send
[1262]1354          echo "#PBS -l nodes=1:ppn=1"                   >>  $job_to_send
[1255]1355          echo "#PBS -o \$HOME/job_queue/last_job_transfer_protocol"      >>  $job_to_send
1356          echo "#PBS -j oe"                              >>  $job_to_send
1357          echo " "                                       >>  $job_to_send
1358          echo "set -x"                                  >>  $job_to_send
[1468]1359          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
[1255]1360          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1361          echo "%%END%%"                                 >>  $job_to_send
1362
[1866]1363       elif [[ $remote_host = lcocean ]]
1364       then
1365          echo "cat > scpjob.${identifier}.tmp << %%END%%"                  >>  $job_to_send
1366          echo "#!/bin/bash"                                             >>  $job_to_send
1367          echo "SGEPREFIX -S /bin/bash"                                  >>  $job_to_send
1368          echo "SGEPREFIX -N transfer_$job_name"                         >>  $job_to_send
1369          echo "SGEPREFIX -cwd"                                          >>  $job_to_send
1370          echo "SGEPREFIX -j y"                                          >>  $job_to_send
1371          echo "SGEPREFIX -o ${local_host}_${job_name}_scpjob_$identifier"  >>  $job_to_send 
1372          echo " "                                                       >>  $job_to_send 
1373          echo "set -x"                                                  >>  $job_to_send 
1374          echo "export PALM_BIN=$PALM_BIN" | sed -e 's:'$HOME':$HOME:'   >>  $job_to_send
1375          echo "export PATH=\$PATH:\$PALM_BIN"                           >>  $job_to_send
1376          echo ""                                 >>  $job_to_send         
1377          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
1378          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1379          echo "rm -f scpjob.${identifier}"                                 >>  $job_to_send         
1380          echo "%%END%%"                                                 >>  $job_to_send
1381          echo "sed -e 's/SGEPREFIX/#$/g' scpjob.${identifier}.tmp > scpjob.${identifier}" >>  $job_to_send         
1382          echo "rm -f scpjob.${identifier}.tmp"                             >>  $job_to_send         
1383
[1468]1384       elif [[ $remote_host = lcxe6 ]]
1385       then
1386          echo "cat > scpjob.${identifier}  << %%END%%"  >>  $job_to_send
1387          echo "#!/bin/ksh"                              >>  $job_to_send
1388          echo "#PBS -N job_protocol_transfer"           >>  $job_to_send
1389          echo "#PBS -l walltime=00:30:00"               >>  $job_to_send
1390          echo "#PBS -A $project_account"                >>  $job_to_send
1391          echo "#PBS -l mppwidth=1"                      >>  $job_to_send
1392          echo "#PBS -l mppnppn=1"                       >>  $job_to_send
1393          echo "#PBS -o \$HOME/job_queue/last_job_transfer_protocol"  >>  $job_to_send
1394          echo "#PBS -j oe"                              >>  $job_to_send
1395          echo " "                                       >>  $job_to_send
1396          echo "set -x"                                  >>  $job_to_send
1397          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
1398          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1399          echo "%%END%%"                                 >>  $job_to_send
[1]1400       else
1401
[1289]1402          echo "cat > scpjob.$identifier << %%END%%"  >>  $job_to_send
[1]1403          echo "# @\\\$-q $return_queue"           >>  $job_to_send
1404          echo "# @\\\$-l${qsubtime} 10"           >>  $job_to_send
1405          echo "# @\\\$-l${qsubmem} 10mb"          >>  $job_to_send
1406          if [[ $remote_host = t3ej2  ||  $remote_host = t3ej5  ||  $remote_host = t3es ]]
1407          then
1408             echo "# @\$-l mpp_p=0"                >>  $job_to_send
1409          fi
1410          echo '# @\$-lF 10mb'                     >>  $job_to_send
1411          echo '# @\$-o job_queue/last_job_transfer_protocol'    >>  $job_to_send
1412          echo '# @\\\$-eo'                          >>  $job_to_send
1413          echo " "                                 >>  $job_to_send
1414          if [[ $remote_host = t3ej2  ||  $remote_host = t3ej5 ]]
1415          then
1416             echo "set +vx"                        >>  $job_to_send
1417             echo ". .profile"                     >>  $job_to_send
1418          fi
1419          echo "set -x"                            >>  $job_to_send
[1468]1420          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile  >  /dev/null"  >>  $job_to_send
[1]1421          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1422          echo "%%END%%"                           >>  $job_to_send
[1620]1423
[1]1424       fi
1425
1426       if [[ $(echo $remote_host | cut -c1-3) = ibm ]]
1427       then
[1289]1428          echo "llsubmit  scpjob.$identifier"      >>  $job_to_send
[1620]1429       elif [[ $remote_host = lcbullhh ]]
1430       then
1431          echo "sbatch  scpjob.$identifier"               >>  $job_to_send
[1274]1432       elif [[ $remote_host = lccrayb || $remote_host = lccrayh ]]
[1255]1433       then
[2134]1434          echo "msub -E -q $return_queue  scpjob.$identifier"               >>  $job_to_send
[1]1435       elif [[ $remote_host = t3eb  ||  $remote_host = t3eh  ||  $remote_host = t3ej2  ||  $remote_host = t3ej5 ]]
1436       then
[1289]1437          echo "qsub -J n  scpjob.$identifier"     >>  $job_to_send
[1]1438       elif [[ $remote_host = t3es ]]
1439       then
[1289]1440          echo "qsub -J n  -s /bin/ksh  scpjob.$identifier"     >>  $job_to_send
[1043]1441       elif [[ $remote_host = lckiaps ]]
1442       then
[1289]1443          echo "mv  scpjob.$identifier  $job_catalog"           >>  $job_to_send
[1468]1444          echo "ssh $SSH_PORTOPT ${remote_username}@${remote_address}  \"$submcom ${job_catalog}/scpjob.$identifier\" "  >>  $job_to_send
[1289]1445          echo "rm  ${job_catalog}/scpjob.$identifier"          >>  $job_to_send
[1099]1446       elif [[ $remote_host = lckyu* ]]
[1090]1447       then
[1468]1448          echo "scp $PORTOPT scpjob.$identifier  ${remote_username}@${remote_address}:job_queue"           >>  $job_to_send
1449          echo "ssh $SSH_PORTOPT ${remote_username}@${remote_address}  \"cd job_queue; $submcom scpjob.$identifier; rm scpjob.$identifier\" "  >>  $job_to_send
[2257]1450       elif [[ $remote_host = lcocean ]]
[1099]1451       then
[1289]1452          echo "mv  scpjob.$identifier  $job_catalog"           >>  $job_to_send
[1468]1453          echo "/usr/bin/ssh ${remote_username}@${remote_address}  \"$init_cmds $module_calls cd $job_catalog; $submcom scpjob.$identifier\" "  >>  $job_to_send
[1]1454       else
[1289]1455          echo "$submcom  scpjob.$identifier"      >>  $job_to_send
[1]1456       fi
[1043]1457       if [[ $remote_host != ibmku  &&  $remote_host != lckiaps ]]
[622]1458       then
[1289]1459          echo "rm  scpjob.$identifier"            >>  $job_to_send
[622]1460       fi
[1]1461    else
[1468]1462#       echo "ftpcopy  -d  $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
[1289]1463       # ??? funktioniert das ÃŒberhaupt noch ???
[1468]1464       echo "nohup  ftpcopy  -d  -w 15  $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile  >  /dev/null  &"  >>  $job_to_send
[1]1465    fi
1466    echo "set -x"                               >>  $job_to_send
1467    echo "     ' exit"                          >>  $job_to_send
1468    echo "set -x"                               >>  $job_to_send
1469 fi
1470
1471
[1289]1472    # APPEND THE JOB-FILE (CREATE BY mrun) TO THE JOB-DIRECTIVES GENERATED ABOVE
[1]1473 cat  $file_to_send  >>  $job_to_send
[69]1474
[1]1475 if [[ $remote_host = ibm ]]
1476 then
1477    echo " "         >>  $job_to_send
1478    echo "exit"      >>  $job_to_send
1479 fi
[635]1480
[1289]1481    # REMOVE JOB-FILE
[2257]1482 if [[ $remote_host = lctit  ||  $remote_host = ibmku ]]
[69]1483 then
1484    echo " "                               >>  $job_to_send
1485    echo "rm ~/job_queue/$job_on_remhost"  >>  $job_to_send
1486 fi
[1]1487
1488
[1289]1489    # TRANSFER JOB TO THE TARGET HOST (JOB-DIRECTORY)
[1]1490 if [[ $no_submit = false ]]
1491 then
1492    if [[ $remote_host != $local_host ]]
1493    then
1494       [[ $verify = true ]]  &&  printf "\n >>> transfering job to \"$remote_host\"..."
[1289]1495       if [[ $remote_host = ibms ]]    # ssh on ibms cannot handle "~/"
[1]1496       then
[82]1497          job_catalog_save=$job_catalog
1498          job_catalog=job_queue
1499       fi
[2266]1500       scp  $ssh_key  $PORTOPT  $job_to_send  ${remote_user}@${remote_address}:${job_catalog}/$job_on_remhost
[82]1501       if [[ $? = 1 ]]
1502       then
1503          locat=scp; exit
1504       fi
1505       if [[ $remote_host = ibms ]]
1506       then
1507          job_catalog=$job_catalog_save
1508       fi
[1]1509       [[ $verify = true ]]  &&  printf "\n >>> finished\n"
1510    else
1511       eval  job_catalog=$job_catalog
1512       cp  $job_to_send  ${job_catalog}/$job_on_remhost
1513    fi
1514
1515
1516
[1289]1517       # START NQS- / LOADLEVELER-JOB
[1]1518    if [[ $remote_host != $local_host ]]
1519    then
1520       [[ $verify = true ]]  &&  printf "\n >>> submitting job using \"qsub\"...\n"
[635]1521
[1289]1522       if [[ $remote_host = ibmku ]]
[1]1523       then
[1468]1524          ssh  $SSH_PORTOPT $remote_address  -l $remote_user  "cd $job_catalog; $submcom $job_on_remhost"
[1]1525       else
[1944]1526          ssh  $ssh_key  $SSH_PORTOPT $remote_address  -l $remote_user  "cd $job_catalog; $submcom $job_on_remhost; rm $job_on_remhost"
[82]1527       fi
[1]1528
1529       [[ $verify = true ]]  &&  printf " >>> o.k.\n"
1530    else
1531       cd  $job_catalog
[2257]1532       if [[ $(echo $local_host | cut -c1-3) = ibm  ||  $(echo $local_host | cut -c1-6) = lccray || $local_host = lceddy ]]
[1]1533       then
1534          eval  $submcom  $job_on_remhost
[2184]1535       elif [[  $local_host = lctit  ||  $local_host = lcxe6  ||  $local_host = lck  || $local_host = lckordi ||  $local_host = lcyon || $local_host = lcsb  ||  $local_host = lckyu* ]]
[108]1536       then
[635]1537          chmod  u+x  $job_on_remhost
[108]1538          eval  $submcom  $job_on_remhost
[1620]1539       elif [[ $local_host = lcbullhh ]]
1540       then
1541          if [[ $queue = default ]]
1542          then
1543             eval  $submcom  $job_on_remhost
1544          fi
[1]1545       else
1546          qsub  $job_on_remhost
1547       fi
[622]1548
[2257]1549          # JOBFILE MUST NOT BE DELETED ON lctit/ibmku. THIS WILL BE DONE
[1289]1550          # AT THE END OF THE JOB
[2257]1551       if [[ $local_host != lctit  &&  $local_host != ibmku ]]
[622]1552       then
1553          rm  $job_on_remhost
1554       fi
[1]1555       cd  -  > /dev/null
1556    fi
1557 fi
1558
[1289]1559    # FINAL ACTIONS
[1]1560 if [[ $no_submit = false ]]
1561 then
[1099]1562    rm  -f $job_to_send
[1]1563 fi
[1266]1564 [[ $verify = true ]]  &&  printf "\n\n *** SUBJOB finished \n\n"
Note: See TracBrowser for help on using the repository browser.