source: palm/trunk/SCRIPTS/subjob @ 2563

Last change on this file since 2563 was 2365, checked in by kanani, 7 years ago

Vertical nesting implemented (SadiqHuq?)

  • Property svn:keywords set to Id Rev
File size: 59.7 KB
RevLine 
[1841]1#!/bin/bash
[1090]2
3# subjob - script for automatic generation and submission of batch-job files
4#          for various batch queuing systems
5
[1046]6#--------------------------------------------------------------------------------#
7# This file is part of PALM.
8#
9# PALM is free software: you can redistribute it and/or modify it under the terms
10# of the GNU General Public License as published by the Free Software Foundation,
11# either version 3 of the License, or (at your option) any later version.
12#
13# PALM is distributed in the hope that it will be useful, but WITHOUT ANY
14# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
15# A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
16#
17# You should have received a copy of the GNU General Public License along with
18# PALM. If not, see <http://www.gnu.org/licenses/>.
19#
[1310]20# Copyright 1997-2014  Leibniz Universitaet Hannover
[1046]21#--------------------------------------------------------------------------------#
22#
23# Current revisions:
[1090]24# ------------------
[1351]25#
[2188]26#
[1046]27# Former revisions:
28# -----------------
[169]29# $Id: subjob 2365 2017-08-21 14:59:59Z Giersch $
[2365]30# Added lckea & lckeam. KIT/IMK-IFU Garmisch cluster. LRZ (SadiqHuq)
31#
32# 2295 2017-06-27 14:25:52Z raasch
[2295]33# adjustments for using lcgeohu (cirrus @ HUB)
34#
35# 2266 2017-06-09 09:27:21Z raasch
[2266]36# nech related parts removed
37#
38# 2257 2017-06-07 14:07:05Z witha
[2257]39# adjustments for lceddy, removed lcflow-specific code
40#
41# 2188 2017-03-21 06:42:42Z raasch
[1623]42#
[2188]43# 2187 2017-03-21 06:41:25Z raasch
44# adjustment of compute node names for lckyuh
45#
[2186]46# 2184 2017-03-21 04:31:22Z raasch
47# bugfix: localhost renamed local_host
48#
[2150]49# 2148 2017-02-09 16:56:42Z scharf
50# added kuma and gharbi to the list of known hosts
51#
[2135]52# 2134 2017-02-02 07:33:46Z raasch
53# option -E added to msub commands on HLRN-III machines to allow output of more
54# job informations in the job protocol files
55#
[1945]56# 1944 2016-06-15 06:29:00Z raasch
57# adjustments for using HLRN ssh-keys
58#
[1941]59# 1940 2016-06-14 05:15:20Z raasch
60# adjustments for lckiaps
61#
[1867]62# 1866 2016-04-15 06:50:59Z raasch
63# adjusted for lcocean
64#
[1842]65# 1841 2016-04-07 19:14:06Z raasch
66# script now running under bash
67#
[1702]68# 1701 2015-11-02 07:43:04Z maronga
69# Bugfix: added missing init_cmds for lccrayh/lccrayb
70#
[1623]71# 1621 2015-07-17 11:39:33Z heinze
[1621]72# adjustments for Mistral at DKRZ Hamburg (lcbullhh)
[1200]73#
[1576]74# 1575 2015-03-27 09:56:27Z raasch
75# mpp2-queues added to lccrayh
76#
[1548]77# 1547 2015-01-29 15:09:12Z witha
78# adjustments for ForWind computing cluster (lcflow)
79#
[1546]80# 1545 2015-01-29 06:52:23Z heinze
81# local host name for blizzard further specified
82#
[1481]83# 1480 2014-10-17 14:41:49Z raasch
84# adjustments for 2nd stage of HLRNIII
85#
[1469]86# 1468 2014-09-24 14:06:57Z maronga
87# Typo removed (addres->address)
88# Adjustments for lcxe6
89#
[1453]90# 1452 2014-08-22 09:41:06Z heinze
91# local hosts for blizzard added
92#
[1451]93# 1450 2014-08-21 07:31:51Z heinze
94# HLRN-III (lccrayb): testq queue adjusted to mpp1testq
95#
[1443]96# 1442 2014-07-28 07:09:10Z raasch
97# HLRN-III (lccrayb/lccrayh) queues adjusted
98#
[1379]99# 1378 2014-04-28 06:04:58Z raasch
100# -et option added for lctit
101#
[1351]102# 1350 2014-04-04 13:01:30Z maronga
103# location of qsub updated for lcxe6
104#
[1290]105# 1289 2014-03-04 07:12:34Z raasch
106# German comments translated to English
107# fimm-, necriam-, scirocco-, ibmy-, and sgi-specific code removed
108#
[1280]109# 1279 2014-01-28 12:10:14Z raasch
110# node calculation modified due to changes in mrun (tasks_per_node must not be
111# an integral divisor of numprocs any more)
112#
[1275]113# 1274 2014-01-09 13:14:54Z heinze
114# adjustments for lccrayh
115#
[1267]116# 1266 2013-12-11 12:07:34Z heinze
117# further adjustments for lccrayb (use msub instead of qsub)
118#
[1265]119# 1264 2013-12-09 12:46:09Z fricke
120# Bugfix: Using number of nodes instead of number of processors (lccrayb)
121#
[1263]122# 1262 2013-12-09 10:57:20Z fricke
123# further adjustments for lccrayb
124#
[1261]125# 1260 2013-12-04 12:48:04Z raasch
126# jaboticaba admitted
127#
[1256]128# 1255 2013-11-07 14:43:35Z raasch
129# further adjustments for lccrayb
130#
[1225]131# 1224 2013-09-16 07:27:23Z raasch
132# first adjustments for lccrayb
133#
[1203]134# 1202 2013-07-10 16:22:07Z witha
135# adjustments for Forwind cluster (lcflow)
136#
[1200]137# 1199 2013-07-05 14:52:22Z raasch
138# adjustments for CSC Helsinki (lccrayf)
139#
[1185]140# use of cluster/express queue enabled (ibmh)
141# vinessa added (imuk)
[1047]142#
[1104]143# 1103 2013-02-20 02:15:53Z raasch
144# bash compatibility adjustments (usage of OPTIND, output formatting with printf
145# instead typeset -L/R),
146# further adjustments for lckyuh
147#
[1100]148# 2013-02-10 01:47:43Z raasch
149# adjustments for Kyushu-Univeristy computing center (lckyuh - hayaka)
150# and for Forwind cluster (lcflow)
151#
[1096]152# 1094 2013-02-03 01:52:12Z raasch
153# new option -P for explicit setting of ssh/scp port,
154# decalpha parts (yonsei) removed
155#
[1091]156# 2013-02-02 07:06:13Z raasch
[1099]157# adjustments for Kyushu-University computing center (lckyut - tatara)
[1091]158# old changelog messages removed
159#
[1047]160# 1046 2012-11-09 14:38:45Z maronga
161# code put under GPL (PALM 3.9)
162#
[1090]163# 08/07/94 - Siggi - first version finished
164# 29/06/94 - Siggi - script development started
165#--------------------------------------------------------------------------------#
166# subjob - script for automatic generation and submission of batch-job files
167#          for various batch queuing systems
168#--------------------------------------------------------------------------------#
[1]169
170
[1289]171    # VARIABLE-DECLARATIONS AND DEFAULT VALUES
[352]172 delete_dayfile=false
[799]173 email_notification=none
[122]174 group_number=none
[1]175 locat=normal
176 no_default_queue=none
177 no_submit=false
178 job_catalog="~/job_queue"
179 job_name=none
180 local_user=$LOGNAME
181 node_usage=shared
[475]182 numprocs=0
[1]183 punkte="..........................................................."
184 submcom=qsub
185 queue=default
186 remote_host=none
187 remote_user=""
188 verify=true
189
190 typeset  -i   cputime=memory=Memory=0  minuten  resttime  sekunden  stunden
[1779]191 typeset  -i   numprocs  mpi_tasks=nodes=processes_per_node=0 tasks_per_node=threads_per_task=1
[1]192
193
194
[1289]195    # ERROR HANDLING
196    # IN CASE OF EXIT:
[1]197 trap 'if [[ $locat != normal ]]
198       then
199          case  $locat  in
200             (option)  printf "\n  --> available optios can be displayed"
201                       printf " by typing:"
202                       printf "\n      \"subjob ?\" \n";;
203             (ftpcopy|parameter|scp|verify)  printf "\n";;
204             (*)       printf "\n  +++ unknown error"
205                       printf "\n      please inform S. Raasch!\n"
206          esac
207          [[ -f $job_to_send ]]  &&  rm  $job_to_send
208          printf "\n\n+++ SUBJOB killed \n\n"
209       fi' exit
210
211
[1289]212    # IN CASE OF TERMINAL-BREAK:
[1]213 trap '[[ -f $job_to_send ]]  &&  rm  $job_to_send
214       printf "\n\n+++ SUBJOB killed \n\n"
215       exit
216      ' 2
217
218
[1289]219    # DETERMINE NAME OF LOCAL HOST
[1]220 local_host=$(hostname)
221
[1289]222    # SET HOST-SPECIFIC VARIABLES VEREINBAREN (CHECK, IF LOCAL HOST
223    # IS ADMITTED AT ALL)
224    # NOTE: ONE OF THE ENTRIES FOR "lck" OR "lckordi" ALWAYS HAS TO BE
225    # COMMENT OUT, BECAUSE THE HOSTNAME (node*) IS SAME FOR BOTH MACHINES
[1]226 case  $local_host  in
[1468]227     (ambiel-lx)             local_address=134.106.74.48;  local_host=lcfor;;
228     (atmos)                 local_address=172.20.25.35;   local_host=lcide;;
229     (austru)                local_address=130.75.105.128; local_host=lcmuk;;
230     (autan)                 local_address=130.75.105.57;  local_host=lcmuk;;
231     (bora)                  local_address=130.75.105.103; local_host=lcmuk;;
[2187]232     (a0*|b0*)               local_address=133.5.4.33;     local_host=lckyuh;;
[1545]233     (blizzard1|p0*|p1*|p2*|p3*|p4*|p5*|p6*|p7*|p8*|p9*)   local_address=136.172.40.15;  local_host=ibmh;;
234     (blizzard2|p0*|p1*|p2*|p3*|p4*|p5*|p6*|p7*|p8*|p9*)   local_address=136.172.40.16;  local_host=ibmh;;
[1468]235     (blogin*|bxc*)          local_address=130.73.233.1;   local_host=lccrayb;;
236     (hlogin*|hxc*)          local_address=130.75.4.1;     local_host=lccrayh;;
237     (breva)                 local_address=130.75.105.98;  local_host=lcmuk;;
238     (buran)                 local_address=130.75.105.58;  local_host=lcmuk;;
239     (caurus)                local_address=130.75.105.19;  local_host=lcmuk;;
240     (climate*)              local_address=165.132.26.68;  local_host=lcyon;;
241     (clogin*)               local_address=86.50.166.21;   local_host=lccrayf;;
242     (elephanta)             local_address=130.75.105.6;   local_host=lcmuk;;
[2257]243     (hpcl*)                 local_address=eddy.hpc.uni-oldenburg.de; local_host=lceddy;;
244     (cfd*)                  local_address=eddy.hpc.uni-oldenburg.de; local_host=lceddy;;
[1468]245     (node*)                 local_address=165.132.26.61   local_host=lck;;
246   #  (node*)                 local_address=210.219.61.8    local_host=lckordi;;
247     (gaia*)                 local_address=150.183.146.24; local_host=ibmkisti;;
[2147]248     (gharbi)                local_address=130.75.105.47;  local_host=lcmuk;;
[1468]249     (gallego)               local_address=130.75.105.10;  local_host=lcmuk;;
250     (gregale)               local_address=130.75.105.109; local_host=lcmuk;;
251     (hababai)               local_address=130.75.105.108; local_host=lcmuk;;
252     (hayaka*)               local_address=133.5.4.33;     local_host=lckyuh;;
253     (hexagon.bccs.uib.no)   local_address=129.177.20.113; local_host=lcxe6;;
254     (hx*)                   local_address=133.3.51.11;    local_host=lckyoto;;
255     (inferno)               local_address=130.75.105.5;   local_host=lcmuk;;
256     (irifi)                 local_address=130.75.105.104; local_host=lcmuk;;
[2365]257   # (i*)                    local_address=129.187.11.197; local_host=ibmmuc;;
[1468]258     (jaboticaba)            local_address=150.163.25.181; local_host=lcbr;;
[2365]259     (kea*)                  local_address=172.27.80.109;  local_host=lckeal;;
[1468]260     (sno)                   local_address=130.75.105.113; local_host=lcmuk;;
[2147]261     (kuma)                  local_address=130.75.105.115; local_host=lcmuk;;
[1468]262     (levanto)               local_address=130.75.105.45;  local_host=lcmuk;;
[1940]263     (login*)                local_address=118.128.66.201; local_host=lckiaps;;
[2365]264   # (login*)                local_address=129.187.11.197; local_host=ibmmuc;;
265     (lm*)                   local_address=129.187.11.197; local_host=ibmmuc;;
266     (lx*)                   local_address=129.187.20.240; local_host=lclrz;;
267     (mpp2*)                 local_address=129.187.20.105; local_host=lclrz;;
[1468]268     (maestro)               local_address=130.75.105.2;   local_host=lcmuk;;
269     (meller)                local_address=134.106.74.155; local_host=lcfor;;
270     (meteo-login*)          local_address=193.166.211.144;local_host=lcxt5m;;
[1620]271     (mlogin1*|m1*)          local_address=136.172.50.13;  local_host=lcbullhh;;
[1468]272     (hexagon*)              local_address=129.177.20.113; local_host=lcxe6;;
273     (nobel*)                local_address=150.183.5.101;  local_host=ibms;;
[1866]274     (ocean)                 local_address="ocean";        local_host=lcocean;;
[1468]275     (orkan)                 local_address=130.75.105.3;   local_host=lcmuk;;
276     (ostria)                local_address=130.75.105.106; local_host=lcmuk;;
277     (paesano)               local_address=130.75.105.46;  local_host=lcmuk;;
278     (pcj*)                  local_address=172.31.120.1;   local_host=lckyut;;
279     (pingui)                local_address=134.106.74.118; local_host=lcfor;;
280     (quanero)               local_address=130.75.105.107; local_host=lcmuk;;
281     (rte*)                  local_address=133.5.185.60;   local_host=lcrte;;
[1866]282     (schultzl-Latitude-E6540)  local_address="schultzl-Latitude-E6540"; local_host=lcsch;;
[1468]283     (shiokaze-lx)           local_address=134.106.74.123; local_host=lcfor;;
284     (sisu-login*)           local_address=86.50.166.21;   local_host=lccrayf;;
285     (solano)                local_address=130.75.105.110; local_host=lcmuk;;
286     (sugoka*)               local_address=172.31.120.1;   local_host=lckyut;;
[1866]287     (tc*)                   local_address="ocean";        local_host=lcocean;;
[1468]288     (t2a*)                  local_address=10.1.6.165;     local_host=lctit;;
[2365]289     (uc1n*)                 local_address=129.13.82.89;   local_host=lcbwuni;;
[1468]290     (urban*)                local_address=147.46.30.151   local_host=lcsb;;
291     (vinessa)               local_address=130.75.105.112; local_host=lcmuk;;
292     (vorias)                local_address=172.20.25.43;   local_host=lcmuk;;
293     (*.cc.kyushu-u.ac.jp)   local_address=133.5.4.129;    local_host=ibmku;;
[2295]294     (*.cluster)             local_address=192.168.1.254;  local_host=lcgeohu;;
[1]295     (*)                     printf "\n  +++ \"$local_host\" unknown";
[1255]296                             printf "\n      please contact the PALM group at IMUK";
[1]297                             locat=parameter; exit;;
298 esac
299
300
301
[1289]302    # BY DEFAULT, THE REMOTE HOST IS THE LOCAL HOST
[1]303 remote_host=$local_host
304
305
306
307
[1289]308    # READ THE SHELLSCRIPT-OPTIONS
[1094]309 while  getopts  :c:dDe:g:h:m:n:N:O:P:q:t:T:u:vX:  option
[1]310 do
311   case  $option  in
312       (c)   job_catalog=$OPTARG;;
313       (d)   delete_dayfile=true;;
314       (D)   no_submit=true;;
[352]315       (e)   email_notification=$OPTARG;;
[125]316       (g)   group_number=$OPTARG;;
[1]317       (h)   remote_host=$OPTARG;;
318       (m)   memory=$OPTARG;;
319       (n)   job_name=$OPTARG;;
320       (N)   node_usage=$OPTARG;;
321       (O)   threads_per_task=$OPTARG;;
[1094]322       (P)   scp_port=$OPTARG;;
[1]323       (q)   no_default_queue=$OPTARG;;
324       (t)   cputime=$OPTARG;;
325       (T)   tasks_per_node=$OPTARG;;
326       (u)   remote_user=$OPTARG;;
327       (v)   verify=false;;
328       (X)   numprocs=$OPTARG;;
329       (\?)  printf "\n  +++ Option $OPTARG unknown \n";
330             locat=option; exit;;
331   esac
332 done
333
334
[1289]335    # GET THE NAME OF THE JOBFILE AS NEXT ARGUMENT
[1103]336 (( to_shift = $OPTIND - 1 ))
337 shift $to_shift; file_to_send=$1
[1]338
339
[1289]340    # OUTPUT OF SHORT DESCRIPTION OF SCRIPT-OPTIONS
[1]341 if [ "$1" = "?" ]
342 then
343   (printf "\n  *** subjob can be called as follows:\n"
344    printf "\n      subjob -c.. -d -D -h.. -m.. -q.. -t.. -u.. -v  <jobfile>\n"
345    printf "\n      Description of available options:\n"
346    printf "\n      Option  Description                         Default-Value"
347    printf "\n        -c    job-input- and output-catalog       ~/job_queue"
348    printf "\n        -d    no job-protocol will be created     ---"
349    printf "\n        -D    only the job-file will be created   ---"
350    printf "\n        -h    execution host, available hosts:    $remote_host"
[1289]351    printf "\n              ibm, ibmh, ibmkisti, ibmku, ibms, lc...,"
[2266]352    printf "\n              lckiaps, lctit"
[1]353    printf "\n        -m    memory demand per process in MByte  ---"
354    printf "\n        -n    jobname                             <jobdatei>"
355    printf "\n        -O    threads per task (for OpenMP usage) 1"
[1094]356    printf "\n        -P    ssh/scp port                        default port"
[1]357    printf "\n        -q    job-queue to be used                default"
358    printf "\n        -t    allowed cpu-time in seconds         ---"
359    printf "\n        -T    tasks per node (on parallel hosts)  ---"
360    printf "\n        -u    username on execution host          from .netrc"
361    printf "\n        -v    no prompt for confirmation          ---"
362    printf "\n        -X    # of processors (on parallel hosts) 1"
363    printf "\n "
364    printf "\n      The only possible positional parameter is <jobfile>:"
365    printf "\n      The complete NQS-job must be provided here."
366    printf "\n      <jobfile>=? creates this outline\n\n") | more
367    exit
368 fi
369
370
371
[1289]372    # CHECK, IF JOB-FILE HAS BEEN GIVEN AS ARGUMENT AND IF THE FILE ITSELF EXISTS
[1]373 if [[ "$file_to_send" = "" ]]
374 then
375    printf "\n  +++ job-file missing"
376    locat=parameter; exit
377 else
378    if [[ -f $file_to_send ]]
379    then
380       true
381    else
382       printf "\n  +++ job-file: "
383       printf "\n           $file_to_send"
384       printf "\n      does not exist"
385       locat=parameter; exit
386    fi
387 fi
388
389
390
[1289]391    # IF NO JOBNAME HAS BEEN GIVEN, JOBNAME IS SET TO THE NAME OF THE JOB-FILE,
392    # PROVIDED THAT THE JOB-FILE NAME DOES NOT CONTAIN ANY PATH
[1]393 if [[ $job_name = none ]]
394 then
395    job_name=$file_to_send
396 fi
397 if [[ $(echo $job_name | grep -c "/") != 0 ]]
398 then
399    printf "\n  +++ job-file name: "
400    printf "\n           $job_name"
401    printf "\n      must not contain \"/\"-characters"
402    locat=parameter; exit
403 fi
404
405
406
407
[1289]408    # SET HOST-SPECIFIC QUANTITIES, OR TERMINATE IN CASE OF UNKNOWN HOST,
409    # OR IF NO HOST HAS BEEN GIVEN
[1]410 if [[ $remote_host = none ]]
411 then
412    printf "\n  +++ host missing"
413    locat=option; exit
414 else
415    case  $remote_host  in
[1468]416        (ibm)     queue=p690_standard; remote_address=134.76.99.81; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
417        (ibmh)    queue=cluster; remote_address=136.172.40.15; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
418        (ibmkisti) queue=class.32plus; remote_address=150.183.146.24; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
419        (ibmku)   queue=s4; remote_address=133.5.4.129; submcom=/usr/local/bin/llsubmit;;
420        (ibms)    queue=p_normal; remote_address=150.183.5.101; submcom=/usr/lpp/LoadL/full/bin/llsubmit;;
[2365]421        (ibmmuc)  remote_address=129.187.11.197; submcom=/usr/bin/llsubmit;;
422        (lcbwuni) queue=develop; remote_address=129.13.82.89; submcom=/opt/moab/bin/msub;;
[1620]423        (lcbullhh)    queue=compute; remote_address=136.172.50.13; submcom=/usr/bin/sbatch;;
[2134]424        (lccrayb) queue=mpp1testq; remote_address=130.73.233.1; submcom="/opt/moab/default/bin/msub -E";;
425        (lccrayh) queue=mpp1testq; remote_address=130.75.4.1; submcom="/opt/moab/default/bin/msub -E";;
[1468]426        (lccrayf) queue=small; remote_address=86.50.166.21; submcom=/opt/slurm/default/bin/sbatch;;
[2257]427        (lceddy)  remote_address=eddy.hpc.uni-oldenburg.de; submcom=sbatch;;
[2295]428        (lcgeohu) remote_address=cirrus.geo.hu-berlin.de; submcom=sbatch;;
[1468]429        (lckyoto) remote_address=133.3.51.11; submcom=/thin/local/bin/qsub;;
430        (lck)     remote_address=165.132.26.61; submcom=/usr/torque/bin/qsub;;
[2365]431        (lckeal)  queue=ivy; remote_address=172.27.80.109; submcom=/usr/bin/sbatch;;
[1940]432        (lckiaps) remote_address=118.128.66.201; submcom=/opt/pbs/default/bin/qsub;;
[1468]433        (lckordi) remote_address=210.219.61.8; submcom=/usr/torque/bin/qsub;;
434        (lckyuh)  remote_address=133.5.4.33; submcom=/usr/bin/pjsub;;
435        (lckyut)  remote_address=133.5.4.37; submcom=/usr/bin/pjsub;;
[2365]436        (lclrz)   remote_address=129.187.20.240; submcom=/usr/bin/sbatch;;
[1866]437        (lcocean) remote_address="ocean"; submcom=qsub;;
[1468]438        (lcsb)    remote_address=147.46.30.151; submcom=/usr/torque/bin/qsub;;
439        (lctit)   queue=S; remote_address=10.1.6.165; submcom=/opt/pbs/tools/bin/t2sub;;
440        (lcxe6)   remote_address=129.177.20.113; submcom=/opt/torque/default/bin/qsub;;
441        (lcxt5m)  remote_address=193.166.211.144; submcom=/opt/pbs/10.1.0.91350/bin/qsub;;
442        (lcyon)   remote_address=165.132.26.68; submcom=/usr/torque/bin/qsub;;
[251]443        (*)       printf "\n  +++ hostname \"$remote_host\" not allowed";
444                  locat=parameter; exit;;
[1]445    esac
446 fi
447
448
[1289]449    # CHECK, IF A VALID QUEUE HAS BEEN GIVEN
[1]450 if [[ $no_default_queue != none ]]
451 then
452    error=false
453    ndq=$no_default_queue
454    case  $remote_host  in
455        (ibm)    case  $ndq  in
456                     (p690_express|p690_standard|p690_long)  error=false;;
457                     (*)                                     error=true;;
458                 esac;;
459        (ibmh)   case  $ndq  in
[1184]460                     (cluster|express)  error=false;;
[1]461                     (*)                                     error=true;;
462                 esac;;
[693]463        (ibmkisti)   case  $ndq  in
464                     (class.32plus|class.1-2|class.2-32)  error=false;;
465                     (*)                                     error=true;;
466                 esac;;
[622]467        (ibmku)  case  $ndq  in
468                     (sdbg1|sdbg2|sdbg4|s4|s16|s32|s32-s)    error=false;;
469                     (*)                                     error=true;;
470                 esac;;
[1]471        (ibms)   case  $ndq  in
472                     (express|normal|p_express|p_normal|p_normal_1.3|p_normal_1.7|grand)     error=false;;
473                     (*)                                     error=true;;
474                 esac;;
[2365]475        (ibmmuc*) case  $ndq  in
476                     (test|micro|general|large|fat|fattest|special|tmp1|tmp2) error=false;;
477                     (*)                                     error=true;;
478                 esac;;
[1620]479        (lcbullhh) case  $ndq  in
[2147]480                     (compute|compute2|shared)  error=false;;
[1620]481                     (*)                                     error=true;;
482                 esac;;
[2365]483        (lcbwuni) case  $ndq  in
484                     (develop|singlenode|multinode|verylong|fat) error=false;;
485                     (*)                                     error=true;;
486                 esac;;
[1224]487        (lccrayb) case  $ndq  in
[1480]488                     (dataq|mpp1q|mpp1testq|mpp2q|mpp2testq|smp1q|smp1testq|specialm1q)   error=false;;
[1224]489                     (*)                                     error=true;;
490                 esac;;
[1274]491        (lccrayh) case  $ndq  in
[1575]492                     (dataq|mpp1q|mpp1testq|mpp2q|mpp2testq|smp1q|smp1testq|specialm1q)   error=false;;
[1274]493                     (*)                                     error=true;;
494                 esac;;
[1197]495        (lccrayf) case  $ndq  in
496                     (usup|test*|small|large)                error=false;;
497                     (*)                                     error=true;;
498                 esac;;
[2257]499        (lceddy) case  $ndq  in
500                     (eddy.p|cfdh.p|cfdl.p|carl.p|mpcs.p|mpcl.p|mpcb.p|all_nodes.p)  error=false;;
[1099]501                     (*)                                     error=true;;
502                 esac;;
[1040]503        (lckiaps) case  $ndq  in
[1940]504                     (express|normal|normal20|quickq)        error=false;;
[1040]505                     (*)                                     error=true;;
506                 esac;;
[440]507        (lckyoto) case  $ndq  in
508                     (eh|ph)                                 error=false;;
509                     (*)                                     error=true;;
510                 esac;;
[1099]511        (lckyuh) case  $ndq  in
512                     (fx-dbg|fx-single|fx-small|fx-middle|fx-large)  error=false;;
513                     (*)                                     error=true;;
514                 esac;;
[1090]515        (lckyut) case  $ndq  in
516                     (cx-dbg|cx-single|cx-small|cx-middle|cx-large)  error=false;;
517                     (*)                                     error=true;;
518                 esac;;
[2365]519        (lclrz) case  $ndq  in
520                     (mpp1|mpp2|iuv2|myri)                   error=false;;
521                     (*)                                     error=true;;
522                 esac;;
[1]523        (lctit)  case  $ndq  in
[635]524                     (G|L128|L256|L512H|S|S96|V)             error=false;;
[1]525                     (*)                                     error=true;;
526                 esac;;
527        (t3eb)   case  $ndq  in
528                     (berte|p50|p100|p392|forfree|p25himem)  error=false;;
529                     (*)    error=true;;
530                 esac;;
531        (t3eh)   case  $ndq  in
532                     (para_t3e|em|k|l|lm|comp_t3e|c|p|ht)  error=false;;
533                     (*)    error=true;;
534                 esac;;
535        (t3ej2|t3ej5)  case  $ndq  in
536                     (low|normal|high)  error=false;;
537                     (*)    error=true;;
538                 esac;;
539        (t3es)  case  $ndq  in
540                     (batch|serial-4|pe4|p48|pe16|pe32|pe64|pe128)  error=false;;
541                     (*)    error=true;;
542                 esac;;
543    esac
544    if [[ $error = true ]]
545    then
546       printf "\n  +++ queue \"$no_default_queue\" on host \"$remote_host\" not allowed"
547       locat=parameter; exit
548    else
549       queue=$no_default_queue
550    fi
551 fi
552
553
554
[1289]555    # CHECK THE CPU-TIME
556    # SPLIT TIME INTO HOURS, MINUTES, AND SECONDS
[1]557 done=false
558 while [[ $done = false ]]
559 do
560    if (( $cputime <= 0 ))
561    then
562       printf "\n  +++ wrong cpu-time or cpu-time missing"
563       printf "\n  >>> Please type cpu-time in seconds as INTEGER:"
564       printf "\n  >>> "
565       read  cputime  1>/dev/null  2>&1
566    else
567       done=true
568    fi
569 done
570 (( stunden  = cputime / 3600 ))
571 (( resttime = cputime - stunden * 3600 ))
572 (( minuten  = resttime / 60 ))
573 (( sekunden = resttime - minuten * 60 ))
574 timestring=${stunden}:${minuten}:${sekunden}
575
576
577
[1289]578    # CHECK THE MEMORY DEMAND
[1]579 done=false
580 while [[ $done = false ]]
581 do
582    if (( memory <= 0 ))
583    then
584       printf "\n  +++ wrong memory demand or memory demand missing"
585       printf "\n  >>> Please type memory in  MByte per process  as INTEGER:"
586       printf "\n  >>> "
587       read  memory  1>/dev/null  2>&1
588    else
589       done=true
590    fi
591 done
592
[2266]593 if [[ $remote_host = lctit ]]
[1]594 then
[635]595    (( Memory = memory * tasks_per_node / 1000 ))
[1]596 fi
597
598
[1289]599    # MEMORY DEMAND IN CASE OF OPENMP-USAGE ON IBM-SYSTEMS
[1]600 if [[ $(echo $remote_host | cut -c1-3) = ibm ]]
601 then
602    (( memory = memory * threads_per_task ))
603 fi
604
605
[1289]606    # CALCULATE NUMBER OF REQUIRED NODES
[1]607 if (( tasks_per_node != 0 ))
608 then
[1279]609    (( nodes = ( numprocs - 1 ) / ( tasks_per_node * threads_per_task ) + 1 ))
[1]610 fi
611
[1094]612
[1289]613    # CALCULATE NUMBER OF PROCESSES PER NODE
[622]614 (( processes_per_node = tasks_per_node * threads_per_task ))
[1]615
[1094]616
[1289]617    # CALCULATE NUMBER OF MPI TASKS
[696]618 (( mpi_tasks = numprocs / threads_per_task ))
[1]619
[696]620
[1289]621    # SET PORT NUMBER OPTION FOR CALLS OF ssh/scp, subjob AND batch_scp SCRIPTS
[1094]622 if [[ "$scp_port" != "" ]]
623 then
624    PORTOPT="-P $scp_port"
625    SSH_PORTOPT="-p $scp_port"
626 fi
627
628
[1289]629    # HEADER-OUTPUT
[1]630 if [[ $verify = true ]]
631 then
632    printf "\n\n"
633    printf "#--------------------------------------------------------------# \n"
634    spalte1=SUBJOB;spalte2=$(date)
[1103]635    printf "| %-20s%40s | \n" "$spalte1" "$spalte2"
[1]636    printf "|                                                              | \n"
637    printf "| values of parameters/options:                                | \n"
[1103]638    spalte1=$(echo local_host$punkte | cut -c-20)
639    spalte2=$punkte$local_host
640    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
641    spalte1=$(echo remote_host$punkte | cut -c-20)
642    spalte2=$punkte$remote_host
643    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
644    spalte1=$(echo queue$punkte | cut -c-20)
645    spalte2=$punkte$queue
646    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
647    spalte1=$(echo memory$punkte | cut -c-20)
648    spalte2="$punkte$memory mb"
649    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
650    spalte1=$(echo cputime$punkte | cut -c-20)
651    spalte2="$punkte$cputime sec"
652    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
653    spalte1=$(echo job_name$punkte | cut -c-20)
654    spalte2="$punkte$job_name"
655    printf "| %-20s%40s | \n" "$spalte1" "${spalte2: -40}"
[1]656    printf "#--------------------------------------------------------------# \n\n"
657
658
[1289]659       # QUERY CHECK
[1]660    antwort="dummy"
661    while [[ $antwort != y  &&  $antwort != Y  &&  $antwort != n  &&  $antwort != N ]]
662    do
663       read antwort?" >>> continue (y/n) ? "
664    done
665    if [[ $antwort = n  ||  $antwort = N ]]
666    then
667       locat=verify; exit
668    fi
669    printf "\n"
670 fi
671
[1289]672    # GENERATE RANDOM IDENTIFIER, AND DETERMINE THE JOBNAME ON THE TARGET HOST
673 identifier=$RANDOM
674 job_on_remhost=${job_name}_${identifier}_$local_host
675 job_to_send=job_to_send_$identifier
[1]676 if [[ $delete_dayfile = false ]]
677 then
[1289]678    remote_dayfile=${local_host}_${job_name}_result_$identifier
[1]679    local_dayfile=${remote_host}_${job_name}
680 else
681    remote_dayfile=/dev/null
682 fi
683
684
[1289]685    # GENERATE THE BATCH-JOB SCRIPTS (FOR QUEUEING-SYSTEMS qsub/msub/LoadLeveler)
[1]686 if [[ $(echo $remote_host | cut -c1-3) = ibm  &&  $numprocs != 0 ]]
687 then
688
[1289]689       # GENERAL LOADLEVELER SETTINGS
[622]690    execute_in_shell="#!/bin/ksh"
691    use_shell="# @ shell = /bin/ksh"
692    consumable_memory="ConsumableMemory($memory mb)"
693    class="# @ class = $queue"
694    environment="# @ environment = OMP_NUM_THREADS=$threads_per_task; MP_SHARED_MEMORY=yes"
695    network_to_use="# @ network.mpi = sn_all,shared,us"
696    data_limit="# @ data_limit = 1.76gb"
697    image_size="# @ image_size = 50"
[693]698    wall_clock_limit="# @ wall_clock_limit = ${timestring},$timestring"
[312]699
[693]700    if [[ $email_notification = none ]]
701    then
702       notify_user=""
703    else
704       notify_user="# @ notify_user = $email_notification"
705       if [[ $delete_dayfile = true ]]
706       then
707          notification='# @ notification = never'
708       fi
709    fi
[622]710
[312]711    if [[ $remote_host = ibmh ]]
[1]712    then
[312]713       data_limit=""
714       network_to_use=""
[1184]715       class="# @ class = $queue"
[312]716       environment=""
[814]717       rset="# @ rset = RSET_MCM_AFFINITY"
718       task_affinity="# @ task_affinity = core(1)"
[693]719    elif [[ $remote_host = ibmkisti ]]
720    then
721       network_to_use="# @ network.MPI = sn_all,shared,US"
722       wall_clock_limit="# @ wall_clock_limit = $timestring"
[696]723       if [[ $threads_per_task = 1 ]]
724       then
725          rset="# @ rset = RSET_MCM_AFFINITY"
726          mcm_affinity_options="# @ mcm_affinity_options = mcm_mem_pref mcm_sni_none mcm_distribute"
727       fi
[693]728       environment=""
729       use_shell=""
730       data_limit=""
731       image_size=""
[622]732    elif [[ $remote_host = ibmku ]]
733    then
734       execute_in_shell="#!/usr/bin/ksh"
735       use_shell="# @ shell = /usr/bin/ksh"
736       consumable_memory=""
737       environment=""
738       network_to_use="# @ network.mpi = sn_all,shared,us"
739       data_limit=""
740       image_size=""
741    elif [[ $remote_host = ibms ]]
742    then
743       network_to_use="# @ network.mpi = csss,shared,us"
[1]744    fi
745
746    cat > $job_to_send << %%END%%
[622]747$execute_in_shell
748$use_shell
[1]749
750# @ job_type = parallel
[693]751# @ job_name = $job_name
[1]752# @ resources = ConsumableCpus($threads_per_task) $consumable_memory
753# @ output = $remote_dayfile
754# @ error = $remote_dayfile
[693]755$wall_clock_limit
[622]756$image_size
[312]757$class
758$environment
[1]759$network_to_use
760$data_limit
[693]761$rset
762$mcm_affinity_options
[814]763$task_affinity
[1]764$notification
[693]765$notify_user
[1]766
767%%END%%
768
769    if (( nodes > 0 ))
770    then
771
[2365]772       if [[ $remote_host == ibmmuc* ]]
[693]773       then
[2365]774       cat > $job_to_send << %%END%%
[693]775
[2365]776#!/bin/bash
777# @ job_type = parallel
778# @ job_name = $job_name
779# @ output = $remote_dayfile
780# @ error = $remote_dayfile
781# @ wall_clock_limit = $timestring
782$class
783$mcm_affinity_options
784$task_affinity
785$notify_user
786# @ network.MPI = sn_all,not_shared,us
787# @ notification = always
788# @ energy_policy_tag = table_kit_ifu
789# @ minimize_time_to_solution = yes
790# @ node = $nodes
791# @ total_tasks = $numprocs
792# @ node_topology = island
793# @ island_count = 1,2
794# @ environment = LD_LIBRARY_PATH=/lrz/sys/libraries/netcdf/4.2.1.1_impi4/lib:/lrz/sys/libraries/hdf5/1.8.15/ibmmpi/lib:/lrz/sys/libraries/fftw/3.3.3/avx/lib/
795# @ queue
796
797%%END%%
798
799       elif [[ $remote_host != ibmkisti ]]
800       then
801
[693]802          cat >> $job_to_send << %%END%%
[1]803# @ node = $nodes
[622]804# @ tasks_per_node = $processes_per_node
[1]805# @ node_usage = $node_usage
806# @ queue
807
808%%END%%
809
[693]810       else
811
812          cat >> $job_to_send << %%END%%
[696]813# @ total_tasks = $mpi_tasks
[693]814# @ blocking = unlimited
815# @ queue
816
817%%END%%
818
819       fi
820
[1]821    else
822
[1289]823       cat >> $job_to_send << %%END%%
[1]824# @ blocking = unlimited
825# @ total_tasks = $numprocs
826# @ node_usage = $node_usage
827# @ queue
828
829%%END%%
830
831    fi
832
[1289]833       # WORKAROUND BECAUSE OF SILLY JOB FILTER ON ibmkisti
[696]834    if [[ $remote_host = ibmkisti  &&  $threads_per_task != 1 ]]
835    then
836       echo  "export OMP_NUM_THREADS=$threads_per_task"  >>  $job_to_send
837    fi
838
[1]839 elif [[ $(echo $remote_host | cut -c1-3) = ibm  &&  $numprocs = 0 ]]
840 then
841
842    cat > $job_to_send << %%END%%
843#!/bin/ksh
844
845# @ job_type = serial
846# @ node_usage = $node_usage
847# @ job_name = palm
848# @ wall_clock_limit = ${timestring},$timestring
849# @ resources = ConsumableCpus(1) ConsumableMemory(1 gb)
850# @ output = $remote_dayfile
851# @ error = $remote_dayfile
[312]852$class
[1]853$notification
854
855# @ queue
856
857%%END%%
858
[1620]859 elif [[ $remote_host = lcbullhh ]]
860 then
861    if [[ $numprocs != 0 ]]
862    then
863       cat > $job_to_send << %%END%%
864#!/bin/bash -l
865#SBATCH -J $job_name
866#SBATCH -t $timestring
867#SBATCH -N $nodes
868#SBATCH --ntasks-per-node=$processes_per_node
869#SBATCH -p $queue
870#SBATCH -o $remote_dayfile
871#SBATCH -e $remote_dayfile
872#SBATCH -A $project_account
873
874$init_cmds
875$module_calls
876
877%%END%%
878
879    else
880       cat > $job_to_send << %%END%%
881#!/bin/bash -l
882#SBATCH -J $job_name
883#SBATCH -t $timestring
884#SBATCH -l ncpus=1
885#SBATCH -l pmem=${memory}mb
886#SBATCH -m abe
887#SBATCH -o $remote_dayfile
888#SBATCH -e $remote_dayfile
889#SBATCH -A $project_account
890
891$init_cmds
892$module_calls
893
894%%END%%
895
896    fi
897
[1274]898 elif [[ $remote_host = lccrayb || $remote_host = lccrayh ]]
[1224]899 then
900
[1480]901    if [[ "$feature" != "" ]]
902    then
903       featuredir="#PBS -l feature=$feature"
904    fi
905
[1224]906    if [[ $numprocs != 0 ]]
907    then
908       cat > $job_to_send << %%END%%
[1255]909#!/bin/bash
[1224]910#PBS -N $job_name
911#PBS -l walltime=$timestring
[1264]912#PBS -l nodes=$nodes:ppn=$processes_per_node
[1224]913#PBS -o $remote_dayfile
914#PBS -j oe
915#PBS -q $queue
[1480]916$featuredir
[1224]917
[1701]918$init_cmds
[1224]919$module_calls
920
921%%END%%
922
923    else
924
925       continue
926
927    fi
928
[1197]929 elif [[ $remote_host = lccrayf ]]
930 then
931
932    if [[ $numprocs != 0 ]]
933    then
934       cat > $job_to_send << %%END%%
935#!/bin/bash -l
936#SBATCH -J $job_name
937#SBATCH -t $timestring
938#SBATCH -N $nodes
939#SBATCH --ntasks-per-node=$processes_per_node
940#SBATCH -p $queue
941#SBATCH -o $remote_dayfile
942#SBATCH -e $remote_dayfile
943
944$init_cmds
945$module_calls
946
947%%END%%
948
949    else
950       cat > $job_to_send << %%END%%
951#!/bin/bash -l
952#SBATCH -J $job_name
953#SBATCH -t $timestring
954#SBATCH -l ncpus=1
955#SBATCH -l pmem=${memory}mb
956#SBATCH -m abe
957#SBATCH -o $remote_dayfile
958#SBATCH -e $remote_dayfile
959
960$init_cmds
961$module_calls
962
963%%END%%
964
965    fi
966
[2257]967
968
969 elif [[ $remote_host = lceddy ]]
[1099]970 then
[2257]971    cat > $job_to_send << %%END%%
[1099]972#!/bin/bash
[2257]973#SBATCH -J $job_name
974#SBATCH -t $timestring
975#SBATCH -n $numprocs
976#SBATCH -N $nodes
977#SBATCH --cpus-per-task 1
978#SBATCH -p $queue
979#SBATCH -o $remote_dayfile
980#SBATCH -e $remote_dayfile
981#SBATCH --mem-per-cpu $memory
982#SBATCH --exclusive
[1099]983
[2257]984module load PALMDependencies/.gcc
985
986$init_cmds
987$module_calls
988
[1099]989%%END%%
990
[1021]991 elif [[ $remote_host = lck || $remote_host = lckordi || $remote_host = lcsb ]]
[368]992 then
993
994    if [[ $numprocs != 0 ]]
995    then
996       cat > $job_to_send << %%END%%
997#!/bin/ksh
998#PBS -N $job_name
999#PBS -l walltime=$timestring
1000#PBS -l ncpus=$numprocs
1001#PBS -l pmem=${memory}mb
1002#PBS -o $remote_dayfile
[1021]1003#PBS -l nodes=$nodes:ppn=${processes_per_node}
[368]1004#PBS -j oe
1005
1006mpd &
1007
1008%%END%%
1009
1010    else
1011       cat > $job_to_send << %%END%%
1012#!/bin/ksh
1013#PBS -N $job_name
1014#PBS -l walltime=$timestring
1015#PBS -l ncpus=1
1016#PBS -l pmem=${memory}mb
1017#PBS -o $remote_dayfile
1018#PBS -j oe
1019
1020%%END%%
1021
1022    fi
1023
[2295]1024 elif [[ $remote_host = lcgeohu ]]
1025 then
1026
1027    if [[ $email_notification = none ]]
1028    then
1029       notify_user=""
1030    else
1031       notify_user="#SBATCH --mail-type=ALL"
1032    fi
1033
1034    if [[ $numprocs != 0 ]]
1035    then
1036       cat > $job_to_send << %%END%%
1037#!/bin/bash
1038#SBATCH --job-name=$job_name
1039#SBATCH --ntasks=$processes_per_node
1040#SBATCH --time=$timestring
1041#SBATCH --output=$remote_dayfile
1042#SBATCH --error=$remote_dayfile
1043#SBATCH --qos=short
1044$notify_user
1045
1046$init_cmds
1047$module_calls
1048
1049%%END%%
1050
1051    else
1052       cat > $job_to_send << %%END%%
1053#!/bin/bash
1054#SBATCH --job-name=$job_name
1055#SBATCH --ntasks=$processes_per_node
1056#SBATCH --output=$remote_dayfile
1057#SBATCH --error=$remote_dayfile
1058#SBATCH -l ncpus=1
1059
1060$init_cmds
1061$module_calls
1062
1063%%END%%
1064
1065    fi
1066
[1040]1067 elif [[ $remote_host = lckiaps ]]
1068 then
1069
1070    if [[ $numprocs != 0 ]]
1071    then
1072       cat > $job_to_send << %%END%%
[1940]1073#!/bin/bash
[1040]1074#PBS -N $job_name
1075#PBS -l walltime=$timestring
1076#PBS -l select=1:ncpus=$numprocs
1077#PBS -l pmem=${memory}mb
1078#PBS -q $queue
1079#PBS -o $remote_dayfile
1080#PBS -j oe
1081#PBS -V
1082
1083%%END%%
1084
1085    else
1086       cat > $job_to_send << %%END%%
[1940]1087#!/bin/bash
[1040]1088#PBS -N $job_name
1089#PBS -l walltime=$timestring
1090#PBS -l ncpus=1
1091#PBS -l pmem=${memory}mb
1092#PBS -o $remote_dayfile
1093#PBS -j oe
1094
1095%%END%%
1096
1097    fi
1098
[693]1099 elif [[ $remote_host = lcyon ]]
1100 then
1101
1102    if [[ $numprocs != 0 ]]
1103    then
1104       cat > $job_to_send << %%END%%
1105#!/bin/ksh
1106#PBS -N $job_name
1107#PBS -l walltime=$timestring
1108#PBS -l ncpus=$numprocs
1109#PBS -l pmem=${memory}mb
1110#PBS -o $remote_dayfile
1111#PBS -j oe
1112
1113%%END%%
1114
1115    else
1116       cat > $job_to_send << %%END%%
1117#!/bin/ksh
1118#PBS -N $job_name
1119#PBS -l walltime=$timestring
1120#PBS -l ncpus=1
1121#PBS -l pmem=${memory}mb
1122#PBS -o $remote_dayfile
1123#PBS -j oe
1124
1125%%END%%
1126
1127    fi
1128
[892]1129 elif [[ $remote_host = lcxe6 ]]
[164]1130 then
1131
1132    if [[ $numprocs != 0 ]]
1133    then
1134       cat > $job_to_send << %%END%%
[799]1135#!/bin/ksh
[164]1136#PBS -S /bin/ksh
1137#PBS -N $job_name
[552]1138#PBS -A $project_account
[206]1139#PBS -j oe
[164]1140#PBS -l walltime=$timestring
1141#PBS -l mppwidth=${numprocs}
[622]1142#PBS -l mppnppn=${processes_per_node}
[164]1143#PBS -m abe
1144#PBS -o $remote_dayfile
[492]1145$email_directive
[164]1146
[892]1147$init_cmds
[493]1148$module_calls
[343]1149
[164]1150%%END%%
1151
[108]1152    else
1153       cat > $job_to_send << %%END%%
1154#!/bin/ksh
[168]1155#PBS -S /bin/ksh
[108]1156#PBS -N $job_name
[552]1157#PBS -A $project_account
[206]1158#PBS -j oe
[108]1159#PBS -l walltime=$timestring
1160#PBS -l ncpus=1
1161#PBS -l pmem=${memory}mb
1162#PBS -m abe
[492]1163$email_directive
[108]1164#PBS -o $remote_dayfile
1165
[892]1166$init_cmds
[493]1167$module_calls
[343]1168
[108]1169%%END%%
1170
1171    fi
1172
[440]1173 elif [[ $remote_host = lckyoto ]]
[437]1174 then
1175
[440]1176       cat > $job_to_send << %%END%%
[799]1177#!/bin/ksh
[440]1178# @\$-o $remote_dayfile
1179# @\$-eo -oi
1180# @\$-lP 16
[799]1181# @\$-lp 1
[440]1182# @\$-lm 28gb  -llm unlimited -ls unlimited
1183# @\$-q $queue
1184# @\$-Pvn abs_pack
1185##for intel? @\$-Pvn abs_unpack -Pvs unpack -Pvc unpack
1186#. /thin/local/etc/setprofile/intel-11.0.sh
1187#. /thin/local/etc/setprofile/mvapich2-1.4+intel-11.0.sh
1188. ~/.myprofile
1189#. /home2/t/t51254/palm/current_version/myprofile
1190#. /thin/apps/pgi/mpi.sh
1191#
1192env
1193#
1194set -x
1195
1196%%END%%
1197
1198 elif [[ $remote_host = lcxt5m ]]
1199 then
1200
[437]1201    if [[ $numprocs != 0 ]]
1202    then
1203       cat > $job_to_send << %%END%%
[799]1204#!/bin/ksh
[437]1205#PBS -S /bin/ksh
1206#PBS -N $job_name
1207#PBS -j oe
1208#PBS -l walltime=$timestring
1209#PBS -l mppwidth=${numprocs}
[622]1210#PBS -l mppnppn=${processes_per_node}
[437]1211#PBS -m abe
1212#PBS -o $remote_dayfile
1213
[892]1214$init_cmds
[493]1215$module_calls
[437]1216
1217%%END%%
1218
1219    else
1220       cat > $job_to_send << %%END%%
1221#!/bin/ksh
1222#PBS -S /bin/ksh
1223#PBS -N $job_name
1224#PBS -j oe
1225#PBS -l walltime=$timestring
1226#PBS -l ncpus=1
1227#PBS -l pmem=${memory}mb
1228#PBS -m abe
1229#PBS -o $remote_dayfile
1230
[892]1231$init_cmds
[493]1232$module_calls
[437]1233
1234%%END%%
1235
1236    fi
1237
[1099]1238 elif [[ $remote_host = lckyuh ]]
1239 then
1240    cat > $job_to_send << %%END%%
1241#!/bin/bash
1242#PJM -L "rscgrp=$queue"
1243#PJM -L "node=$nodes"
1244#PJM --mpi "proc=$numprocs"
1245#PJM -L "elapse=$timestring"
1246#PJM -o $remote_dayfile
1247#PJM -j
1248#PJM -X
1249#PJM --no-stging
1250
1251export LANG=en_US.UTF-8
1252%%END%%
1253
[1090]1254 elif [[ $remote_host = lckyut ]]
1255 then
1256    cat > $job_to_send << %%END%%
1257#!/bin/bash
[1099]1258#PJM -L "rscgrp=$queue"
[1090]1259#PJM -L "vnode=$numprocs"
1260#PJM -L "vnode-core=1"
1261#PJM -L "elapse=$timestring"
1262#PJM --mpi proc=$numprocs
1263#PJM -o $remote_dayfile
1264#PJM -j
[1099]1265#PJM -X
1266#PJM --no-stging
[1090]1267
1268export LANG=en_US.UTF-8
1269%%END%%
1270
[1866]1271 elif [[ $remote_host = lcocean ]]
1272 then
1273   cat > $job_to_send << %%END%%
1274#!/bin/bash
1275#$ -cwd
1276#$ -V
1277#$ -N $job_name
1278#$ -pe orte $numprocs
1279#$ -o $remote_dayfile
1280#$ -j y
1281#$ -R y
1282$init_cmds
1283$module_calls
1284
1285%%END%%
1286
[1]1287 elif [[ $remote_host = lctit ]]
1288 then
1289    cat > $job_to_send << %%END%%
[635]1290#!/bin/ksh
[892]1291$init_cmds
[678]1292$module_calls
1293
[1]1294%%END%%
1295
[1289]1296       # SET OPTIONS FOR SUBMIT-COMMAND
[678]1297    if [[ $tasks_per_node != $processes_per_node ]]
1298    then
[1378]1299       submcom="$submcom -W group_list=$group_number -N $job_name -l walltime=$timestring -l select=$nodes:ncpus=$processes_per_node:mpiprocs=$tasks_per_node:mem=${Memory}gb -o $remote_dayfile -j oe -et 1 -q $queue "
[678]1300    else
[1378]1301       submcom="$submcom -W group_list=$group_number -N $job_name -l walltime=$timestring -l select=$nodes:ncpus=$processes_per_node:mpiprocs=$tasks_per_node:mem=${Memory}gb -l place=scatter -o $remote_dayfile -j oe -et 1 -q $queue "
[678]1302    fi
[1]1303
[2365]1304 elif [[ $remote_host = lclrz ]]
1305 then
1306       cat > $job_to_send << %%END%%
1307#!/bin/bash
1308#SBATCH -J $job_name
1309#SBATCH -t $timestring
1310#SBATCH -N $nodes
1311#SBATCH --ntasks-per-node=$processes_per_node
1312#SBATCH --get-user-env
1313#SBATCH -o $remote_dayfile
1314#SBATCH -e $remote_dayfile
1315#SBATCH --mail-user=${email_notification}
1316#SBATCH --clusters=$queue
1317
1318$init_cmds
1319
1320$module_calls
1321
1322%%END%%
1323
1324 elif [[ $remote_host = lckea* ]]
1325 then
1326       keal_tasks_per_core=1
1327
1328       if [[ $queue = haswell || $queue = ivy* ]]
1329       then
1330          if (( tasks_per_node > 20 ))
1331          then
1332             keal_tasks_per_core=2
1333          fi
1334       fi
1335
1336       cat > $job_to_send << %%END%%
1337#!/bin/bash
1338#SBATCH -J $job_name
1339#SBATCH -t $timestring
1340#SBATCH -N $nodes
1341#SBATCH --ntasks-per-node=$processes_per_node
1342#SBATCH --ntasks-per-core=$keal_tasks_per_core
1343#SBATCH --mem-per-cpu=${memory}mb
1344#SBATCH --get-user-env
1345#SBATCH -o $remote_dayfile
1346#SBATCH -e $remote_dayfile
1347#SBATCH --mail-user=${email_notification}
1348#SBATCH --mail-type=ALL
1349#SBATCH --partition=$queue
1350
1351export MV2_ENABLE_AFFINITY=0
1352
1353$init_cmds
1354
1355$module_calls
1356
1357%%END%%
1358
1359 elif [[ $remote_host=lcbwuni ]]
1360 then
1361    if [[ $email_notification = none ]]
1362    then
1363       email_directive=""
1364    else
1365       email_directive="#PBS -M $email_notification"
1366    fi
1367       cat > $job_to_send << %%END%%
1368#!/bin/ksh
1369#PBS -N $job_name
1370#PBS -l walltime=$timestring
1371#PBS -l nodes=${nodes}:ppn=$processes_per_node
1372#PBS -l pmem=${memory}mb
1373#PBS -m abe
1374#PBS -o $remote_dayfile
1375#PBS -j oe
1376#PBS -q $queue
1377$email_directive
1378%%END%%
1379
[1]1380 else
1381
1382    cat > $job_to_send << %%END%%
1383# @\$-q ${queue}
1384# @\$-l${qsubtime} $timestring
1385# @\$-l${qsubmem} ${memory}mb
1386# @\$-o $remote_dayfile
1387# @\$-eo
1388
1389%%END%%
1390
1391 fi
1392
1393
[1289]1394    # IN CASE OF JOBS EXECUTING ON REMOTE-HOSTS, THE TRANSFER OF THE DAYFILES
1395    # TO THE LOCAL HOSTS WILL BE INITIATED BY TRAP ON EXIT
1396    # NO TRANSFER POSSIBLE ON IBM IN SEOUL
[1]1397 if [[ $delete_dayfile = false  &&  $remote_host != $local_host ]]
1398 then
1399    echo "set +vx"                              >>  $job_to_send
1400    echo "trap '"                               >>  $job_to_send
1401    echo "set +vx"                              >>  $job_to_send
[2295]1402    if [[ $(echo $remote_host | cut -c1-3) = ibm  ||  $remote_host = lcbullhh  ||  $remote_host = lccrayb  ||  $remote_host = lccrayh  ||  $(echo $remote_host | cut -c1-3) = nec  ||  $remote_host = lckiaps  ||  $remote_host = lckyu* || $remote_host = lcxe6  ||  $remote_host = lcocean  || $remote_host = lcgeohu ]]
[1]1403    then
[622]1404       if [[ $remote_host = ibmh ]]
[1]1405       then
1406          return_queue=c1
[693]1407       elif [[ $remote_host = ibmkisti ]]
1408       then
1409          return_queue=class.1-2
[622]1410       elif [[ $remote_host = ibmku ]]
1411       then
1412          return_queue=sdbg2
[1]1413       elif [[ $remote_host = ibms ]]
1414       then
1415          return_queue=p_normal
[1620]1416       elif [[ $remote_host = lcbullhh ]]
1417       then
1418          return_queue=shared
[1274]1419       elif [[ $remote_host = lccrayb || $remote_host = lccrayh ]]
[1255]1420       then
1421          return_queue=dataq
[1468]1422       elif [[ $remote_host = lcxe6 ]]
1423       then
1424          return_queue=debug
[1040]1425       elif [[ $remote_host = lckiaps ]]
1426       then
1427          return_queue=express
[1099]1428       elif [[ $remote_host = lckyuh ]]
1429       then
1430          return_queue=cx-single
[1090]1431       elif [[ $remote_host = lckyut ]]
1432       then
1433          return_queue=cx-single
[1]1434       else
1435          return_queue=unknown
1436       fi
1437
1438       if [[ $(echo $remote_host | cut -c1-3) = ibm ]]
1439       then
1440
[622]1441          if [[ $remote_host = ibmku ]]
1442          then
[1289]1443             echo "echo \"#!/usr/bin/ksh\" >> scpjob.$identifier"            >>  $job_to_send
1444             echo "echo \"# @ shell = /usr/bin/ksh\" >> scpjob.$identifier"  >>  $job_to_send
[622]1445          else
[1289]1446             echo "echo \"#!/bin/ksh\" >> scpjob.$identifier"                >>  $job_to_send
[622]1447          fi
[1289]1448          echo "echo \"# @ job_type = serial\" >> scpjob.$identifier"    >>  $job_to_send
1449          echo "echo \"# @ job_name = transfer\" >> scpjob.$identifier"  >>  $job_to_send
1450          echo "echo \"# @ resources = ConsumableCpus(1) ConsumableMemory(1 gb)\" >> scpjob.$identifier"  >>  $job_to_send
1451          echo "echo \"# @ wall_clock_limit = 00:10:00,00:10:00\" >> scpjob.$identifier "  >>  $job_to_send
1452          echo "echo \"# @ output = job_queue/last_job_transfer_protocol\" >> scpjob.$identifier"  >>  $job_to_send
1453          echo "echo \"# @ error = job_queue/last_job_transfer_protocol\" >> scpjob.$identifier"  >>  $job_to_send
[312]1454          if [[ $host != "ibmh" ]]
1455          then
[1289]1456             echo "echo \"# @ class = $return_queue\" >> scpjob.$identifier"  >>  $job_to_send
[312]1457          fi
[1289]1458          echo "echo \"# @ image_size = 10\" >> scpjob.$identifier"      >>  $job_to_send
1459          echo "echo \"# @ notification = never\" >> scpjob.$identifier" >>  $job_to_send
[1]1460
[1289]1461          echo "echo \"# @ queue\" >> scpjob.$identifier"                >>  $job_to_send
1462          echo "echo \" \" >> scpjob.$identifier"                        >>  $job_to_send
[1]1463
[1289]1464          echo "echo \"set -x\" >> scpjob.$identifier"                   >>  $job_to_send
[1468]1465          echo "echo \"batch_scp  $PORTOPT  -d  -w 10  -u $local_user  $local_address  ${job_catalog}/$remote_dayfile  \\\"$job_catalog\\\"  $local_dayfile\" >> scpjob.$identifier"  >>  $job_to_send
[622]1466          if [[ $remote_host = ibmku ]]
1467          then
[1289]1468             echo "echo \"rm  scpjob.$identifier\" >> scpjob.$identifier"   >>  $job_to_send
[622]1469          fi
[1289]1470          echo "echo \"exit\" >> scpjob.$identifier"                     >>  $job_to_send
[1]1471
[1620]1472       elif [[ $remote_host = lcbullhh ]]
1473       then
1474          echo "cat > scpjob.$identifier << %%END%%"        >>  $job_to_send
1475          echo "#!/bin/bash"                             >>  $job_to_send
1476          echo "#SBATCH --job-name=job_protocol_transfer" >>  $job_to_send
1477          echo "#SBATCH -t 00:20:00"                     >>  $job_to_send
1478          echo "#SBATCH -N 1"                            >>  $job_to_send
1479          echo "#SBATCH -n 1"                            >>  $job_to_send
1480          echo "#SBATCH -o \$HOME/job_queue/last_job_transfer_protocol"      >>  $job_to_send
1481          echo "#SBATCH -o $remote_dayfile"              >>  $job_to_send
1482          echo "#SBATCH -e $remote_dayfile"              >>  $job_to_send
1483          echo "#SBATCH -A $project_account"             >>  $job_to_send
1484          echo "#SBATCH -p $return_queue"                >>  $job_to_send
1485          echo " "                                       >>  $job_to_send
1486          echo "set -x"                                  >>  $job_to_send
1487          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
1488          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1489          echo "%%END%%"                                 >>  $job_to_send
1490
[1099]1491       elif [[ $remote_host = lckyuh ]]
1492       then
[1289]1493          echo "cat > scpjob.$identifier << %%END%%"  >>  $job_to_send
[1099]1494          echo "#!/bin/bash"                       >>  $job_to_send
1495          echo "#PJM -L \"node=1\""                >>  $job_to_send
1496          echo "#PJM -L \"rscgrp=$return_queue\""  >>  $job_to_send
1497          echo "#PJM --no-stging"                  >>  $job_to_send
1498          echo "#PJM -L \"elapse=30:00\""          >>  $job_to_send
1499          echo "#PJM -o \$HOME/job_queue/last_job_transfer_protocol"  >>  $job_to_send
1500          echo "#PJM -j"                           >>  $job_to_send
1501          echo " "                                 >>  $job_to_send
1502          echo "export LANG=en_US.UTF-8"           >>  $job_to_send
1503          echo "set -x"                            >>  $job_to_send
[1468]1504          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  $remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
[1099]1505          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1506          echo "%%END%%"                           >>  $job_to_send
1507
[1090]1508       elif [[ $remote_host = lckyut ]]
1509       then
[1289]1510          echo "cat > scpjob.$identifier << %%END%%"  >>  $job_to_send
[1090]1511          echo "#!/bin/bash"                       >>  $job_to_send
1512          echo "#PJM -L \"vnode=1\""               >>  $job_to_send
1513          echo "#PJM -L \"rscgrp=$return_queue\""  >>  $job_to_send
1514          echo "#PJM --no-stging"                  >>  $job_to_send
1515          echo "#PJM -L \"elapse=30:00\""          >>  $job_to_send
1516          echo "#PJM -o \$HOME/job_queue/last_job_transfer_protocol"  >>  $job_to_send
1517          echo "#PJM -j"                           >>  $job_to_send
1518          echo " "                                 >>  $job_to_send
1519          echo "export LANG=en_US.UTF-8"           >>  $job_to_send
1520          echo "set -x"                            >>  $job_to_send
[1468]1521          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  $remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
[1090]1522          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1523          echo "%%END%%"                           >>  $job_to_send
1524
[1274]1525       elif [[ $remote_host = lccrayb || $remote_host = lccrayh ]]
[1255]1526       then
[1289]1527          echo "cat > scpjob.$identifier << %%END%%"        >>  $job_to_send
[1255]1528          echo "#!/bin/bash"                             >>  $job_to_send
1529          echo "#PBS -N job_protocol_transfer"           >>  $job_to_send
1530          echo "#PBS -l walltime=00:30:00"               >>  $job_to_send
[1262]1531          echo "#PBS -l nodes=1:ppn=1"                   >>  $job_to_send
[1255]1532          echo "#PBS -o \$HOME/job_queue/last_job_transfer_protocol"      >>  $job_to_send
1533          echo "#PBS -j oe"                              >>  $job_to_send
1534          echo " "                                       >>  $job_to_send
1535          echo "set -x"                                  >>  $job_to_send
[1468]1536          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
[1255]1537          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1538          echo "%%END%%"                                 >>  $job_to_send
1539
[1866]1540       elif [[ $remote_host = lcocean ]]
1541       then
1542          echo "cat > scpjob.${identifier}.tmp << %%END%%"                  >>  $job_to_send
1543          echo "#!/bin/bash"                                             >>  $job_to_send
1544          echo "SGEPREFIX -S /bin/bash"                                  >>  $job_to_send
1545          echo "SGEPREFIX -N transfer_$job_name"                         >>  $job_to_send
1546          echo "SGEPREFIX -cwd"                                          >>  $job_to_send
1547          echo "SGEPREFIX -j y"                                          >>  $job_to_send
1548          echo "SGEPREFIX -o ${local_host}_${job_name}_scpjob_$identifier"  >>  $job_to_send 
1549          echo " "                                                       >>  $job_to_send 
1550          echo "set -x"                                                  >>  $job_to_send 
1551          echo "export PALM_BIN=$PALM_BIN" | sed -e 's:'$HOME':$HOME:'   >>  $job_to_send
1552          echo "export PATH=\$PATH:\$PALM_BIN"                           >>  $job_to_send
1553          echo ""                                 >>  $job_to_send         
1554          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
1555          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1556          echo "rm -f scpjob.${identifier}"                                 >>  $job_to_send         
1557          echo "%%END%%"                                                 >>  $job_to_send
1558          echo "sed -e 's/SGEPREFIX/#$/g' scpjob.${identifier}.tmp > scpjob.${identifier}" >>  $job_to_send         
1559          echo "rm -f scpjob.${identifier}.tmp"                             >>  $job_to_send         
1560
[1468]1561       elif [[ $remote_host = lcxe6 ]]
1562       then
1563          echo "cat > scpjob.${identifier}  << %%END%%"  >>  $job_to_send
1564          echo "#!/bin/ksh"                              >>  $job_to_send
1565          echo "#PBS -N job_protocol_transfer"           >>  $job_to_send
1566          echo "#PBS -l walltime=00:30:00"               >>  $job_to_send
1567          echo "#PBS -A $project_account"                >>  $job_to_send
1568          echo "#PBS -l mppwidth=1"                      >>  $job_to_send
1569          echo "#PBS -l mppnppn=1"                       >>  $job_to_send
1570          echo "#PBS -o \$HOME/job_queue/last_job_transfer_protocol"  >>  $job_to_send
1571          echo "#PBS -j oe"                              >>  $job_to_send
1572          echo " "                                       >>  $job_to_send
1573          echo "set -x"                                  >>  $job_to_send
1574          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
1575          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1576          echo "%%END%%"                                 >>  $job_to_send
[2295]1577
1578       elif [[ $remote_host = lcgeohu ]]
1579       then
1580          echo "cat > scpjob.$identifier << %%END%%"        >>  $job_to_send
1581          echo "#!/bin/bash"                             >>  $job_to_send
1582          echo "#SBATCH --job-name=job_protocol_transfer" >>  $job_to_send
1583          echo "#SBATCH -t 00:20:00"                     >>  $job_to_send
1584          echo "#SBATCH -N 1"                            >>  $job_to_send
1585          echo "#SBATCH -n 1"                            >>  $job_to_send
1586          echo "#SBATCH -o \$HOME/job_queue/last_job_transfer_protocol"      >>  $job_to_send
1587          echo "#SBATCH -o $remote_dayfile"              >>  $job_to_send
1588          echo "#SBATCH -e $remote_dayfile"              >>  $job_to_send
1589          echo " "                                       >>  $job_to_send
1590          echo "set -x"                                  >>  $job_to_send
1591          echo "sbatch  scpjob.$identifier"              >>  $job_to_send
1592          echo "%%END%%"                                 >>  $job_to_send
1593
[1]1594       else
1595
[1289]1596          echo "cat > scpjob.$identifier << %%END%%"  >>  $job_to_send
[1]1597          echo "# @\\\$-q $return_queue"           >>  $job_to_send
1598          echo "# @\\\$-l${qsubtime} 10"           >>  $job_to_send
1599          echo "# @\\\$-l${qsubmem} 10mb"          >>  $job_to_send
1600          if [[ $remote_host = t3ej2  ||  $remote_host = t3ej5  ||  $remote_host = t3es ]]
1601          then
1602             echo "# @\$-l mpp_p=0"                >>  $job_to_send
1603          fi
1604          echo '# @\$-lF 10mb'                     >>  $job_to_send
1605          echo '# @\$-o job_queue/last_job_transfer_protocol'    >>  $job_to_send
1606          echo '# @\\\$-eo'                          >>  $job_to_send
1607          echo " "                                 >>  $job_to_send
1608          if [[ $remote_host = t3ej2  ||  $remote_host = t3ej5 ]]
1609          then
1610             echo "set +vx"                        >>  $job_to_send
1611             echo ". .profile"                     >>  $job_to_send
1612          fi
1613          echo "set -x"                            >>  $job_to_send
[1468]1614          echo "batch_scp  $PORTOPT  -d  -w 10  -u $local_user $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile  >  /dev/null"  >>  $job_to_send
[1]1615          echo "[[ \"\$for_subjob_to_do\" != \"\" ]]  &&  eval \$for_subjob_to_do"  >>  $job_to_send
1616          echo "%%END%%"                           >>  $job_to_send
[1620]1617
[1]1618       fi
1619
1620       if [[ $(echo $remote_host | cut -c1-3) = ibm ]]
1621       then
[1289]1622          echo "llsubmit  scpjob.$identifier"      >>  $job_to_send
[1620]1623       elif [[ $remote_host = lcbullhh ]]
1624       then
1625          echo "sbatch  scpjob.$identifier"               >>  $job_to_send
[1274]1626       elif [[ $remote_host = lccrayb || $remote_host = lccrayh ]]
[1255]1627       then
[2134]1628          echo "msub -E -q $return_queue  scpjob.$identifier"               >>  $job_to_send
[1]1629       elif [[ $remote_host = t3eb  ||  $remote_host = t3eh  ||  $remote_host = t3ej2  ||  $remote_host = t3ej5 ]]
1630       then
[1289]1631          echo "qsub -J n  scpjob.$identifier"     >>  $job_to_send
[1]1632       elif [[ $remote_host = t3es ]]
1633       then
[1289]1634          echo "qsub -J n  -s /bin/ksh  scpjob.$identifier"     >>  $job_to_send
[1043]1635       elif [[ $remote_host = lckiaps ]]
1636       then
[1289]1637          echo "mv  scpjob.$identifier  $job_catalog"           >>  $job_to_send
[1468]1638          echo "ssh $SSH_PORTOPT ${remote_username}@${remote_address}  \"$submcom ${job_catalog}/scpjob.$identifier\" "  >>  $job_to_send
[1289]1639          echo "rm  ${job_catalog}/scpjob.$identifier"          >>  $job_to_send
[1099]1640       elif [[ $remote_host = lckyu* ]]
[1090]1641       then
[1468]1642          echo "scp $PORTOPT scpjob.$identifier  ${remote_username}@${remote_address}:job_queue"           >>  $job_to_send
1643          echo "ssh $SSH_PORTOPT ${remote_username}@${remote_address}  \"cd job_queue; $submcom scpjob.$identifier; rm scpjob.$identifier\" "  >>  $job_to_send
[2257]1644       elif [[ $remote_host = lcocean ]]
[1099]1645       then
[1289]1646          echo "mv  scpjob.$identifier  $job_catalog"           >>  $job_to_send
[1468]1647          echo "/usr/bin/ssh ${remote_username}@${remote_address}  \"$init_cmds $module_calls cd $job_catalog; $submcom scpjob.$identifier\" "  >>  $job_to_send
[1]1648       else
[1289]1649          echo "$submcom  scpjob.$identifier"      >>  $job_to_send
[1]1650       fi
[1043]1651       if [[ $remote_host != ibmku  &&  $remote_host != lckiaps ]]
[622]1652       then
[1289]1653          echo "rm  scpjob.$identifier"            >>  $job_to_send
[622]1654       fi
[1]1655    else
[1468]1656#       echo "ftpcopy  -d  $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile"  >>  $job_to_send
[1289]1657       # ??? funktioniert das ÃŒberhaupt noch ???
[1468]1658       echo "nohup  ftpcopy  -d  -w 15  $local_address  ${job_catalog}/$remote_dayfile  \"$job_catalog\"  $local_dayfile  >  /dev/null  &"  >>  $job_to_send
[1]1659    fi
1660    echo "set -x"                               >>  $job_to_send
1661    echo "     ' exit"                          >>  $job_to_send
1662    echo "set -x"                               >>  $job_to_send
1663 fi
1664
1665
[1289]1666    # APPEND THE JOB-FILE (CREATE BY mrun) TO THE JOB-DIRECTIVES GENERATED ABOVE
[1]1667 cat  $file_to_send  >>  $job_to_send
[69]1668
[1]1669 if [[ $remote_host = ibm ]]
1670 then
1671    echo " "         >>  $job_to_send
1672    echo "exit"      >>  $job_to_send
1673 fi
[635]1674
[1289]1675    # REMOVE JOB-FILE
[2257]1676 if [[ $remote_host = lctit  ||  $remote_host = ibmku ]]
[69]1677 then
1678    echo " "                               >>  $job_to_send
1679    echo "rm ~/job_queue/$job_on_remhost"  >>  $job_to_send
1680 fi
[1]1681
1682
[1289]1683    # TRANSFER JOB TO THE TARGET HOST (JOB-DIRECTORY)
[1]1684 if [[ $no_submit = false ]]
1685 then
1686    if [[ $remote_host != $local_host ]]
1687    then
1688       [[ $verify = true ]]  &&  printf "\n >>> transfering job to \"$remote_host\"..."
[1289]1689       if [[ $remote_host = ibms ]]    # ssh on ibms cannot handle "~/"
[1]1690       then
[82]1691          job_catalog_save=$job_catalog
1692          job_catalog=job_queue
[2295]1693       elif [[ $remote_host = lcgeohu ]]
1694       then
1695          job_catalog_save=$job_catalog
1696          job_catalog=/home/${remote_user}/job_queue
[82]1697       fi
[2266]1698       scp  $ssh_key  $PORTOPT  $job_to_send  ${remote_user}@${remote_address}:${job_catalog}/$job_on_remhost
[82]1699       if [[ $? = 1 ]]
1700       then
1701          locat=scp; exit
1702       fi
[2295]1703       if [[ $remote_host = ibms  ||  $remote_host = lcgeohu ]]
[82]1704       then
1705          job_catalog=$job_catalog_save
1706       fi
[1]1707       [[ $verify = true ]]  &&  printf "\n >>> finished\n"
1708    else
1709       eval  job_catalog=$job_catalog
1710       cp  $job_to_send  ${job_catalog}/$job_on_remhost
1711    fi
1712
1713
1714
[1289]1715       # START NQS- / LOADLEVELER-JOB
[1]1716    if [[ $remote_host != $local_host ]]
1717    then
1718       [[ $verify = true ]]  &&  printf "\n >>> submitting job using \"qsub\"...\n"
[635]1719
[1289]1720       if [[ $remote_host = ibmku ]]
[1]1721       then
[1468]1722          ssh  $SSH_PORTOPT $remote_address  -l $remote_user  "cd $job_catalog; $submcom $job_on_remhost"
[2295]1723       elif [[ $remote_host = lcgeohu ]]
1724       then
1725          ssh  $ssh_key  $SSH_PORTOPT $remote_address  -l $remote_user  "cd $job_catalog; $submcom $job_on_remhost"
[1]1726       else
[1944]1727          ssh  $ssh_key  $SSH_PORTOPT $remote_address  -l $remote_user  "cd $job_catalog; $submcom $job_on_remhost; rm $job_on_remhost"
[82]1728       fi
[1]1729
1730       [[ $verify = true ]]  &&  printf " >>> o.k.\n"
1731    else
1732       cd  $job_catalog
[2295]1733       if [[ $(echo $local_host | cut -c1-3) = ibm  ||  $(echo $local_host | cut -c1-6) = lccray || $local_host = lceddy || $local_host = lcgeohu ]]
[1]1734       then
1735          eval  $submcom  $job_on_remhost
[2184]1736       elif [[  $local_host = lctit  ||  $local_host = lcxe6  ||  $local_host = lck  || $local_host = lckordi ||  $local_host = lcyon || $local_host = lcsb  ||  $local_host = lckyu* ]]
[108]1737       then
[635]1738          chmod  u+x  $job_on_remhost
[108]1739          eval  $submcom  $job_on_remhost
[1620]1740       elif [[ $local_host = lcbullhh ]]
1741       then
1742          if [[ $queue = default ]]
1743          then
1744             eval  $submcom  $job_on_remhost
1745          fi
[2365]1746       elif [[ $local_host = lclrz || $local_host = lckea* ]]
1747       then
1748          eval  $submcom  $job_on_remhost
1749       elif [[ $local_host = lcbwuni ]]
1750       then
1751          msub  -q $queue $job_on_remhost
[1]1752       else
1753          qsub  $job_on_remhost
1754       fi
[622]1755
[2257]1756          # JOBFILE MUST NOT BE DELETED ON lctit/ibmku. THIS WILL BE DONE
[1289]1757          # AT THE END OF THE JOB
[2295]1758       if [[ $local_host != lctit  &&  $local_host != ibmku  &&  $local_host != lcgeohu ]]
[622]1759       then
1760          rm  $job_on_remhost
1761       fi
[1]1762       cd  -  > /dev/null
1763    fi
1764 fi
1765
[1289]1766    # FINAL ACTIONS
[1]1767 if [[ $no_submit = false ]]
1768 then
[1099]1769    rm  -f $job_to_send
[1]1770 fi
[1266]1771 [[ $verify = true ]]  &&  printf "\n\n *** SUBJOB finished \n\n"
Note: See TracBrowser for help on using the repository browser.