#!/bin/ksh
# mrun - script for running PALM jobs
#--------------------------------------------------------------------------------#
# This file is part of PALM.
#
# PALM is free software: you can redistribute it and/or modify it under the terms
# of the GNU General Public License as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later version.
#
# PALM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# PALM. If not, see .
#
# Copyright 1997-2012 Leibniz University Hannover
#--------------------------------------------------------------------------------#
#
# Current revisions:
# ------------------
#
#
# Former revisions:
# -----------------
# $Id: mrun 1185 2013-06-17 14:07:59Z heinze $
# use of cluster/express queue enabled (ibmh)
#
# 1124 2013-04-09 15:46:52Z raasch
# variable "memory" is exported via typeset option -x, because otherwise an unknown
# side effect may lead to data loss while getopts is reading the script-option arguments
#
# 1122 2013-04-09 08:37:16Z heinze
# Bugfix: change type of variable last_char
#
# 1119 2013-04-05 15:11:19Z raasch
# Bugfix for setting -T option for subjob
#
# 1108 2013-03-05 07:03:32Z raasch
# bugfix for coupled runs on lckyut/lckyuh
#
# 1106 2013-03-04 05:31:38Z raasch
# --stdin argument for mpiexec on lckyuh
# -y and -Y settings output to header
#
# 1103 2013-02-20 02:15:53Z raasch
# default script runs again under ksh, because of unsolved problems with read
# from stdin: when bash script is called from a ksh, message "read error: 0:
# Resource temporarily unavailable" appears and script does not stop,
# further bash compatibility adjustments,
# shebang line replaced by /bin/bash when running jobs on lckyuh; no restarts
# on lckyuh, but mrun does not terminate and issues a warning instead
#
# 1101 2013-02-17 10:20:21Z raasch
# script now running under bash instead of ksh, which required small adjustments
# (output formatting with printf instead "typeset -L/-R", print replaced by echo,
# read from stdin),
# cross compilername on lckyuh compute nodes replaced by real compiler name
#
# 1099 2013-02-10 01:47:43Z raasch
# adjustments for Kyushu-University computing center (lckyuh - hayaka)
# and for Forwind cluster (lcflow)
# small further adjustments for lckyut
#
# 1094 2013-02-03 01:52:12Z raasch
# explicit ssh/scp port can be set in config file with environment variable
# scp_port. This port is handled to all ssh/scp/batch_scp calls.
# decalpha parts (yonsei) removed
#
# 2013-02-02 07:06:13Z raasch
# adjustments for Kyushu-University computing center (lckyut - tatara)
#
# 1083 2013-01-04 10:22:09Z maronga
# bugfix in parameter file check (read %cpp_options was missing)
#
# 1069 2012-11-28 16:18:43Z maronga
# bugfix: coupling mode was always set to mpi2, typos removed
#
# 1058 2012-11-21 07:00:35Z raasch
# Intel inspector (inspxe) is given the number of PEs instead of the number of
# nodes
#
# 1046 2012-11-09 14:38:45Z maronga
# code put under GPL (PALM 3.9)
#
# 21/03/94 - Siggi - first version finished
# 03/03/94 - Siggi - script development started
#
#--------------------------------------------------------------------------------#
# mrun - script for running PALM jobs
#--------------------------------------------------------------------------------#
# VARIABLENVEREINBARUNGEN + DEFAULTWERTE
set +o allexport # SICHERHEITSHALBER UNTERBINDEN, DA SONST EVTL. STAGEOUT
# NICHT LAUEFT (TOO MANY ARGUMENTS - PROBLEM)
set +o noclobber # EXISTIERENDE DATEIEN DUERFEN UEBERSCHRIEBEN WERDEN
AddFilenames=""
additional_conditions=""
add_source_path=""
afname=""
archive_save=true
archive_system=none
check_namelist_files=true
combine_plot_fields=true
compiler_name=""
cond1=""
cond2=""
config_file=.mrun.config
coupled_dist=""
coupled_mode="mpi1"
cpp_opts=""
cpp_options=""
cpumax=0
cpurest=0
delete_temporary_catalog=true
do_batch=false
do_compile=true
do_remote=false
do_stagein=true
do_stageout=true
do_trace=false
email_notification="none"
exclude=""
executable=""
execution_error=false
fimm=false
fname=test
fromhost=""
global_revision=""
group_number=none
host=""
host_file=""
hp=""
ignore_archive_error=false
input_list=""
interpreted_config_file=""
job_catalog="~/job_queue"
job_on_file=""
keep_data_from_previous_run=false
link_local_input=false
link_local_output=false
localhost_realname=$(hostname)
local_compile=false
local_dvrserver_running=.FALSE.
locat=normal
mainprog=""
makefile=""
max_par_io_str=""
mc=$0
while [[ $(echo $mc | grep -c "/") != 0 ]]
do
mc=`echo $mc | cut -f2- -d"/"`
done
module_calls=""
mrun_script_name=$mc
netcdf_inc=""
netcdf_lib=""
netcdf_support=false
node_usage=default
numprocs=""
numprocs_atmos=0
numprocs_ocean=0
OOPT=""
openmp=false
output_list=""
package_list=""
punkte="..........................................................."
queue=none
read_from_config=""
restart_run=false
if [[ `hostname` = rte10 ]]
then
return_addres=133.5.185.60
echo "+++ WARNING: fixed return_addres = $return_addres is used !!!!!"
elif [[ `hostname` = climate0 ]]
then
return_addres=165.132.26.68
echo "+++ WARNING: fixed return_addres = $return_addres is used !!!!!"
elif [[ `hostname` = urban00 ]]
then
return_addres=147.46.30.151
echo "+++ WARNING: fixed return_addres = $return_addres is used !!!!!"
else
return_addres=$(nslookup `hostname` 2>&1 | grep "Address:" | tail -1 | awk '{print $2}')
fi
return_password=""
return_username=$LOGNAME
remotecall=false
remote_username=""
run_coupled_model=false
run_mode=""
scirocco=false
store_on_archive_system=false
striche=" ----------------------------------------------------------------------------"
silent=false
source_list=""
source_path=SOURCE
tasks_per_node=""
threads_per_task=1
tmpcreate=false
tmp_data_catalog=""
transfer_problems=false
usern=$LOGNAME
use_openmp=false
version="MRUN 2.0 Rev$Rev: 1185 $"
working_directory=`pwd`
TOPT=""
XOPT=""
zeit=$( date | cut -c 12-19 )
typeset -i iec=0 iic=0 iin=0 ioc=0 iout=0 stagein_anz=0 stageout_anz=0
typeset -x -i memory=0 # has to be exported here, otherwise an unknown side
# effect may cause data loss when getopts is reading the
# script-option arguments
typeset -i cputime i ii iia iii iio icycle inode ival jobges jobsek last_char_int maxcycle minuten nodes pes sekunden tp1
# EINZELNE VARIABLE FUER HAUPTPROGRAMM EXPORTIEREN
export cpurest fname host localhost return_addres return_username remotecall tasks_per_node
# FOLGENDE VARIABLEN MUESSEN FUER DIE INTERPRETATION DER KONFIGURATIONSDATEI
# EXPORTIERT WERDEN
export afname config_file cpp_opts cpumax do_batch do_trace fname fromhost
export group_number input_list numprocs output_list queue run_mode
# FEHLERBEHANDLUNG
# BEI EXIT:
trap 'rm -rf $working_directory/tmp_mrun
if [[ $locat != localhost ]]
then
# if [[ ! -f ${mrun_path}/statistik/mrun_statistik ]]
# then
# cat > ${mrun_path}/statistik/mrun_statistik << %STATEND%
#MRUN-calls on $localhost
#
#date and time user localhost remotehost termination mrun-command
#--------------------------------------------------------------------------------------------------------------------
#%STATEND%
# chmod 666 ${mrun_path}/statistik/mrun_statistik
# fi
#
# # EINTRAG IN DIE STATISTIK-DATEI
# string1=`date` #-L35
# string2=$usern #-L12
# string3=$localhost_realname #-L12
# string4=$host #-L12
# string5=$locat #-L12
# if [[ "$job_on_file" = "" && $locat != control_c && $locat != user_abort ]]
# then
# if [[ $do_batch = true ]]
# then
# printf "$string1$string2$string3$string4$string5$mrun_com \n" >> ${mrun_path}/statistik/mrun_statistik
# else
# printf "$string1$string2$string3$string4$string5$mc \n" >> ${mrun_path}/statistik/mrun_statistik
# fi
# fi
echo " " > /dev/null
fi
if [[ $locat != normal && $locat != control_c && $locat != local_compile ]]
then
# EVENTUELLE ERROR-KOMMANDOS ABARBEITEN
(( i = 0 ))
while (( i < iec ))
do
(( i = i + 1 ))
printf "\n *** Execution of ERROR-command:\n"
printf " >>> ${err_command[$i]}\n"
eval ${err_command[$i]}
done
if [[ -n $interpreted_config_file ]]
then
rm -rf $interpreted_config_file
fi
if [[ -n .mrun_environment ]]
then
rm -rf .mrun_environment
fi
if [[ $tmpcreate = true ]]
then
printf "\n *** Contents of \"$TEMPDIR\":\n"
ls -al; cd
[[ $delete_temporary_catalog = true ]] && rm -rf $TEMPDIR
fi
if [[ "$dvrserver_id" != "" ]]
then
echo "+++ killing dvrserver_id=$dvrserver_id"
kill $dvrserver_id
fi
if [[ -f ~/job_queue/JOBINFO.$QSUB_REQID ]]
then
rm -rf ~/job_queue/JOBINFO.$QSUB_REQID
fi
printf "\n\n+++ MRUN killed \n\n"
elif [[ $locat != control_c ]]
then
printf "\n\n --> all actions finished\n\n"
printf " Bye, bye $usern !!\n\n"
fi' exit
# BEI TERMINAL-BREAK:
trap 'rm -rf $working_directory/tmp_mrun
rm -rf $working_directory/tmp_check_namelist_files
[[ $tmpcreate = true ]] && (cd; rm -rf $TEMPDIR)
if [[ -f ~/job_queue/JOBINFO.$QSUB_REQID ]]
then
rm -rf ~/job_queue/JOBINFO.$QSUB_REQID
fi
if [[ "$dvrserver_id" != "" ]]
then
echo "+++ killing dvrserver_id=$dvrserver_id"
kill $dvrserver_id
fi
printf "\n+++ MRUN killed by \"^C\" \n\n"
locat=control_c
exit
' 2
# CHECK IF THE PATH FOR THE PALM BINARIES (SCRIPTS+UTILITY-PROGRAMS) HAS
# BEEN SET
if [[ "$PALM_BIN" = "" ]]
then
printf "\n +++ environment variable PALM_BIN has not been set"
printf "\n please set it to the directory where the PALM scripts are located"
locat=palm_bin; exit
fi
export PATH=$PALM_BIN:$PATH
# SHELLSCRIPT-OPTIONEN EINLESEN UND KOMMANDO NEU ZUSAMMENSETZEN, FALLS ES
# FUER FOLGEJOBS BENOETIGT WIRD
while getopts :a:AbBc:Cd:D:Fg:G:h:H:i:IkK:m:M:n:o:O:p:P:q:r:R:s:St:T:u:U:vw:xX:yY:zZ option
do
case $option in
(a) afname=$OPTARG;;
(A) store_on_archive_system=true; mc="$mc -A";;
(b) do_batch=true; mc="$mc -b";;
(B) delete_temporary_catalog=false; mc="$mc -B";;
(c) config_file=$OPTARG; mc="$mc -c$OPTARG";;
(C) restart_run=true; mc="$mc -C";;
(d) fname=$OPTARG; mc="$mc -d$OPTARG";;
(D) cpp_opts="$cpp_opts $OPTARG"; mc="$mc -D'$OPTARG'";;
(F) job_on_file="-D"; mc="$mc -F";;
(g) group_number=$OPTARG; mc="$mc -g$OPTARG";;
(G) global_revision=$OPTARG; mc="$mc -G'$OPTARG'";;
(h) host=$OPTARG; mc="$mc -h$OPTARG";;
(H) fromhost=$OPTARG; mc="$mc -H$OPTARG";;
(i) input_list=$OPTARG; mc="$mc -i'$OPTARG'";;
(I) ignore_archive_error=true; mc="$mc -I";;
(k) keep_data_from_previous_run=true; mc="$mc -k";;
(K) additional_conditions="$OPTARG"; mc="$mc -K'$OPTARG'";;
(m) memory=$OPTARG; mc="$mc -m$OPTARG";;
(M) makefile=$OPTARG; mc="$mc -M$OPTARG";;
(n) node_usage=$OPTARG; mc="$mc -n$OPTARG";;
(o) output_list=$OPTARG; mc="$mc -o'$OPTARG'";;
(O) use_openmp=true; threads_per_task=$OPTARG; mc="$mc -O$OPTARG";;
(p) package_list=$OPTARG; mc="$mc -p'$OPTARG'";;
(P) return_password=$OPTARG; mc="$mc -P$OPTARG";;
(q) queue=$OPTARG; mc="$mc -q$OPTARG";;
(r) run_mode=$OPTARG; mc="$mc -r'$OPTARG'";;
(R) remotecall=true;return_addres=$OPTARG; mc="$mc -R$OPTARG";;
(s) source_list=$OPTARG; mc="$mc -s'$OPTARG'";;
(S) read_from_config=false; mc="$mc -S";;
(t) cpumax=$OPTARG; mc="$mc -t$OPTARG";;
(T) mrun_tasks_per_node=$OPTARG; mc="$mc -T$OPTARG";;
(u) remote_username=$OPTARG; mc="$mc -u$OPTARG";;
(U) return_username=$OPTARG; mc="$mc -U$OPTARG";;
(v) silent=true; mc="$mc -v";;
(w) max_par_io_str=$OPTARG; mc="$mc -w$OPTARG";;
(x) do_trace=true;set -x; mc="$mc -x";;
(X) numprocs=$OPTARG; mc="$mc -X$OPTARG";;
(y) ocean_file_appendix=true; mc="$mc -y";;
(Y) run_coupled_model=true; coupled_dist=$OPTARG; mc="$mc -Y'$OPTARG'";;
(z) check_namelist_files=false; mc="$mc -z";;
(Z) combine_plot_fields=false; mc="$mc -Z";;
(\?) printf "\n +++ unknown option $OPTARG \n"
printf "\n --> type \"$0 ?\" for available options \n"
locat=parameter;exit;;
esac
done
# EVTL. POSITIONSPARAMETER EINLESEN
# ZUR ZEIT GIBT ES NUR DEN PARAMETER ? (=KURZINFO)
(( to_shift = $OPTIND - 1 ))
shift $to_shift
# KURZE AUFRUFBESCHREIBUNG WIRD HIER AUSGEGEBEN
if [[ "$1" = "?" ]]
then
(printf "\n *** mrun can be called as follows:\n"
printf "\n $mrun_script_name -b -c.. -d.. -D.. -f.. -F -h.. -i.. -I -K.. -m.. -o.. -p.. -r.. -R -s.. -t.. -T.. -v -x -X.. -y -Y.. -z -Z \n"
printf "\n Description of available options:\n"
printf "\n Option Description Default-Value"
printf "\n -a base name of input files equiv. -d"
printf "\n -A archiving when using file-attribute fl"
printf "\n -b batch-job on local machine ---"
printf "\n -B do not delete temporary directory at end ---"
printf "\n -c configuration file .mrun.config"
printf "\n -d base name of files attached to program test"
printf "\n -D preprocessor(cpp)-directives \"\" "
printf "\n -F create remote job file only ---"
printf "\n -h execution host $localhost_realname"
printf "\n -i INPUT control list \"\" "
printf "\n -I archiving errors of previous batch-jobs"
printf "\n will be ignored"
printf "\n -k keep data from previous run"
printf "\n -K additional conditions for controling"
printf "\n usage of conditional code and"
printf "\n env-variables in configuration file \"\" "
printf "\n -m memory demand in MB (batch-jobs) 0 MB"
printf "\n -M Makefile name Makefile"
printf "\n -n node usage (shared/not_shared) depending on -h"
printf "\n -o OUTPUT control list \"\" "
printf "\n -O use OpenMP ---"
printf "\n -p software package list \"\" "
printf "\n -q queue \"$queue\" "
printf "\n -r run control list (combines -i -o) \"\" "
printf "\n -s filenames of routines to be compiled \"\" "
printf "\n must end with .f, .f90, .F, or .c !"
printf "\n use \"..\" for more than one file and wildcards"
printf "\n -s LM compiles all locally modified files"
printf "\n -S config file interpreted by shellscript ---"
printf "\n -t allowed cpu-time in seconds (batch) 0"
printf "\n -T tasks per node depending on -h"
printf "\n -u username on remote machine \"\" "
printf "\n -v no prompt for confirmation ---"
printf "\n -w maximum parallel io streams as given by -X"
printf "\n -x tracing of mrun for debug purposes ---"
printf "\n -X # of processors (on parallel machines) 1"
printf "\n -y add appendix \"_O\" to all local output"
printf "\n files (ocean precursor runs followed by"
printf "\n coupled atmosphere-ocean runs) ---"
printf "\n -Y run coupled model, \"#1 #2\" with"
printf "\n #1 atmosphere and #2 ocean processors \"#/2 #/2\" depending on -X"
printf "\n -z disable a priori parameter file check ---"
printf "\n -Z skip combine_plot_fields at the end of "
printf "\n the simulation ---"
printf "\n "
printf "\n Possible values of positional parameter :"
printf "\n \"?\" - this outline \n\n") | more
exit
elif [[ "$1" != "" ]]
then
printf "\n +++ positional parameter $1 unknown \n"
locat=parameter; exit
fi
# KURZE STARTMELDUNG
printf "\n*** $version "
printf "\n will be executed. Please wait ..."
# PRUEFEN, OB KONFIGURATIONS-DATEI VORHANDEN
if [[ ! -f $config_file ]]
then
printf "\n\n +++ configuration file: "
printf "\n $config_file"
printf "\n does not exist"
locat=connect; exit
fi
# HOST-IDENTIFIER (local_host) AUS KONFIGURATIONSDATEI BESTIMMEN
line=""
grep "%host_identifier" $config_file > tmp_mrun
while read line
do
if [[ "$line" != "" && $(echo $line | cut -c1) != "#" ]]
then
HOSTNAME=`echo $line | cut -d" " -s -f2`
host_identifier=`echo $line | cut -d" " -s -f3`
if [[ $localhost_realname = $HOSTNAME ]]
then
localhost=$host_identifier
break
fi
fi
done < tmp_mrun
if [[ "$localhost" = "" ]]
then
printf "\n\n +++ no host identifier found in configuration file \"$config_file\""
printf "\n for local host \"$localhost_realname\"."
printf "\n Please add line"
printf "\n \"\%host_identifier $localhost_realname \""
printf "\n to the configuration file."
locat=localhost; exit
fi
# HOSTSPEZIFISCHE VARIABLEN SETZEN
case $localhost_realname in
(r1*|r2*|h01*|b01*) archive_system=tivoli;;
(cs*) archive_system=ut;;
(fimm.bccs.uib.no) fimm=true;;
(gate|n-sx) PATH=$PALM_BIN:$PATH:/usr/bin/nqsII;;
(scirocco) scirocco=true;;
esac
# BASISNAME DER INPUT-DATEIEN GLEICH ALLGEMEINEM BASISNAMEN SETZEN,
# WENN NICHT VOM BENUTZER ANDERS BESTIMMT
[[ "$afname" = "" ]] && afname=$fname
# EVTL. RUN-MODUS DEN I/O-LISTEN HINZUFUEGEN
if [[ "$run_mode" != "" ]]
then
input_list="$input_list $run_mode"
output_list="$output_list $run_mode"
fi
# RECHNERNAMEN ABSPEICHERN, VON DEM AUS JOB GESTARTET WIRD,
# ALLERDINGS NUR DANN, WENN NICHT PER OPTION -H BEREITS EIN WERT
# ZUGEWIESEN WURDE (MRUN MACHT DIES IMMER, WENN ES SELBST BATCH-JOBS
# STARTET)
if [[ "$fromhost" = "" ]]
then
fromhost=$localhost
fi
# PRUEFEN, OB AUF REMOTE-MASCHINE GERECHNET WERDEN SOLL
# WERT VON do_remote WIRD FUER DATEIVERBINDUNGEN BENOETIGT.
# WENN AUF REMOTE-MASCHINE GERECHNET WIRD, IST GLEICHZEITIG KLAR,
# DASS EIN BATCH-JOB GESTARTET WERDEN MUSS
if [[ -n $host && "$host" != $localhost ]]
then
do_batch=true
do_remote=true
case $host in
(ibm|ibmh|ibmkisti|ibmku|ibms|ibmy|nech|necriam|lcflow|lckyoto|lcsgib|lcsgih|unics|lcxe6|lcxt5m|lck|lckiaps|lckordi|lckyuh|lckyut|lcsb) true;;
(*) printf "\n"
printf "\n +++ sorry: execution of batch jobs on remote host \"$host\""
printf "\n is not available"
locat=nqs; (( iec = 0 )); exit;;
esac
else
host=$localhost
fi
# ZUSATZBEDINGUNGEN (OPTION -K) AUSWERTEN
if [[ -n $additional_conditions ]]
then
# echo $additional_conditions | cut -d" " -f1-3 | read cond1 cond2 dummy
cond1=`echo $additional_conditions | cut -d" " -f1`
cond2=`echo $additional_conditions | cut -d" " -s -f2`
dummy=`echo $additional_conditions | cut -d" " -s -f3`
if [[ -n $dummy ]]
then
printf "\n +++ more than 2 additional conditions given for Option \"-K\""
locat=options; exit
fi
block=_$cond1
[[ -n $cond2 ]] && block=${block}_$cond2
fi
# KOPPLUNGSEIGENSCHAFTEN (-Y) AUSWERTEN UND coupled_mode BESTIMMEN
if [[ $run_coupled_model = true ]]
then
if [[ -n $coupled_dist ]]
then
numprocs_atmos=`echo $coupled_dist | cut -d" " -s -f1`
numprocs_ocean=`echo $coupled_dist | cut -d" " -s -f2`
if (( $numprocs_ocean + $numprocs_atmos != $numprocs ))
then
printf "\n +++ number of processors does not fit to specification by \"-Y\"."
printf "\n PEs (total) : $numprocs"
printf "\n PEs (atmosphere): $numprocs_atmos"
printf "\n PEs (ocean) : $numprocs_ocean"
locat=coupling; exit
fi
else
(( numprocs_ocean = $numprocs / 2 ))
(( numprocs_atmos = $numprocs / 2 ))
fi
coupled_dist=`echo "$numprocs_atmos $numprocs_ocean"`
# GET coupled_mode FROM THE CONFIG FILE
line=""
grep "%cpp_options.*-D__mpi2.*$host" $config_file > tmp_mrun
while read line
do
if [[ "$line" != "" && $(echo $line | cut -c1) != "#" && $(echo $line | cut -d" " -s -f4) = $cond1 && $(echo $line | cut -d" " -s -f4) = $cond2 ]]
then
coupled_mode="mpi2"
fi
done < tmp_mrun
fi
# PRUEFEN, OB EVTL. BEI VORHERGEHENDEM LAUF (KETTENJOB) EINE
# ARCHIVIERUNG FEHLGESCHLAGEN IST
if [[ -f ~/job_queue/ARCHIVE_ERROR_$fname ]]
then
if [[ $ignore_archive_error = false ]]
then
printf "\n +++ data archiving of previous run failed"
printf "\n see directory \~/job_queue on remote machine"
locat=archive; exit
else
printf "\n +++ warning: data archiving in a previous run failed"
printf "\n MRUN continues, trying to get backup copy"
fi
fi
# WERTE VON MRUN-OPTIONEN SICHERN UND DAMIT GEGEBENENFALLS SPAETER DIE
# IN DER KONFIGURAIONSDATEI ANGEGEBENEN WERTE UEBERSTEUERN
mrun_memory=$memory
mrun_group_number=$group_number
mrun_cpumax=$cpumax
mrun_numprocs=$numprocs
# LESEN UND INTERPRETIEREN DER KONFIGURATIONS-DATEI VOM SHELLSCRIPT AUS
# VORUEBERGEHEND ZWINGEND AUF LINUX-RECHNERN
if [[ "$read_from_config" = false ]]
then
[[ $silent = false ]] && printf "\n Reading the configuration file... "
while read zeile
do
[[ $silent = false ]] && printf "."
# ZUERST EVENTUELL VORKOMMENDE ENVIRONMENT-VARIABLEN DURCH IHRE WERTE
# ERSETZEN
eval zeile=\"$zeile\"
# INTERPRETATION DER ZEILE
if [[ "$(echo $zeile)" = "" ]]
then
# LEERZEILE, KEINE AKTION
continue
elif [[ "$(echo $zeile | cut -c1)" = "#" ]]
then
# ZEILE IST KOMMENTARZEILE
true
elif [[ "$(echo $zeile | cut -c1)" = "%" ]]
then
# ZEILE DEFINIERT ENVIRONMENT-VARIABLE
zeile=$(echo $zeile | cut -c2-)
# echo $zeile | cut -d" " -f1-5 | read var value for_host for_cond1 for_cond2
var=`echo $zeile | cut -d" " -f1`
value=`echo $zeile | cut -d" " -s -f2`
for_host=`echo $zeile | cut -d" " -s -f3`
for_cond1=`echo $zeile | cut -d" " -s -f4`
for_cond2=`echo $zeile | cut -d" " -s -f5`
if [[ "$for_host" = "" || ( "$for_host" = $host && "$for_cond1" = "$cond1" && "$for_cond2" = "$cond2" ) || $(echo "$input_list$output_list"|grep -c "$for_host") != 0 ]]
then
# BEI COMPILER- CPP- ODER LINKEROPTIONEN EVTL ":" DURCH " "
# ERSETZEN. "::" WIRD DURCH ":" ERSETZT.
value=`echo $value | sed 's/::/%DUM%/g' | sed 's/:/ /g' | sed 's/%DUM%/:/g'`
# ENVIRONMENT-VARIABLE WIRD WERT AUS KONFIGURATIONSDATEI
# ZUGEWIESEN, WENN SIE SELBST NOCH KEINEN WERT UEBER DIE
# ENTSPRECHENDE SCRIPT-OPTION ERHALTEN HAT. SOLCHE
# VARIABLEN HAETTEN DANN DEN WERT "" ODER IM INTEGER-FALL DEN
# WERT 0. ALLGEMEINE REGEL ALSO: SCRIPT-OPTION GEHT UEBER
# KONFIGURATIONSDATEI
if [[ "$(eval echo \$$var)" = "" || "$(eval echo \$$var)" = "0" ]]
then
eval $var=\$value
# EVTL. BILDSCHIRMAUSGABEN DER ENVIRONMENT-VARIABLEN
if [[ $do_trace = true ]]
then
printf "\n*** ENVIRONMENT-VARIABLE $var = $value"
fi
fi
# WENN ENVIRONMENT-VARIABLE HOST VEREINBART, DANN SOFORT AUSWERTEN
# WERT VON do-remote WIRD BEI DATEIVERBINDUNGEN BENOETIGT
# WENN AUF REMOTE-MASCHINE GERECHNET WIRD, IST GLEICHZEITIG KLAR,
# DASS EIN BATCH-JOB GESTARTET WERDEN MUSS
if [[ $var = host ]]
then
if [[ -n $host && "$host" != $localhost ]]
then
do_batch=true
do_remote=true
case $host in
(ibm|ibmh|ibmkisti|ibmku|ibms|ibmy|lcflow|lckyoto|lcsgib|lcsgih|nech|necriam|unics|lcxe6|lcxt5m|lck|lckiaps|lckordi|lckyuh|lckyut|lcsb) true;;
(*) printf "\n +++ sorry: execution of batch jobs on remote host \"$host\""
printf "\n is not available"
locat=nqs; exit;;
esac
else
host=$localhost
fi
fi
# VOM BENUTZER DEFINIERTE ENVIRONMENT VARIABLEN MUESSEN PRINZIPIELL
# EXPORTIERT WERDEN, DA SIE VIELLEICHT IN WEITER UNTEN AUFZURUFEN-
# DEN PROGRAMMEN BENOETIGT WERDEN
export $var
fi
elif [[ "$(echo $zeile | cut -c1-3)" = "EC:" ]]
then
# ZEILE DEFINIERT ERROR-KOMMANDO
(( iec = iec + 1 ))
zeile=$(echo $zeile | cut -c4-)
err_command[$iec]="$zeile"
elif [[ "$(echo $zeile | cut -c1-3)" = "IC:" ]]
then
# ZEILE DEFINIERT INPUT-KOMMANDO
(( iic = iic + 1 ))
zeile=$(echo $zeile | cut -c4-)
in_command[$iic]="$zeile"
elif [[ "$(echo $zeile | cut -c1-3)" = "OC:" ]]
then
# ZEILE DEFINIERT OUTPUT-KOMMANDO
(( ioc = ioc + 1 ))
zeile=$(echo $zeile | cut -c4-)
out_command[$ioc]="$zeile"
else
# ZEILE DEFINIERT DATEIVERBINDUNG. EINLESEN DER DATEIEIGENSCHAFTEN
# s2a: in/out - Feld
# s2b: loc - Feld (optional)
# s2c: tr/ar - Feld (optional)
# echo $zeile | cut -d" " -f1-2 | read s1 s2
s1=`echo $zeile | cut -d" " -f1`
s2=`echo $zeile | cut -d" " -s -f2`
s2a=$(echo $s2 | cut -d":" -f1)
if [[ $(echo $s2 | grep -c ":") = 0 ]]
then
s2b=""
s2c=""
else
# echo $s2 | cut -d":" -f2-3 | sed 's/:/ /g' | read s2b s2c
s2b=`echo $s2 | cut -d":" -f2 | sed 's/:/ /g'`
s2c=`echo $s2 | cut -d":" -s -f3 | sed 's/:/ /g'`
fi
# echo $zeile | cut -d" " -f3-6 | read s3 s4 s5 s6
s3=`echo $zeile | cut -d" " -f3`
s4=`echo $zeile | cut -d" " -s -f4`
s5=`echo $zeile | cut -d" " -s -f5`
s6=`echo $zeile | cut -d" " -s -f6`
# ABSPEICHERN DER DATEIVERBINDUNG, FALLS IN INPUT- ODER OUTPUT-LIST
# VERMERKT. VARIABLE S3 KANN AUCH LISTE ENTHALTEN (FELDTRENNER ":")
# DATEIVERBINDUNG WIRD DANN NICHT ABGESPEICHERT UND GEPRUEFT, WENN
# PROGRAMMLAUF AUF REMOTE-MASCHINE ERFOLGT UND DATEI NUR LOKAL VOR-
# HANDEN SEIN MUSS (D.H. s2b = loc)
IFSALT="$IFS"; IFS="$IFS:"
if [[ "$s2a" = in && ! ( $do_remote = true && ( "$s2b" = loc || "$s2b" = locopt ) ) ]]
then
found=false
for actual in $input_list
do
for formal in $s3
do
[[ $actual = $formal || "$formal" = "-" ]] && found=true
done
done
if [[ $found = true ]]
then
(( iin = iin + 1 ))
localin[$iin]=$s1; transin[$iin]=$s2b; actionin[$iin]=$s2c;
typein[$iin]=$s3; pathin[$iin]=$s4; endin[$iin]=$s5;
extin[$iin]=$s6
fi
elif [[ "$s2a" = out && ! ( $do_remote = true && "$s2b" = loc ) ]]
then
found=false
for actual in $output_list
do
for formal in $s3
do
[[ $actual = $formal || "$formal" = "-" ]] && found=true
done
done
if [[ $found = true ]]
then
(( iout = iout + 1 ))
localout[$iout]=$s1; actionout[$iout]=$s2c; typeout[$iout]=$s3;
pathout[$iout]=$s4; endout[$iout]=$s5; extout[$iout]=$s6
fi
elif [[ "$s2a" != in && "$s2a" != out ]]
then
printf "\n +++ I/O-attribute in configuration file $config_file has the invalid"
printf "\n value \"$s2\". Only \"in\" and \"out\" are allowed!"
locat=connect; exit
fi
IFS="$IFSALT"
fi
done < $config_file
else
# INTERPRETATION DER KONFIGURATIONSDATEI MITTELS FORTRAN 90 - PROGRAMM
[[ $silent = false ]] && printf "..."
export cond1 cond2 config_file do_remote do_trace input_list localhost output_list
export interpreted_config_file=.icf.$RANDOM
# ENVIRONMENT-VARIABLEN FUER INTERPRET_CONFIG UEBER NAMELIST_DATEI ZUR
# VERFUEGUNG STELLEN
cat > .mrun_environment << %%END%%
&mrun_environment cond1 = '$cond1', cond2 = '$cond2',
config_file = '$config_file', do_remote = '$do_remote',
do_trace = '$do_trace', host = '$host',
input_list = '$input_list', icf = '$interpreted_config_file',
localhost = '$localhost', output_list = '$output_list' /
%%END%%
if [[ $localhost_realname = "sx-fep" ]]
then
/home/COAR/NC/raasch/pub/interpret_config_necriam.x
else
if [[ "$host" != $localhost ]]
then
# REMOTE JOB FROM LOCAL HOST: JUST TAKE THE FIRST EXECUTABLE FOUND
interpret_config_executable=`ls -1 ${PALM_BIN}/interpret_config*.x 2>/dev/null`
if [[ $? != 0 ]]
then
printf "\n\n +++ no interpret_config found"
printf "\n run \"mbuild -u -h ...\" to generate utilities for this host"
locat=interpret_config; exit
fi
interpret_config_executable=`echo $interpret_config_executable | cut -d" " -f1`
$interpret_config_executable
else
# CHECK, IF THERE IS AN EXECUTABLE FOR THE BLOCK
if [[ ! -f ${PALM_BIN}/interpret_config${block}.x ]]
then
printf "\n\n +++ no interpret_config found for given block \"$cond1 $cond2\""
printf "\n run \"mbuild -u -h ...\" to generate utilities for this block"
locat=interpret_config; exit
else
interpret_config${block}.x
fi
fi
fi
rm .mrun_environment
# AUSFUEHRUNG DER GENERIERTEN SHELL-KOMMANDOS IN DIESER SHELL
chmod u+x $interpreted_config_file
export PATH=$PATH:.
. $interpreted_config_file
rm $interpreted_config_file
fi
# OPTIONSWERTE UEBERSTEUERN KONFIGURATIONSDATEI
[[ $mrun_memory != 0 ]] && memory=$mrun_memory
[[ "$mrun_group_number" != "none" ]] && group_number=$mrun_group_number
[[ $mrun_cpumax != 0 ]] && cpumax=$mrun_cpumax
[[ "$mrun_numprocs" != "" ]] && numprocs=$mrun_numprocs
[[ "$max_par_io_str" != "" ]] && maximum_parallel_io_streams=$max_par_io_str
[[ "$mrun_tasks_per_node" != "" ]] && tasks_per_node=$mrun_tasks_per_node
# QUELLTEXTVERZEICHNIS AUF LOKALER MASCHINE AUS KONFIGURATIONSDATEI
# BESTIMMEN (WUERDE SONST EVTL. DAS VERZEICHNIS DES JEWEILS UNTER -h
# ANGEGEBENEN REMOTE-RECHNERS SEIN)
# BEI BATCH-JOBS SIND DIE ZU UEBERSETZENDEN PROGRAMMTEILE SCHON KOMPLETT
if [[ "$SOURCES_COMPLETE" = "" ]]
then
# ZUERST PRUEFEN, OB EIN GLOBALER QUELLTEXTPFAD FUER ALLE RECHNER
# VEREINBART WURDE
source_path=""
line=""
grep "%source_path" $config_file > tmp_mrun
while read line
do
if [[ "$line" != "" && $(echo $line | cut -c1) != "#" ]]
then
if [[ "$(echo $line | cut -d" " -f3)" = "" ]]
then
global_source_path=`echo $line | cut -d" " -f2`
fi
fi
done < tmp_mrun
line=""
found=false
grep " $localhost" $config_file | grep "%source_path" > tmp_mrun
while read line
do
if [[ "$line" != "" && $(echo $line | cut -c1) != "#" ]]
then
if [[ $found = true ]]
then
printf "\n\n +++ more than one source path found in configuration file"
printf "\n for local host \"$localhost\" "
locat=source_path; exit
fi
source_path=`echo $line | cut -d" " -f2`
found=true
fi
done < tmp_mrun
rm tmp_mrun
if [[ "$source_path" = "" ]]
then
if [[ "$global_source_path" != "" ]]
then
source_path=$global_source_path
else
printf "\n\n +++ no source path found in configuration file"
printf "\n for local host \"$localhost\" "
locat=source_path; exit
fi
fi
eval source_path=$source_path
if [[ ! -d $source_path ]]
then
printf "\n\n +++ source path \"$source_path\" on local host"
printf "\n \"$localhost\" does not exist"
locat=source_path; exit
fi
fi
# GLOBALE REVISIONSNUMMER ERMITTELN (FORTSETZUNGSLAEUFEN WIRD DIESE
# DURCH OPTION -G MITGETEILT)
if [[ "$global_revision" = "" && $host != "ibmkisti" ]]
then
global_revision=`svnversion $source_path 2>/dev/null`
global_revision="Rev: $global_revision"
fi
# NOCHMAL PRUEFEN, OB AUF REMOTE-MASCHINE GERECHNET WERDEN SOLL
# (HOST KANN IN KONFIGURATIONSDATEI ANDERS FESTGELEGT WORDEN SEIN)
# WERT VON do_remote WIRD FUER DATEIVERBINDUNGEN BENOETIGT.
# WENN AUF REMOTE-MASCHINE GERECHNET WIRD, IST GLEICHZEITIG KLAR,
# DASS EIN BATCH-JOB GESTARTET WERDEN MUSS
if [[ -n $host && "$host" != $localhost ]]
then
do_batch=true
do_remote=true
case $host in
(ibm|ibmh|ibmkisti|ibmku|ibms|ibmy|lcflow|lckyoto|lcsgib|lcsgih|nech|necriam|unics|lcxe6|lcxt5m|lck|lckiaps|lckordi|lckyuh|lckyut|lcsb) true;;
(*) printf "\n"
printf "\n +++ sorry: execution of batch jobs on remote host \"$host\""
printf "\n is not available"
locat=nqs; (( iec = 0 )); exit;;
esac
else
host=$localhost
fi
# PRUEFUNG EINIGER PROZESSORZAHLANGABEN BEI RECHNUNGEN AUF PARALLELRECHNERN
if [[ "$cond1" = parallel || "$cond2" = parallel ]]
then
# PRUEFEN, OB DIE ANZAHL DER ZU VERWENDENDEN PES ANGEGEBEN WURDE
if [[ ! -n $numprocs ]]
then
printf "\n"
printf "\n +++ option \"-K parallel\" requires additional specification"
printf "\n of the number of processors to be used by"
printf "\n mrun-option \"-X\" or by environment-variable"
printf "\n \"numprocs\" in the configuration file"
locat=numprocs; (( iec = 0 )); exit
fi
# PRUEFEN, OB DIE PROZESSORANZAHL PRO KNOTEN ANGEGEBEN WURDE (GGF.
# DEFAULT-WERT SETZEN) UND OB SIE EIN GANZZAHLIGER TEILER DER
# GESAMTPROZESSORANZAHL IST
if [[ "$tasks_per_node" = "" && $host != lcflow && $host != lcxt5m ]]
then
printf "\n"
printf "\n +++ option \"-T\" (tasks per node) is missing"
printf "\n set -T option or define tasks_per_node in the config file"
locat=tasks_per_node; (( iec = 0 )); exit
fi
if [[ $host != lcflow && $host != lcxt5m ]]
then
(( ival = $tasks_per_node ))
(( pes = numprocs ))
# if [[ $(echo $package_list | grep -c dvrp_graphics+1PE) = 1 ]]
# then
# (( pes = pes - 1 ))
# fi
(( ii = pes / ival ))
if (( pes - ii * ival > 0 ))
then
printf "\n"
printf "\n +++ tasks per node (option \"-T\") must be an integral"
printf "\n divisor of the total number of processors (option \"-X\")"
printf "\n values of this mrun-call: \"-T $tasks_per_node\" \"-X $numprocs\""
locat=tasks_per_node; (( iec = 0 )); exit
fi
fi
# IBMY HAT NUR EINEN KNOTEN
if [[ $host = ibmy ]]
then
if [[ "$tasks_per_node" != "" && "$tasks_per_node" != "$numprocs" ]]
then
printf "\n"
printf "\n +++ on ibmy, tasks per node (option \"-T\") must be equal to the"
printf "\n total number of processors (option \"-X\")"
printf "\n values of this mrun-call: \"-T $tasks_per_node\" \"-X $numprocs\""
locat=tasks_per_node; (( iec = 0 )); exit
fi
fi
# SETTINGS FOR SUBJOB-COMMAND
OOPT="-O $threads_per_task"
# GESAMTZAHL DER KNOTEN BESTIMMEN
if [[ "$tasks_per_node" != "" ]]
then
TOPT="-T $tasks_per_node"
(( nodes = numprocs / ( tasks_per_node * threads_per_task ) ))
fi
# PRUEFEN, OB NODE USAGE EINEN ERLAUBTEN WERT HAT BZW. DEN DEFAULT
# SETZEN
if [[ $node_usage = default ]]
then
if [[ $host = ibms ]]
then
node_usage=shared
elif [[ $(echo $host | cut -c1-5) = lcsgi ]]
then
node_usage=singlejob
else
node_usage=not_shared
fi
fi
if [[ $node_usage != shared && $node_usage != not_shared && $node_usage != singlejob && "$(echo $node_usage | cut -c1-3)" != "sla" ]]
then
printf "\n"
printf "\n +++ node usage (option \"-n\") is only allowed to be set"
printf "\n \"shared\" or \"not_shared\""
locat=tasks_per_node; (( iec = 0 )); exit
fi
fi
# PRUEFEN, OB HOSTFILE EXISTIERT
if [[ -n $hostfile ]]
then
if [[ ! -f $hostfile ]]
then
printf "\n"
printf "\n +++ hostfile \"$hostfile\" does not exist"
locat=hostfile; exit
fi
fi
# PRUEFEN, OB RHOSTS DATEI EXISTIERT. GEGEBENENFALLS ANLEGEN BZW. ERWEITERN
if [[ $host = ibmy && $do_remote = false ]]
then
if [[ ! -f $HOME/.rhosts ]]
then
echo "gfdl5.yonsei.ac.kr" > $HOME/.rhosts
printf "\n\n *** file:"
printf "\n $HOME/.rhosts"
printf "\n was created\n"
fi
if [[ $(grep -c gfdl5.yonsei.ac.kr $HOME/.rhosts) = 0 ]]
then
echo "gfdl5.yonsei.ac.kr" >> $HOME/.rhosts
printf "\n\n *** file:"
printf "\n $HOME/.rhosts"
printf "\n was extended by the name of the current host\n"
fi
fi
# Set default value for the maximum number of parallel io streams
if [[ "$maximum_parallel_io_streams" = "" ]]
then
maximum_parallel_io_streams=$numprocs
fi
# Set port number option for calls of ssh/scp, subjob and batch_scp scripts
if [[ "$scp_port" != "" ]]
then
PORTOPT="-P $scp_port"
SSH_PORTOPT="-p $scp_port"
fi
# FALLS NICHT VORGEGEBEN, DEFAULT-QUEUE AUF DER ZIELMASCHINE FESTLEGEN
if [[ $queue = none ]]
then
case $host in
(ibmh) queue=cluster;;
(ibmkisti) queue=class.32plus;;
(ibmy) queue=parallel;;
(lckiaps) queue=normal;;
(lckyoto) queue=ph;;
(lckyuh) queue=fx-single;;
(lckyut) queue=cx-single;;
(lcsgib) queue=testq;;
(lcsgih) queue=testq;;
(lctit) queue=S;;
(nech) queue=none;;
(necriam) queue=SP;;
(unics) queue=unics;;
esac
fi
# VOLLSTAENDIGE DATEINAMEN DER INPUT-FILES BILDEN,
# INPUT-DATEIEN AUF VORHANDENSEIN PRUEFEN UND EVTL. HOECHSTE ZYKLUSNUMMER
# ERMITTELN
(( i = 0 ))
while (( i < iin ))
do
(( i = i + 1 ))
(( maxcycle = 0 ))
# NAMENSBILDUNG (EVTL. IST FESTER DATEINAME VORGEGEBEN)
if [[ "${actionin[$i]}" = di ]]
then
remotepathin[$i]=${pathin[$i]}/${endin[$i]} # REMOTE-PFAD ERST AUF
# REM-MASCHINE AUSWERTEN
eval filename=${pathin[$i]}/${endin[$i]}
else
remotepathin[$i]=${pathin[$i]}/${afname}${endin[$i]} # REMOTE-PFAD ERST AUF
# REM-MASCHINE AUSWERTEN
eval filename=${pathin[$i]}/${afname}${endin[$i]}
fi
eval pathname=${pathin[$i]}
# SAVE INPUT FILE NAME FOR PARAMETER FILE CHECK
if [[ ("${transin[$i]}" = job) && (-f $filename) ]]
then
filename_input=$filename
fi
# PRUEFEN AUF VORHANDENSEIN
if [[ $(ls $filename* 2>&1 | grep -c "not found") = 1 || \
$(ls $filename* 2>&1 | grep -c "nicht gefunden") = 1 || \
$(ls $filename* 2>&1 | grep -c "No such file") = 1 || \
$(ls $filename* 2>&1 | grep -c "does not exist") = 1 ]]
then
# DATEIEN MIT EXTENSION (Z.B. NC) MUESSEN NICHT ZWINGEND VORHANDEN
# SEIN, DESHALB IN SOLCHEN FAELLEN KEIN ABBRUCH. DIES IST NUR EINE
# VORUEBERGEHENDE LOESUNG (OKT 05). NICHT ZWINGEND BENOETIGTE
# EINGABEDATEIEN SOLLTEN EINE SPEZIELLE OPTION IN DER DATEI-
# VERBINDUNGSANWEISUNG BEKOMMEN (Z.B. inopt?)
if [[ "${transin[$i]}" != "locopt" ]]
then
printf "\n\n +++ INPUT-file: "
if [[ "${extin[$i]}" = "" ]]
then
printf "\n $filename"
else
printf "\n $filename.${extin[$i]}"
fi
printf "\n does not exist\n"
locat=input; exit
else
transin[$i]="unavailable"
fi
else
# ZYKLUSNUMMER FESTSTELLEN
ls -1 -d $filename > filelist 2>/dev/null
ls -1 -d $filename.* >> filelist 2>/dev/null
while read zeile
do
cycle=$(echo $zeile | cut -f2 -d".")
if [[ "$cycle" = "$zeile" ]]
then
(( icycle = 0 ))
elif [[ "$cycle" = "${extin[$i]}" ]]
then
(( icycle = 0 ))
else
(( icycle = $cycle ))
fi
if (( icycle > maxcycle ))
then
(( maxcycle = icycle ))
file_to_be_used=$zeile
fi
done 0 ))
then
if [[ "${extin[$i]}" != " " && "${extin[$i]}" != "" ]]
then
filename=${filename}.$maxcycle.${extin[$i]}
else
filename=${filename}.$maxcycle
fi
else
if [[ "${extin[$i]}" != " " && "${extin[$i]}" != "" ]]
then
filename=${filename}.${extin[$i]}
fi
fi
# DATEINAMEN OHNE PFAD ABER MIT ZYKLUSNUMMER ABSPEICHERN,
# DA SPAETER BEI RUECKSPEICHERN VOM ARCHIVSYSTEM EVTL. BENOETIGT
absnamein[$i]=$filename
if (( maxcycle > 0 ))
then
if [[ "${actionin[$i]}" = di ]]
then
frelin[$i]=${endin[$i]}.$maxcycle
else
frelin[$i]=${afname}${endin[$i]}.$maxcycle
fi
else
if [[ "${actionin[$i]}" = di ]]
then
frelin[$i]=${endin[$i]}
else
frelin[$i]=${afname}${endin[$i]}
fi
fi
fi
done
# VOLLSTAENDIGE DATEINAMEN (OHNE $ ODER ~) DER OUTPUT-FILES BILDEN,
# OUTPUT-DATEIEN AUF VORHANDENSEIN PRUEFEN UND EVTL. HOECHSTE ZYKLUSNUMMER
# ERMITTELN ODER, FALLS NICHT VORHANDEN, PRUEFEN, OB SIE SICH ANLEGEN LASSEN
# DIESE AKTIONEN FINDEN NICHT STATT, WENN PROGRAMM AUF REMOTE-MASCHINE
# RECHNET UND DATEI ANSCHLIESSEND TRANSFERIERT WERDEN SOLL!
(( i = 0 ))
while (( i < iout ))
do
(( i = i + 1 ))
if [[ ! ( $fromhost != $localhost && ( "${actionout[$i]}" = tr || "${actionout[$i]}" = tra || "${actionout[$i]}" = trpe ) ) ]]
then
if [[ "${actionout[$i]}" = tr ]]
then
actionout[$i]=""
elif [[ "${actionout[$i]}" = trpe ]]
then
actionout[$i]=pe
elif [[ "${actionout[$i]}" = tra ]]
then
actionout[$i]=a
fi
(( maxcycle = 0 ))
eval filename=${pathout[$i]}/${fname}${endout[$i]}
eval catalogname=${pathout[$i]}
if [[ $(ls $filename* 2>&1 | grep -c "not found") = 1 || \
$(ls $filename* 2>&1 | grep -c "nicht gefunden") = 1 || \
$(ls $filename* 2>&1 | grep -c "No such file") = 1 || \
$(ls $filename* 2>&1 | grep -c "does not exist") = 1 ]]
then
# OUTPUT-DATEI NICHT VORHANDEN. PRUEFEN, OB ANLEGEN MOEGLICH.
if cat /dev/null > $filename
then
rm $filename
else
# PRUEFEN, OB KATALOG VORHANDEN UND EVTL. DIESEN ANLEGEN
if [[ ! -d $catalogname ]]
then
if mkdir -p $catalogname
then
printf "\n\n *** directory:"
printf "\n $catalogname"
printf "\n was created\n"
else
printf "\n\n +++ OUTPUT-file:"
printf "\n $filename"
printf "\n cannot be created, because directory does not exist"
printf "\n and cannot be created either"
printf "\n"
locat=output ; exit
fi 2>/dev/null
else
printf "\n\n +++ OUTPUT-file:"
printf "\n $filename"
printf "\n cannot be created, although directory exists"
printf "\n"
locat=output ; exit
fi
fi 2>/dev/null
else
# ZYKLUSNUMMER FESTSTELLEN
ls -1 -d $filename > filelist 2>/dev/null
ls -1 -d $filename.* >> filelist 2>/dev/null
while read zeile
do
cycle=$(echo $zeile | cut -f2 -d".")
if [[ "$cycle" = "$zeile" || "$cycle" = ${extout[$i]} ]]
then
(( icycle = 1 ))
else
(( icycle = $cycle + 1 ))
fi
if (( icycle > maxcycle ))
then
(( maxcycle = icycle ))
fi
done 0 ))
then
filename=${filename}.$maxcycle
if cat /dev/null > $filename
then
rm $filename
else
printf "\n +++ OUTPUT-file:"
printf "\n $filename"
printf "\n cannot be created"
locat=output ; exit
fi
fi
else
(( maxcycle = maxcycle - 1 ))
if (( maxcycle > 0 ))
then
filename=${filename}.$maxcycle
fi
fi
# DATEINAMEN OHNE PFAD ABER MIT ZYKLUSNUMMER ABSPEICHERN,
# DA SPAETER BEI ABLAGE AUF ARCHIVSYSTEM BZW. FUER
# DATEI OUTPUT_FILE_CONNECTIONS EVTL. BENOETIGT
pathout[$i]=$filename
if (( maxcycle > 0 ))
then
frelout[$i]=${fname}${endout[$i]}.$maxcycle
else
frelout[$i]=${fname}${endout[$i]}
fi
fi
done
# DAS DVR-PAKET ERFORDERT EINE ENTSPRECHENDE BIBLIOTHEK
if [[ $(echo $package_list | grep -c dvrp_graphics) != 0 ]]
then
if [[ "$dvr_inc" = "" ]]
then
printf "\n\n +++ no value for \"dvr_inc\" given in configuration file"
printf "\n This is required for the dvrp_graphics package.\n"
locat=dvr; exit
fi
if [[ "$dvr_lib" = "" ]]
then
printf "\n\n +++ no value for \"dvr_lib\" given in configuration file"
printf "\n This is required for the dvrp_graphics package.\n"
locat=dvr; exit
fi
fi
# PRUEFEN, OB ENTWEDER HAUPTPROGRAMM ODER NUR EIN AUSFUEHRBARES
# PROGRAMM VEREINBART WURDE (IN DIESEM FALL BRAUCHT IM WEITEREN NICHT
# UEBERSETZT ZU WERDEN)
if [[ "$mainprog" = "" && "$executable" = "" ]]
then
printf "\n +++ neither main program nor executable defined"
locat=source; exit
elif [[ "$mainprog" != "" && "$executable" != "" ]]
then
printf "\n +++ main program as well as executable defined"
locat=source; exit
elif [[ "$mainprog" = "" && "$executable" != "" ]]
then
do_compile=false
fi
# SOURCE-VERZEICHNIS ZUM AUFSAMMELN DER ZU UEBERSETZENDEN PROGRAMMTEILE
# ERZEUGEN. HIERHIN WERDEN SPAETER IM FALL VON BATCH-JOBS AUCH DAS MRUN-
# SCRIPTS SOWIE DIE KONFIGURATIONSDATEI KOPIERT
if [[ $restart_run != true && "$SOURCES_COMPLETE" = "" ]]
then
rm -rf SOURCES_FOR_RUN_$fname
mkdir SOURCES_FOR_RUN_$fname
fi
# ALLE ZU UEBERSETZENDEN PROGRAMMTEILE ZUSAMMENSAMMELN
# BEI BATCH-JOBS IST DIES NICHT NOETIG, WEIL DIE PROGRAMMTEILE BEREITS DURCH
# DEN MRUN-AUFRUF ERMITTELT SIND, DER DEN BATCH-JOB GENERIERT HAT, UND
# IM VERZEICHNIS SOURCES_FOR_RUN_... ABGELEGT SIND
if [[ $do_compile = true && "$SOURCES_COMPLETE" = "" ]]
then
[[ "$source_list" = LM ]] && source_list=LOCALLY_MODIFIED
if [[ "$source_list" = LOCALLY_MODIFIED ]]
then
# MODIFIZIERTE DATEIEN DER SVN-ARBEITSKOPIE BESTIMMEN
source_list=""
cd $source_path
# PRUEFEN, OB VERZEICHNIS UEBERHAUPT UNTER SVN-KONTROLLE STEHT
if [[ ! -d .svn ]]
then
printf "\n\n +++ source directory"
printf "\n \"$source_path\" "
printf "\n is not under control of \"subversion\"."
printf "\n Please do not use mrun-option \"-s LOCALLY_MODIFIED\"\n"
fi
# ALLE MODIFIZIERTEN QUELLCODEDATEIEN AUFLISTEN
Filenames=""
svn status > tmp_mrun
while read line
do
firstc=`echo $line | cut -c1`
if [[ $firstc = M || $firstc = "?" ]]
then
Name=`echo "$line" | cut -c8-`
extension=`echo $Name | cut -d. -f2`
if [[ "$extension" = f90 || "$extension" = F90 || "$extension" = f || "$extension" = F || "$extension" = c ]]
then
Filenames="$Filenames "$Name
fi
fi
done < tmp_mrun
# DATEIEN NACH SOURCES_FOR_RUN_... KOPIEREN
for dateiname in $Filenames
do
cp $dateiname $working_directory/SOURCES_FOR_RUN_$fname
source_list=$source_list"$dateiname "
done
cd - > /dev/null
# MITTELS OPTION -s ANGEGEBENE DATEIEN NACH SOURCES_FOR_RUN_... KOPIEREN
# BEI AUTOMATISCHEN FORTSETZUNGSLAEUFEN SIND DORT SCHON ALLE DATEIEN
# VORHANDEN
elif [[ "$source_list" != "" && $restart_run != true ]]
then
cd $source_path
for filename in $source_list
do
# QUELLTEXT-DATEI DARF KEINE PFADE BEINHALTEN
if [[ $(echo $filename | grep -c "/") != 0 ]]
then
printf "\n +++ source code file: $filename"
printf "\n must not contain (\"/\") "
locat=source; exit
fi
if [[ ! -f $filename ]]
then
printf "\n +++ source code file: $filename"
printf "\n does not exist"
locat=source; exit
else
cp $filename $working_directory/SOURCES_FOR_RUN_$fname
fi
done
cd - > /dev/null
fi
# PRUEFEN, OB ENTWEDER HAUPTPROGRAMM VORHANDEN UND ES EVTL. IN DER
# LISTE DER ZU UEBERSETZENDEN PROGRAMMTEILE MIT ENTHALTEN IST (WENN
# NICHT, WIRD ES DIESER LISTE HINZUGEFUEGT)
if [[ $restart_run != true ]]
then
if [[ ! -f "$source_path/$mainprog" ]]
then
printf "\n\n +++ main program: $mainprog"
printf "\n does not exist in source directory"
printf "\n \"$source_path\"\n"
locat=source; exit
else
if [[ $(echo $source_list | grep -c $mainprog) = 0 ]]
then
cp $source_path/$mainprog SOURCES_FOR_RUN_$fname
source_list=${mainprog}" $source_list"
fi
fi
fi
# MAKEFILE AUF VORHANDENSEIN PRUEFEN UND KOPIEREN
# BEI RESTART-LAEUFEN LIEGT ES SCHON IM VERZEICHNIS SOURCES_FOR_RUN...
if [[ "$restart_run" != true ]]
then
[[ "$makefile" = "" ]] && makefile=$source_path/Makefile
if [[ ! -f $makefile ]]
then
printf "\n +++ file \"$makefile\" does not exist"
locat=make; exit
else
cp $makefile SOURCES_FOR_RUN_$fname/Makefile
fi
fi
# DATEIEN AUS ZUSAETZLICHEM QUELLVERZEICHNIS HINZUFUEGEN
if [[ $restart_run != true && "$add_source_path" != "" ]]
then
# GIBT ES DAS VERZEICHNIS UEBERHAUPT?
if [[ ! -d $add_source_path ]]
then
printf "\n\n +++ WARNING: additional source code directory"
printf "\n \"$add_source_path\" "
printf "\n does not exist or is not a directory."
printf "\n No source code will be used from this directory!\n"
add_source_path=""
if [[ $silent == false ]]
then
sleep 3
fi
else
cd $add_source_path
found=false
Names=$(ls -1 *.f90 2>&1)
[[ $(echo $Names | grep -c '*.f90') = 0 ]] && AddFilenames="$Names"
Names=$(ls -1 *.F90 2>&1)
[[ $(echo $Names | grep -c '*.F90') = 0 ]] && AddFilenames="$AddFilenames $Names"
Names=$(ls -1 *.F 2>&1)
[[ $(echo $Names | grep -c '*.F') = 0 ]] && AddFilenames="$AddFilenames $Names"
Names=$(ls -1 *.f 2>&1)
[[ $(echo $Names | grep -c '*.f') = 0 ]] && AddFilenames="$AddFilenames $Names"
Names=$(ls -1 *.c 2>&1)
[[ $(echo $Names | grep -c '*.c') = 0 ]] && AddFilenames="$AddFilenames $Names"
cd - > /dev/null
cd SOURCES_FOR_RUN_$fname
# COPY MAKEFILE IF EXISTING
if [[ -f $add_source_path/Makefile ]]
then
printf "\n\n *** user Makefile from directory"
printf "\n \"$add_source_path\" is used \n"
if [[ $silent == false ]]
then
sleep 1
fi
cp $add_source_path/Makefile .
fi
for dateiname in $AddFilenames
do
if [[ -f $dateiname ]]
then
printf "\n +++ source code file \"$dateiname\" found in additional"
printf "\n source code directory \"$add_source_path\" "
printf "\n but was also given with option \"-s\" which means that it should be taken"
printf "\n from directory \"$source_path\"."
locat=source; exit
fi
cp $add_source_path/$dateiname .
source_list="$source_list $dateiname"
# CHECK IF FILE IS CONTAINED IN MAKEFILE
if [[ $(grep -c $dateiname Makefile) = 0 ]]
then
printf "\n\n +++ user file \"$dateiname\" "
printf "\n is not listed in Makefile \n"
locat=source; exit
else
# Default User-Interface von der Liste entfernen, falls Datei
# ein User-Interface enthaelt
# if [[ $( cat $dateiname | grep -c "END SUBROUTINE user_parin" ) != 0 ]]
# then
# if [[ $dateiname != user_interface.f90 && -f user_interface.f90 ]]
# then
# rm -rf user_interface.f90
# source_list=`echo $source_list | sed -e 's/user_interface.f90//'`
# printf "\n\n *** default \"user_interface.f90\" removed from the files to be translated"
# printf "\n since a user-interface is found in file"
# printf "\n \"$add_source_path/$dateiname\" \n"
# sleep 3
# else
if [[ $found = false ]]
then
found=true
printf "\n\n *** following user file(s) added to the"
printf " files to be translated:\n "
fi
printf "$dateiname "
if [[ $silent == false ]]
then
sleep 0.5
fi
fi
done
[[ $found = true ]] && printf "\n"
cd - > /dev/null
fi
fi
# ALLE UNTERPROGRAMME, DIE ZU VEREINBARTEN SOFTWAREPAKETEN GEHOEREN,
# DER LISTE DER ZU UEBERSETZENDEN DATEIEN HINZUFUEGEN
if [[ $restart_run != true && -n $package_list ]]
then
cd $source_path
for package in $package_list
do
[[ $package = "dvrp_graphics+1PE" ]] && package=dvrp_graphics
# ERMITTELE ALLE DATEIEN, DIE ZUM PAKET GEHOEREN
# FEHLERMELDUNGEN WERDEN ABGEFANGEN, DA * AUCH VERZEICHNISSNAMEN
# LIEFERT
package_source_list=`grep "defined( __$package " * 2>/dev/null | cut -f1 -d:`
# FUEGE DIESE DATEIEN DER LISTE DER ZU UEBERSETZENDEN DATEIEN
# HINZU, FALLS SIE NOCH NICHT DAZUGEHOEREN
for source_list_name in $package_source_list
do
if [[ $(echo $source_list | grep -c $source_list_name) = 0 ]]
then
# NUR DATEIEN MIT GUELTIGEN ENDUNGEN VERWENDEN
ending=`echo $source_list_name | cut -f2 -d.`
if [[ "$ending" = f90 || "$ending" = F90 || "$ending" = f || "$ending" = F || "$ending" = c ]]
then
cp $source_list_name $working_directory/SOURCES_FOR_RUN_$fname
source_list="$source_list $source_list_name"
fi
fi
done
done
cd - > /dev/null
fi
# MAKEFILE AUF VORHANDENSEIN PRUEFEN UND KOPIEREN
# BEI RESTART-LAEUFEN LIEGT ES SCHON IM VERZEICHNIS SOURCES_FOR_RUN...
# if [[ "$restart_run" != true ]]
# then
# [[ "$makefile" = "" ]] && makefile=$source_path/Makefile
# if [[ ! -f $makefile ]]
# then
# printf "\n +++ file \"$makefile\" does not exist"
# locat=make; exit
# else
# cp $makefile SOURCES_FOR_RUN_$fname/Makefile
# fi
# fi
fi # do_compile=true
# FALLS PROGRAMMTEILE UEBERSETZT WERDEN SOLLEN, FOLGEN JETZT EINIGE
# UEBERPRUEFUNGEN UND DAS SETZEN DER PRAEPROZESSOR-DIREKTIVEN
if [[ $do_compile = true ]]
then
# PRAEPROZESSOR-DIREKTIVEN ZUM SELEKTIVEN AUSWAEHLEN VON CODETEILEN
# ZUSAMMENSETZEN
# DIREKTIVEN ZUM AKTIVIEREN VON RECHNERSPEZIFISCHEM CODE
if [[ $(echo $localhost | cut -c1-3) = ibm ]]
then
cpp_options="${cpp_options},-D__ibm=__ibm"
elif [[ $(echo $localhost | cut -c1-3) = nec ]]
then
cpp_options="$cpp_options -D__nec"
elif [[ $(echo $localhost | cut -c1-2) = lc ]]
then
cpp_options="$cpp_options -D__lc"
else
cpp_options="$cpp_options -D__$localhost"
fi
# DIREKTIVEN DIE DURCH OPTION -K BESTIMMT WERDEN (Z.B. PARALLEL)
if [[ $(echo $localhost | cut -c1-3) = ibm ]]
then
[[ -n $cond1 ]] && cpp_options="${cpp_options},-D__$cond1=__$cond1"
[[ -n $cond2 ]] && cpp_options="${cpp_options},-D__$cond2=__$cond2"
else
[[ -n $cond1 ]] && cpp_options="$cpp_options -D__$cond1"
[[ -n $cond2 ]] && cpp_options="$cpp_options -D__$cond2"
fi
# DIREKTIVEN DIE SOFTWAREPAKETE AKTIVIEREN (OPTION -p)
if [[ -n $package_list ]]
then
for package in $package_list
do
if [[ $(echo $localhost | cut -c1-3) = ibm ]]
then
if [[ $package != "dvrp_graphics+1PE" ]]
then
cpp_options="${cpp_options},-D__$package=__$package"
else
cpp_options="${cpp_options},-D__dvrp_graphics=__dvrp_graphics"
export use_seperate_pe_for_dvrp_output=true
fi
else
if [[ $package != "dvrp_graphics+1PE" ]]
then
cpp_options="$cpp_options -D__$package"
else
cpp_options="$cpp_options -D__dvrp_graphics"
export use_seperate_pe_for_dvrp_output=true
fi
fi
done
fi
# DIREKTIVEN DIE DURCH OPTION -D FESTGELEGT SIND
if [[ -n $cpp_opts ]]
then
for popts in $cpp_opts
do
if [[ $(echo $localhost | cut -c1-3) = ibm ]]
then
cpp_options="${cpp_options},-D__$popts=__$popts"
else
cpp_options="$cpp_options -D__$popts"
fi
done
fi
else
# BEI LOKALEN RECHNUNGEN PRUEFEN, OB EXECUTABLE VORHANDEN
if [[ $do_remote = false ]]
then
if [[ ! -f $executable ]]
then
printf "\n +++ executable file: $executable"
printf "\n does not exist"
locat=executable; exit
fi
fi
fi
# JOBMODUS FESTSTELLEN
if [[ "$ENVIRONMENT" = BATCH ]]
then
jobmo=BATCH
else
jobmo=INTERACTIVE
fi
# no interactive runs on lctit
if [[ $host = lctit && $jobmo = INTERACTIVE && $do_batch = false ]]
then
printf "\n +++ no interactive runs allowed on host \"$host\" "
printf "\n please submit batch job using mrun option \"-b\" \n"
locat=normal; exit
fi
# HOSTSPEZIFISCHE DEFAULT-COMPILER SETZEN, FALLS NICHT BEREITS
# DURCH BENUTZER ANDERWEITIG VEREINBART
if [[ "$compiler_name" = "" ]]
then
printf "\n +++ no compiler specified for \"$host $cond1 $cond2\""
locat=compiler_name; exit
fi
# COMPILER AUF RIAMS NEC UEBERSCHREIBEN
[[ $localhost = necriam ]] && compiler_name=mpif90
# TEMPORAEREN KATALOGNAMEN BESTIMMEN
kennung=$RANDOM
if [[ "$tmp_user_catalog" = "" ]]
then
if [[ $localhost = ibmh ]]
then
tmp_user_catalog=$SCRATCH
elif [[ $localhost = nech ]]
then
tmp_user_catalog=$WRKSHR
else
tmp_user_catalog=/tmp
fi
fi
TEMPDIR=$tmp_user_catalog/${usern}.$kennung
# KATALOGNAMEN FUER ZWISCHENSPEICHERUNG VON FORTSETZUNGSLAUFDATEIEN
# BESTIMMEN
if [[ "$tmp_data_catalog" = "" ]]
then
if [[ $localhost = nech ]]
then
tmp_data_catalog=$WRKSHR/mrun_restart_data
else
tmp_data_catalog=/tmp/mrun_restart_data
fi
fi
# EVENTUELL BEI LOKALEN RECHNUNGEN $-ZEICHEN IN ENVIRONMENT-VARIABLEN
# ERSETZEN
if [[ $do_remote = false && $do_compile = true ]]
then
eval fopts=\"$fopts\"
eval lopts=\"$lopts\"
fi
# COMPILE- UND LINK-OPTIONEN BESTIMMEN
fopts="$fopts $netcdf_inc $dvr_inc"
lopts="$lopts $netcdf_lib $dvr_lib"
ROPTS="$ropts"
# if [[ ( $(echo $host | cut -c1-3) = nec || $(echo $host | cut -c1-3) = ibm || $host = lckyoto || $host = lcsgih || $host = lcsgib || $host = lctit || $host = lcfimm || $host = lcflow || $host = lcxe6 || $host = lcxt5m || $host = lck || $host = lckiaps || $host = lckordi || $host = lcsb || $host ) && -n $numprocs ]]
# then
XOPT="-X $numprocs"
# fi
# PRUEFEN DER CPU-ZEIT. (CPUMAX WIRD ALS ENV-VARIABLE VOM HAUTPRO-
# GRAMM BENOETIGT
done=false
while [[ $done = false ]]
do
cputime=$cpumax
if (( $cputime == 0 ))
then
if [[ $do_batch = true ]]
then
printf "\n +++ cpu-time is undefined"
printf "\n >>> Please type CPU-time in seconds as INTEGER:"
printf "\n >>> "
read cputime 1>/dev/null 2>&1
else
cputime=10000000 # NO CPU LIMIT FOR INTERACTIVE RUNS
fi
else
done=true
fi
cpumax=$cputime
done
(( minuten = cputime / 60 ))
(( sekunden = cputime - minuten * 60 ))
# PRUEFEN DER KERNSPEICHERANFORDERUNG
if [[ $do_batch = true ]]
then
done=false
while [[ $done = false ]]
do
if (( memory == 0 ))
then
printf "\n +++ memory demand is undefined"
printf "\n >>> Please type memory in MByte per process as INTEGER:"
printf "\n >>> "
read memory 1>/dev/null 2>&1
else
done=true
fi
done
fi
# PRUEFEN, OB FUER REMOTE-RECHNUNGEN EIN BENUTZERNAME ANGEGEBEN WURDE
if [[ $do_remote = true && -z $remote_username ]]
then
while [[ -z $remote_username ]]
do
printf "\n +++ username on remote host \"$host\" is undefined"
printf "\n >>> Please type username:"
printf "\n >>> "
read remote_username
done
mc="$mc -u$remote_username"
fi
# CHECK FOR INITIAL COMMANDS AFTER LOGIN
if [[ "$login_init_cmd" != "" ]]
then
export init_cmds="${login_init_cmd};"
fi
# set module load command and export for subjob
if [[ "$modules" != "" ]]
then
if [[ $host = lctit ]]
then
export module_calls=". $modules"
else
export module_calls="module load ${modules};"
fi
fi
# bugfix for wrong netcdf module and for netCDF4 usage in case of mpt
if [[ $host = lcsgib || $host = lcsgih ]]
then
if [[ $(echo $module_calls | grep -c netcdf/3.6.3-intel) != 0 ]]
then
export module_calls="$module_calls export LD_LIBRARY_PATH=/sw/dataformats/netcdf/3.6.3-intel/lib:\$LD_LIBRARY_PATH;"
fi
# if [[ $(echo $module_calls | grep -c mpt) != 0 ]]
# then
# export module_calls="$module_calls export LD_LIBRARY_PATH=/sw/sgi/mpt/2011-02-07/lib:\$LD_LIBRARY_PATH;"
# echo "*** module_calls = $module_calls"
# fi
fi
# SET DEFAULT VALUE FOR MPI MODULE TO BE USED ON SGI-ALTIX
if [[ $host = lcsgib || $host = lcsgih ]]
then
if [[ $(echo $modules | grep -c mpt ) != 0 ]]
then
mpilib=mpt
elif [[ $(echo $modules | grep -c mvapich ) != 0 ]]
then
mpilib=mvapich
elif [[ $(echo $modules | grep -c impi ) != 0 ]]
then
mpilib=impi
fi
fi
###########################################################################
# HEADER-AUSGABE
###########################################################################
calltime=$(date)
printf "\n"
# [[ $silent = false ]] && clear
printf "#------------------------------------------------------------------------# \n"
printf "| %-35s%35s | \n" "$version" "$calltime"
printf "| | \n"
spalte1="called on:"; spalte2=$localhost_realname
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
if [[ $local_compile = false ]]
then
if [[ $do_remote = true ]]
then
spalte1="execution on:"; spalte2="$host (username: $remote_username)"
else
spalte1="execution on:"; spalte2="$host ($localhost_realname)"
fi
else
spalte1="compiling test only!"; spalte2=""
fi
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
if [[ -n $numprocs ]]
then
if [[ $run_coupled_model = false ]]
then
spalte1="number of PEs:"; spalte2=$numprocs
else
spalte1="number of PEs:"; spalte2="$numprocs (atmosphere: $numprocs_atmos, ocean: $numprocs_ocean)"
fi
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
fi
if [[ -n $tasks_per_node ]]
then
spalte1="tasks per node:"; spalte2="$tasks_per_node (number of nodes: $nodes)"
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
fi
if [[ $maximum_parallel_io_streams != $numprocs ]]
then
spalte1="max par io streams:"; spalte2="$maximum_parallel_io_streams"
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
fi
if [[ $use_openmp = true ]]
then
spalte1="threads per task:"; spalte2="$threads_per_task"
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
fi
printf "| | \n"
if [[ $do_compile = true ]]
then
if [[ "$mopts" != "" ]]
then
spalte1="make options:"; spalte2=$(echo "$mopts" | cut -c-45)
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
zeile=$(echo "$mopts" | cut -c46-)
while [[ "$zeile" != "" ]]
do
spalte1=""
spalte2=$(echo "$zeile" | cut -c-45)
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
zeile=$(echo "$zeile" | cut -c46-)
done
fi
spalte1="cpp directives:"; spalte2=$(echo "$cpp_options" | cut -c-45)
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
zeile=$(echo "$cpp_options" | cut -c46-)
while [[ "$zeile" != "" ]]
do
spalte1=""
spalte2=$(echo "$zeile" | cut -c-45)
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
zeile=$(echo "$zeile" | cut -c46-)
done
spalte1="compiler options:"; spalte2=$(echo "$fopts" | cut -c-45)
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
zeile=$(echo "$fopts" | cut -c46-)
while [[ "$zeile" != "" ]]
do
spalte1=""
spalte2=$(echo "$zeile" | cut -c-45)
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
zeile=$(echo "$zeile" | cut -c46-)
done
spalte1="linker options:"; spalte2=$(echo "$lopts" | cut -c-45)
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
zeile=$(echo "$lopts" | cut -c46-)
while [[ "$zeile" != "" ]]
do
spalte1=""
spalte2=$(echo "$zeile" | cut -c-45)
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
zeile=$(echo "$zeile" | cut -c46-)
done
spalte1="modules to be load:"; spalte2=$(echo "$modules" | cut -c-45)
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
zeile=$(echo "$modules" | cut -c46-)
while [[ "$zeile" != "" ]]
do
spalte1=""
spalte2=$(echo "$zeile" | cut -c-45)
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
zeile=$(echo "$zeile" | cut -c46-)
done
spalte1="main program:"; spalte2=$mainprog
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
else
spalte1=executable:; spalte2=$executable
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
fi
printf "| | \n"
spalte1="base name of files:"; spalte2=$fname
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
if [[ $fname != $afname ]]
then
spalte1="base name of input files:"; spalte2=$afname
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
fi
spalte1="INPUT control list:"; spalte2=$(echo $input_list)
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
spalte1="OUTPUT control list:"; spalte2=$(echo $output_list)
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
if [[ "$ocean_file_appendix" = true ]]
then
printf "| %-35s%-35s | \n" "suffix \"_O\" is added to local files" " "
fi
if [[ $do_batch = true || "$LOADLBATCH" = yes ]]
then
spalte1="memory demand / PE":; spalte2="$memory MB"
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
spalte1=CPU-time:; spalte2="$minuten:$sekunden"
printf "| %-25s%-45s | \n" "$spalte1" "$spalte2"
fi
if [[ $do_compile = true ]]
then
printf "| | \n"
printf "| Files to be compiled: | \n"
zeile=$source_list
while [[ "$zeile" != "" ]]
do
linestart=$(echo $zeile | cut -c-70)
printf "| %-70s | \n" "$linestart"
zeile=$(echo "$zeile" | cut -c71-)
done
fi
printf "#------------------------------------------------------------------------#"
# BEDINGTE AUSGABE DER DATEIVERBINDUNGEN
if [[ $do_trace = true ]]
then
(( i = 0 ))
while (( i < iin ))
do
(( i = i + 1 ))
if (( i == 1 ))
then
printf "\n\n >>> INPUT-file assignments:\n"
fi
printf "\n ${localin[$i]} : ${absnamein[$i]}"
done
(( i = 0 ))
while (( i < iout ))
do
(( i = i + 1 ))
if (( i == 1 ))
then
printf "\n\n >>> OUTPUT-file assignments:\n"
fi
printf "\n ${localout[$i]} : ${pathout[$i]}"
done
(( i = 0 ))
while (( i < iic ))
do
(( i = i + 1 ))
if (( i == 1 ))
then
printf "\n\n >>> INPUT-commands:\n"
fi
printf "\n ${in_command[$i]}"
done
(( i = 0 ))
while (( i < ioc ))
do
(( i = i + 1 ))
if (( i == 1 ))
then
printf "\n\n >>> OUTPUT-commands:\n"
fi
printf "\n ${out_command[$i]}"
done
fi
# ABFRAGEN BEI AUFRUF AUF LOKALER MASCHINE
if [[ $remotecall = false && $silent = false && $jobmo != BATCH ]]
then
antwort=dummy
printf "\n\n"
printf " >>> everything o.k. (y/n) ? "
while read antwort
do
if [[ "$antwort" != y && "$antwort" != Y && "$antwort" != n && "$antwort" != N ]]
then
printf " >>> everything o.k. (y/n) ? "
else
break
fi
done
if [[ $antwort = n || $antwort = N ]]
then
locat=user_abort; (( iec = 0 )); exit
fi
if [[ $do_batch = true ]]
then
printf " >>> batch-job will be created and submitted"
else
if [[ $local_compile = false ]]
then
printf " >>> MRUN will now continue to execute on this machine"
else
printf " >>> a test compilation will now be carried out on this machine"
fi
fi
fi
# PERFORM PARAMETER FILE CHECK (COUPLED RUNS ARE NOT SUPPORTED YET)
# DEFINE VARIABLES FOR FREQUENTLY USED DIRECTORIES
check_depository="${working_directory}/trunk/UTIL"
check_sources="${working_directory}/tmp_check_namelist_files"
skip_check=false
# CHECK IF NAMELIST_FILE_CHECK HAS BEEN COMPILED SUCCESSFULLY
if [[ ! -f $check_depository/check_namelist_files.tar ]]
then
skip_check=true
reason="run on remote host or parameter file check has not been compiled."
fi
if [[ ! -f $PALM_BIN/check_namelist_files.x ]]
then
skip_check=true
reason="parameter file check has not been compiled."
fi
# CHECK FOR PARALLEL RUN; OTHERWISE SKIP CHECK
if [[ "$cond1" != "parallel" && "$cond2" != "parallel" ]]
then
skip_check=true
reason="serial run."
fi
# ONLY PERFORM CHECK IF -z OPTION IS NOT SET, NO RESTART RUN IS CARRIED OUT
# AND IF THE EXECUTION HOST IS THE LOCAL HOST
# ATTENTION: THIS ROUTINE DOES NOT WORK IF THE COMPILER ON THE LOCAL HOST
# DIFFERS FROM THE COMPILER ON THE REMOTE HOST
if [[ $check_namelist_files == false ]]
then
skip_check=true
reason="-z option set."
fi
if [[ $fromhost != $localhost ]]
then
skip_check=true
reason="submitting host is local host."
fi
if [[ $run_coupled_model == true ]]
then
skip_check=true
reason="coupled run."
fi
if [[ $restart_run == true ]]
then
skip_check=true
reason="restart run."
fi
# SKIP CHECK IN CASE OF RESTART RUN: CHECK WHETHER THE LAST CHAR IS "f" IN PARIN
(( last_char_int = `echo $filename_input | wc -c` - 1 ))
last_char=`echo $filename_input | cut -c $last_char_int`
if [[ "$last_char" == "f" ]]
then
skip_check=true
reason="restart run."
fi
if [[ $skip_check == false ]]
then
tmp_check=${working_directory}/tmp_check
# GET TOPOGRAPHY PARAMETER FILE SUFFIX (USUALLY "_P3DF"). THIS FILE MIGHT IS
# NOT NECESSARILY REQUIRED
line=""
found=false
grep "TOPOGRAPHY_DATA" $config_file > $tmp_check
while read line1
do
line="$line1"
if [[ "$line" != "" && $(echo $line | cut -c1) != "#" ]]
then
topo_suffix=`echo $line | tr -s " " | cut -d" " -s -f5`
found=true
fi
done < $tmp_check
if [[ $found = false ]]
then
printf "\n +++ no TOPOGRAPHY list entry found in the configuration file."
fi
rm -rf ${working_directory}/tmp_check
# CHECK IF THE A P3DF FILE MUST BE CHECKED
for item in $input_list
do
if [[ "$item" == "restart" ]]
then
check_restart=1
else
check_restart=0
fi
done
# ERROR IF NO PARAMETER FILE WAS FOUND, OTHERWISE PROCEED
if [[ (! -f $filename_input) && ( "$filename_input" != "" ) ]]
then
printf "\n\n +++ ERROR: parameter file ($filename_input) not found."
locat=check_namelist; exit
fi
# CHECK IF THE RESTART PARAMETER FILE EXISTS (IF NECESSARY)
if [[ $check_restart == 1 ]]
then
filenamef="${filename_input}f"
if [[ ! -f $filenamef ]]
then
printf "\n\n +++ WARNING: restart parameter file ($filenamef) is missing."
check_restart=0
answer=dummy
printf "\n\n"
if [[ $silent == false ]]
then
while [[ "$answer" != c && "$answer" != C && "$answer" != a && "$answer" != A ]]
do
printf " >>> continue anyway (c(ontinue)/a(bort)) ? "
read answer
done
if [[ $answer = a || $answer = A ]]
then
printf "\n +++ Aborting...."
locat=normal; exit
fi
fi
else
check_restart=1
fi
fi
# CREATE TEMPORARY SOURCES_FOR_CHECK PATH
mkdir $check_sources
cd $check_sources
# CHECK FOR USER CODE, OTHERWISE USE THE PRECOMPILED CHECK_NAMELIST_FILES.X
if [[ -d $add_source_path ]]
then
printf "\n\n *** copying files from $check_depository"
cp $check_depository/check_namelist_files.tar ./
printf "\n\n *** untar of makefile and source files in $check_sources"
tar -xf check_namelist_files.tar > /dev/null 2>&1
printf "\n\n *** adding user code."
cp $add_source_path/* ./
touch check_namelist_files.f90
# GET COMPILER OPTIONS AND PERFORM MAKE
printf "\n\n *** compiling code if necessary...\n"
# workaround for batch jobs on local machine (lcxe6)
if [[ $do_batch == true && $do_remote == false ]]
then
eval $init_cmds
fi
# GET CHECK OPTIONS
line=""
found=false
grep "$localhost" ${base_directory}/${config_file} | grep "%cpp_options" > $tmp_check
while read line1
do
if [[ $(echo $line1 | cut -d" " -s -f3-) = "$localhost" ]]
then
line="$line1"
fi
if [[ "$line" != "" && $(echo $line | cut -c1) != "#" ]]
then
# EVENTUELLE DOPPELPUNKTE AUS OPTIONSSTRING UND ALLE -D ENTFERNEN
line="$line "
copts_check=`echo $line | cut -d" " -s -f2 | sed 's/::/%DUM%/g' | sed 's/:/ /g' | sed 's/%DUM%/:/g' | sed 's/-D[^ ]* //g' | sed 's/ -D.*//g'`
found=true
fi
done < $tmp_check
copts_check="$copts_check -D__check -D__parallel"
make -f Makefile_check F90=$compiler_name_ser COPT="$copts_check"
# GET MAKE OUTPUT
if [[ $? != 0 ]]
then
printf "\n +++ error during make."
answer=dummy
printf "\n\n"
if [[ $silent == false ]]
then
while [[ "$answer" != c && "$answer" != C && "$answer" != a && "$answer" != A ]]
do
printf " >>> continue anyway (c(ontinue)/a(bort)) ? "
read answer
done
if [[ $answer = a || $answer = A ]]
then
printf "\n +++ Aborting..."
rm -rf $check_sources
locat=normal; exit
else
skip_check=true
fi
else
skip_check=true
fi
fi
else
cp $PALM_BIN/check_namelist_files.x ./
fi
cp $filename_input ./PARIN
if [[ $check_restart == 1 ]]
then
cp $filenamef ./PARINF
fi
if [[ -f ${pathname}/${fname}${topo_suffix} && $skip_check == false ]]
then
printf "\n *** adding topography data"
cp ${pathname}/${fname}${topo_suffix} ./TOPOGRAPHY_DATA
# IN CASE OF TOPOGRAPHY AND HIGH GRID POINT NUMBERS, THE STACK SIZE
# MUST BE INCREASED. THIS IS DUE TO THE ARRAY nzb_local AND topo_height,
# WHICH REQUIRE SUFFICIENT MEMORY
ulimit -s unlimited
fi
# CREATE ENVPAR FILE, WHICH IS NEEDED BY CHECK_NAMELIST_FILES.X
cat > ENVPAR << %%END%%
&envpar run_identifier = '$fname', host = '$host',
write_binary = '$write_binary', tasks_per_node = $tasks_per_node,
maximum_parallel_io_streams = $maximum_parallel_io_streams,
maximum_cpu_time_allowed = ${cpumax}.,
revision = '$global_revision',
local_dvrserver_running = $local_dvrserver_running /
%%END%%
# SAFETY CHECK: ONLY PROCEED IF THE PARAMETER CHECK PROGRAM WAS PROPERLY COMPILED
if [[ ! -f check_namelist_files.x && $skip_check == false ]]
then
printf "\n +++ WARNING: check_namelist_files.x not found."
answer=dummy
printf "\n\n"
if [[ $silent == false ]]
then
while [[ "$answer" != c && "$answer" != C && "$answer" != a && "$answer" != A ]]
do
printf " >>> continue anyway (c(ontinue)/a(bort)) ? "
read answer
done
if [[ $answer = a || $answer = A ]]
then
printf "\n +++ Aborting..."
rm -rf $check_sources
locat=normal; exit
else
printf "\n *** skipping parameter file check."
fi
fi
elif [[ $skip_check == false ]]
then
# STARTING THE PARAMETER FILE CHECK
printf "\n\n *** starting parameter file check..."
# CHECKING THE P3D FILE
printf "\n\n (1) checking $filename_input"
echo "$numprocs 0 0" > VARIN
errors=`./check_namelist_files.x < VARIN 2>&1`
check_error=false
if [[ "$errors" == "" ]]
then
printf " --> o.k."
else
printf " --> failed."
check_error=true
printf "\n\n $errors"
fi
# CHECKING THE PD3F FILE IF NECESSARY
if [[ $check_restart == 1 && $check_error == false ]]
then
printf "\n\n (2) checking $filenamef"
# FIRST CHECK IF INITIALIZING_ACTIONS="READ_RESTART_DATA" IS SET
# IN &INIPAR LIST
found=false
cat PARINF | while read line
do
line=$(echo $line|sed 's/ //g')
if [[ $line == *"&inipar"* ]]
then
start_search=true
fi
if [[ $start_search == true ]]
then
if [[ $line == *"initializing_actions='read_restart_data'"* ]]
then
found=true
break
fi
fi
if [[ $line == *"/"* ]]
then
start_search=false
fi
done
if [[ $found = false ]]
then
printf "\n\n +++ ERROR: initializing_actions = 'read_restart_data' not found"
printf "\n in &inipar list in $fname$p3df_suffix."
rm -rf $check_sources
locat=check_namelist; exit
fi
# READ max_user_pr FROM FILES
if [[ -f parin_for_check ]]
then
read max_pr_user < parin_for_check
else
max_user_pr=0
fi
echo "$numprocs 1 $max_pr_user" > VARIN
errors=`./check_namelist_files.x < VARIN 2>&1`
if [[ "$errors" == "" ]]
then
printf " --> o.k."
else
printf " --> failed."
check_error=true
printf "\n\n $errors"
fi
fi
# REPORT ERRORS AND CONTINUE/EXIT
if [[ $check_error == true ]]
then
printf "\n +++ errors found in the parameter file!\n"
answer=dummy
printf "\n\n"
while [[ "$answer" != c && "$answer" != C && "$answer" != a && "$answer" != A ]]
do
printf " >>> continue anyway (c(ontinue)/a(bort)) ? "
read answer
done
if [[ $answer = a || $answer = A ]]
then
printf "\n +++ Aborting..."
rm -rf $check_sources
locat=normal; exit
fi
else
printf "\n\n *** parameter file(s) seem(s) to be o.k.\n"
fi
rm -rf $check_sources
fi
else
printf "\n\n +++ skipping parameter file check due to following reason: $reason \n"
fi
# DELETE TEMPORARY DIRECTORY AND FINISH NAMELIST FILE CHECK
rm -rf $check_sources
cd $working_directory
# FALLS AUF DIESER MASCHINE GERECHNET WERDEN SOLL, WERDEN JETZT ENTSPRE-
# CHENDE AKTIONEN DURCHGEFUEHRT
if [[ $do_batch = false ]]
then
# TEMPORAEREN KATALOG ERZEUGEN
mkdir -p $TEMPDIR
chmod go+rx $TEMPDIR
tmpcreate=true
# set striping on lustre file system
# if [[ $localhost = lcsgih ]]
# then
# lfs setstripe -s 8192k -c 16 $TEMPDIR
# lfs getstripe $TEMPDIR
# fi
# SAEMTLICHE QUELLTEXT-DATEIEN BZW. AUSFUEHRBARES PROGRAMM IN
# TEMPORAERES VERZEICHNIS KOPIEREN
if [[ $do_compile = true ]]
then
# ON NEC, COMPILATION IS DONE ON HOST CROSS VIA CROSS COMPILING
# CREATE A TEMPORARY DIRECTORY ON THAT MACHINE (HOME MOUNTED VIA NFS)
if [[ $localhost = nech ]]
then
TEMPDIR_COMPILE=$HOME/work/${usern}.$kennung
if mkdir -p $TEMPDIR_COMPILE
then
printf "\n *** \"$TEMPDIR_COMPILE\" "
printf "\n is generated as temporary directory for cross compiling\n"
else
printf "\n +++ creating directory \"$TEMPDIR_COMPILE\" "
printf "\n needed for cross compilation failed"
locat=compile
exit
fi
else
TEMPDIR_COMPILE=$TEMPDIR
fi
# PFADNAMEN FUER DAS MAKE-DEPOSITORY ERMITTELN
line=""
grep "%depository_path" $config_file > tmp_mrun
while read line
do
if [[ "$line" != "" && $(echo $line | cut -c1) != "#" ]]
then
if [[ "$(echo $line | cut -d" " -s -f3)" = "" ]]
then
global_depository_path=`echo $line | cut -d" " -s -f2`
fi
fi
done < tmp_mrun
line=""
grep " $localhost" $config_file | grep "%depository_path" > tmp_mrun
while read line
do
if [[ "$line" != "" && $(echo $line | cut -c1) != "#" ]]
then
if [[ "$(echo $line | cut -d" " -s -f4)" = "$cond1" && "$(echo $line | cut -d" " -s -f5)" = "$cond2" ]]
then
local_depository_path=`echo $line | cut -d" " -s -f2`
fi
fi
done < tmp_mrun
if [[ "$local_depository_path" = "" ]]
then
if [[ "$global_depository_path" != "" ]]
then
local_depository_path=$global_depository_path
else
printf "\n\n +++ no depository path found in configuration file"
printf "\n for local host \"$localhost\" "
printf "\n please set \"\%depository_path\" in configuration file\n"
locat=config_file; exit
fi
fi
eval local_depository_path=$local_depository_path
[[ "$cond1" != "" ]] && local_depository_path=${local_depository_path}_$cond1
[[ "$cond2" != "" ]] && local_depository_path=${local_depository_path}_$cond2
basename=`echo $mainprog | cut -f1 -d"."`
eval make_depository=${local_depository_path}/${basename}_current_version.tar
if [[ ! -f $make_depository ]]
then
printf "\n"
printf "\n *** WARNING: make depository \"$make_depository\" not found"
printf "\n \"make\" will fail, if the Makefile or other source files are missing\n"
else
cp $make_depository $TEMPDIR_COMPILE
cd $TEMPDIR_COMPILE
tar -xf $make_depository > /dev/null 2>&1
cd - > /dev/null
fi
cp SOURCES_FOR_RUN_$fname/* $TEMPDIR_COMPILE
else
cp $executable ${TEMPDIR}/a.out
fi
# WECHSEL IN TEMPORAEREN KATALOG
cd $TEMPDIR
printf "\n *** changed to temporary directory: $TEMPDIR"
# OUTPUT-DATEI-VERBINDUNGEN AUF TEMPORAERER DATEI ABLEGEN
# DIESE DATEI KANN VON SPAETER AUFZURUFENDEN BENUTZERPROZEDUREN GELESEN
# WERDEN, UM ZU LOKALEN DATEINAMEN GEHOERENDE PERMANENTE NAMEN ZU
# ERMITTELN
(( i = 0 ))
while (( i < iout ))
do
(( i = i + 1 ))
if [[ "${actionout[$i]}" = tr || "${actionout[$i]}" = tra || "${actionout[$i]}" = trpe ]]
then
printf "${localout[$i]} ${actionout[$i]}\n${pathout[$i]}\n${localhost}_${fname}${endout[$i]}\n" >> OUTPUT_FILE_CONNECTIONS
else
printf "${localout[$i]} ${actionout[$i]}\n${pathout[$i]}\n${frelout[$i]}\n" >> OUTPUT_FILE_CONNECTIONS
fi
done
# EVTL. UEBERSETZUNGSAKTIONEN STARTEN
if [[ $do_compile = true ]]
then
# COMPILING WITH MAKE (ON NEC COMPILER IS CALLED ON HOST CROSS)
printf "\n\n\n *** compilation starts \n$striche\n"
printf " *** compilation with make using following options:\n"
printf " make depository: $make_depository"
if [[ "$mopts" != "" ]]
then
printf " make options: $mopts\n"
fi
printf " compilername: $compiler_name\n"
printf " compiler options: $fopts\n"
printf " preprocessor directives: $cpp_options \n"
printf " linker options: $lopts \n"
if [[ "$modules" != "" ]]
then
printf " modules to be load: $modules \n"
fi
printf " source code files: $source_list \n"
if [[ $localhost = nech ]]
then
# init_cmds was ". /SX/opt/etc/initsx.sh;"
ssh $SSH_PORTOPT 136.172.44.192 -l $usern "$init_cmds $module_calls cd \$HOME/work/${usern}.$kennung; sxmake $mopts -f Makefile PROG=a.out F90=$compiler_name COPT=\"$cpp_options\" F90FLAGS=\"$fopts\" LDFLAGS=\"$lopts\" "
cp $TEMPDIR_COMPILE/a.out .
[[ $? != 0 ]] && compile_error=true
rm -rf $TEMPDIR_COMPILE
elif [[ $localhost = ibmh ]]
then
printf " compiler is called via ssh on \"plogin1\" \n"
ssh $SSH_PORTOPT plogin1 -l $usern "$init_cmds export PATH=/sw/ibm/xlf/13.1.0.8/usr/bin:$PATH; $module_calls cd $TEMPDIR; make $mopts -f Makefile PROG=a.out F90=$compiler_name COPT=\"$cpp_options\" F90FLAGS=\"$fopts\" LDFLAGS=\"$lopts\" "
[[ ! -f a.out ]] && compile_error=true
continue # ANDERENFALLS IST STATUS=1, FALLS A.OUT VORHANDEN
elif [[ $localhost = lcsgib ]]
then
printf " compiler is called via ssh on \"bicegate0\" \n"
ssh $SSH_PORTOPT bicegate0 -l $usern "$init_cmds $module_calls cd $TEMPDIR; make $mopts -f Makefile PROG=a.out F90=$compiler_name COPT=\"$cpp_options\" F90FLAGS=\"$fopts\" LDFLAGS=\"$lopts\" "
[[ ! -f a.out ]] && compile_error=true
continue # ANDERENFALLS IST STATUS=1, FALLS A.OUT VORHANDEN
elif [[ $localhost = lcsgih ]]
then
printf " compiler is called via ssh on \"hicegate0\" \n"
ssh $SSH_PORTOPT hicegate0 -l $usern "$init_cmds $module_calls cd $TEMPDIR; make $mopts -f Makefile PROG=a.out F90=$compiler_name COPT=\"$cpp_options\" F90FLAGS=\"$fopts\" LDFLAGS=\"$lopts\" 2>&1 "
[[ ! -f a.out ]] && compile_error=true
continue # ANDERENFALLS IST STATUS=1, FALLS A.OUT VORHANDEN
elif [[ $localhost = lcflow ]]
then
printf " compiler is called via ssh on \"flow\" \n"
ssh $SSH_PORTOPT flow02.hpc.uni-oldenburg.de -l $usern "$init_cmds $module_calls cd $TEMPDIR; make $mopts -f Makefile PROG=a.out F90=$compiler_name COPT=\"$cpp_options\" F90FLAGS=\"$fopts\" LDFLAGS=\"$lopts\" "
[[ ! -f a.out ]] && compile_error=true
continue # ANDERENFALLS IST STATUS=1, FALLS A.OUT VORHANDEN
else
[[ "$init_cmds" != "" ]] && eval $init_cmds
[[ "$module_calls" != "" ]] && eval $module_calls
make $mopts -f Makefile PROG=a.out F90=$compiler_name COPT="$cpp_options" F90FLAGS="$fopts" LDFLAGS="$lopts"
fi
if [[ $? != 0 || "$compile_error" = true || "$module_compile_error" = true ]]
then
printf "\n +++ error occured while compiling or linking"
locat=compile
# WORKAROUND: REMOVE IF CONSTRUCT LATER, BUT KEEP THE EXIT!
# if [[ $localhost != lcsgib && $localhost != lcsgih ]]
# then
exit
# else
# locat=normal
# fi
else
printf "$striche\n *** compilation finished \n"
fi
fi
# FALLS NUR TESTWEISE KOMPILIERT WERDEN SOLLTE, IST MRUN JETZT FERTIG
if [[ $local_compile = true ]]
then
cd $HOME
rm -rf $TEMPDIR
locat=local_compile; exit
fi
# BEREITSTELLEN DER INPUT-DATEIEN
# SCHLEIFE UEBER ALLE VOM BENUTZER ANGEGEBENEN DATEIEN
(( i = 0 ))
while (( i < iin ))
do
(( i = i + 1 ))
if (( i == 1 ))
then
printf "\n\n *** providing INPUT-files:\n$striche"
fi
# OPTIONALE DATEIEN BEI NICHTVORHANDENSEIN UEBERGEHEN
if [[ "${transin[$i]}" = unavailable ]]
then
if [[ "${extin[$i]}" = "" || "${extin[$i]}" = " " ]]
then
printf "\n +++ WARNING: input file \"${pathin[$i]}/${afname}${endin[$i]}\" "
printf "\n is not available!"
else
printf "\n +++ WARNING: input file \"${pathin[$i]}/${afname}${endin[$i]}.${extin[$i]}\" "
printf "\n is not available!"
fi
continue
fi
# PRUEFEN, OB EINZELDATEI ODER DATEI PRO PROZESSOR
files_for_pes=false; datentyp=file
if [[ "${actionin[$i]}" = pe && -n $numprocs ]]
then
files_for_pes=true; datentyp=directory
actionin[$i]=""
elif [[ "${actionin[$i]}" = pe && ! -n $numprocs ]]
then
actionin[$i]=""
elif [[ "${actionin[$i]}" = arpe && -n $numprocs ]]
then
files_for_pes=true; datentyp=directory
actionin[$i]="ar"
elif [[ "${actionin[$i]}" = arpe && ! -n $numprocs ]]
then
actionin[$i]="ar"
elif [[ "${actionin[$i]}" = flpe && -n $numprocs ]]
then
files_for_pes=true; datentyp=directory
actionin[$i]="fl"
elif [[ "${actionin[$i]}" = flpe && ! -n $numprocs ]]
then
actionin[$i]="fl"
fi
if [[ $files_for_pes = true ]]
then
printf "\n >>> INPUT: ${absnamein[$i]}/.... to ${localin[$i]}"
else
printf "\n >>> INPUT: ${absnamein[$i]} to ${localin[$i]}"
fi
# INPUT-DATEI FUER EINEN FORTSETZUNGSLAUF. ES WIRD GEPRUEFT,
# OB DIESE DATEI NOCH AUF DEM TEMPORAEREN DATENKATALOG VORHANDEN
# IST. FALLS NICHT, WIRD VERSUCHT, SIE ANSCHLIESSEND VOM ARCHIV-
# SERVER ZU HOLEN
if [[ "${actionin[$i]}" = fl ]]
then
printf "\n $datentyp will be fetched from temporary directory \"${tmp_data_catalog}\" !"
if [[ $files_for_pes = false ]]
then
if [[ -f "$tmp_data_catalog/${frelin[$i]}" ]]
then
ln $tmp_data_catalog/${frelin[$i]} ${localin[$i]}
got_tmp[$i]=true
elif [[ -f "$WORK/${frelin[$i]}" && $ignore_archive_error = true ]]
then
printf "\n +++ $datentyp not found in \"$tmp_data_catalog\" !"
printf "\n *** trying to use backup copy in \"$WORK\" "
cp $WORK/${frelin[$i]} ${localin[$i]}
else
printf "\n +++ $datentyp not found in \"$tmp_data_catalog\" "
printf "\n or \"$tmp_data_catalog\" does not exist!"
printf "\n *** trying to get copy from archive"
actionin[$i]=ar
fi
else
if [[ -d "$tmp_data_catalog/${frelin[$i]}" ]]
then
mkdir ${localin[$i]}
cd $tmp_data_catalog/${frelin[$i]}
for file in $(ls *)
do
ln $file $TEMPDIR/${localin[$i]}
done
cd $TEMPDIR
got_tmp[$i]=true
elif [[ -d "$WORK/${frelin[$i]}" && $ignore_archive_error = true ]]
then
printf "\n +++ $datentyp not found in \"$tmp_data_catalog\" !"
printf "\n *** trying to use backup copy in \"$WORK\" "
cp -r $WORK/${frelin[$i]} ${localin[$i]}
else
printf "\n +++ $datentyp not found in \"$tmp_data_catalog\" "
printf "\n or \"$tmp_data_catalog\" does not exist!"
printf "\n *** trying to get copy from archive"
actionin[$i]=ar
fi
fi
fi
# DATEI LIEGT AUF ARCHIV-SERVER
if [[ "${actionin[$i]}" = ar ]]
then
if [[ $files_for_pes = false ]]
then
printf "\n file will be restored from archive-system ($archive_system)!"
else
printf "\n directory will be restored from archive-system ($archive_system)!"
fi
file_restored=false
if [[ $archive_system = asterix ]]
then
do_stagein=true
(( stagein_anz = 0 ))
while [[ $do_stagein = true ]]
do
if [[ $files_for_pes = false ]]
then
stagein -O ${frelin[$i]} > STAGEIN_OUTPUT
else
stagein -t -O ${frelin[$i]} > STAGEIN_OUTPUT
fi
cat STAGEIN_OUTPUT
if [[ $(grep -c "st.msg:i24" STAGEIN_OUTPUT) != 0 ]]
then
file_restored=true
do_stagein=false
else
(( stagein_anz = stagein_anz + 1 ))
if (( stagein_anz == 10 ))
then
printf "\n +++ stagein stoped after 10 tries"
locat=stage
exit
fi
printf "\n +++ restoring from archive failed, trying again:"
sleep 900
fi
done
elif [[ $archive_system = DMF ]]
then
if [[ $files_for_pes = false ]]
then
printf "\n +++ restoring of single files impossible with $archive_system !\n"
locat=DMF
exit
else
find $ARCHIVE/${frelin[$i]} -type m -print | dmget
cp -r $ARCHIVE/${frelin[$i]} $PWD
file_restored=true
fi
elif [[ $archive_system = tivoli ]]
then
if [[ $files_for_pes = false ]]
then
if [[ $localhost = lcsgih ]]
then
ssh $SSH_PORTOPT $usern@hicedata.hlrn.de "cp $PERM/${frelin[$i]} $PWD"
else
ssh $SSH_PORTOPT $usern@bicedata.hlrn.de "cp $PERM/${frelin[$i]} $PWD"
fi
else
(( inode = 0 ))
while (( inode < nodes ))
do
if [[ $localhost = lcsgih ]]
then
ssh $SSH_PORTOPT $usern@hicedata.hlrn.de "cd $PWD; tar xf $PERM/${frelin[$i]}/${frelin[$i]}.node_$inode.tar"
else
ssh $SSH_PORTOPT $usern@bicedata.hlrn.de "cd $PWD; tar xf $PERM/${frelin[$i]}/${frelin[$i]}.node_$inode.tar"
fi
(( inode = inode + 1 ))
done
fi
file_restored=true
elif [[ $archive_system = ut ]]
then
if [[ $files_for_pes = false ]]
then
cp $UT/${frelin[$i]} .
else
(( inode = 0 ))
while (( inode < nodes ))
do
tar xf $UT/${frelin[$i]}/${frelin[$i]}.node_$inode.tar
(( inode = inode + 1 ))
done
fi
file_restored=true
else
printf "\n +++ archive_system=\"$archive_system\" restore impossible!"
locat=rearchive
exit
fi
if [[ $file_restored = true ]]
then
# DATEI AUCH AUF TEMPORAERES DATENVERZEICHNIS LEGEN, DAMIT
# SIE BEI WEITEREN ZUGRIFFEN NOCH VORHANDEN IST
[[ ! -d $tmp_data_catalog ]] && mkdir -p $tmp_data_catalog; chmod g+rx $tmp_data_catalog
if [[ $files_for_pes = false ]]
then
ln -f ${frelin[$i]} $tmp_data_catalog/${frelin[$i]}
else
mkdir $tmp_data_catalog/${frelin[$i]}
ln -f ${frelin[$i]}/* $tmp_data_catalog/${frelin[$i]}
fi
got_tmp[$i]=true
# DATEI UNTER LOKALEM NAMEN ZUR VERFUEGUNG STELLEN
mv ${frelin[$i]} ${localin[$i]}
fi
fi
# DATEI LIEGT IM VOM BENUTZER ANGEGEBENEN VERZEICHNIS
if [[ "${actionin[$i]}" = "" || "${actionin[$i]}" = "di" || "${actionin[$i]}" = "npe" ]]
then
if [[ "${actionin[$i]}" = "npe" && -n $numprocs ]]
then
# DATEI WIRD FUER DIE PROZESSOREN EINES PARALLERECHNERS BEREITGESTELLT
printf "\n file will be provided for $numprocs processors"
mkdir ${localin[$i]}
ival=$numprocs
(( ii = 0 ))
while (( ii <= ival-1 ))
do
if (( ii < 10 ))
then
cp ${absnamein[$i]} ${localin[$i]}/_000$ii
elif (( ii < 100 ))
then
cp ${absnamein[$i]} ${localin[$i]}/_00$ii
elif (( ii < 1000 ))
then
cp ${absnamein[$i]} ${localin[$i]}/_0$ii
else
cp ${absnamein[$i]} ${localin[$i]}/_$ii
fi
(( ii = ii + 1 ))
done
else
if [[ $files_for_pes = true ]]
then
# DIE DEN PROZESSOREN EINES PARALLELRECHNERS ZUGEHOERIGEN
# DATEIEN WERDEN BEREITGESTELLT, INDEM ZUERST DER GESAMTE
# KATALOGINHALT KOPIERT UND DANN DIE EINZELNEN DATEIEN
# PER MOVE UMBENANNT WERDEN
printf "\n providing $numprocs files for the respective processors"
mkdir ${localin[$i]}
if [[ $link_local_input = true ]]
then
printf " using ln -f\n"
cd ${absnamein[$i]}
for file in $(ls *)
do
ln -f $file ${localin[$i]}
done
cd $TEMPDIR
fi
# If "ln -f" fails of if "$link_local_input = false" do a normal "cp -r"
if [[ ! -f "${localin[$i]}/_0000" ]]
then
if [[ $link_local_input = true ]]
then
printf " ln failed for .../_0000, using cp...\n"
fi
cp -r ${absnamein[$i]}/* ${localin[$i]}
fi
else
# BEREITSTELLUNG AUF EINPROZESSORRECHNERN
if [[ $link_local_input = true ]]
then
printf " using ln -f\n"
ln -f ${absnamein[$i]} ${localin[$i]}
fi
# If "ln -f" fails of if "$link_local_input = false" do a normal "cp"
if [[ ! -f "${localin[$i]}" ]]
then
if [[ $link_local_input = true ]]
then
printf " ln failed, using cp...\n"
fi
cp ${absnamein[$i]} ${localin[$i]}
fi
fi
fi
fi
done
if (( i != 0 ))
then
printf "\n$striche\n *** all INPUT-files provided \n"
fi
# EVENTUELLE INPUT-KOMMANDOS ABARBEITEN
(( i = 0 ))
while (( i < iic ))
do
(( i = i + 1 ))
if (( i == 1 ))
then
printf "\n\n *** execution of INPUT-commands:\n$striche"
fi
printf "\n >>> ${in_command[$i]}"
eval ${in_command[$i]}
if (( i == iic ))
then
printf "\n$striche\n"
fi
done
# VERBLEIBENDE CPU-ZEIT BERECHNEN
cpurest=${cpumax}.
# START DVR STREAMING SERVER
if [[ $(echo $package_list | grep -c dvrp_graphics) != 0 ]]
then
if [[ "$dvr_server" != "" ]]
then
printf "\n\n *** preparing the dvr streaming server configuration file"
# Check, if a dvr server is already running
running_dvrserver_id=`echo $(ps -edaf | grep .dvrserver.config | grep -v grep) | cut -d" " -f2`
if [[ "$running_dvrserver_id" != "" ]]
then
printf "\n\n +++ WARNING: A dvr server with id=$running_dvrserver_id is already running!"
printf "\n This server is used instead starting a new one!"
printf "\n If required, script \"process_dvr_output\" has to be run manually."
else
# COPY CONFIGURATION FILE FOR STREAMING SERVER FROM REPOSITORY TO HERE
if [[ -f ${PALM_BIN}/.dvrserver.config ]]
then
cp ${PALM_BIN}/.dvrserver.config .
# Entering the BASEDIR, UID and GID into this file
user_id=`id -u`
group_id=`id -g`
# & is needed as seperator, because TEMPDIR contains /
sed "s&&${TEMPDIR}&g" .dvrserver.config > .dvrserver.1
sed "s//$user_id/g" .dvrserver.1 > .dvrserver.2
sed "s//$group_id/g" .dvrserver.2 > .dvrserver.3
mv .dvrserver.3 .dvrserver.config
rm .dvrserver.1 .dvrserver.2
# Start dvr server in background, get his id and print on terminal
$dvr_server .dvrserver.config >> DVR_LOGFILE 2>&1 &
dvrserver_id=`echo $(ps -edaf | grep .dvrserver.config) | cut -d" " -f2`
printf "\n *** streaming server with id=$dvrserver_id is started in background"
local_dvrserver_running=.TRUE.
else
printf "\n +++ missing file \".dvrserver.config\" in directory:"
printf "\n \"$PALM_BIN\" "
locat=dvr
exit
fi
fi
else
printf "\n\n --- INFORMATIVE: no dvr streaming server will be started"
fi
fi
# NAMELIST-DATEI MIT WERTEN VON ENVIRONMENT-VARIABLEN ERZEUGEN (ZU
# LESEN VON PALM)
cat > ENVPAR << %%END%%
&envpar run_identifier = '$fname', host = '$localhost',
write_binary = '$write_binary', tasks_per_node = $tasks_per_node,
maximum_parallel_io_streams = $maximum_parallel_io_streams,
maximum_cpu_time_allowed = ${cpumax}.,
revision = '$global_revision',
local_dvrserver_running = $local_dvrserver_running /
%%END%%
# PROGRAMMSTART
printf "\n\n *** execution starts in directory\n \"`pwd`\"\n$striche\n"
PATH=$PATH:$TEMPDIR
# MPI debug option (argument checking, slows down execution due to increased latency)
if [[ "$mpi_debug" = true ]]
then
export MPI_CHECK_ARGS=1
printf "\n +++ MPI_CHECK_ARGS=$MPI_CHECK_ARGS"
fi
if [[ "$totalview" = true ]]
then
printf "\n *** totalview debugger will be used"
tv_opt="-tv"
else
tv_opt=""
fi
if [[ "$cond1" = debug || "$cond2" = debug ]]
then
if [[ "$ENVIRONMENT" = BATCH ]]
then
if [[ $(echo $localhost | cut -c1-5) != lcsgi ]]
then
printf "\n +++ debug is allowed in interactive mode only"
locat=debug
exit
fi
fi
if [[ $localhost = ibmh ]]
then
# SETUP THE IBM MPI ENVIRONMENT
export MP_SHARED_MEMORY=yes
export AIXTHREADS_SCOPE=S
export OMP_NUM_THREADS=$threads_per_task
export AUTHSTATE=files
export XLFRTEOPTS="nlwidth=132:err_recovery=no" # RECORD-LENGTH OF NAMELIST-OUTPUT
# FOLLOWING OPTIONS ARE MANDATORY FOR TOTALVIEW
export MP_ADAPTER_USE=shared
export MP_CPU_USE=multiple
export MP_TIMEOUT=1200
unset MP_TASK_AFFINITY
# SO FAR, TOTALVIEW NEEDS HOSTFILE MECHANISM FOR EXECUTION
#(( ii = 1 ))
#while (( ii <= $numprocs ))
#do
# echo $localhost_realname >> hostfile
# (( ii = ii + 1 ))
#done
#export MP_HOSTFILE=hostfile
if [[ "$LOADLBATCH" = yes ]]
then
totalview poe a.out $ROPTS
else
echo totalview poe -a a.out -procs $numprocs -rmpool 0 -nodes 1 $ROPTS
export TVDSVRLAUNCHCMD=ssh
totalview poe -a a.out -procs $numprocs -rmpool 0 -nodes 1 $ROPTS
fi
elif [[ $(echo $localhost | cut -c1-5) = lcsgi ]]
then
# CURRENTLY NO DEBUGGER ON LCSGI
if [[ $run_coupled_model = true ]]
then
printf "\n +++ no debug in coupled mode available on \"$localhost\" "
locat=debug
exit
else
echo "no_coupling" > runfile_atmos
fi
(( ii = $numprocs / $threads_per_task ))
export OMP_NUM_THREADS=$threads_per_task
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
if [[ $threads_per_task != 1 ]]
then
printf "\n threads per task: $threads_per_task"
fi
printf "\n\n"
if [[ $( echo $mpilib | cut -c1-3 ) = mpt ]]
then
# export MPI_LAUNCH_TIMEOUT=360
if [[ "$totalview" = true ]]
then
printf "\n running totalview debugger"
mpiexec_mpt $tv_opt -n $ii ./a.out $ROPTS < runfile_atmos
else
mpiexec_mpt -np $ii ./a.out $ROPTS < runfile_atmos
fi
elif [[ $( echo $mpilib | cut -c1-3 ) = mva ]]
then
# ulimit -s 300000 # A too large stack size causes problems
# export MV2_NUM_PORTS=2
# export MV2_CPU_MAPPING=0:1:2:3
if [[ "$totalview" = true ]]
then
printf "\n running totalview debugger"
mpiexec $tv_opt ./a.out $ROPTS < runfile_atmos
else
mpiexec ./a.out $ROPTS < runfile_atmos
fi
fi
else
printf "\n +++ no debug available on \"$localhost\" "
printf "\n or not implemented in mrun so far"
locat=debug
exit
fi
# end debug mode
else
# normal execution
if [[ -n $numprocs ]]
then
# RUNNING THE PROGRAM ON PARALLEL MACHINES
if [[ $(echo $host | cut -c1-3) = ibm ]]
then
# SETUP THE IBM MPI ENVIRONMENT
if [[ $host != ibmh && $host != ibmkisti ]]
then
export MP_SHARED_MEMORY=yes
export AIXTHREAD_SCOPE=S
export OMP_NUM_THREADS=$threads_per_task
export XLSMPOPTS="spins=0:yields=0:stack=20000000"
export AUTHSTATE=files
export XLFRTEOPTS="nlwidth=132:err_recovery=no" # RECORD-LENGTH OF NAMELIST-OUTPUT
# export MP_PRINTENV=yes
# TUNING-VARIABLEN ZUR VERBESSERUNG DER KOMMUNIKATION
# ZEIGEN ABER DERZEIT (SEP 04, FEDERATION) KAUM WIRKUNG
export MP_WAIT_MODE=poll
[[ $node_usage = not_shared ]] && export MP_SINGLE_THREAD=yes
fi
if [[ $host = ibmkisti ]]
then
export LANG=en_US
export MP_SHARED_MEMORY=yes
if [[ $threads_per_task = 1 ]]
then
export MP_SINGLE_THREAD=yes
export MEMORY_AFFINITY=MCM
else
export OMP_NUM_THREADS=$threads_per_task
fi
fi
if [[ "$LOADLBATCH" = yes ]]
then
printf "\n--- Control: OMP_NUM_THREADS = \"$OMP_NUM_THREADS\" \n"
if [[ "$cond1" = hpmcount || "$cond2" = hpmcount ]]
then
/opt/optibm/HPM_2_4_1/bin/hpmcount a.out $ROPTS
else
if [[ $run_coupled_model = false ]]
then
if [[ "$ocean_file_appendix" = true ]]
then
echo "precursor_ocean" > runfile_atmos
else
echo "precursor_atmos" > runfile_atmos
fi
else
(( iia = $numprocs_atmos / $threads_per_task ))
(( iio = $numprocs_ocean / $threads_per_task ))
printf "\n coupled run ($iia atmosphere, $iio ocean)"
printf "\n using $coupled_mode coupling"
printf "\n\n"
echo "coupled_run $iia $iio" > runfile_atmos
fi
poe ./a.out $ROPTS < runfile_atmos
fi
else
if [[ $localhost = ibmh || $localhost = ibms ]]
then
poe a.out -procs $numprocs -nodes 1 -rmpool 0 $ROPTS
elif [[ $localhost = ibmkisti || $localhost = ibmku || $localhost = ibmy ]]
then
if [[ -f $hostfile ]]
then
cp $hostfile hostfile
else
(( ii = 1 ))
while (( ii <= $numprocs ))
do
echo $localhost_realname >> hostfile
(( ii = ii + 1 ))
done
fi
export MP_HOSTFILE=hostfile
if [[ $run_coupled_model = false ]]
then
if [[ "$ocean_file_appendix" = true ]]
then
echo "precursor_ocean" > runfile_atmos
else
echo "precursor_atmos" > runfile_atmos
fi
else
(( iia = $numprocs_atmos / $threads_per_task ))
(( iio = $numprocs_ocean / $threads_per_task ))
printf "\n coupled run ($iia atmosphere, $iio ocean)"
printf "\n using $coupled_mode coupling"
printf "\n\n"
echo "coupled_run $iia $iio" > runfile_atmos
fi
if [[ $localhost = ibmy ]]
then
./a.out -procs $tasks_per_node $ROPTS < runfile_atmos
else
poe ./a.out -procs $numprocs $ROPTS < runfile_atmos
fi
else
if [[ "$host_file" = "" ]]
then
printf "\n +++ no hostfile given in configuration file"
locat=config_file
exit
else
eval host_file=$host_file
fi
export MP_HOSTFILE=$host_file
poe a.out -procs $numprocs -tasks_per_node $numprocs $ROPTS
fi
fi
elif [[ $host = nech || $host = necriam ]]
then
(( ii = nodes ))
if [[ $ii = 1 ]]
then
export F_ERRCNT=0 # acceptable number of errors before program is stopped
export MPIPROGINF=YES
# export F_TRACE=YES|FMT1|FMT2 # output of ftrace informations to job protocol
echo "*** execution on single node with mpirun"
mpirun -np $numprocs ./a.out $ROPTS
else
(( i = 0 ))
while (( i < ii ))
do
echo "-h $i -p $tasks_per_node -e ./mpi_exec_shell" >> multinode_config
(( i = i + 1 ))
done
echo "#!/bin/sh" > mpi_exec_shell
echo " " >> mpi_exec_shell
echo "set -u" >> mpi_exec_shell
echo "F_ERRCNT=0" >> mpi_exec_shell
echo "MPIPROGINV=YES" >> mpi_exec_shell
echo "OMP_NUM_THREADS=$threads_per_task" >> mpi_exec_shell
echo "cpurest=$cpurest" >> mpi_exec_shell
echo "fname=$fname" >> mpi_exec_shell
echo "localhost=$localhost" >> mpi_exec_shell
echo "return_addres=$return_addres" >> mpi_exec_shell
echo "return_username=$return_username" >> mpi_exec_shell
echo "tasks_per_node=$tasks_per_node" >> mpi_exec_shell
echo "write_binary=$write_binary" >> mpi_exec_shell
echo "use_seperate_pe_for_dvrp_output=$use_seperate_pe_for_dvrp_output" >> mpi_exec_shell
echo " " >> mpi_exec_shell
echo "export F_ERRCNT" >> mpi_exec_shell
echo "export MPIPROGINV" >> mpi_exec_shell
echo "export OMP_NUM_THREADS" >> mpi_exec_shell
echo "export cpurest" >> mpi_exec_shell
echo "export fname" >> mpi_exec_shell
echo "export localhost" >> mpi_exec_shell
echo "export return_addres" >> mpi_exec_shell
echo "export return_username" >> mpi_exec_shell
echo "export tasks_per_node" >> mpi_exec_shell
echo "export write_binary" >> mpi_exec_shell
echo "export use_seperate_pe_for_dvrp_output" >> mpi_exec_shell
echo " " >> mpi_exec_shell
echo "exec ./a.out" >> mpi_exec_shell
chmod u+x mpi_exec_shell
export MPIPROGINF=YES
mpirun -f multinode_config &
wait
fi
elif [[ $(echo $host | cut -c1-2) = lc && $host != lckyoto && $host != lctit ]]
then
# COPY HOSTFILE FROM SOURCE DIRECTORY OR CREATE IT, IF IT
# DOES NOT EXIST
if [[ $host != lcsgih && $host != lcsgib && $host != lckyuh && $host != lckyut ]]
then
if [[ -f $hostfile ]]
then
cp $hostfile hostfile
(( ii = $numprocs / $threads_per_task ))
else
(( ii = 1 ))
while (( ii <= $numprocs / $threads_per_task ))
do
echo $localhost_realname >> hostfile
(( ii = ii + 1 ))
done
fi
eval zeile=\"`head -n $ii hostfile`\"
printf "\n *** running on: $zeile"
fi
(( ii = $numprocs / $threads_per_task ))
export OMP_NUM_THREADS=$threads_per_task
# echo "*** OMP_NUM_THREADS=$OMP_NUM_THREADS"
if [[ $threads_per_task != 1 ]]
then
# increase stack size to unlimited, because large runs
# may abort otherwise
ulimit -Ss unlimited
printf "\n threads per task: $threads_per_task stacksize: unlimited"
fi
if [[ $run_coupled_model = false ]]
then
if [[ "$ocean_file_appendix" = true ]]
then
echo "precursor_ocean" > runfile_atmos
else
echo "precursor_atmos" > runfile_atmos
fi
printf "\n\n"
if [[ $host = lcsgih || $host = lcsgib ]]
then
if [[ $( echo $mpilib | cut -c1-3 ) = mpt ]]
then
# MPI_DSM_DISTRIBUTE not necessary when MPI_DSM_CPULIST is set
# export MPI_DSM_DISTRIBUTE=1
# MPI_DSM_CPULIST: pin MPI processes to cores
if [[ $use_openmp = false ]]
then
if [[ "$sgi_feature" = ice2 ]]
then
export MPI_DSM_CPULIST="0,4,1,5,2,6,3,7:allhosts"
else
export MPI_DSM_CPULIST="0,1,4,5,2,3,6,7:allhosts"
fi
else
unset MPI_DSM_CPULIST
fi
# MPI_IB_RAILS: use both IB rails on ICE2
export MPI_BUFS_PER_HOST=512
export MPI_IB_RAILS=2
# NECESSARY, IF MORE THAN 4096 PEs ARE USED
export MPI_CONNECTIONS_THRESHOLD=8192
# echo "*** MPI_DSM_CPULIST=$MPI_DSM_CPULIST"
export MPI_TYPE_DEPTH=20
# echo "*** MPI_TYPE_DEPTH=$MPI_TYPE_DEPTH"
export MPI_GROUP_MAX=64
# echo "*** MPI_GROUP_MAX=$MPI_GROUP_MAX"
if [[ $use_openmp = true ]]
then
echo " mpiexec -npernode $tasks_per_node ./a.out $ROPTS < runfile_atmos"
mpiexec -npernode $tasks_per_node $tv_opt ./a.out $ROPTS < runfile_atmos
else
mpiexec_mpt -np $ii $tv_opt ./a.out $ROPTS < runfile_atmos
fi
# next is test for openmp usage
# echo "mpiexec -npernode $tasks_per_node ./a.out $ROPTS < runfile_atmos"
# mpiexec -npernode $tasks_per_node ./a.out $ROPTS < runfile_atmos
elif [[ $( echo $mpilib | cut -c1-3 ) = mva ]]
then
export MV2_NUM_PORTS=2
# The default setting of MV2_CPU_MAPPING gives best results
# export MV2_ENABLE_AFFINITY=1
# export MV2_CPU_MAPPING=0,1,4,5,2,3,6,7
# if [[ "$sgi_feature" = ice2 ]]
# then
# export MV2_CPU_MAPPING=0,4,1,5,2,6,3,7
# else
# export MV2_CPU_MAPPING=0,1,4,5,2,3,6,7
# fi
if [[ $use_openmp = true ]]
then
unset MV2_CPU_MAPPING
export MV2_ENABLE_AFFINITY=0
fi
echo "*** MV2_CPU_MAPPING=$MV2_CPU_MAPPING"
echo "*** MV2_ENABLE_AFFINITY=$MV2_ENABLE_AFFINITY"
if [[ $use_openmp = true ]]
then
echo " mpiexec -npernode $tasks_per_node ./a.out $ROPTS < runfile_atmos"
mpiexec -npernode $tasks_per_node $tv_opt ./a.out $ROPTS < runfile_atmos
else
mpiexec -np $ii $tv_opt ./a.out $ROPTS < runfile_atmos
fi
elif [[ "$mpilib" = impi ]]
then
echo "mpirun -np $ii inspxe-cl -r result -collect mi3 -- ./a.out < runfile_atmos"
mpirun -np $ii inspxe-cl -r result -collect mi3 -- ./a.out < runfile_atmos
fi
elif [[ $host = lcxe6 || $host = lcxt5m ]]
then
aprun -n $ii -N $tasks_per_node a.out $ROPTS < runfile_atmos
elif [[ $host = lcflow ]]
then
mpiexec -machinefile $TMPDIR/machines -n $ii -env I_MPI_FABRICS shm:ofa a.out < runfile_atmos $ROPTS
elif [[ $host = lcsb ]]
then
mpirun_rsh -hostfile $PBS_NODEFILE -np `cat $PBS_NODEFILE | wc -l` a.out < runfile_atmos $ROPTS
elif [[ $host = lckiaps ]]
then
mpirun -np $ii -f $PBS_NODEFILE a.out < runfile_atmos $ROPTS
elif [[ $host = lckyu* ]]
then
mpiexec -n $ii --stdin runfile_atmos ./a.out $ROPTS
else
mpiexec -machinefile hostfile -n $ii a.out < runfile_atmos $ROPTS
fi
else
# currently there is no full MPI-2 support on ICE and XT4
(( iia = $numprocs_atmos / $threads_per_task ))
(( iio = $numprocs_ocean / $threads_per_task ))
printf "\n coupled run ($iia atmosphere, $iio ocean)"
printf "\n using $coupled_mode coupling"
printf "\n\n"
if [[ $coupled_mode = "mpi2" ]]
then
echo "atmosphere_to_ocean $iia $iio" > runfile_atmos
echo "ocean_to_atmosphere $iia $iio" > runfile_ocean
if [[ $host = lcsgih || $host = lcsgib ]]
then
if [[ $( echo $mpilib | cut -c1-3 ) = mpt ]]
then
# export MPI_LAUNCH_TIMEOUT=360
mpiexec_mpt -np $iia ./a.out $ROPTS < runfile_atmos &
mpiexec_mpt -np $iio ./a.out $ROPTS < runfile_ocean &
elif [[ $( echo $mpilib | cut -c1-3 ) = mva ]]
then
# ulimit -s 300000 # A too large stack size causes problems
# export MV2_NUM_PORTS=2
# export MV2_CPU_MAPPING=0:1:2:3
mpiexec -n $iia ./a.out $ROPTS < runfile_atmos &
mpiexec -n $iio ./a.out $ROPTS < runfile_ocean &
fi
elif [[ $host = lcxe6 || $host = lcxt5m ]]
then
aprun -n $iia -N $tasks_per_node a.out < runfile_atmos $ROPTS &
aprun -n $iio -N $tasks_per_node a.out < runfile_ocean $ROPTS &
else
# WORKAROUND BECAUSE mpiexec WITH -env option IS NOT AVAILABLE ON SOME SYSTEMS
mpiexec -machinefile hostfile -n $iia a.out $ROPTS < runfile_atmos &
mpiexec -machinefile hostfile -n $iio a.out $ROPTS < runfile_ocean &
# mpiexec -machinefile hostfile -n $iia -env coupling_mode atmosphere_to_ocean a.out $ROPTS &
# mpiexec -machinefile hostfile -n $iio -env coupling_mode ocean_to_atmosphere a.out $ROPTS &
fi
wait
else
echo "coupled_run $iia $iio" > runfile_atmos
if [[ $host = lcsgih || $host = lcsgib ]]
then
if [[ $( echo $mpilib | cut -c1-3 ) = mpt ]]
then
# export MPI_LAUNCH_TIMEOUT=360
mpiexec_mpt -np $ii ./a.out $ROPTS < runfile_atmos
elif [[ $( echo $mpilib | cut -c1-3 ) = mva ]]
then
# ulimit -s 300000 # A too large stack size causes problems
# export MV2_NUM_PORTS=2
# export MV2_CPU_MAPPING=0:1:2:3
mpiexec ./a.out $ROPTS < runfile_atmos
fi
elif [[ $host = lcxe6 || $host = lcxt5m ]]
then
aprun -n $ii -N $tasks_per_node a.out < runfile_atmos $ROPTS
elif [[ $host = lck || $host = lckordi ]]
then
mpiexec -n $ii ./a.out $ROPTS < runfile_atmos &
elif [[ $host = lckyu* ]]
then
mpiexec -n $ii --stdin runfile_atmos ./a.out $ROPTS
fi
wait
fi
fi
elif [[ $host = lckyoto ]]
then
set -xv
export P4_RSHCOMMAND=plesh
echo " P4_RSHCOMMAND = $P4_RSHCOMMAND"
if [[ "$ENVIRONMENT" = BATCH ]]
then
if [[ "$cond2" = fujitsu ]]
then
mpiexec -n $numprocs ./a.out $ROPTS # for fujitsu-compiler
elif [[ "cond2" = pgi ]]
then
mpirun -np $numprocs -machinefile ${QSUB_NODEINF} ./a.out $ROPTS
else
mpirun_rsh -np $numprocs -hostfile ${QSUB_NODEINF} MV2_USE_SRQ=0 ./a.out ${ROPTS} || /bin/true
fi
else
if [[ "$cond2" = "" ]]
then
mpiruni_rsh -np $numprocs ./a.out $ROPTS # for intel
else
mpirun -np $numprocs ./a.out $ROPTS
fi
fi
set +xv
elif [[ $host = lctit ]]
then
export OMP_NUM_THREADS=$threads_per_task
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
if [[ "$threads_per_task" != 1 ]]
then
export MV2_ENABLE_AFFINITY=0
fi
echo "----- PBS_NODEFILE content:"
cat $PBS_NODEFILE
echo "-----"
(( ii = $numprocs / $threads_per_task ))
echo "mpirun -np $ii -hostfile $PBS_NODEFILE ./a.out $ROPTS"
mpirun -np $ii -hostfile $PBS_NODEFILE ./a.out $ROPTS
else
mpprun -n $numprocs a.out $ROPTS
fi
[[ $? != 0 ]] && execution_error=true
# PERFORMANCE-AUSWERTUNG MIT APPRENTICE
if [[ "$cond1" = apprentice || "$cond2" = apprentice ]]
then
apprentice
fi
else
a.out $ROPTS
fi
fi
if [[ $? != 0 || $execution_error = true ]]
then
# ABBRUCH BEI LAUFZEITFEHLER
# [[ ! ( "$cond1" = debug || "$cond2" = debug ) ]] && cat aout_output*
printf "\n +++ runtime error occured"
locat=execution
exit
else
# [[ ! ( "$cond1" = debug || "$cond2" = debug ) ]] && cat aout_output*
printf "\n$striche\n *** execution finished \n"
# Stop the dvr streaming server and process the dvr output in order
# to create dvrs- and html-files containing all streams
if [[ "$dvrserver_id" != "" ]]
then
kill $dvrserver_id
printf "\n *** dvr server with id=$dvrserver_id has been stopped"
# If there is a directory, data have been output by the
# streaming server. Otherwise, user has chosen dvrp_output=local
if [[ -d DATA_DVR ]]
then
# Add the current dvr configuration file to the dvr output
# directory
cp .dvrserver.config DATA_DVR
# Process the dvr output (option -s for also generating
# sequence mode data)
process_dvr_output -d DATA_DVR -f $fname -s
else
# Process the local output
process_dvr_output -l -d DATA_DVR -f $fname
fi
elif [[ $(echo $package_list | grep -c dvrp_graphics) != 0 ]]
then
# Process dvr output generated in local mode (dvrp_output=local)
process_dvr_output -l -d DATA_DVR -f $fname
fi
fi
# Call of combine_plot_fields in order to merge single files written
# by each PE into one file.
if [[ ! -f ${PALM_BIN}/combine_plot_fields${block}.x ]]
then
printf "\n\n\n +++ WARNING: no combine_plot_fields found for given block \"$cond1 $cond2\""
printf "\n 2d- and/or 3d-data may be incomplete!"
printf "\n Run \"mbuild -u -h $localhost\" to generate utilities for this block.\n"
elif [[ "$combine_plot_fields" == true ]]
then
printf "\n\n\n *** post-processing: now executing \"combine_plot_fields${block}.x\" ..."
combine_plot_fields${block}.x
else
# Temporary solution to skip combine_plot_fields. This is necessary in case of huge amount of
# data output. To do: extend this branch by creating a batch job for combine_plot_fields.
printf "\n\n\n *** post-processing: skipping combine_plot_fields (-Z option set) ..."
fi
# EVENTUELLE OUTPUT-KOMMANDOS ABARBEITEN
(( i = 0 ))
while (( i < ioc ))
do
(( i = i + 1 ))
if (( i == 1 ))
then
printf "\n\n *** execution of OUTPUT-commands:\n$striche"
fi
printf "\n >>> ${out_command[$i]}"
eval ${out_command[$i]}
if (( i == ioc ))
then
printf "\n$striche\n"
fi
done
# EVTL. INHALT DES AKTUELLEN VERZEICHNISSES AUSGEBEN
if [[ $do_trace = true ]]
then
printf "\n\n"
ls -al
fi
# OUTPUT-DATEIEN AN IHRE ZIELORTE KOPIEREN
(( i = 0 ))
while (( i < iout ))
do
(( i = i + 1 ))
if (( i == 1 ))
then
printf "\n\n *** saving OUTPUT-files:\n$striche"
fi
# PRUEFEN, OB EINZELDATEI ODER DATEI PRO PROZESSOR
files_for_pes=false; filetyp=file
if [[ "${actionout[$i]}" = pe && -n $numprocs ]]
then
files_for_pes=true; filetyp=directory
actionout[$i]=""
elif [[ "${actionout[$i]}" = pe && ! -n $numprocs ]]
then
actionout[$i]=""
elif [[ "${actionout[$i]}" = arpe && -n $numprocs ]]
then
files_for_pes=true; filetyp=directory
actionout[$i]="ar"
elif [[ "${actionout[$i]}" = arpe && ! -n $numprocs ]]
then
actionout[$i]="ar"
elif [[ "${actionout[$i]}" = flpe && -n $numprocs ]]
then
files_for_pes=true; filetyp=directory
actionout[$i]="fl"
elif [[ "${actionout[$i]}" = flpe && ! -n $numprocs ]]
then
actionout[$i]="fl"
elif [[ "${actionout[$i]}" = trpe && -n $numprocs ]]
then
files_for_pes=true; filetyp=directory
actionout[$i]="tr"
elif [[ "${actionout[$i]}" = trpe && ! -n $numprocs ]]
then
actionout[$i]="tr"
fi
if [[ ! -f ${localout[$i]} && $files_for_pes = false ]]
then
printf "\n +++ temporary OUTPUT-file ${localout[$i]} does not exist\n"
elif [[ ! -d ${localout[$i]} && $files_for_pes = true ]]
then
printf "\n +++ temporary OUTPUT-file ${localout[$i]}/.... does not exist\n"
else
# KOPIEREN PER FTP/SCP (IMMER IM BINAERMODUS, -M: FALLS ZIELKATALOG
# NICHT VORHANDEN, WIRD VERSUCHT, IHN ANZULEGEN), ABER NUR BEI
# REMOTE-RECHNUNGEN
if [[ "${actionout[$i]}" = tr ]]
then
if [[ $localhost != $fromhost ]]
then
if [[ $files_for_pes = false ]]
then
cps=""
cst=""
else
cps=-c
cst="/"
fi
if [[ $localhost = nech ]]
then
# TRANSFER IN EIGENSTAENDIGEM JOB
# ZUERST KOPIE DER DATEI INS TEMPORAERE DATENVERZEICHNIS
[[ ! -d $tmp_data_catalog/TRANSFER ]] && mkdir -p $tmp_data_catalog/TRANSFER
file_to_transfer=${fname}_${localout[$i]}_to_transfer_$kennung
if [[ $files_for_pes = false ]]
then
ln -f ${localout[$i]} $tmp_data_catalog/TRANSFER/$file_to_transfer
else
mkdir $tmp_data_catalog/TRANSFER/$file_to_transfer
ln ${localout[$i]}/* $tmp_data_catalog/TRANSFER/$file_to_transfer
fi
echo "set -x" > transfer_${localout[$i]}
echo "cd $tmp_data_catalog/TRANSFER" >> transfer_${localout[$i]}
printf "\n >>> OUTPUT: ${localout[$i]}$cst by SCP in seperate job to"
printf "\n ${pathout[$i]}/${localhost}_${fname}${endout[$i]}$cst"
printf "\n or higher cycle\n"
echo "batch_scp $PORTOPT $cps -b -m -u $return_username $return_addres $file_to_transfer \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" >> transfer_${localout[$i]}
echo "[[ \$? = 0 ]] && rm $file_to_transfer" >> transfer_${localout[$i]}
if [[ $localhost = nech ]]
then
subjob -d -c /pf/b/$usern/job_queue -v -q pp -X 0 -m 1000 -t 900 $PORTOPT transfer_${localout[$i]}
else
if [[ "$LOGNAME" = b323013 ]]
then
subjob -v -q c1 -X 0 -m 1000 -t 900 -c $job_catalog $PORTOPT transfer_${localout[$i]}
else
subjob -d -v -q c1 -X 0 -m 1000 -t 900 -c $job_catalog $PORTOPT transfer_${localout[$i]}
fi
fi
else
# TRANSFER INNERHALB DIESES JOBS
transfer_failed=false
printf "\n >>> OUTPUT: ${localout[$i]}$cst by SCP to"
printf "\n ${pathout[$i]}/${localhost}_${fname}${endout[$i]}$cst\n"
batch_scp $PORTOPT $cps -b -m -u $return_username $return_addres ${localout[$i]} "${pathout[$i]}" ${localhost}_${fname}${endout[$i]} ${extout[$i]}
[[ $? != 0 ]] && transfer_failed=true
# BEI FEHLGESCHLAGENEM TRANSFER SICHERUNGSKOPIE AUF
# LOKALER MASCHINE ANLEGEN
if [[ $transfer_failed = true ]]
then
printf " +++ transfer failed. Trying to save a copy on the local host under:\n"
printf " ${pathout[$i]}/${localhost}_${fname}${endout[$i]}_$kennung\n"
# ERSTMAL PRUEFEN, OB VERZEICHNIS EXISTIERT. GEGEBENENFALLS
# ANLEGEN.
eval local_catalog=${pathout[$i]}
if [[ ! -d $local_catalog ]]
then
printf " *** local directory does not exist. Trying to create:\n"
printf " $local_catalog \n"
mkdir -p $local_catalog
fi
eval cp ${localout[$i]} ${pathout[$i]}/${localhost}_${fname}${endout[$i]}_$kennung
transfer_problems=true
fi
fi
else
# WERTZUWEISUNG, SO DASS WEITER UNTEN NUR KOPIERT WIRD
actionout[$i]=""
fi
fi
# APPEND PER FTP/SCP (IMMER IM BINAERMODUS, -M: FALLS ZIELKATALOG
# NICHT VORHANDEN, WIRD VERSUCHT, IHN ANZULEGEN), ABER NUR BEI
# REMOTE-RECHNUNGEN
if [[ "${actionout[$i]}" = tra ]]
then
if [[ $localhost != $fromhost ]]
then
if [[ $localhost = ibmh || $localhost = nech ]]
then
# TRANSFER IN EIGENSTAENDIGEM JOB
# ZUERST KOPIE DER DATEI INS TEMPORAERE DATENVERZEICHNIS
[[ ! -d $tmp_data_catalog/TRANSFER ]] && mkdir -p $tmp_data_catalog/TRANSFER
file_to_transfer=${fname}_${localout[$i]}_to_transfer_$kennung
ln -f ${localout[$i]} $tmp_data_catalog/TRANSFER/$file_to_transfer
echo "set -x" > transfer_${localout[$i]}
echo "cd $tmp_data_catalog/TRANSFER" >> transfer_${localout[$i]}
printf "\n >>> OUTPUT: ${localout[$i]} append by SCP in seperate job to"
printf "\n ${pathout[$i]}/${localhost}_${fname}${endout[$i]}"
printf "\n or higher cycle\n"
echo "batch_scp $PORTOPT -A -b -m -u $return_username $return_addres $file_to_transfer \"${pathout[$i]}\" ${localhost}_${fname}${endout[$i]} ${extout[$i]}" >> transfer_${localout[$i]}
echo "[[ \$? = 0 ]] && rm $file_to_transfer" >> transfer_${localout[$i]}
if [[ $localhost = nech ]]
then
subjob -d -c /pf/b/$usern/job_queue -v -q pp -X 0 -m 1000 -t 900 $PORTOPT transfer_${localout[$i]}
else
if [[ $LOGNAME = b323013 ]]
then
subjob -v -q c1 -X 0 -m 1000 -t 900 -c $job_catalog $PORTOPT transfer_${localout[$i]}
else
subjob -d -v -q c1 -X 0 -m 1000 -t 900 -c $job_catalog $PORTOPT transfer_${localout[$i]}
fi
fi
else
# TRANSFER INNERHALB DIESES JOBS
transfer_failed=false
printf "\n >>> OUTPUT: ${localout[$i]} append by SCP to"
printf "\n ${pathout[$i]}/${localhost}_${fname}${endout[$i]}\n"
batch_scp $PORTOPT -A -b -m -u $return_username $return_addres ${localout[$i]} "${pathout[$i]}" ${localhost}_${fname}${endout[$i]} ${extout[$i]}
[[ $? != 0 ]] && transfer_failed=true
# BEI FEHLGESCHLAGENEM TRANSFER SICHERUNGSKOPIE AUF
# LOKALER MASCHINE ANLEGEN
if [[ $transfer_failed = true ]]
then
printf " +++ transfer failed. Trying to save a copy on the local host under:\n"
printf " ${pathout[$i]}/${localhost}_${fname}${endout[$i]}_$kennung\n"
# ERSTMAL PRUEFEN, OB VERZEICHNIS EXISTIERT. GEGEBENENFALLS
# ANLEGEN
eval local_catalog=${pathout[$i]}
if [[ ! -d $local_catalog ]]
then
printf " *** local directory does not exist. Trying to create:\n"
printf " $local_catalog \n"
mkdir -p $local_catalog
fi
eval cp ${localout[$i]} ${pathout[$i]}/${localhost}_${fname}${endout[$i]}_$kennung
transfer_problems=true
fi
fi
else
# WERTZUWEISUNG, SO DASS WEITER UNTEN NUR APPEND AUF
# LOKALER MASCHINE DURCHGEFUEHRT WIRD
actionout[$i]=a
fi
fi
# OUTPUT-DATEI FUER EINEN FORTSETZUNGSLAUF. DATEI WIRD PER
# LINK AUF DEN TEMPORAEREN DATENKATALOG GELEGT. OPTION -f WIRD
# VERWENDET, FALLS DORT NOCH EINE DATEI GLEICHEN NAMENS VORHANDEN
# SEIN SOLLTE. ANSCHLIESSEND WIRD SEINE ARCHIVIERUNG ANGESTOSSEN
if [[ "${actionout[$i]}" = fl ]]
then
[[ ! -d $tmp_data_catalog ]] && mkdir -p $tmp_data_catalog
chmod g+rx $tmp_data_catalog
if [[ $files_for_pes = false ]]
then
printf "\n >>> OUTPUT: ${localout[$i]} to"
printf "\n $tmp_data_catalog/${frelout[$i]} (temporary data catalog)\n"
ln -f ${localout[$i]} $tmp_data_catalog/${frelout[$i]}
else
printf "\n >>> OUTPUT: ${localout[$i]}/.... to"
printf "\n $tmp_data_catalog/${frelout[$i]} (temporary data catalog)\n"
mkdir $tmp_data_catalog/${frelout[$i]}
cd ${localout[$i]}
for file in $(ls *)
do
ln -f $file $tmp_data_catalog/${frelout[$i]}
done
cd $TEMPDIR
fi
# ARCHIVIERUNGSJOB WIRD ERZEUGT UND ABGESCHICKT
if [[ $store_on_archive_system = true ]]
then
if [[ $archive_system = asterix ]]
then
echo "cd $tmp_data_catalog" >> archive_${frelout[$i]}
if [[ $files_for_pes = false ]]
then
echo "stageout ${frelout[$i]} > STAGE_OUTPUT${i}_$kennung" >> archive_${frelout[$i]}
else
echo "stageout -t ${frelout[$i]} > STAGE_OUTPUT${i}_$kennung" >> archive_${frelout[$i]}
fi
echo "cat STAGE_OUTPUT${i}_$kennung" >> archive_${frelout[$i]}
echo "if [[ \$(grep -c \"st.msg:150\" STAGE_OUTPUT${i}_$kennung) != 0 ]]" >> archive_${frelout[$i]}
echo "then" >> archive_${frelout[$i]}
echo " do_stageout=false" >> archive_${frelout[$i]}
echo "else" >> archive_${frelout[$i]}
echo " echo \" +++ $filetyp ${frelout[$i]} could not be stored on archive-system\" " >> archive_${frelout[$i]}
echo " cat /dev/null > ~/job_queue/ARCHIVE_ERROR_$fname" >> archive_${frelout[$i]}
echo " cat STAGE_OUTPUT${i}_$kennung > ~/job_queue/archive_${frelout[$i]}_error" >> archive_${frelout[$i]}
echo " echo \" *** $filetyp ${frelout[$i]} will be copied to \$WORK as backup\" " >> archive_${frelout[$i]}
if [[ $files_for_pes = false ]]
then
echo " cp ${frelout[$i]} \$WORK" >> archive_${frelout[$i]}
else
echo " cp -r ${frelout[$i]} \$WORK/${frelout[$i]}" >> archive_${frelout[$i]}
fi
echo " echo \" *** $filetyp ${frelout[$i]} saved\" " >> archive_${frelout[$i]}
echo "fi" >> archive_${frelout[$i]}
echo "rm STAGE_OUTPUT${i}_$kennung" >> archive_${frelout[$i]}
elif [[ $archive_system = DMF ]]
then
echo "cd $tmp_data_catalog" >> archive_${frelout[$i]}
if [[ $files_for_pes = false ]]
then
printf "\n +++ archiving of single files impossible with $archive_system !\n"
locat=DMF
exit
else
# FUER RECHNER IN JUELICH. DORT KOENNTEN AUCH
# EINZELNE DATEIEN GESICHERT WERDEN (SPAETER KORR.)
echo "rm -rf \$ARCHIVE/${frelout[$i]}" >> archive_${frelout[$i]}
echo "cp -r ${frelout[$i]} \$ARCHIVE" >> archive_${frelout[$i]}
fi
elif [[ $archive_system = tivoli ]]
then
echo "cd $tmp_data_catalog" >> archive_${frelout[$i]}
if [[ $files_for_pes = false ]]
then
# EVENTUELL NOCH VORHANDENE DATEI IM ARCHIV LOSCHEN
echo "set -x" >> archive_${frelout[$i]}
echo "rm -rf \$PERM/${frelout[$i]}" >> archive_${frelout[$i]}
echo "cp ${frelout[$i]} \$PERM/${frelout[$i]}" >> archive_${frelout[$i]}
else
echo "set -x" >> archive_${frelout[$i]}
echo "rm -rf \$PERM/${frelout[$i]}/*" >> archive_${frelout[$i]}
echo "[[ ! -d \$PERM/${frelout[$i]} ]] && mkdir $PERM/${frelout[$i]}" >> archive_${frelout[$i]}
cd $tmp_data_catalog
all_files=`ls -1 ${frelout[$i]}/*`
cd - > /dev/null
(( inode = 0 ))
(( tp1 = tasks_per_node + 1 ))
while (( inode < nodes ))
do
# echo "*** all_files = \"$all_files\" "
files=`echo $all_files | cut -d" " -f1-$tasks_per_node`
all_files=`echo $all_files | cut -d" " -f$tp1-`
# echo "*** tasks_per_node = \"$tasks_per_node\" "
# echo "*** files = \"$files\" "
# echo "*** all_files = \"$all_files\" "
echo "tar cvf \$PERM/${frelout[$i]}/${frelout[$i]}.node_$inode.tar $files" >> archive_${frelout[$i]}
### echo "tar cvf ${frelout[$i]}.node_$inode.tar $files" >> archive_${frelout[$i]}
### echo "cp ${frelout[$i]}.node_$inode.tar \$PERM/${frelout[$i]}/" >> archive_${frelout[$i]}
###echo "rm ${frelout[$i]}.node_$inode.tar" >> archive_${frelout[$i]}
# echo "dsmc incremental \$PERM/${frelout[$i]}/${frelout[$i]}.node_$inode.tar" >> archive_${frelout[$i]}
# echo "dsmmigrate \$PERM/${frelout[$i]}/${frelout[$i]}.node_$inode.tar" >> archive_${frelout[$i]}
(( inode = inode + 1 ))
done
# echo "rm -rf \$PERM/${frelout[$i]}.tar" >> archive_${frelout[$i]}
# echo "tar cvf \$PERM/${frelout[$i]}.tar ${frelout[$i]}" >> archive_${frelout[$i]}
fi
elif [[ $archive_system = ut ]]
then
echo "cd $tmp_data_catalog" >> archive_${frelout[$i]}
if [[ $files_for_pes = false ]]
then
# EVENTUELL NOCH VORHANDENE DATEI IM ARCHIV LOSCHEN
echo "set -x" >> archive_${frelout[$i]}
echo "rm -rf \$UT/${frelout[$i]}" >> archive_${frelout[$i]}
echo "cp ${frelout[$i]} \$UT/${frelout[$i]}" >> archive_${frelout[$i]}
else
echo "set -x" >> archive_${frelout[$i]}
echo "rm -rf \$UT/${frelout[$i]}/*" >> archive_${frelout[$i]}
echo "[[ ! -d \$UT/${frelout[$i]} ]] && mkdir $UT/${frelout[$i]}" >> archive_${frelout[$i]}
cd $tmp_data_catalog
all_files=`ls -1 ${frelout[$i]}/*`
cd - > /dev/null
(( inode = 0 ))
(( tp1 = tasks_per_node + 1 ))
while (( inode < nodes ))
do
files=`echo $all_files | cut -d" " -f1-$tasks_per_node`
all_files=`echo $all_files | cut -d" " -f$tp1-`
echo "tar cvf \$UT/${frelout[$i]}/${frelout[$i]}.node_$inode.tar $files" >> archive_${frelout[$i]}
(( inode = inode + 1 ))
done
fi
elif [[ $archive_system = none ]]
then
printf " +++ archiving on $localhost not available!\n"
fi
if [[ $archive_system != none ]]
then
if [[ $localhost = lcsgih || $localhost = lcsgib ]]
then
# subjob -d -v -q cdata -X 0 -m 1000 -t 43200 -c $job_catalog $PORTOPT archive_${frelout[$i]}
subjob -v -q permq -X 1 -T 1 -m 1000 -t 172800 -c $job_catalog $PORTOPT archive_${frelout[$i]}
elif [[ $localhost = nech ]]
then
subjob -d -c /pf/b/$usern/job_queue -v -q pp -X 0 -m 1000 -t 7200 $PORTOPT archive_${frelout[$i]}
fi
printf " Archiving of $tmp_data_catalog/${frelout[$i]} initiated (batch job submitted)\n"
fi
else
printf " +++ caution: option -A is switched off. No archiving on $archive_system!\n"
fi
# LEERDATEI IM BENUTZERVERZEICHNIS ANLEGEN, DAMIT BEKANNT IST,
# WIE DIE HOECHSTE ZYKLUSNUMMER AUF DEM ARCHIV-SYSTEM LAUTET
if [[ $files_for_pes = false ]]
then
cat /dev/null > ${pathout[$i]}
else
mkdir -p ${pathout[$i]}
fi
fi
# KOPIEREN AUF LOKALER MASCHINE ZUM ARCHIV-SYSTEM
# AUSSERDEM MUSS LEERDATEI ANGELEGT WERDEN, DAMIT BEKANNT IST,
# WIE DIE HOECHSTE ZYKLUSNUMMER AUF DEM ARCHIV-SYSTEM LAUTET
# FALLS IRGENDEINE (VON MEHREREN) ARCHIVIERUNGEN SCHIEF GEHT,
# WIRD FLAG GESETZT UND BLEIBT AUCH BEI WEITEREN ORDNUNGS-
# GEMAESSEN ARCHIVIERUNGEN GESETZT
if [[ "${actionout[$i]}" = ar ]]
then
if [[ $files_for_pes = false ]]
then
printf "\n >>> OUTPUT: ${localout[$i]} to"
printf "\n ${pathout[$i]}"
printf "\n File will be copied to archive-system ($archive_system) !\n"
else
printf "\n >>> OUTPUT: ${localout[$i]}/_.... to"
printf "\n ${pathout[$i]}"
printf "\n Directory will be copied to archive-system ($archive_system) !\n"
fi
mv ${localout[$i]} ${frelout[$i]}
file_saved=false
if [[ $archive_system = asterix ]]
then
do_stageout=true
(( stageout_anz = 0 ))
while [[ $do_stageout = true ]]
do
if [[ $files_for_pes = false ]]
then
stageout ${frelout[$i]} > STAGE_OUTPUT
else
stageout -t ${frelout[$i]} > STAGE_OUTPUT
fi
cat STAGE_OUTPUT
if [[ $(grep -c "st.msg:150" STAGE_OUTPUT) != 0 ]]
then
file_saved=true
do_stageout=false
else
if [[ $files_for_pes = false ]]
then
printf "\n +++ file ${frelout[$i]} could not be saved on archive-system"
else
printf "\n +++ directory ${frelout[$i]} could not be saved on archive-system"
fi
(( stageout_anz = stageout_anz + 1 ))
if (( stageout_anz == 10 ))
then
printf "\n +++ stoped after 10 unsuccessful tries!"
archive_save=false
do_stageout=false
else
printf "\n *** new try to store on archive after 15 min:"
sleep 900
fi
fi
done
elif [[ $archive_system = DMF ]]
then
if [[ $files_for_pes = false ]]
then
printf "\n +++ archiving of single files impossible on $archive_system!\n"
locat=DMF
exit
else
rm -rf $ARCHIVE/${frelout[$i]}
cp -r ${frelout[$i]} $ARCHIVE
fi
file_saved=true
elif [[ $archive_system = tivoli ]]
then
# ARCHIVIERUNG NUR PER BATCH-JOB MOEGLICH
# DATEI MUSS ZWISCHENZEITLICH INS TEMPORAERE DATENVERZEICHNIS
# GELEGT WERDEN
[[ ! -d $tmp_data_catalog ]] && mkdir -p $tmp_data_catalog
chmod g+rx $tmp_data_catalog
if [[ $files_for_pes = false ]]
then
ln -f ${frelout[$i]} $tmp_data_catalog/${frelout[$i]}
else
mkdir $tmp_data_catalog/${frelout[$i]}
ln -f ${frelout[$i]}/* $tmp_data_catalog/${frelout[$i]}
fi
# BATCH JOB GENERIEREN UND ABSCHICKEN; DATEI MUSS WIEDER
# AUS TEMPORAEREM DATENVERZEICHNIS ENTFERNT WERDEN
echo "cd $tmp_data_catalog" > archive_${frelout[$i]}
if [[ $files_for_pes = false ]]
then
# EVENTUELL NOCH VORHANDENE DATEI IM ARCHIV LOSCHEN
echo "rm -rf \$PERM/${frelout[$i]}" >> archive_${frelout[$i]}
echo "cp ${frelout[$i]} \$PERM/${frelout[$i]}" >> archive_${frelout[$i]}
echo "rm -rf ${frelout[$i]}" >> archive_${frelout[$i]}
else
echo "rm -rf \$PERM/${frelout[$i]}.tar" >> archive_${frelout[$i]}
echo "tar cvf \$PERM/${frelout[$i]}.tar ${frelout[$i]}" >> archive_${frelout[$i]}
echo "rm -rf ${frelout[$i]}" >> archive_${frelout[$i]}
fi
subjob -v -d -q cdata -X 0 -m 1000 -t 43200 -c $job_catalog $PORTOPT archive_${frelout[$i]}
printf " Archiving of $tmp_data_catalog/${frelout[$i]} initiated (batch job submitted)\n"
file_saved=true
elif [[ $archive_system = ut ]]
then
# ARCHIVIERUNG NUR PER BATCH-JOB MOEGLICH
# DATEI MUSS ZWISCHENZEITLICH INS TEMPORAERE DATENVERZEICHNIS
# GELEGT WERDEN
[[ ! -d $tmp_data_catalog ]] && mkdir -p $tmp_data_catalog
chmod g+rx $tmp_data_catalog
if [[ $files_for_pes = false ]]
then
ln -f ${frelout[$i]} $tmp_data_catalog/${frelout[$i]}
else
mkdir $tmp_data_catalog/${frelout[$i]}
ln -f ${frelout[$i]}/* $tmp_data_catalog/${frelout[$i]}
fi
# BATCH JOB GENERIEREN UND ABSCHICKEN; DATEI MUSS WIEDER
# AUS TEMPORAEREM DATENVERZEICHNIS ENTFERNT WERDEN
echo "cd $tmp_data_catalog" > archive_${frelout[$i]}
if [[ $files_for_pes = false ]]
then
# EVENTUELL NOCH VORHANDENE DATEI IM ARCHIV LOSCHEN
echo "rm -rf \$UT/${frelout[$i]}" >> archive_${frelout[$i]}
echo "cp ${frelout[$i]} \$UT/${frelout[$i]}" >> archive_${frelout[$i]}
echo "rm -rf ${frelout[$i]}" >> archive_${frelout[$i]}
else
echo "rm -rf \$UT/${frelout[$i]}.tar" >> archive_${frelout[$i]}
echo "tar cvf \$UT/${frelout[$i]}.tar ${frelout[$i]}" >> archive_${frelout[$i]}
echo "rm -rf ${frelout[$i]}" >> archive_${frelout[$i]}
fi
subjob -v -c /pf/b/$usern/job_queue -d -q pp -X 0 -m 1000 -t 7200 $PORTOPT archive_${frelout[$i]}
printf " Archiving of $tmp_data_catalog/${frelout[$i]} initiated (batch job submitted)\n"
file_saved=true
else
printf "\n +++ archive_system=\"$archive_system\" archiving impossible!"
archive_save=false
fi
if [[ $file_saved = true ]]
then
if [[ $files_for_pes = false ]]
then
cat /dev/null > ${pathout[$i]}
else
mkdir -p ${pathout[$i]}
fi
fi
fi
# APPEND AUF LOKALER MASCHINE
if [[ "${actionout[$i]}" = "a" ]]
then
if [[ "${extout[$i]}" != " " && "${extout[$i]}" != "" ]]
then
printf "\n >>> OUTPUT: ${localout[$i]} append to ${pathout[$i]}.${extout[$i]}\n"
cat ${localout[$i]} >> ${pathout[$i]}.${extout[$i]}
else
printf "\n >>> OUTPUT: ${localout[$i]} append to ${pathout[$i]}\n"
cat ${localout[$i]} >> ${pathout[$i]}
fi
fi
# KOPIEREN AUF LOKALER MASCHINE
# ES MUSS KOPIERT WERDEN, DA MOVE NICHT UEBER FILESYSTEM HINAUS MOEGLICH
if [[ "${actionout[$i]}" = "" && $files_for_pes = false ]]
then
# KOPIEREN AUF EINPROZESSORMASCHINE
if [[ "${extout[$i]}" != " " && "${extout[$i]}" != "" ]]
then
printf "\n >>> OUTPUT: ${localout[$i]} to ${pathout[$i]}.${extout[$i]}\n"
if [[ $link_local_output = true ]]
then
printf " using ln -f\n"
ln -f ${localout[$i]} ${pathout[$i]}.${extout[$i]}
fi
# If "ln -f" fails of if "$link_local_output = false" do a normal "cp"
if [[ ! -f "${pathout[$i]}.${extout[$i]}" ]]
then
if [[ $link_local_output = true ]]
then
printf " ln failed, using cp...\n"
fi
cp ${localout[$i]} ${pathout[$i]}.${extout[$i]}
fi
else
printf "\n >>> OUTPUT: ${localout[$i]} to ${pathout[$i]}\n"
if [[ $link_local_output = true ]]
then
printf " using ln -f\n"
ln -f ${localout[$i]} ${pathout[$i]}
fi
# If "ln -f" fails of if "$link_local_output = false" do a normal "cp"
if [[ ! -f "${pathout[$i]}" ]]
then
if [[ $link_local_output = true ]]
then
printf " ln failed, using cp...\n"
fi
cp ${localout[$i]} ${pathout[$i]}
fi
fi
elif [[ "${actionout[$i]}" = "" && $files_for_pes = true ]]
then
# DIE DEN PROZESSOREN EINES PARALLELRECHNERS ZUGEHOERIGEN
# DATEIEN WERDEN ERST IN EINEM KATALOG GESAMMELT UND DIESER
# WIRD DANN KOPIERT
# PER MOVE UMBENANNT WERDEN
printf "\n >>> OUTPUT: ${localout[$i]}/_.... to ${pathout[$i]}\n"
if [[ $link_local_output = true ]]
then
printf " using ln -f\n"
mkdir ${pathout[$i]}
cd ${localout[$i]}
for file in $(ls *)
do
ln -f $file ${pathout[$i]}
done
cd $TEMPDIR
fi
# If "ln -f" fails of if "$link_local_output = false" do a normal "cp -r"
if [[ ! -f "${pathout[$i]}/_0000" ]]
then
if [[ $link_local_output = true ]]
then
printf " ln failed for .../_0000, using cp...\n"
fi
cp -r ${localout[$i]} ${pathout[$i]}
fi
fi
fi
done
if (( i != 0 ))
then
if [[ $transfer_problems = true ]]
then
printf "\n$striche\n *** OUTPUT-files saved"
printf "\n +++ WARNING: some data transfers failed! \n"
else
printf "\n$striche\n *** all OUTPUT-files saved \n"
fi
fi
# EVENTUELL FOLGEJOB STARTEN
# DATEI CONTINUE_RUN MUSS VOM BENUTZERPROGRAMM AUS ERZEUGT WERDEN
if [[ -f CONTINUE_RUN ]]
then
if [[ $archive_save = true ]]
then
# ZUERST IN MRUN-AUFRUF OPTIONEN FUER FORTSETZUNGSLAUF, FUER
# STILLES ABARBEITEN (OHNE INTERAKTIVE RUECKFAGEN) UND FUER
# BATCH-BETRIEB (NUR WICHTIG, FALLS BATCH AUF LOKALER MASCHINE
# DURCHGEFUEHRT WERDEN SOLL) EINFUEGEN, FALLS NICHT BEREITS VOR-
# HANDEN
[[ $(echo $mc | grep -c "\-C") = 0 ]] && mc="$mc -C"
[[ $(echo $mc | grep -c "\-v") = 0 ]] && mc="$mc -v"
[[ $(echo $mc | grep -c "\-b") = 0 ]] && mc="$mc -b"
if [[ $(echo $mc | grep -c "#") != 0 ]]
then
mc=`echo $mc | sed 's/#/f/g'`
fi
# JOB STARTEN
printf "\n\n *** initiating restart-run on \"$return_addres\" using command:\n"
echo " $mc"
printf "\n$striche\n"
if [[ $localhost != $fromhost ]]
then
if [[ $localhost = lcsgih || $localhost = lcsgib || $localhost = nech || $localhost = ibmh || $localhost = ibmkisti || $localhost = ibmku || $localhost = ibms || $localhost = lcflow || $localhost = lckyu* ]]
then
echo "*** ssh will be used to initiate restart-runs!"
echo " return_addres=\"$return_addres\" "
echo " return_username=\"$return_username\" "
if [[ $(echo $return_addres | grep -c "130.75.105") = 1 ]]
then
ssh $SSH_PORTOPT $return_addres -l $return_username "PATH=\$PATH:$LOCAL_MRUN_PATH;. /muksoft/packages/intel/bin/compilervars.sh intel64;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc "
else
if [[ $localhost = ibmkisti ]]
then
ssh $SSH_PORTOPT $usern@gaiad "ssh $SSH_PORTOPT $return_addres -l $return_username \"PATH=\\\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc\" "
else
ssh $SSH_PORTOPT $return_addres -l $return_username "PATH=\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc "
fi
fi
else
printf "\n +++ no restart mechanism available for host \"$localhost\" "
locat=restart; exit
fi
# WARTEN, DAMIT SICH RESTART JOB IN QUEUE EINREIHEN KANN, BEVOR
# DER AKTUELLE JOB ENDET
if [[ $queue = special1q ]]
then
sleep 120
else
sleep 30
fi
else
# BEI RECHNUNGEN AUF LOKALER MASCHINE KANN MRUN DIREKT AUFGE-
# RUFEN WERDEN, AUSSER AUF lcfimm
cd $LOCAL_PWD
if [[ $localhost = lcfimm ]]
then
ssh $SSH_PORTOPT $return_addres -l $return_username "PATH=\$PATH:$LOCAL_MRUN_PATH;export PALM_BIN=$LOCAL_MRUN_PATH;cd $LOCAL_PWD; $mc "
elif [[ $localhost = lckyuh ]]
then
printf "\n +++ WARNING: no restart mechanism available for host \"$localhost\" "
printf "\n please restart job manually using command:\n"
printf "\n \"$mc\" "
else
eval $mc # ' MUESSEN AUSGEWERTET WERDEN
fi
cd - > /dev/null
fi
if [[ $localhost = lckyuh ]]
then
printf "\n$striche\n *** restart-run to be initiated manually\n"
else
printf "\n$striche\n *** restart-run initiated \n"
fi
# EVENTUELL INPUT-DATEIEN, DIE VON TEMPORAEREM DATENVERZEICHNIS
# GEHOLT WORDEN SIND, LOESCHEN
(( i = 0 ))
while (( i < iin ))
do
(( i = i + 1 ))
if [[ "${got_tmp[$i]}" = true && $keep_data_from_previous_run = false ]]
then
rm -r $tmp_data_catalog/${frelin[$i]}
fi
done
else
printf "\n +++ no restart-run possible, since errors occured"
printf "\n during the archive process"
fi
fi
# EVTL. EMAIL-BENACHRICHTIGUNG UEBER ABGESCHLOSSENEN LAUF
if [[ "$email_notification" != "none" ]]
then
if [[ $localhost != $fromhost ]]
then
if [[ -f CONTINUE_RUN ]]
then
echo "PALM restart run necessary" > email_text
echo "description header of actual run:" >> email_text
cat CONTINUE_RUN >> email_text
echo "mrun-command to restart:" >> email_text
echo "$mc" >> email_text
else
echo "PALM run with base filename \"$fname\" on host \"$localhost\" finished" > email_text
fi
mail $email_notification < email_text
printf "\n *** email notification sent to \"$email_notification\" "
fi
fi
# ALLE ARBEITEN BEENDET. TEMPORAERER KATALOG KANN GELOESCHT WERDEN
cd $HOME
[[ $delete_temporary_catalog = true ]] && rm -rf $TEMPDIR
else
# FALLS AUF REMOTE-MASCHINE GERECHNET WERDEN SOLL, WERDEN JETZT ENTSPRE-
# CHENDE AKTIONEN DURCHGEFUEHRT
# MRUN-BEFEHL FUER REMOTE-MASCHINE ZUSAMMENSTELLEN
mrun_com="$mrun_script_name -a $afname -c $config_file -d $fname -h $host -H $fromhost -m $memory -t $cpumax -q $queue -R $return_addres -U $return_username -u $remote_username"
[[ "$cpp_opts" != "" ]] && mrun_com=${mrun_com}" -D \"$cpp_opts\""
[[ "$global_revision" != "" ]] && mrun_com=${mrun_com}" -G \"$global_revision\""
[[ $group_number != none ]] && mrun_com=${mrun_com}" -g $group_number"
[[ $do_compile = true ]] && mrun_com=${mrun_com}" -s \"$source_list\""
[[ "$input_list" != "" ]] && mrun_com=${mrun_com}" -i \"$input_list\""
[[ $ignore_archive_error = true ]] && mrun_com=${mrun_com}" -I"
[[ $keep_data_from_previous_run = true ]] && mrun_com=${mrun_com}" -k"
[[ "$additional_conditions" != "" ]] && mrun_com=${mrun_com}" -K \"$additional_conditions\""
# [[ "$makefile" != "$source_path/Makefile" ]] && mrun_com=${mrun_com}" -M \"$makefile\""
[[ "$output_list" != "" ]] && mrun_com=${mrun_com}" -o \"$output_list\""
[[ "$read_from_config" = false ]] && mrun_com=${mrun_com}" -S"
[[ $do_trace = true ]] && mrun_com=${mrun_com}" -x"
[[ "$numprocs" != "" ]] && mrun_com=${mrun_com}" -X $numprocs"
if [[ $use_openmp = true ]]
then
mrun_com=${mrun_com}" -O $threads_per_task"
fi
[[ "$tasks_per_node" != "" ]] && mrun_com=${mrun_com}" -T $tasks_per_node"
[[ $store_on_archive_system = true ]] && mrun_com=${mrun_com}" -A"
[[ $package_list != "" ]] && mrun_com=${mrun_com}" -p \"$package_list\""
[[ $return_password != "" ]] && mrun_com=${mrun_com}" -P $return_password"
[[ $delete_temporary_catalog = false ]] && mrun_com=${mrun_com}" -B"
[[ $node_usage != default && "$(echo $node_usage | cut -c1-3)" != "sla" && $node_usage != novice ]] && mrun_com=${mrun_com}" -n $node_usage"
[[ "$ocean_file_appendix" = true ]] && mrun_com=${mrun_com}" -y"
[[ $run_coupled_model = true ]] && mrun_com=${mrun_com}" -Y \"$coupled_dist\""
[[ "$check_namelist_files" = false ]] && mrun_com=${mrun_com}" -z"
[[ "$combine_plot_fields" = false ]] && mrun_com=${mrun_com}" -Z"
[[ "$max_par_io_str" != "" ]] && mrun_com=${mrun_com}" -w $max_par_io_str"
if [[ $do_remote = true ]]
then
printf "\n>>>> MRUN-command on execution host:\n>>>> $mrun_com \n"
fi
# ZUSAMMENSTELLUNG DES JOBSCRIPTS AUF DATEI jobfile
jobfile=jobfile.$RANDOM
# TEMPORAERES VERZEICHNIS GENERIEREN UND NACH DORT WECHSELN
echo "mkdir $TEMPDIR" >> $jobfile
echo "cd $TEMPDIR" >> $jobfile
# EVENTUELL FEHLERVERFOLGUNG AKTIVIEREN
if [[ $do_trace = true ]]
then
echo "set -x" >> $jobfile
else
echo "set +vx" >> $jobfile
fi
# initialize the environment and load modules
if [[ "$init_cmds" != "" ]]
then
echo "$init_cmds" >> $jobfile
fi
if [[ "$module_calls" != "" ]]
then
echo "$module_calls" >> $jobfile
fi
# BEREITSTELLUNG VON QUELLTEXTEN, MRUN-SCRIPTS UND KONFIGURATIONS-
# DATEI FUER DEN JOB
if [[ $( echo $host | cut -c1-5 ) = lcsgi || $host = ibmkisti ]]
then
# KONFIGURATIONSDATEI UND MRUN_SCRIPT IN DAS SOURCE-VERZEICHNIS
# KOPIEREN
if [[ $restart_run != true ]]
then
cp $config_file $working_directory/SOURCES_FOR_RUN_$fname
cp ${PALM_BIN}/$mrun_script_name $working_directory/SOURCES_FOR_RUN_$fname
fi
# SOURCE-VERZEICHNIS VOM LOKALEN RECHNER PER SCP TRANSFERIEREN
# (BEI JOBS AUF LOKALEM RECHNER REICHT CP)
echo "set -x" >> $jobfile
if [[ $host = $localhost ]]
then
echo "cp -r $working_directory/SOURCES_FOR_RUN_$fname ." >> $jobfile
else
if [[ $host != ibmkisti ]]
then
echo "scp $PORTOPT -r $return_username@$return_addres:$working_directory/SOURCES_FOR_RUN_$fname ." >> $jobfile
else
# on KISTI's IBM firewall is only opened on interactive node
echo "localdir=\`pwd\`" >> $jobfile
echo "ssh $SSH_PORTOPT $remote_username@gaiad \"cd \$localdir; scp $PORTOPT -r $return_username@$return_addres:$working_directory/SOURCES_FOR_RUN_$fname .\" " >> $jobfile
fi
fi
echo "export SOURCES_COMPLETE=true" >> $jobfile
# QUELLTEXTE, MRUN-SCRIPT UND KONFIGURATIONSDATEI IN DAS AKTUELLE
# ARBEITSVERZEICHNIS VERSCHIEBEN
echo "mv SOURCES_FOR_RUN_$fname/$config_file . " >> $jobfile
echo "mv SOURCES_FOR_RUN_$fname/$mrun_script_name . " >> $jobfile
echo "execute_mrun=true" >> $jobfile
echo " " >> $jobfile
else
# ABSPEICHERN DER QUELLTEXTE (NUR FALLS UEBERSETZT WERDEN SOLL)
# SOWIE GEGEBENENFALLS DES MAKEFILES
if [[ $do_compile = true ]]
then
source_catalog=SOURCES_FOR_RUN_$fname
# UNTERVERZEICHNIS FUER QUELLTEXTE UND MAKEFILE ANLEGEN
# MRUN WIRD DIESES VRZEICHNIS UEBER ENVIRONMENT VARIABLE
# MITGETEILT (UEBERSTEUERT ANGABE IN KONFIGURATIONSDATEI)
echo "mkdir SOURCES_FOR_RUN_$fname" >> $jobfile
echo "export SOURCES_COMPLETE=true" >> $jobfile
echo "cd SOURCES_FOR_RUN_$fname" >> $jobfile
for filename in $source_list
do
# ABDECKZEICHEN VERHINDERN, DASS ERSETZUNGEN ERFOLGEN
echo "cat > $filename << \"%END%\"" >> $jobfile
cat $source_catalog/$filename >> $jobfile
echo " " >> $jobfile
echo "%END%" >> $jobfile
echo " " >> $jobfile
done
# ABDECKZEICHEN VERHINDERN, DASS ERSETZUNGEN ERFOLGEN
echo "cat > Makefile << \"%END%\"" >> $jobfile
cat $source_catalog/Makefile >> $jobfile
echo " " >> $jobfile
echo "%END%" >> $jobfile
echo " " >> $jobfile
echo "cd - > /dev/null" >> $jobfile
fi
# ABSPEICHERN DER KONFIGURATIONSDATEI
# ABDECKZEICHEN VERHINDERN, DASS ERSETZUNGEN ERFOLGEN
# Lines with #$ coming from the svn keyword substitution are
# removed from the config file in order to avoid problems
# with the SGE batch system
echo "cat > $config_file << \"%END%\"" >> $jobfile
if [[ $host = lckyuh ]]
then
# no cross compiler on compute node
sed 's/frtpx/frt/g' $config_file >> $jobfile
else
sed 's/#$.*//g' $config_file >> $jobfile
fi
echo "%END%" >> $jobfile
echo " " >> $jobfile
# ABSPEICHERN DER AKTUELLEN MRUN-VERSION
# ABDECKZEICHEN VERHINDERN, DASS ERSETZUNGEN ERFOLGEN
echo "cat > $mrun_script_name <<\"%END%\"" >> $jobfile
if [[ $host = lckyuh ]]
then
sed 's/\/bin\/ksh/\/bin\/bash/g' ${PALM_BIN}/$mrun_script_name >> $jobfile
else
cat ${PALM_BIN}/$mrun_script_name >> $jobfile
fi
echo "%END%" >> $jobfile
echo "chmod u+x $mrun_script_name" >> $jobfile
echo "execute_mrun=true" >> $jobfile
echo " " >> $jobfile
fi
# EVTL. BENOETIGTE INPUT-DATEIEN PER FTP HOLEN ODER DEM JOB DIREKT
# MITGEBEN UND AUF DEM REMOTE-RECHNER IM BENUTZERVERZEICHNIS ABLEGEN
# FALLS DIESES NICHT EXISTIERT, WIRD VERSUCHT, DAS JEWEILS LETZTE
# UNTERVERZEICHNIS DES PFADNAMENS ANZULEGEN
if [[ $do_remote = true ]]
then
(( i = 0 ))
while (( i < iin ))
do
(( i = i + 1 ))
echo "[[ ! -d ${pathin[$i]} ]] && mkdir -p ${pathin[$i]}" >> $jobfile
if [[ "${transin[$i]}" = job ]]
then
echo "cat > ${remotepathin[$i]} <<\"%END%\"" >> $jobfile
eval cat ${pathin[$i]}/${frelin[$i]} >> $jobfile
echo " " >> $jobfile
echo "%END%" >> $jobfile
else
echo "batch_scp $PORTOPT -b -o -g -s -u $return_username $return_addres ${remotepathin[$i]} \"${pathin[$i]}\" ${frelin[$i]}" >> $jobfile
fi
# UEBERPRUEFEN, OB DATEI ANGELEGT WERDEN KONNTE
echo "if [[ \$? = 1 ]]" >> $jobfile
echo "then" >> $jobfile
echo " echo \" \" " >> $jobfile
echo " echo \"+++ file ${remotepathin[$i]} could not be created\" " >> $jobfile
echo " echo \" please check, if directory exists on $host!\" " >> $jobfile
echo " echo \"+++ MRUN will not be continued\" " >> $jobfile
echo " execute_mrun=false" >> $jobfile
echo "fi" >> $jobfile
done
fi
# ARBEITSKATALOG AUF DER LOKALEN MASCHINE FUER EVENTUELLE
# FORTSETZUNGSLAUEFE PER ENV-VARIABLE UEBERGEBEN
echo "LOCAL_PWD=$working_directory" >> $jobfile
echo "export LOCAL_PWD" >> $jobfile
# EBENSO LOKALEN MRUN-KATALOG UEBERGEBEN
echo "LOCAL_MRUN_PATH=$PALM_BIN" >> $jobfile
echo "export LOCAL_MRUN_PATH" >> $jobfile
# WORKAROUND FUER RIAM-NEC-JOBS WEGEN PROFILE-SCHWIERIGKEITEN
if [[ $localhost_realname = "gate" || $localhost = lctit ]]
then
echo "export PALM_BIN=$PALM_BIN" >> $jobfile
elif [[ $host = lcflow || $localhost = lcflow ]]
then
echo "export PALM_BIN=$PALM_BIN" | sed -e 's:'$HOME':$HOME:' >> $jobfile
echo "export PATH=\$PATH:\$PALM_BIN" >> $jobfile
fi
# MRUN AUF ZIELRECHNER AUFRUFEN (queue is workaround for ibmkisti)
echo "set -x" >> $jobfile
echo "queue=$queue" >> $jobfile
echo "[[ \$execute_mrun = true ]] && ./$mrun_com" >> $jobfile
echo 'ls -al; echo `pwd`' >> $jobfile
echo "cd \$HOME" >> $jobfile
echo "rm -rf $TEMPDIR" >> $jobfile
# JOB PER SUBJOB STARTEN
if [[ $silent = false ]]
then
printf "\n "
else
printf "\n\n"
fi
subjob $job_on_file -h $host -u $remote_username -g $group_number -q $queue -m $memory -N $node_usage -t $cpumax $XOPT $TOPT $OOPT -n $fname -v -c $job_catalog -e $email_notification $PORTOPT $jobfile
rm -rf $jobfile
fi # ENDE REMOTE-TEIL
# ENDE DER PROZEDUR