1 | #$Id$ |
---|
2 | #column 1 column 2 |
---|
3 | #name of variable value of variable (~ must not be used) |
---|
4 | #---------------------------------------------------------------------------- |
---|
5 | %base_directory $HOME/palm/current_version |
---|
6 | %base_data $HOME/palm/current_version/JOBS |
---|
7 | %source_path $HOME/palm/current_version/trunk/SOURCE |
---|
8 | %user_source_path $base_data/$run_identifier/USER_CODE |
---|
9 | %fast_io_catalog /scratch/usr/<replace_by_your_HLRN_username> |
---|
10 | %restart_data_path $fast_io_catalog |
---|
11 | %output_data_path $base_data |
---|
12 | %local_jobcatalog $base_data/$run_identifier/LOG_FILES |
---|
13 | %remote_jobcatalog $base_data/$run_identifier/LOG_FILES |
---|
14 | |
---|
15 | %local_ip <replace by IP of your local computer> |
---|
16 | %local_username <replace_by_your_local_username> |
---|
17 | %remote_ip 130.73.234.1 |
---|
18 | %remote_loginnode blogin1 |
---|
19 | %remote_username <replace_by_your_HLRN_username> |
---|
20 | %ssh_key id_rsa_hlrn |
---|
21 | %defaultqueue standard96:test |
---|
22 | %project_account <replace_by_your_default_HLRN_project_account> |
---|
23 | %submit_command /cm/shared/apps/slurm/current/bin/sbatch |
---|
24 | |
---|
25 | %compiler_name mpiifort |
---|
26 | %compiler_name_ser ifort |
---|
27 | %cpp_options -cpp -DMPI_REAL=MPI_DOUBLE_PRECISION -DMPI_2REAL=MPI_2DOUBLE_PRECISION -D__lc -D__parallel -D__netcdf -D__netcdf4 -D__netcdf4_parallel -D__intel_compiler -D__fftw |
---|
28 | %make_options -j 4 |
---|
29 | %compiler_options -fpe0 -O3 -fp-model source -ftz -xCORE-AVX512 -no-prec-div -no-prec-sqrt -ip -convert little_endian -I /sw/dataformats/netcdf/intel.18/4.7.3/skl/include/ -I /sw/numerics/fftw3/impi/intel/3.3.8/skl/include/ |
---|
30 | %linker_options -Wl,-rpath=\\$LD_RUN_PATH \\`nf-config --flibs\\` -L /sw/numerics/fftw3/impi/intel/3.3.8/skl/lib -lfftw3 |
---|
31 | %module_commands module load intel/18.0.6 impi/2018.5 netcdf-parallel/impi/intel fftw3/impi/intel |
---|
32 | %execute_command srun --propagate=STACK --kill-on-bad-exit -n {{mpi_tasks}} -N {{nodes}} --ntasks-per-node={{tasks_per_node}} palm |
---|
33 | %execute_command_for_combine srun --propagate=STACK -n 1 --ntasks-per-node=1 combine_plot_fields.x |
---|
34 | %memory 2300 |
---|
35 | |
---|
36 | # BATCH-directives to be used for batch jobs. If $-characters are required, hide them with 3 backslashes |
---|
37 | BD:#!/bin/bash |
---|
38 | #BD:#SBATCH --dependency=afterany:{{previous_job}} |
---|
39 | BD:#SBATCH -A {{project_account}} |
---|
40 | BD:#SBATCH --job-name={{run_id}} |
---|
41 | BD:#SBATCH --time={{cpu_hours}}:{{cpu_minutes}}:{{cpu_seconds}} |
---|
42 | BD:#SBATCH --ntasks={{mpi_tasks}} |
---|
43 | BD:#SBATCH --nodes={{nodes}} |
---|
44 | BD:#SBATCH --ntasks-per-node={{tasks_per_node}} |
---|
45 | BD:#SBATCH --partition={{queue}} |
---|
46 | BD:#SBATCH --output={{job_protocol_file}} |
---|
47 | BD:#SBATCH --error={{job_protocol_file}} |
---|
48 | #BD:#SBATCH --mail-type=ALL |
---|
49 | #BD:#SBATCH --mail-user=<replace_by_your_email_address> |
---|
50 | |
---|
51 | # BATCH-directives for batch jobs used to send back the jobfile from a remote to a local host |
---|
52 | BDT:#!/bin/bash |
---|
53 | BDT:#SBATCH -A {{project_account}} |
---|
54 | BDT:#SBATCH --job-name=job_transfer |
---|
55 | BDT:#SBATCH --time=00:30:00 |
---|
56 | BDT:#SBATCH --ntasks=1 |
---|
57 | BDT:#SBATCH --nodes=1 |
---|
58 | BDT:#SBATCH --ntasks-per-node=1 |
---|
59 | BDT:#SBATCH --partition={{queue}} |
---|
60 | BDT:#SBATCH --output={{job_transfer_protocol_file}} |
---|
61 | BDT:#SBATCH --error={{job_transfer_protocol_file}} |
---|
62 | |
---|
63 | #---------------------------------------------------------------------------- |
---|
64 | # INPUT-commands, executed before running PALM - lines must start with "IC:" |
---|
65 | #---------------------------------------------------------------------------- |
---|
66 | # my settings |
---|
67 | IC:ulimit -s unlimited # requires --propagate=STACK in srun command to distribute to all nodes |
---|
68 | IC:export I_MPI_PMI_LIBRARY=libpmi.so |
---|
69 | IC:export I_MPI_FABRICS=shm:ofi |
---|
70 | IC:export I_MPI_OFI_PROVIDER=psm2 |
---|
71 | IC:export I_MPI_ADJUST_ALLTOALL=3 |
---|
72 | IC:export I_MPI_HYDRA_BRANCH_COUNT=128 |
---|
73 | IC:module list |
---|
74 | |
---|
75 | #---------------------------------------------------------------------------- |
---|
76 | # ERROR-commands - executed when program terminates abnormally |
---|
77 | #---------------------------------------------------------------------------- |
---|
78 | EC:[[ \$locat = execution ]] && cat RUN_CONTROL |
---|
79 | EC:[[ \$locat = execution ]] && cat PARTICLE_INFOS/* |
---|
80 | |
---|
81 | #---------------------------------------------------------------------------- |
---|
82 | # OUTPUT-commands - executed when program terminates normally |
---|
83 | #---------------------------------------------------------------------------- |
---|
84 | # Combine 1D- and 3D-profile output (these files are not usable for plotting) |
---|
85 | #OC:[[ -f LIST_PROFIL_1D ]] && cat LIST_PROFIL_1D >> LIST_PROFILE |
---|
86 | #OC:[[ -f LIST_PROFIL ]] && cat LIST_PROFIL >> LIST_PROFILE |
---|
87 | |
---|
88 | # Combine all particle information files |
---|
89 | #OC:[[ -f PARTICLE_INFOS/_0000 ]] && cat PARTICLE_INFOS/* >> PARTICLE_INFO |
---|