[3836] | 1 | #$Id$ |
---|
| 2 | #column 1 column 2 |
---|
| 3 | #name of variable value of variable (~ must not be used) |
---|
| 4 | #---------------------------------------------------------------------------- |
---|
| 5 | %base_data ~/palm/current_version/JOBS |
---|
| 6 | %base_directory $HOME/palm/current_version |
---|
| 7 | %source_path $HOME/palm/current_version/trunk/SOURCE |
---|
| 8 | %user_source_path $base_directory/JOBS/$run_identifier/USER_CODE |
---|
| 9 | %fast_io_catalog /scratch/usr/<replace_by_your_HLRN_username> |
---|
| 10 | %local_jobcatalog /home/<replace_by_your_local_username>/job_logfiles |
---|
| 11 | %remote_jobcatalog /home/<replace_by_your_HLRN_username>/job_logfiles |
---|
| 12 | # |
---|
| 13 | %local_ip <replace by IP of your local computer> |
---|
| 14 | %local_username <replace_by_your_local_username> |
---|
| 15 | %remote_ip 134.76.43.141 |
---|
| 16 | %remote_loginnode glogin1 |
---|
| 17 | %remote_username <replace_by_your_HLRN_username> |
---|
| 18 | %ssh_key id_rsa_hlrn |
---|
| 19 | %defaultqueue medium:test |
---|
| 20 | %submit_command /cm/shared/apps/slurm/current/bin/sbatch |
---|
| 21 | |
---|
| 22 | %compiler_name mpiifort |
---|
| 23 | %compiler_name_ser ifort |
---|
| 24 | %cpp_options -cpp -DMPI_REAL=MPI_DOUBLE_PRECISION -DMPI_2REAL=MPI_2DOUBLE_PRECISION -D__lc -D__parallel -D__netcdf -D__netcdf4 -D__netcdf4_parallel -D__intel_compiler -D__fftw |
---|
| 25 | %make_options -j 4 |
---|
| 26 | %compiler_options -fpe0 -O3 -fp-model source -ftz -xCORE-AVX512 -no-prec-div -no-prec-sqrt -ip -convert little_endian -I /home/bekklaus/opt/NetCDF_parallel_intel18/include -I /home/bekklaus/opt/fftw3_intel18/include |
---|
| 27 | %linker_options -L/home/bekklaus/opt/NetCDF_parallel_intel18/lib -lnetcdff -Wl,-rpath=/home/bekklaus/opt/NetCDF_parallel_intel18/lib -L/home/bekklaus/opt/fftw3_intel18/lib -lfftw3 |
---|
| 28 | %module_commands module load slurm intel/compiler/64/2018/18.0.3 |
---|
| 29 | %execute_command srun --propagate=STACK --kill-on-bad-exit -n {{mpi_tasks}} -N {{nodes}} --ntasks-per-node={{tasks_per_node}} palm |
---|
| 30 | %execute_command_for_combine srun --propagate=STACK -n 1 --ntasks-per-node=1 combine_plot_fields.x |
---|
| 31 | %memory 2300 |
---|
| 32 | |
---|
| 33 | # BATCH-directives to be used for batch jobs. If $-characters are required, hide them with 3 backslashes |
---|
| 34 | BD:#!/bin/bash |
---|
| 35 | BD:#SBATCH --job-name={{job_id}} |
---|
| 36 | BD:#SBATCH --time={{cpu_hours}}:{{cpu_minutes}}:{{cpu_seconds}} |
---|
| 37 | BD:#SBATCH --ntasks={{mpi_tasks}} |
---|
| 38 | BD:#SBATCH --nodes={{nodes}} |
---|
| 39 | BD:#SBATCH --ntasks-per-node={{tasks_per_node}} |
---|
| 40 | BD:#SBATCH --partition={{queue}} |
---|
| 41 | BD:#SBATCH --output={{job_protocol_file}} |
---|
| 42 | BD:#SBATCH --error={{job_protocol_file}} |
---|
| 43 | |
---|
| 44 | # |
---|
| 45 | # BATCH-directives for batch jobs used to send back the jobfile from a remote to a local host |
---|
| 46 | BDT:#!/bin/bash |
---|
| 47 | BDT:#SBATCH --job-name=job_transfer |
---|
| 48 | BDT:#SBATCH --time=00:30:00 |
---|
| 49 | BDT:#SBATCH --ntasks=1 |
---|
| 50 | BDT:#SBATCH --nodes=1 |
---|
| 51 | BDT:#SBATCH --ntasks-per-node=1 |
---|
| 52 | BDT:#SBATCH --partition={{queue}} |
---|
| 53 | BDT:#SBATCH --output={{job_transfer_protocol_file}} |
---|
| 54 | BDT:#SBATCH --error={{job_transfer_protocol_file}} |
---|
| 55 | |
---|
| 56 | #---------------------------------------------------------------------------- |
---|
| 57 | # INPUT-commands, executed before running PALM - lines must start with "IC:" |
---|
| 58 | #---------------------------------------------------------------------------- |
---|
| 59 | # my settings |
---|
| 60 | IC:ulimit -s unlimited # requires --propagate=STACK in srun command to distribute to all nodes |
---|
| 61 | IC:export I_MPI_PMI_LIBRARY=libpmi.so |
---|
| 62 | IC:export I_MPI_FABRICS=shm:ofi |
---|
| 63 | IC:export I_MPI_OFI_PROVIDER=psm2 |
---|
| 64 | IC:module list |
---|
| 65 | #IC:ulimit -c 0 |
---|
| 66 | |
---|
| 67 | |
---|
| 68 | # DKRZ proposed Mistral settings for MPI environment |
---|
| 69 | #IC:export LD_LIBRARY_PATH=/sw/rhel6-x64/netcdf/netcdf_fortran-4.4.3-parallel-openmpi2-intel14/lib:$LD_LIBRARY_PATH |
---|
| 70 | #IC:export OMPI_MCA_pml=cm |
---|
| 71 | #IC:export OMPI_MCA_mtl=mxm |
---|
| 72 | #IC:export MXM_RDMA_PORTS=mlx5_0:1 |
---|
| 73 | #IC:export MXM_LOG_LEVEL=ERROR |
---|
| 74 | #IC:export MXM_HANDLE_ERRORS=bt |
---|
| 75 | #IC:export UCX_HANDLE_ERRORS=bt |
---|
| 76 | |
---|
| 77 | #---------------------------------------------------------------------------- |
---|
| 78 | # ERROR-commands - executed when program terminates abnormally |
---|
| 79 | #---------------------------------------------------------------------------- |
---|
| 80 | EC:[[ \$locat = execution ]] && cat RUN_CONTROL |
---|
| 81 | EC:[[ \$locat = execution ]] && cat PARTICLE_INFOS/* |
---|
| 82 | |
---|
| 83 | #---------------------------------------------------------------------------- |
---|
| 84 | # OUTPUT-commands - executed when program terminates normally |
---|
| 85 | #---------------------------------------------------------------------------- |
---|
| 86 | |
---|
| 87 | # Combine 1D- and 3D-profile output (these files are not usable for plotting) |
---|
| 88 | OC:[[ -f LIST_PROFIL_1D ]] && cat LIST_PROFIL_1D >> LIST_PROFILE |
---|
| 89 | OC:[[ -f LIST_PROFIL ]] && cat LIST_PROFIL >> LIST_PROFILE |
---|
| 90 | |
---|
| 91 | # Combine all particle information files |
---|
| 92 | OC:[[ -f PARTICLE_INFOS/_0000 ]] && cat PARTICLE_INFOS/* >> PARTICLE_INFO |
---|