====== ORCA ======
#!/bin/bash --login
###################################################################################################
# WARNING: Please adapt all relevant parameters so that they fit the requirements of your job(s). #
###################################################################################################
# #SBATCH -o %x.%j.%N.out # for debugging purposes: redirect SLURM's stdout (please see man 1 sbatch for explanation of replacement symbols)
# #SBATCH -e %x.%j.%N.err # for debugging purposes: redirect SLURM's stderr (please see man 1 sbatch for explanation of replacement symbols)
#SBATCH -J ORCAJOB # job name (please change)
#SBATCH -n 1 # total number of tasks/cores for your job
#SBATCH --hint=nomultithread # IMPORTANT: hyper-threading is activated; switch off and attribute whole core to task
# #SBATCH --ntasks-per-node=1 # number of tasks per node
#SBATCH -N 1 # number of nodes
# #SBATCH -p smp # partition the job gets scheduled: standard (default), smp, gpu (uncomment, if you want your job to run on hosts in SMP partition)
#SBATCH --time=00:15:00 # job run (wall clock) time in HH:MM:SS
#SBATCH --mem=4GB # amount of resident main memory PER NODE(!)
# #SBATCH --mem-per-cpu=1GB # amount of resident main memory PER CORE(!) (set only, if needed)
# #SBATCH --mail-type=END # if your want to receive notifications about job status (cf. man 1 sbatch)
# #SBATCH --mail-user=username
# loading ORCA module
module use /home/units/Fak_II/chemie/modules
module add chemistry/orca/4.1.2
# running ORCA in serial mode
# orca [name_of_input_file]
# running ORCA in parallel mode
srun --hint=nomultithread -n ${SLURM_NTASKS} -E orca [name_of_input_file]
\\
date of revision: 07-24-2019 © kraus