#!/bin/bash --login ################################################################################################### # WARNING: Please adapt all relevant parameters so that they fit the requirements of your job(s). # # Questions and Remarks welcome to Sebastian Kraus # ################################################################################################### # %x: job name; %j: job id; %N: node; %t: task id; %a: array id (and others) # #SBATCH -o %x.%j.%N.out # for debugging purposes: redirect SLURM's stdout (please see man 1 sbatch for explanation of replacement symbols) # #SBATCH -e %x.%j.%N.err # for debugging purposes: redirect SLURM's stderr (please see man 1 sbatch for explanation of replacement symbols) # #SBATCH -D $PWD # if needed change to current working directory #SBATCH -J jobname # job name #SBATCH -n 1 # total number of tasks/cores for your job #SBATCH --hint=nomultithread # IMPORTANT: hyper-threading is activated; switch off and attribute whole core to task # #SBATCH --ntasks-per-node=1 # number of tasks per node #SBATCH -N 1 # number of nodes # #SBATCH -p smp # partition the job gets scheduled: standard (default), smp, gpu (uncomment, if you want your job to run on hosts in SMP partition) #SBATCH --time=00:15:00 # job run (wall clock) time in HH:MM:SS #SBATCH --mem=4GB # amount of resident main memory PER NODE(!) # #SBATCH --mem-per-cpu=1GB # amount of resident main memory PER CORE(!) (set only, if needed) # #SBATCH --gres=gpu:tesla:1 # GPU resources (only with gpu partition!) # #SBATCH --mail-type=END # if your want to receive notifications about job status (cf. man 1 sbatch) # #SBATCH --mail-user=username # and now your job definition ;-) module add [your_modules] [your_commands]