NAMD
- namdjob
#!/bin/bash --login ################################################################################################### # WARNING: Please adapt all relevant parameters so that they fit the requirements of your job(s). # ################################################################################################### # #SBATCH -o %x.%j.%N.out # for debugging purposes: redirect SLURM's stdout (please see man 1 sbatch for explanation of replacement symbols) # #SBATCH -e %x.%j.%N.err # for debugging purposes: redirect SLURM's stderr (please see man 1 sbatch for explanation of replacement symbols) # #SBATCH --mail-type=END # if your want to receive notifications about job status (cf. man 1 sbatch) # #SBATCH --mail-user=username #SBATCH -J NAMDJOB # job name/description #SBATCH -c 4 # total number of tasks/cores for your job #SBATCH --hint=nomultithread # IMPORTANT: hyper-threading is activated; switch off and attribute whole core to task #SBATCH -N 1 # number of nodes #SBATCH -p gpu # partition the job gets scheduled: standard (default), smp, gpu (uncomment, if you want your job to run on hosts in SMP partition) #SBATCH --gres=gpu:1 # demand GPU resources (please do not use more than 1 CUDA accelerator per NAMD job; NAMD does not benefit from several accelerators and you will waste precious computing resources!) #SBATCH --time=00:15:00 # job run (wall clock) time in HH:MM:SS #SBATCH --mem=4GB # amount of resident main memory PER NODE(!) # #SBATCH --mail-type=END # #SBATCH --mail-user=username # loading NAMD module module use /home/units/Fak_II/chemie/modules module add biomodeling/namd/2.13 # Launching NAMD (please do not employ option switches +devices and+p !) # namd2 [name_of_input_file]
date of revision: 02-26-2020 © kraus