Difference between revisions of "Applications/CP2K"
From HPC
MSummerbell (talk | contribs) (→Compute Queue) |
m |
||
Line 22: | Line 22: | ||
==== Compute Queue ==== | ==== Compute Queue ==== | ||
− | <pre style="background-color: #C8C8C8; color: black; border: 2px solid | + | <pre style="background-color: #C8C8C8; color: black; border: 2px solid #C8C8C8; font-family: monospace, sans-serif;"> |
+ | |||
#!/bin/bash | #!/bin/bash | ||
#SBATCH -J cp2k-cpu | #SBATCH -J cp2k-cpu | ||
Line 36: | Line 37: | ||
module purge | module purge | ||
− | module | + | module add intel/mkl/64/11.3.2 |
− | module | + | module add intel/mpi/64/5.1.3.181 |
− | module | + | module add intel/compiler/64/2016 |
− | module | + | module add cp2k/3.0 |
export I_MPI_FABRICS=shm:tmi | export I_MPI_FABRICS=shm:tmi | ||
Line 65: | Line 66: | ||
==== GPU Queue ==== | ==== GPU Queue ==== | ||
− | <pre style="background-color: #C8C8C8; color: black; border: 2px solid | + | |
+ | <pre style="background-color: #C8C8C8; color: black; border: 2px solid #C8C8C8; font-family: monospace, sans-serif;"> | ||
+ | |||
#!/bin/bash | #!/bin/bash | ||
#SBATCH -J cp2k-gpu | #SBATCH -J cp2k-gpu | ||
Line 78: | Line 81: | ||
module purge | module purge | ||
− | module | + | module add cp2k/3.0 |
− | module | + | module add cuda/7.5.18 |
− | module | + | module add intel/mkl/64/11.3.2 |
− | module | + | module add intel/mpi/64/5.1.3.181 |
module list | module list |
Revision as of 12:22, 8 February 2017
Contents
Application Details
- Description : ORCA is a Quantum Chemistry Program package that contains modern electronic structure methods including density functional theory, many-body perturbation, coupled cluster, multi-reference methods, and semi-empirical quantum chemistry methods
- Versions : 3.0
- Module names : cp2k/3.0
- License: Freely available under the GPL license
Modules Available
- module add cp2k/3.0
Usage Examples
Batch Submission
Compute Queue
#!/bin/bash #SBATCH -J cp2k-cpu #SBATCH -N 120 #SBATCH --ntasks-per-node 14 #SBATCH -o %N.%j.%a.out #SBATCH -e %N.%j.%a.err #SBATCH -p compute #SBATCH --exclusive echo $SLURM_JOB_NODELIST module purge module add intel/mkl/64/11.3.2 module add intel/mpi/64/5.1.3.181 module add intel/compiler/64/2016 module add cp2k/3.0 export I_MPI_FABRICS=shm:tmi export I_MPI_FALLBACK=no module list mpirun --version # calculating the number of processes NP=$(( $SLURM_JOB_NUM_NODES * $SLURM_NTASKS_PER_NODE )) echo $NP "processes" CP2K=/home/user/cp2k/cp2k/exe/Linux-x86-64-intel-host/cp2k.psmp export OMP_NUM_THREADS=2 mpirun -genvall -np $NP env PSM_TRACEMASK=0x101 $CP2K H2O-64-RI-MP2-TZ.inp > H2O-64-RI-MP2-TZ-omp2.out
[username@login01 ~]$ sbatch cp2k-test.job Submitted batch job 189522
GPU Queue
#!/bin/bash #SBATCH -J cp2k-gpu #SBATCH -N 1 #SBATCH --ntasks-per-node 24 #SBATCH -o %N.%j.%a.out #SBATCH -e %N.%j.%a.err #SBATCH -p gpu #SBATCH --gres=gpu:1 #SBATCH --exclusive module purge module add cp2k/3.0 module add cuda/7.5.18 module add intel/mkl/64/11.3.2 module add intel/mpi/64/5.1.3.181 module list nvidia-smi -a mpirun --version # calculating the number of processes NP=$(( $SLURM_JOB_NUM_NODES * $SLURM_NTASKS_PER_NODE )) echo $NP "processes" CP2K=/trinity/clustervision/CentOS/7/apps/cp2k/build-v1intel-cp2k-20151010-120408/cp2k/exe/Linux-x86-64-cuda/cp2k.sopt $CP2K H2O-64.inp > H2O-64.out
[username@login01 ~]$ sbatch cp2k-test-gpu.job Submitted batch job 189523