Difference between revisions of "Applications/Ansys"
From HPC
MSummerbell (talk | contribs) (→Ansys Fluent) |
MSummerbell (talk | contribs) (→Job Submission Script) |
||
| Line 7: | Line 7: | ||
===Job Submission Script=== | ===Job Submission Script=== | ||
| + | <pre style="background-color: #C8C8C8; color: black; border: 2px solid blue; font-family: monospace, sans-serif;"> | ||
| + | #!/bin/bash | ||
| + | #SBATCH -J ANSYS_FLUENT # sensible name for the job | ||
| + | #SBATCH -N 1 | ||
| + | #SBATCH -n 28 | ||
| + | #SBATCH -o %N.%j.%a.out | ||
| + | #SBATCH -e %N.%j.%a.err | ||
| + | #SBATCH -p compute | ||
| + | #SBATCH --exclusive | ||
| + | |||
| + | # load the relevant module files | ||
| + | module purge | ||
| + | module load intel/mpi/64/5.1.3.181 | ||
| + | module load ansys/v172 | ||
| + | |||
| + | export FLUENT_GUI=off | ||
| + | export I_MPI_ROOT=/trinity/clustervision/CentOS/7/apps/intel/impi/5.1.3.181 | ||
| + | export I_MPI_DEBUG=5 | ||
| + | export I_MPI_FABRICS=shm:tmi | ||
| + | export I_MPI_FALLBACK=no | ||
| + | |||
| + | if [ -z "$SLURM_NPROCS" ]; then | ||
| + | N=$(( $(echo $SLURM_TASKS_PER_NODE | sed -r 's/([0-9]+)\(x([0-9]+)\)/\1 * \2/') )) | ||
| + | else | ||
| + | N=$SLURM_NPROCS | ||
| + | fi | ||
| + | echo $SLURM_JOB_NODELIST | ||
| + | echo $SLURM_NPROCS | ||
| + | echo -e "N: $N\n"; | ||
| + | |||
| + | # run fluent in batch on the allocated node(s) | ||
| + | srun hostname -s > hostfile | ||
| + | FLUENT_ARCH=lnamd64 export FLUENT_ARCH | ||
| + | export LD_LIBRARY_PATH=/usr/lib64/psm2-compat:$LD_LIBRARY_PATH | ||
| + | |||
| + | fluent -ssh 3ddp -g -t$N -mpi=intel -pib.infinipath -cnf=hostfile -i my_fluent_file | ||
| + | </pre> | ||
Revision as of 15:15, 27 January 2017
Application Details
- Versions: V17.0, V17.2
- Module names: ansys/v170, ansys/v172
- License:
Ansys Fluent
Job Submission Script
#!/bin/bash #SBATCH -J ANSYS_FLUENT # sensible name for the job #SBATCH -N 1 #SBATCH -n 28 #SBATCH -o %N.%j.%a.out #SBATCH -e %N.%j.%a.err #SBATCH -p compute #SBATCH --exclusive # load the relevant module files module purge module load intel/mpi/64/5.1.3.181 module load ansys/v172 export FLUENT_GUI=off export I_MPI_ROOT=/trinity/clustervision/CentOS/7/apps/intel/impi/5.1.3.181 export I_MPI_DEBUG=5 export I_MPI_FABRICS=shm:tmi export I_MPI_FALLBACK=no if [ -z "$SLURM_NPROCS" ]; then N=$(( $(echo $SLURM_TASKS_PER_NODE | sed -r 's/([0-9]+)\(x([0-9]+)\)/\1 * \2/') )) else N=$SLURM_NPROCS fi echo $SLURM_JOB_NODELIST echo $SLURM_NPROCS echo -e "N: $N\n"; # run fluent in batch on the allocated node(s) srun hostname -s > hostfile FLUENT_ARCH=lnamd64 export FLUENT_ARCH export LD_LIBRARY_PATH=/usr/lib64/psm2-compat:$LD_LIBRARY_PATH fluent -ssh 3ddp -g -t$N -mpi=intel -pib.infinipath -cnf=hostfile -i my_fluent_file