Difference between revisions of "Programming/C-Plusplus"
From HPC
m |
m (→Programming example) |
||
| (14 intermediate revisions by 2 users not shown) | |||
| Line 1: | Line 1: | ||
== Programming Details == | == Programming Details == | ||
| − | C++ is a general-purpose, imperative computer programming language. It is derived from | + | C++ is a general-purpose, imperative computer programming language. It is derived from the C language and shares many constructs with it, it also expands into object-orientated programming paradigms. |
| + | {| | ||
| + | |style="width:5%; border-width: 0;cellpadding=0" | [[File:icon_tick.png]] | ||
| + | |style="width:95%; border-width: 0;cellpadding=0" | There is direct support for [[programming/OpenMP|openMP]] and [[programming/OpenMPI|openMPI]]. | ||
| + | |- | ||
| + | |} | ||
| − | |||
| + | === Programming example === | ||
| − | + | <pre style="background-color: #f5f5dc; color: black; font-family: monospace, sans-serif;"> | |
| − | |||
| − | <pre style="background-color: # | ||
#include <omp.h> | #include <omp.h> | ||
| Line 28: | Line 31: | ||
return 0; | return 0; | ||
} | } | ||
| + | |||
| + | </pre> | ||
| + | |||
| + | <pre style="background-color: #f5f5dc; color: black; font-family: monospace, sans-serif;"> | ||
| + | |||
| + | #include <stdlib.h> | ||
| + | #include <stdio.h> | ||
| + | #include <omp.h> | ||
| + | #include <iostream> | ||
| + | #include <chrono> | ||
| + | #include <vector> | ||
| + | |||
| + | using namespace std; | ||
| + | |||
| + | #pragma optimize( "[t]", on) | ||
| + | int main(int argc, char *argv[]){ | ||
| + | const int size = atoi(argv[1]); | ||
| + | const int maxVal = 255; | ||
| + | int row, col, ele; | ||
| + | |||
| + | double** m1 = new double*[size]; | ||
| + | double** m2 = new double*[size]; | ||
| + | double** res = new double*[size]; | ||
| + | |||
| + | for (int i = 0; i < size; i++){ | ||
| + | m1[i] = new double[size]; | ||
| + | m2[i] = new double[size]; | ||
| + | res[i] = new double[size]; | ||
| + | } | ||
| + | |||
| + | int procs = omp_get_num_procs(); | ||
| + | omp_set_num_threads(procs); | ||
| + | |||
| + | std::chrono::duration<double, std::micro> time; | ||
| + | |||
| + | for (int row = 0; row < size; row++){ | ||
| + | for (int col = 0; col < size; col++){ | ||
| + | m1[row][col] = 0 + (rand() % maxVal); | ||
| + | m2[row][col] = 0 + (rand() % maxVal); | ||
| + | } | ||
| + | } | ||
| + | |||
| + | auto start = std::chrono::high_resolution_clock::now(); | ||
| + | |||
| + | #pragma omp parallel shared(m1, m2, res) | ||
| + | { | ||
| + | |||
| + | #pragma omp for nowait private(row, col, ele) schedule(dynamic) | ||
| + | for (int row = 0; row < size; row++) { | ||
| + | for (int ele = 0; ele < size; ele++){ | ||
| + | double temp = 0; | ||
| + | for (int col = 0; col < size; col++){ | ||
| + | temp += m1[row][ele] * m2[ele][col]; | ||
| + | } | ||
| + | res[row][col] += temp; | ||
| + | } | ||
| + | } | ||
| + | } | ||
| + | |||
| + | auto end = std::chrono::high_resolution_clock::now(); | ||
| + | |||
| + | |||
| + | time = end - start; | ||
| + | |||
| + | cout << (time.count() / 1000 / 1000) << endl; | ||
| + | |||
| + | |||
| + | |||
| + | return 0; | ||
| + | } | ||
| + | |||
| + | |||
</pre> | </pre> | ||
| Line 37: | Line 112: | ||
<pre style="background-color: black; color: white; border: 2px solid black; font-family: monospace, sans-serif;"> | <pre style="background-color: black; color: white; border: 2px solid black; font-family: monospace, sans-serif;"> | ||
| − | [username@login01 ~]$ module | + | [username@login01 ~]$ module add gcc/8.2.0 |
[username@login01 ~]$ g++ -o test2 -fopenmp test2.c (openMP support) | [username@login01 ~]$ g++ -o test2 -fopenmp test2.c (openMP support) | ||
[username@login01 ~]$ ./test2 | [username@login01 ~]$ ./test2 | ||
| Line 44: | Line 119: | ||
Thread 11 says 39 | Thread 11 says 39 | ||
..... | ..... | ||
| − | |||
</pre> | </pre> | ||
| Line 51: | Line 125: | ||
The following modules are available for C | The following modules are available for C | ||
| − | * module add gcc/ | + | * module add gcc/8.2.0 (GNU compiler) |
| − | * module add intel/ | + | * module add intel/2018 (Intel compiler) |
<strong>Note</strong>: OpenMP is a library directive within the compiler and does not require any additional module to be loaded. | <strong>Note</strong>: OpenMP is a library directive within the compiler and does not require any additional module to be loaded. | ||
| − | |||
== Usage Examples == | == Usage Examples == | ||
| − | == Batch example == | + | === Batch example === |
| − | + | <pre style="background-color: #C8C8C8; color: black; border: 2px solid #C8C8C8; font-family: monospace, sans-serif;"> | |
| − | <pre style="background-color: # | ||
#!/bin/bash | #!/bin/bash | ||
| Line 74: | Line 146: | ||
#SBATCH -p compute | #SBATCH -p compute | ||
#SBATCH --exclusive | #SBATCH --exclusive | ||
| + | #SBATCH --mail-user= your email address here | ||
echo $SLURM_JOB_NODELIST | echo $SLURM_JOB_NODELIST | ||
module purge | module purge | ||
| − | module add gcc/ | + | module add gcc/8.2.0 |
export I_MPI_DEBUG=5 | export I_MPI_DEBUG=5 | ||
| Line 85: | Line 158: | ||
/home/user/CODE_SAMPLES/OPENMP/demo | /home/user/CODE_SAMPLES/OPENMP/demo | ||
| − | |||
</pre> | </pre> | ||
| Line 94: | Line 166: | ||
</pre> | </pre> | ||
| − | == | + | == Next Steps == |
* [https://en.wikipedia.org/wiki/C%2B%2B https://en.wikipedia.org/wiki/C%2B%2B] | * [https://en.wikipedia.org/wiki/C%2B%2B https://en.wikipedia.org/wiki/C%2B%2B] | ||
| Line 101: | Line 173: | ||
* [https://computing.llnl.gov/tutorials/openMP/ https://computing.llnl.gov/tutorials/openMP/] | * [https://computing.llnl.gov/tutorials/openMP/ https://computing.llnl.gov/tutorials/openMP/] | ||
| − | + | {{Languagespagenav}} | |
Latest revision as of 15:29, 23 August 2023
Contents
Programming Details
C++ is a general-purpose, imperative computer programming language. It is derived from the C language and shares many constructs with it, it also expands into object-orientated programming paradigms.
| |
There is direct support for openMP and openMPI. |
Programming example
#include <omp.h>
#include <iostream>
int main ()
{
int thread_number;
#pragma omp parallel private(thread_number)
{
#pragma omp for schedule(static) nowait
for (int i = 0; i < 100; i++)
{
thread_number = omp_get_thread_num();
cout << "Thread " << thread_number << " says " << i << endl;
}
}
return 0;
}
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <iostream>
#include <chrono>
#include <vector>
using namespace std;
#pragma optimize( "[t]", on)
int main(int argc, char *argv[]){
const int size = atoi(argv[1]);
const int maxVal = 255;
int row, col, ele;
double** m1 = new double*[size];
double** m2 = new double*[size];
double** res = new double*[size];
for (int i = 0; i < size; i++){
m1[i] = new double[size];
m2[i] = new double[size];
res[i] = new double[size];
}
int procs = omp_get_num_procs();
omp_set_num_threads(procs);
std::chrono::duration<double, std::micro> time;
for (int row = 0; row < size; row++){
for (int col = 0; col < size; col++){
m1[row][col] = 0 + (rand() % maxVal);
m2[row][col] = 0 + (rand() % maxVal);
}
}
auto start = std::chrono::high_resolution_clock::now();
#pragma omp parallel shared(m1, m2, res)
{
#pragma omp for nowait private(row, col, ele) schedule(dynamic)
for (int row = 0; row < size; row++) {
for (int ele = 0; ele < size; ele++){
double temp = 0;
for (int col = 0; col < size; col++){
temp += m1[row][ele] * m2[ele][col];
}
res[row][col] += temp;
}
}
}
auto end = std::chrono::high_resolution_clock::now();
time = end - start;
cout << (time.count() / 1000 / 1000) << endl;
return 0;
}
Compilation
The program would be compiled in the following way, optional Intel compiler available too:
[username@login01 ~]$ module add gcc/8.2.0 [username@login01 ~]$ g++ -o test2 -fopenmp test2.c (openMP support) [username@login01 ~]$ ./test2 Thread 22 says 32 Thread 11 says 39 .....
Modules Available
The following modules are available for C
- module add gcc/8.2.0 (GNU compiler)
- module add intel/2018 (Intel compiler)
Note: OpenMP is a library directive within the compiler and does not require any additional module to be loaded.
Usage Examples
Batch example
#!/bin/bash #SBATCH -J openmpi-single-node #SBATCH -N 1 #SBATCH --ntasks-per-node 28 #SBATCH -o %N.%j.%a.out #SBATCH -e %N.%j.%a.err #SBATCH -p compute #SBATCH --exclusive #SBATCH --mail-user= your email address here echo $SLURM_JOB_NODELIST module purge module add gcc/8.2.0 export I_MPI_DEBUG=5 export I_MPI_FABRICS=shm:tmi export I_MPI_FALLBACK=no /home/user/CODE_SAMPLES/OPENMP/demo
[username@login01 ~]$ sbatch demo.job Submitted batch job 234552