Multithreading, OpenMP,C - c++

I saw that piece of some code below in a forum but when i started to compile it i get some errors.. I want to parallel the area from #pragma scop up to #pragma endscop.
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_fdtd_2d(int tmax,
int nx,
int ny,
DATA_TYPE POLYBENCH_2D(ex,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_2D(ey,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_2D(hz,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(_fict_,TMAX,tmax))
{
int t, i, j;
#pragma scop
#pragma omp parallel private (t,i,j)
{
#pragma omp master
{
for (t = 0; t < _PB_TMAX; t++)
{
#pragma omp for
for (j = 0; j < _PB_NY; j++)
ey[0][j] = _fict_[t];
#pragma omp barrier
#pragma omp for collapse(2) schedule(static)
for (i = 1; i < _PB_NX; i++)
for (j = 0; j < _PB_NY; j++)
ey[i][j] = ey[i][j] - 0.5*(hz[i][j]-hz[i-1][j]);
#pragma omp barrier
#pragma omp for collapse(2) schedule(static)
for (i = 0; i < _PB_NX; i++)
for (j = 1; j < _PB_NY; j++)
ex[i][j] = ex[i][j] - 0.5*(hz[i][j]-hz[i][j-1]);
#pragma omp barrier
#pragma omp for collapse(2) schedule(static)
for (i = 0; i < _PB_NX - 1; i++)
for (j = 0; j < _PB_NY - 1; j++)
hz[i][j] = hz[i][j] - 0.7* (ex[i][j+1] - ex[i][j] + ey[i+1][j] - ey[i][j]);
#pragma omp barrier
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int tmax = TMAX;
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(ex,DATA_TYPE,NX,NY,nx,ny);
POLYBENCH_2D_ARRAY_DECL(ey,DATA_TYPE,NX,NY,nx,ny);
POLYBENCH_2D_ARRAY_DECL(hz,DATA_TYPE,NX,NY,nx,ny);
POLYBENCH_1D_ARRAY_DECL(_fict_,DATA_TYPE,TMAX,tmax);
/* Initialize array(s). */
init_array (tmax, nx, ny,
POLYBENCH_ARRAY(ex),
POLYBENCH_ARRAY(ey),
POLYBENCH_ARRAY(hz),
POLYBENCH_ARRAY(_fict_));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_fdtd_2d (tmax, nx, ny,
POLYBENCH_ARRAY(ex),
POLYBENCH_ARRAY(ey),
POLYBENCH_ARRAY(hz),
POLYBENCH_ARRAY(_fict_));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, ny, POLYBENCH_ARRAY(ex),
POLYBENCH_ARRAY(ey),
POLYBENCH_ARRAY(hz)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(ex);
POLYBENCH_FREE_ARRAY(ey);
POLYBENCH_FREE_ARRAY(hz);
POLYBENCH_FREE_ARRAY(_fict_);
return 0;
}
The errors are like:
stencils/fdtd-2d/fdtd-2dp.c:80:9: error: work-sharing region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp for
^
stencils/fdtd-2d/fdtd-2dp.c:83:9: error: barrier region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp barrier
^
stencils/fdtd-2d/fdtd-2dp.c:84:9: error: work-sharing region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp for collapse(2) schedule(static)
^
stencils/fdtd-2d/fdtd-2dp.c:88:9: error: barrier region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp barrier
^
stencils/fdtd-2d/fdtd-2dp.c:89:9: error: work-sharing region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp for collapse(2) schedule(static)
^
stencils/fdtd-2d/fdtd-2dp.c:93:9: error: barrier region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp barrier
^
stencils/fdtd-2d/fdtd-2dp.c:94:9: error: work-sharing region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp for collapse(2) schedule(static)
^
stencils/fdtd-2d/fdtd-2dp.c:98:9: error: barrier region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp barrier
^
Any help appreciated in how may i compile this..

Honestly, this is pretty poor OpenMP code. It does not consider data usage throughout the algorithm. What you probably want is:
int t, i, j;
#pragma omp parallel private (t,i,j)
{
for (t = 0; t < _PB_TMAX; t++)
{
#pragma omp for nowait
for (j = 0; j < _PB_NY; j++)
ey[0][j] = _fict_[t];
#pragma omp for collapse(2) nowait schedule(static)
for (i = 1; i < _PB_NX; i++)
for (j = 0; j < _PB_NY; j++)
ey[i][j] = ey[i][j] - 0.5*(hz[i][j]-hz[i-1][j]);
#pragma omp for collapse(2) schedule(static)
for (i = 0; i < _PB_NX; i++)
for (j = 1; j < _PB_NY; j++)
ex[i][j] = ex[i][j] - 0.5*(hz[i][j]-hz[i][j-1]);
// #pragma omp barrier <- Implicit if nowait not specified
#pragma omp for collapse(2) schedule(static)
for (i = 0; i < _PB_NX - 1; i++)
for (j = 0; j < _PB_NY - 1; j++)
hz[i][j] = hz[i][j] - 0.7*(ex[i][j+1] - ex[i][j] + ey[i+1][j] - ey[i][j]);
// #pragma omp barrier <- Implicit if nowait not specified
}
}
The barriers should be removed because they are implicit after the for loop ends without a nowait specified.
Furthermore, I believe the first two barriers should be entirely removed because there is no thread dependence between the first three loops -- if a thread finishes its portion of the loop and immediately starts a portion of the next loop, there is no chance of a race condition. You can add the nowait clause to override the implicit barrier at the end of the omp for directive.
Finally, if _PB_NX and _PB_NY are large-ish, then you are very unlikely to gain any benefit by collapsing the nested loops. I would imagine that removing the collapse(2) could slightly improve the performance of the overall function.
Hope this helps.

Remove the #pragma omp master statement from your code. That will fix the compilation issue. You probably don't want to run that block 'only' in the master thread, because then you will not get any performance benefit of using Open MP.

Related

OpenMP - "#pragma omp critical" importance

So I started using OpenMP (multithreading) to increase the speed of my matrix multiplication and I witnessed weird things: when I turn off OpenMP Support (in Visual Studio 2019) my nested for-loop completes 2x faster. So I removed "#pragma omp critical" to test if it slows down the proccess significantly and the proccess went 4x faster than before (with OpenMP Support On).
Here's my question: is "#pragma omp critical" important in nested loop? Can't I just skip it?
#pragma omp parallel for collapse(3)
for (int i = 0; i < this->I; i++)
{
for (int j = 0; j < A.J; j++)
{
m.matrix[i][j] = 0;
for (int k = 0; k < A.I; k++)
{
#pragma omp critical
m.matrix[i][j] += this->matrix[i][k] * A.matrix[k][j];
}
}
}
Here's my question: is "#pragma omp critical" important in nested
loop? Can't I just skip it?
If the matrices m, this and A are different you do not need any critical region. Instead, you need to ensure that each thread will write to a different position of the matrix m as follows:
#pragma omp parallel for collapse(2)
for (int i = 0; i < this->I; i++)
{
for (int j = 0; j < A.J; j++)
{
m.matrix[i][j] = 0;
for (int k = 0; k < A.I; k++)
{
m.matrix[i][j] += this->matrix[i][k] * A.matrix[k][j];
}
}
}
The collapse clause will assign to each thread a different pair (i, j) therefore there will not be multiple threads writing to the same position of the matrix m (i.e., race-condition).
#pragma omp critical is necessary here, as there is a (remote) chance that two threads could write to a particular m.matrix[i][j] value. It hurts performance because only one thread at a time can access that protected assignment statement.
This would likely be better without the collapse part (then you can remove the #pragma omp critical). Accumulate the sums to a temporary local variable, then store it in m.matrix[i][j] after the k loop finishes.

openMP: call parallel function from parallel region

I'm trying to make my serial programm parallel with openMP. Here is the code where I have a big parallel region with a number of internal "#pragma omp for" sections. In serial version I have a function fftw_shift() which has "for" loops inside it too.
The question is how to rewrite the fftw_shift() function properly in order to already existed threads in the external parallel region could split "for" loops inside with no nested threads.
I'm not sure that my realisation works correctly. There is the way to inline the whole function in parallel region but I'm trying to realise how to deal with it in the described situation.
int fftw_shift(fftw_complex *pulse, fftw_complex *shift_buf, int
array_size)
{
int j = 0; //counter
if ((pulse != nullptr) || (shift_buf != nullptr)){
if (omp_in_parallel()) {
//shift the array
#pragma omp for private(j) //shedule(dynamic)
for (j = 0; j < array_size / 2; j++) {
//left to right
shift_buf[(array_size / 2) + j][REAL] = pulse[j][REAL]; //real
shift_buf[(array_size / 2) + j][IMAG] = pulse[j][IMAG]; //imaginary
//right to left
shift_buf[j][REAL] = pulse[(array_size / 2) + j][REAL]; //real
shift_buf[j][IMAG] = pulse[(array_size / 2) + j][IMAG]; //imaginary
}
//rewrite the array
#pragma omp for private(j) //shedule(dynamic)
for (j = 0; j < array_size; j++) {
pulse[j][REAL] = shift_buf[j][REAL]; //real
pulse[j][IMAG] = shift_buf[j][IMAG]; //imaginary
}
return 0;
}
}
....
#pragma omp parallel firstprivate(x, phase) if(array_size >=
OMP_THREASHOLD)
{
// First half-step
#pragma omp for schedule(dynamic)
for (x = 0; x < array_size; x++) {
..
}
// Forward FTW
fftw_shift(pulse_x, shift_buf, array_size);
#pragma omp master
{
fftw_execute(dft);
}
#pragma omp barrier
fftw_shift(pulse_kx, shift_buf, array_size);
...
}
If you call fftw_shift from a parallel region - but not a work-sharing construct (i.e. not in a parallel for), then you can just use omp for just as if you were inside a parallel region. This is called an orphaned directive.
However, your loops just copy data, so don't expect a perfect speedup depending on your system.

Nested loop OpenMP Parallellizing, private or public index?

Imagine you have a nested loop for in a parallel region, something like:
#pragma omp parallel
{
for (int i = 0, ...) {
for (int j = 0, ...) { }}}
or
#pragma omp parallel
{
for (int i = 0, ...) {
for (int j = i, ...) { }}}
If we use #pragma omp for, automatically the i index becomes private. But... do we need to set the j index to private or public? What is the effect?
#pragma omp parallel
{
#pragma omp for shared(j)
for (int i = 0, ...) {
for (int j = 0, ...) { }}}
or
#pragma omp parallel
{
#pragma omp for private(j)
for (int i = 0, ...) {
for (int j = 0, ...) { }}}
Thanks in advance!
Everything declarded inside the parallel region is automatically private. That is (presumably) the behavior you want: Each iteration of i should loop through all j, the j loops are all independent (thus private, not public). However, you are actually lacking the important parallel part: If you don't write
#pragma omp parallel for
but only
#pragma omp for
then you won't get anything happening in parallel (unless you first created a parallel region with #pragma omp parallel in an enclosing scope)!
Private j has no effect as j is private by defaults(since it is scoped within the i for loop so when a new thread is created j is specific to that thread
#pragma omp parallel for private(j)
for (int i = 0, ...) {
for (int j = 0, ...) { }}
If you use shared j it should have no effect as stated above since the scope of j is local to each instance of i, if you expand the scope of j to global, you will encounter a race codition

pragma omp parallel for vs. pragma omp parallel

In C++ with openMP, is there any difference between
#pragma omp parallel for
for(int i=0; i<N; i++) {
...
}
and
#pragma omp parallel
for(int i=0; i<N; i++) {
...
}
?
Thanks!
#pragma omp parallel
for(int i=0; i<N; i++) {
...
}
This code creates a parallel region, and each individual thread executes what is in your loop. In other words, you do the complete loop N times, instead of N threads splitting up the loop and completing all iterations just once.
You can do:
#pragma omp parallel
{
#pragma omp for
for( int i=0; i < N; ++i )
{
}
#pragma omp for
for( int i=0; i < N; ++i )
{
}
}
This will create one parallel region (aka one fork/join, which is expensive and therefore you don't want to do it for every loop) and run multiple loops in parallel within that region. Just make sure if you already have a parallel region you use #pragma omp for as opposed to #pragma omp parrallel for as the latter will mean that each of your N threads spawns N more threads to do the loop.

What happens in OpenMP when there's a pragma for inside a pragma for?

At the start of #pragma omp parallel a bunch of threads are created, then when we get to #pragma omp for the workload is distributed. What happens if this for loop has a for loop inside it, and I place a #pragma omp for before it as well? Does each thread create new threads? If not, which threads are assigned this task? What exactly happens in this situation?
By default, no threads are spawned for the inner loop. It is done sequentially using the thread that reaches it.
This is because nesting is disabled by default. However, if you enable nesting via omp_set_nested(), then a new set of threads will be spawned.
However, if you aren't careful, this will result in p^2 number of threads (since each of the original p threads will spawn another p threads.) Therefore nesting is disabled by default.
In a situation like the following:
#pragma omp parallel
{
#pragma omp for
for(int ii = 0; ii < n; ii++) {
/* ... */
#pragma omp for
for(int jj = 0; jj < m; jj++) {
/* ... */
}
}
}
what happens is that you trigger an undefined behavior as you violate the OpenMP standard. More precisely you violate the restrictions appearing in section 2.5 (worksharing constructs):
The following restrictions apply to worksharing constructs:
Each worksharing region must be encountered by all threads in a team or by none at all.
The sequence of worksharing regions and barrier regions encountered must be the same for every thread in a team.
This is clearly shown in the examples A.39.1c and A.40.1c:
Example A.39.1c: The following example of loop construct nesting is conforming because the inner and outer loop regions bind to different parallel
regions:
void work(int i, int j) {}
void good_nesting(int n)
{
int i, j;
#pragma omp parallel default(shared)
{
#pragma omp for
for (i=0; i<n; i++) {
#pragma omp parallel shared(i, n)
{
#pragma omp for
for (j=0; j < n; j++)
work(i, j);
}
}
}
}
Example A.40.1c: The following example is non-conforming because the inner and outer loop regions are closely nested
void work(int i, int j) {}
void wrong1(int n)
{
#pragma omp parallel default(shared)
{
int i, j;
#pragma omp for
for (i=0; i<n; i++) {
/* incorrect nesting of loop regions */
#pragma omp for
for (j=0; j<n; j++)
work(i, j);
}
}
}
Notice that this is different from:
#pragma omp parallel for
for(int ii = 0; ii < n; ii++) {
/* ... */
#pragma omp parallel for
for(int jj = 0; jj < m; jj++) {
/* ... */
}
}
in which you try to spawn a nested parallel region. Only in this case the discussion of Mysticial answer holds.