this is my first time posting so I will appologise for my novice mistakes. Please also excuse the fact that not all variable names are in english. My problem is the following: i've written this code using openMP in both Visual Studio 2010 and in eclipse for c/c++ using the cygwin gcc compiler toolchain. In visual I get a speed-up but in eclipse I get a slow down twice the amount of the serial version. Can someone please explain what I have done wrong please? In short i'm just simulating the speed-up from when I copy from an array of 3D vectors into a double array in order to send over MPI.
#include <omp.h>
#include <time.h>
#include <stdio.h>
#include <vector>
const int NUMAR_FORME=10;
const int NUMAR_SECUNDE_SIMULATE=60; //number of buffers
const int dimensiuni_forme[10]={100,200,300,400,500,600,700,800,900,10000}; //size of each buffer
//-------- the buffers, cuurently only worker_buffer and buff is used
std::vector<std::vector<std::vector<double> > > worker_buffer;
std::vector<std::vector<double> > send_buffer,corect;
double **buff;
double **worker_buffer1;
long i,j,k,l;
int flag=0;
int numarator=0; //number of tests runed
clock_t start;
start=clock();
worker_buffer.resize(1);
buff = new double* [2];
int de_scris=0; //this tells me in which buffer to store, nou I alternate buff[0], buff[1], buff[0], buff[1]
worker_buffer[0].resize(NUMAR_SECUNDE_SIMULATE);
for(i=0;i<NUMAR_SECUNDE_SIMULATE;i++)
worker_buffer[0][i].resize(dimensiuni_forme[9]);
while(numarator<60)
{
if(numarator!=0)
delete [] buff[de_scris];
if(numarator!=0)
de_scris=(de_scris+1)%2;
long limita;
limita=NUMAR_SECUNDE_SIMULATE*dimensiuni_forme[9]*3; //3-comes from the fact that I will have a 3D vector structure
buff[de_scris]= new double [limita];
for(i=0;i<NUMAR_SECUNDE_SIMULATE;i++)
{ for(j=0;j<dimensiuni_forme[9];j++)
{
worker_buffer[0][i][j]=(i*dimensiuni_forme[9]+j)*3;
buff[de_scris][(i*dimensiuni_forme[9]+j)*3]=worker_buffer[0][i][j];
buff[de_scris][(i*dimensiuni_forme[9]+j)*3+1]=worker_buffer[0][i][j]+0.5;
buff[de_scris][(i*dimensiuni_forme[9]+j)*3+2]=worker_buffer[0][i][j]+0.75;
}
}
numarator++;
}
start=clock()-start;
printf("TICKS TOTAL %ld \n",start);
bool ad=true;
long nr;
for(i=0;i<NUMAR_SECUNDE_SIMULATE*dimensiuni_forme[9]*3;i++)
{
if(i%3==0)
nr=i;
if(i%3==0 && buff[de_scris][i]!=i)
ad=false;
else
if(i%3==1 &&buff[de_scris][i]!=(nr+0.5))
ad=false;
else
if(i%3==2 && buff[de_scris][i]!=(nr+0.75))
ad=false;
}
if(ad==false)
printf("not correct \n");
start=clock();
numarator=0;
//parallel version
while(numarator<60)
{
if(numarator!=0)
delete [] buff[de_scris];
long index, limita,id;
omp_set_num_threads(2);
if(numarator!=0)
de_scris=(de_scris+1)%2;
limita=NUMAR_SECUNDE_SIMULATE*dimensiuni_forme[9]*3; //3-
buff[de_scris]= new double [limita];
#pragma omp parallel shared(worker_buffer,limita,buff) private(index,id)
{
printf("intram cu %d threaduri \n", omp_get_num_threads());
id=omp_get_thread_num();
//index=id;
for(index=id;(index*3)<limita;index+=omp_get_num_threads())
{
buff[de_scris][index*3]=worker_buffer[0][index/dimensiuni_forme[9]][index%dimensiuni_forme[9]]; //aici va veni send_buff[index].x
buff[de_scris][index*3+1]=buff[de_scris][index*3]+0.5;
buff[de_scris][index*3+2]=buff[de_scris][index*3]+0.75;
}
// index+=omp_get_num_threads();
}//end parallel zone
numarator++;
}
start=clock()-start;
printf("TICKS TOTAL %ld \n",start);
ad=true;
//testing for correctness
for(i=0;i<NUMAR_SECUNDE_SIMULATE*dimensiuni_forme[9]*3;i++)
{
if(i%3==0)
nr=i;
if(i%3==0 && buff[de_scris][i]!=i)
ad=false;
else
if(i%3==1 &&buff[de_scris][i]!=(nr+0.5))
ad=false;
else
if(i%3==2 && buff[de_scris][i]!=(nr+0.75))
ad=false;
}
if(ad==false)
printf("not correct \n");
return 0;
}
Judging by how you organized this for loop:
for(index=id;(index*3)<limita;index+=omp_get_num_threads())
{
buff[de_scris][index*3]=worker_buffer[0][index/dimensiuni_forme[9]][index%dimensiuni_forme[9]]; //aici va veni send_buff[index].x
buff[de_scris][index*3+1]=buff[de_scris][index*3]+0.5;
buff[de_scris][index*3+2]=buff[de_scris][index*3]+0.75;
}
and assuming that you have 4 threads, your threads will get interleaved index values:
thread 0: 0, 4, 8, 12,...
thread 1: 1, 5, 9, 13,...
thread 2: 2, 6, 10, 14,...
thread 3: 3, 7, 11, 15,...
which may be causing cache ping-pong effects, since values written by different threads may land on the same cache line, thus slowing down your execution.
Try to use a simple for loop with static partitioning instead, in order to get continuous partitions:
#pragma omp parallel for
for(index = 0; index < limita / 3;index++)
{
buff[de_scris][index*3]=worker_buffer[0][index/dimensiuni_forme[9]][index%dimensiuni_forme[9]]; //aici va veni send_buff[index].x
buff[de_scris][index*3+1]=buff[de_scris][index*3]+0.5;
buff[de_scris][index*3+2]=buff[de_scris][index*3]+0.75;
}
Related
I am trying to find the data race in my code but I just can't seem to grasp why it happens. The data in the threads is used read-only and the only variable that is written to is protected by a critical region.
I tried using the Intel Inspector but I am compiling with g++ 9.3.0 and apparently even the 2021 version can't deal with the OpenMP implementation for it. The release notes do not explicitly state it as exception as it was for older versions but there is a warning about false positives because it is not supported. It also always shows a data race for the pragma statements which isn't helpful at all.
My current suspects are either Eigen or the fact that I use a reference to a std::vector. Eigen itself I compile with EIGEN_DONT_PARALLELIZE to not mess with nested parallelism although I think I don't use anything that would use it anyway.
Edit:
Not sure if it is really a "data race" (or wrong memory access?) but the example produces non-deterministic output in the form of that the result differs for the same input. If this happens the loop in the main breaks. With more than one thread this happens early (after 5-12 iterations usually). If I run it with one thread only or compile without OpenMP, I have to manually end the example program.
Minimal (not) working example below.
#include <Eigen/Dense>
#include <vector>
#include <iostream>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_set_num_threads(number)
#endif
typedef Eigen::Matrix<double, 9, 1> Vector9d;
typedef std::vector<Vector9d, Eigen::aligned_allocator<Vector9d>> Vector9dList;
Vector9d derivPath(const Vector9dList& pathPositions, int index){
int n = pathPositions.size()-1;
if(index >= 0 && index < n+1){
// path is one point, no derivative possible
if(n == 0){
return Vector9d::Zero();
}
else if(index == n){
return Vector9d::Zero();
}
// path is a line, derivative is in the direction of start to end
else {
return n * (pathPositions[index+1] - pathPositions[index]);
}
}
else{
return Vector9d::Zero();
}
}
// ********************************
// data race occurs here somewhere
double errorFunc(const Vector9dList& pathPositions){
int n = pathPositions.size()-1;
double err = 0.0;
#pragma omp parallel default(none) shared(pathPositions, err, n)
{
double err_private = 0;
#pragma omp for schedule(static)
for(int i = 0; i < n+1; ++i){
Vector9d derivX_i = derivPath(pathPositions, i);
// when I replace this with pathPositions[i][0] the loop in the main doesn't break
// (or at least I always had to manually end the program)
// but it does break if I use derivX_i[0];
double err_i = derivX_i.norm();
err_private = err_private + err_i;
}
#pragma omp critical
{
err += err_private;
}
}
err = err / static_cast<double>(n);
return err;
}
// ***************************************
int main(int argc, char **argv){
// setup data
int n = 100;
Vector9dList pathPositions;
pathPositions.reserve(n+1);
double a = 5.0;
double b = 1.0;
double c = 1.0;
Eigen::Vector3d f, u;
f << 0, 0, -1;//-p;
u << 0, 1, 0;
for(int i = 0; i<n+1; ++i){
double t = static_cast<double>(i)/static_cast<double>(n);
Eigen::Vector3d p;
double x = 2*t*a - a;
double z = -b/(a*a) * x*x + b + c;
p << x, 0, z;
Vector9d cam;
cam << p, f, u;
pathPositions.push_back(cam);
}
omp_set_num_threads(8);
//reference value
double pe = errorFunc(pathPositions);
int i = 0;
do{
double pe_i = errorFunc(pathPositions);
// there is a data race
if(std::abs(pe-pe_i) > std::numeric_limits<double>::epsilon()){
std::cout << "Difference detected at iteration " << i << " diff:" << std::abs(pe-pe_i);
break;
}
i++;
}
while(true);
}
Output for running the example multiple times
Difference detected at iteration 13 diff:1.77636e-15
Difference detected at iteration 1 diff:1.77636e-15
Difference detected at iteration 0 diff:1.77636e-15
Difference detected at iteration 0 diff:1.77636e-15
Difference detected at iteration 0 diff:1.77636e-15
Difference detected at iteration 7 diff:1.77636e-15
Difference detected at iteration 8 diff:1.77636e-15
Difference detected at iteration 6 diff:1.77636e-15
As you can see, the difference is minor but there and it doesn't always happen in the same iteration which makes it non-deterministic. There is no output if I run it single threaded as I usually end the program after letting it run for a couple of minutes. Therefore, it has to have to do with the parallelization somehow.
I know I could use a reduction in this case but in the original code in my project I have to compute other things in the parallel region as well and I wanted to keep the minimal example as close to the original structure as possible.
I use OpenMP in other parts of my program too where I am not sure if I have a data race there too but the structure is similar (except that I use #pragma omp parallel for and the collapse statement). I have some variable or vector I write to but it's always either in a critical region or each thread only writes to it's own subset of the vector. Data that is used by multiple threads is always read-only. The read-only data is always a std::vector, a reference to a std::vector or a numerical data type like int or double. The vectors always contain an Eigen type or double.
There are no race conditions. You are observing a natural consequence of the non-commutative algebra of truncated floating-point representations. (A + B) + C is not always the same as A + (B + C) when A, B, and C are finite-precision floating-point numbers due to rounding errors. 1.77636E-15 x 100 (the absolute error when commenting out err = err / static_cast<double>(n);) in binary is:
0 | 01010101 | 00000000000000000001100
S exponent mantissa
As you can see, the error is in the least significant bits of the mantissa, hinting at it being the result of accumulation of rounding errors.
The problem occurs here:
#pragma omp parallel default(none) shared(pathPositions, err, n)
{
...
#pragma omp critical
{
err += err_private;
}
}
The final value of err depends on the order in which the different threads arrive at the critical section and their contributions get added, which is why sometimes you see discrepancy right away and sometimes it takes a couple of iterations.
To demonstrate that it is not an OpenMP problem per se, simply modify the function to read:
double errorFunc(const Vector9dList& pathPositions){
int n = pathPositions.size()-1;
double err = 0.0;
std::vector<double> errs(n+1);
#pragma omp parallel default(none) shared(pathPositions, errs, n)
{
#pragma omp for schedule(static)
for(int i = 0; i < n+1; ++i){
Vector9d derivX_i = derivPath(pathPositions, i);
errs[i] = derivX_i.norm();
}
}
for (int i = 0; i < n+1; ++i)
err += errs[i];
err = err / static_cast<double>(n);
return err;
}
This removes the dependency on how the sub-sums are computed and added together and the return value will always be the same no matter the number of OpenMP threads.
Another version only fixes the order in which err_private are reduced into err:
double errorFunc(const Vector9dList& pathPositions){
int n = pathPositions.size()-1;
double err = 0.0;
std::vector<double> errs(omp_get_max_threads());
int nthreads;
#pragma omp parallel default(none) shared(pathPositions, errs, n, nthreads)
{
#pragma omp master
nthreads = omp_get_num_threads();
double err_private = 0;
#pragma omp for schedule(static)
for(int i = 0; i < n+1; ++i){
Vector9d derivX_i = derivPath(pathPositions, i);
double err_i = derivX_i.norm();
err_private = err_private + err_i;
}
errs[omp_get_thread_num()] = err_private;
}
for (int i = 0; i < nthreads; i++)
err += errs[i];
err = err / static_cast<double>(n);
return err;
}
Again, this code produces the same result each and every time as long as the number of threads is kept constant. The value may differ slightly (in the LSBs) with different number of threads.
You can't get easily around such discrepancy and only learn to live with it and take precautions to minimise its influence on the rest of the computation. In fact, you are really lucky to stumble upon it in 2021, a year in the post-x87 era, when virtually all commodity FPUs use 64-bit IEEE 754 operands and not in the 1990's when x87 FPUs used 80-bit operands and the result of a repeated accumulation would depend on whether you keep the value in an FPU register all the time or periodically store it in and then load it back from memory, which rounds the 80-bit representation to a 64-bit one.
In the mean time, mandatory reading for anyone dealing with math on digital computers.
P.S. Although it is 2021 and we've been living for 21 years in the post-x87 era (started when Pentium 4 introduced the SSE2 instruction set back in 2000), if your CPU is an x86 one, you can still partake in the x87 madness. Just compile your code with -mfpmath=387 :)
I decided to post this after hours of trying out solutions to similar problems with no success. I'm writing a C++ MPI+OpenMP code where one MPI node (server) sends double arrays to other nodes. The server spawns threads in order to send to many clients simultaneously. The serial version (with MPI alone) works very well, and so does the single-threaded version. The multi-threaded version (openmp) keeps throwing a segmentation fault error after a random number of iterations. The line printf("%d: cur_idx:%d, opt_k.k:%d, idx:%d, N:%d \n", tid, cur_idx,opt_k.k,idx,N) prints out the values at each iteration. The unpredictability is the number of iterations (in one incident, the code ran successfully only to throw a seg fault error when I tried running it again immediately after). It however always completes with num_threads=1. getData returns a vector of structs, with the struct defined as (int,int,double *).
Here's the code
double *tStatistics=new double[8], tmp_time; // wall clock time
double SY, Sto;
int a_tasks=0, file_p=0;
vector<myDataType *> d = getData();
int idx=0; opt_k.k=1; opt_k.proc_files=0; opt_k.p=this->node_sz;
opt_k.proc_files=0; SY=0; Sto=0;
std::fill(header,header+SZ_HEADER,-1);
omp_set_num_threads(5);// for now
// parallel region
#pragma omp parallel default(none) shared(d,idx,SY,Sto) private(a_tasks)
{
double *myHeader=new double[SZ_HEADER];
std::fill(myHeader,myHeader+SZ_HEADER,0);
int tid = omp_get_thread_num(), cur_idx, cur_k; int N;
//#pragma omp atomic
N=d.size();
while (idx<N) {
// Assign tasks and fetch results where available
cur_idx=N;
#pragma omp critical(update__idx)
{
if (idx<N) {
cur_idx=idx; cur_k=opt_k.k; idx+=cur_k;
}
}
if (cur_idx<N) {
printf("%d: cur_idx:%d, opt_k.k:%d, idx:%d, N:%d \n", tid, cur_idx,opt_k.k,idx,N);
MPI_Recv(myHeader,SZ_HEADER,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,MY_COMM_GRP,this->Stat);
if(this->Stat->MPI_TAG == TAG_HEADER){ // serve tasks
while (cur_k && cur_idx<N) {
myHeader[1]=d[cur_idx]->nRows; myHeader[2]=d[cur_idx]->nCols; myHeader[3]=cur_idx; myHeader[9]=--cur_k;
MPI_Send(myHeader,SZ_HEADER,MPI_DOUBLE,(int)myHeader[4],TAG_DATA,MY_COMM_GRP);
MPI_Send(d[cur_idx]->data,d[cur_idx]->nRows*d[cur_idx]->nCols,MPI_DOUBLE,(int)myHeader[4],TAG_DATA,MY_COMM_GRP);
delete[] d[cur_idx]->data; ++cur_idx;
}
}else if(this->Stat->MPI_TAG == TAG_RESULT){ // collect results
printf("%d - 4\n", tid);
}
} //end if(loopmain)
} // end while(loopmain)
} // end parallel section
message("terminate slaves");
for(int i=1;i<node_sz;++i){ // terminate
MPI_Recv(header,SZ_HEADER,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,MY_COMM_GRP,this->Stat);
MPI_Send(header,SZ_HEADER,MPI_DOUBLE,(int)header[4],TAG_TERMINATE,MY_COMM_GRP);
}
return 0;
The other matching function is
void CMpifun::slave2()
{
double *Data; vector<myDataType> dataQ; vector<hist_type> resQ;
char out_opt='b'; // irrelevant
myDataType *out_im = new myDataType; hist_type *out_hist; CLdp ldp;
int file_cnt=0; double tmp_t; //local variables
while (true) { // main while loop
header[4]=myRank; MPI_Send(header,SZ_HEADER,MPI_DOUBLE,MASTER,TAG_HEADER,MY_COMM_GRP);
MPI_Recv(header,SZ_HEADER,MPI_DOUBLE,MASTER,MPI_ANY_TAG,MY_COMM_GRP,this->Stat);
if(this->Stat->MPI_TAG == TAG_TERMINATE) {
break;
}
//receive data
while(true) {
Data=new double[(int)(header[1]*header[2])];
MPI_Recv(Data,(int)(header[1]*header[2]),MPI_DOUBLE,MASTER,TAG_DATA,MY_COMM_GRP,this->Stat);
myDataType d; d.data=Data; d.nRows=(int)header[1]; d.nCols=(int)header[2];
//dataQ.push_back(d);
delete[] Data;
file_cnt++;
if ((int)header[9]) {
MPI_Recv(header,SZ_HEADER,MPI_DOUBLE,MASTER,TAG_DATA,MY_COMM_GRP,this->Stat);
} else break;
}
} // end main while loop
message("terminating");
I've tried all the recommendations addressing similar problems. Here are my environment settings
export OMP_WAIT_POLICY="active"
export OMP_NUM_THREADS=4
export OMP_DYNAMIC=true # "true","false"
export OMP_STACKSIZE=200M #
export KMP_STACKSIZE=$OMP_STACKSIZE
ulimit -s unlimited
Many thanks to all that have chipped in. I'm becoming increasingly convinced that this has to do with memory allocation somehow, but also don't understand why. I now have the following code:
double CMpifun::sendData2()
{
double *tStatistics=new double[8], tmp_time; // wall clock time
double SY, Sto; int a_tasks=0, file_p=0;
vector<myDataType *> d = getData();
int idx=0; opt_k.k=1; opt_k.proc_files=0; opt_k.p=this->node_sz;
opt_k.proc_files=0; SY=0; Sto=0;
std::fill(header,header+SZ_HEADER,-1);
omp_set_num_threads(224);// for now
// parallel region
#pragma omp parallel default(none) shared(idx,SY,Sto,d) private(a_tasks)
{
double *myHeader=new double[SZ_HEADER];
std::fill(myHeader,myHeader+SZ_HEADER,0);
int tid = omp_get_thread_num(), cur_idx, cur_k; int N;
//#pragma omp critical(update__idx)
{
N=d.size();
}
while (idx<N) {
// Assign tasks and fetch results where available
cur_idx=N;
#pragma omp critical(update__idx)
{
if (idx<N) {
cur_idx=idx; cur_k=opt_k.k; idx+=cur_k;
}
}
if (cur_idx<N) {
//printf("%d: cur_idx:%d, opt_k.k:%d, idx:%d, N:%d \n", tid, cur_idx,opt_k.k,idx,N);
printf("%d: cur_idx:%d, N:%d \n", tid, cur_idx,N);
//#pragma omp critical(update__idx)
{
MPI_Recv(myHeader,SZ_HEADER,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,MY_COMM_GRP,this->Stat);
}
if(this->Stat->MPI_TAG == TAG_HEADER){ // serve tasks
while (cur_k && cur_idx<N) {
//#pragma omp critical(update__idx)
{
myHeader[1]=d[cur_idx]->nRows; myHeader[2]=d[cur_idx]->nCols; myHeader[3]=cur_idx;
myHeader[9]=--cur_k;
MPI_Send(myHeader,SZ_HEADER,MPI_DOUBLE,(int)myHeader[4],TAG_DATA,MY_COMM_GRP);
MPI_Send(d[cur_idx]->data,d[cur_idx]->nRows*d[cur_idx]->nCols,MPI_DOUBLE,(int)myHeader[4],TAG_DATA,MY_COMM_GRP);
delete[] d[cur_idx]->data;
}
++cur_idx;
}
}else if(this->Stat->MPI_TAG == TAG_RESULT){ // collect results
printf("%d - 4\n", tid);
}
} //end if(loopmain)
} // end while(loopmain)
} // end parallel section
message("terminate slaves");
for(int i=1;i<node_sz;++i){ // terminate
MPI_Recv(header,SZ_HEADER,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,MY_COMM_GRP,this->Stat);
MPI_Send(header,SZ_HEADER,MPI_DOUBLE,(int)header[4],TAG_TERMINATE,MY_COMM_GRP);
}
return 0;
And it's pair
void CMpifun::slave2()
{
double *Data; vector<myDataType> dataQ; vector<hist_type> resQ;
char out_opt='b'; // irrelevant
myDataType *out_im = new myDataType; hist_type *out_hist; CLdp ldp;
int file_cnt=0; double tmp_t; //local variables
while (true) { // main while loop
header[4]=myRank; MPI_Send(header,SZ_HEADER,MPI_DOUBLE,MASTER,TAG_HEADER,MY_COMM_GRP);
MPI_Recv(header,SZ_HEADER,MPI_DOUBLE,MASTER,MPI_ANY_TAG,MY_COMM_GRP,this->Stat);
if(this->Stat->MPI_TAG == TAG_TERMINATE) {
break;
}
//receive data
while(true) {
Data=new double[(int)(header[1]*header[2])];
MPI_Recv(Data,(int)(header[1]*header[2]),MPI_DOUBLE,MASTER,TAG_DATA,MY_COMM_GRP,this->Stat);
myDataType *d=new myDataType; d->data=Data; d->nRows=(int)header[1]; d->nCols=(int)header[2];
dataQ.push_back(*d);
delete[] Data;
file_cnt++;
if ((int)header[9]) {
MPI_Recv(header,SZ_HEADER,MPI_DOUBLE,MASTER,TAG_DATA,MY_COMM_GRP,this->Stat);
} else break;
}
// Error section: Uncommenting next line causes seg fault
/*while (dataQ.size()) { // process data
out_hist = new hist_type();
myDataType d = dataQ.back(); dataQ.pop_back(); // critical section
ldp.process(d.data, d.nRows,d.nCols,out_opt,out_im, out_hist);
resQ.push_back(*out_hist); out_hist=0;
delete[] d.data; delete[] out_im->data;
}*/
//time_arr[1] /= file_cnt; time_arr[2] /= file_cnt;
//header[6]=time_arr[0]; header[7]=time_arr[1]; header[8]=time_arr[2];
//header[4]=myRank; header[9]=resQ.size();
} // end main while loop
The update is that if I uncomment the while loop in the Slave2() function then the run doesn't complete. What I don't understand is, this function (slave2) has no openmp/threading whatsoever, but it seems to have an effect. Furthermore it doesn't share any variables with the threaded function. If I comment out the troublesome section then the code runs, irrespective of the number of threads I set (4, 18, 300). My OpenMP environment variables remain as before. The output of limit -a is as follows,
core file size (blocks, -c) 0
data seg size (kbytes, -d) unlimited
scheduling priority (-e) 0
file size (blocks, -f) unlimited
pending signals (-i) 30473
max locked memory (kbytes, -l) 64
max memory size (kbytes, -m) unlimited
open files (-n) 1024
pipe size (512 bytes, -p) 8
POSIX message queues (bytes, -q) 819200
real-time priority (-r) 0
stack size (kbytes, -s) 37355
cpu time (seconds, -t) unlimited
max user processes (-u) 30473
virtual memory (kbytes, -v) unlimited
file locks (-x) unlimited
My constructor also calls mpi_init_thread. To address #Tim issue, the reason I used dynamic memory (with new) is so as not to bloat the stack memory, in following a recommendation from a solution to a similar problem. Your assistance is appreciated.
The biggest problem I see are the many race conditions your code exhibits. The erratic behavior you are seeing is no doubt caused by this. Remember that any time you access a shared variable in OpenMP (either declared via the shared keyword or by global scope), you are accessing memory that can be read or written by any other thread in the gang with no guarantees about order. For example,
N = d.size();
is a race condition because std::vector is not thread-safe. Because you are using OpenMP inside of a class, then any member variables are also considered "global" and thus not thread-safe by default.
As #tim18 noted, because you are calling MPI routines from within OpenMP parallel regions, you should initialize the MPI runtime to be thread-safe using the MPI_Init_thread function.
As an aside, your C++ needs some work. You should never use new or delete in user-level code. Use RAII to manage object lifetimes and wrap large data structures in thin objects that manage the lifetime for you. For example, this line
delete[] d[cur_idx]->data;
tells me that there are demons lurking in your code, waiting to be unleashed upon the unsuspecting user (which could be you!). Incidentally, this is also a race condition. Many demons!
I have a large device array inputValues of int64_t type. Every 32 elements of this array are sorted in an ascending order. I have an unsorted search array removeValues.
My intention is to look for all the elements in removeValues inside inputValues and mark them as -1. What is the most efficient method to achieve this? I am using a 3.5 cuda device if that helps.
I am not looking for a higher level solution, i.e. I do not want to use thrust or cub, but I want to write this using cuda kernels.
My initial approach was to load every 32 values in shared memory in a thread block. Every thread also loads a single value from removeValues and does an independent binary search on the shared memory array. If found, the value is set according by using an if condition.
Wouldn't this approach involve a lot of bank conflicts and branch divergence? Do you think that branch divergence can be addressed by using ternary operators while implementing the binary search? Even if that is solved, how can bank conflict be eliminated? Since the size of sorted arrays is 32, would it be possible to implement a binary search using shuffle instructions? Would that help?
EDIT : I have added an example to show what I intend to achieve.
Let's say that inputValues is a vector where every 32 elements are sorted:
[2, 4, 6, ... , 64], [95, 97, ... , 157], [1, 3, ... , 63], [...]
The typical size for this array can range between 32*2 to 32*32. The values could range from 0 to INT64_MAX.
An example of removeValues would be:
[7, 75, 95, 106]
The typical size for this array could range from 1 to 1024.
After the operation removeValues would be:
[-1, 75, -1, 106]
The values in inputValues remain unchanged.
I would concur with the answer (now deleted) and comment by #harrism. Since I put some effort into the non-thrust approach, I'll present my findings.
I tried to naively implement a binary search at the warp-level using __shfl(), and then repeat that binary search across the data set, passing the data set through each 32-element group.
It's embarrassing, but my code is around 20x slower than thrust (in fact it may be worse than that if you do careful timing with nvprof).
I made the data sizes a little larger than what was proposed in the question, because the data sizes in the question are so small that the timing is in the dust.
Here's a fully worked example of 2 approaches:
What is approximately outlined in the question, i.e. create a binary search using warp shuffle that can search up to 32 elements against a 32-element ordered array. Repeat this process for as many 32-element ordered arrays as there are, passing the entire data set through each ordered array (hopefully you can start to see some of the inefficiency now.)
Use thrust, essentially the same as what is outlined by #harrism, i.e. sort the grouped data set, and then run a vectorized thrust::binary_search on that.
Here's the example:
$ cat t1030.cu
#include <stdio.h>
#include <assert.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/binary_search.h>
typedef long mytype;
const int gsize = 32;
const int nGRP = 512;
const int dsize = nGRP*gsize;//gsize*nGRP;
#include <time.h>
#include <sys/time.h>
#define USECPSEC 1000000ULL
unsigned long long dtime_usec(unsigned long long start){
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
template <typename T>
__device__ T my_shfl32(T val, unsigned lane){
return __shfl(val, lane);
}
template <typename T>
__device__ T my_shfl64(T val, unsigned lane){
T retval = val;
int2 t1 = *(reinterpret_cast<int2 *>(&retval));
t1.x = __shfl(t1.x, lane);
t1.y = __shfl(t1.y, lane);
retval = *(reinterpret_cast<T *>(&t1));
return retval;
}
template <typename T>
__device__ bool bsearch_shfl(T grp_val, T my_val){
int src_lane = gsize>>1;
bool return_val = false;
T test_val;
int shift = gsize>>2;
for (int i = 0; i <= gsize>>3; i++){
if (sizeof(T)==4){
test_val = my_shfl32(grp_val, src_lane);}
else if (sizeof(T)==8){
test_val = my_shfl64(grp_val, src_lane);}
else assert(0);
if (test_val == my_val) return_val = true;
src_lane += (((test_val<my_val)*2)-1)*shift;
shift>>=1;
assert ((src_lane < gsize)&&(src_lane > 0));}
if (sizeof(T)==4){
test_val = my_shfl32(grp_val, 0);}
else if (sizeof(T)==8){
test_val = my_shfl64(grp_val, 0);}
else assert(0);
if (test_val == my_val) return_val = true;
return return_val;
}
template <typename T>
__global__ void bsearch_grp(const T * __restrict__ search_grps, T *data){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
int tid = threadIdx.x;
if (idx < gsize*nGRP){
T grp_val = search_grps[idx];
while (tid < dsize){
T my_val = data[tid];
if (bsearch_shfl(grp_val, my_val)) data[tid] = -1;
tid += blockDim.x;}
}
}
int main(){
// data setup
assert(gsize == 32); //mandatory (warp size)
assert((dsize % 32)==0); //needed to preserve shfl capability
thrust::host_vector<mytype> grps(gsize*nGRP);
thrust::host_vector<mytype> data(dsize);
thrust::host_vector<mytype> result(dsize);
for (int i = 0; i < gsize*nGRP; i++) grps[i] = i;
for (int i = 0; i < dsize; i++) data[i] = i;
// method 1: individual shfl-based binary searches on each group
mytype *d_grps, *d_data;
cudaMalloc(&d_grps, gsize*nGRP*sizeof(mytype));
cudaMalloc(&d_data, dsize*sizeof(mytype));
cudaMemcpy(d_grps, &(grps[0]), gsize*nGRP*sizeof(mytype), cudaMemcpyHostToDevice);
cudaMemcpy(d_data, &(data[0]), dsize*sizeof(mytype), cudaMemcpyHostToDevice);
unsigned long long my_time = dtime_usec(0);
bsearch_grp<<<nGRP, gsize>>>(d_grps, d_data);
cudaDeviceSynchronize();
my_time = dtime_usec(my_time);
cudaMemcpy(&(result[0]), d_data, dsize*sizeof(mytype), cudaMemcpyDeviceToHost);
for (int i = 0; i < dsize; i++) if (result[i] != -1) {printf("method 1 mismatch at %d, was %d, should be -1\n", i, (int)(result[i])); return 1;}
printf("method 1 time: %fs\n", my_time/(float)USECPSEC);
// method 2: thrust sort, followed by thrust binary search
thrust::device_vector<mytype> t_grps = grps;
thrust::device_vector<mytype> t_data = data;
thrust::device_vector<bool> t_rslt(t_data.size());
my_time = dtime_usec(0);
thrust::sort(t_grps.begin(), t_grps.end());
thrust::binary_search(t_grps.begin(), t_grps.end(), t_data.begin(), t_data.end(), t_rslt.begin());
cudaDeviceSynchronize();
my_time = dtime_usec(my_time);
thrust::host_vector<bool> rslt = t_rslt;
for (int i = 0; i < dsize; i++) if (rslt[i] != true) {printf("method 2 mismatch at %d, was %d, should be 1\n", i, (int)(rslt[i])); return 1;}
printf("method 2 time: %fs\n", my_time/(float)USECPSEC);
// method 3: multiple thrust merges, followed by thrust binary search
return 0;
}
$ nvcc -O3 -arch=sm_35 t1030.cu -o t1030
$ ./t1030
method 1 time: 0.009075s
method 2 time: 0.000516s
$
I was running this on linux, CUDA 7.5, GT640 GPU. Obviously the performance will be different on different GPUs, but I'd be surprised if any GPU significantly closed the gap.
In short, you'd be well advised to use a well-tuned library like thrust or cub. If you don't like the monolithic nature of thrust, you could try cub. I don't know if cub has a binary search, but a single binary search against the whole sorted data set is not a difficult thing to write, and it's the smaller part of the time involved (for method 2 -- identifiable using nvprof or additional timing code).
Since your 32-element grouped ranges are already sorted, I also pondered the idea of using multiple thrust::merge operations rather than a single sort. I'm not sure which would be faster, but since the thrust method is already so much faster than the 32-element shuffle search method, I think thrust (or cub) is the obvious choice.
I wrote the following code, that must do search of all possible combinations of two digits in a string whose length is specified:
#include <iostream>
#include <Windows.h>
int main ()
{
using namespace std;
cout<<"Enter length of array"<<endl;
int size;
cin>>size;
int * ps=new int [size];
for (int i=0; i<size; i++)
ps[i]=3;
int k=4;
SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS);
while (k>=0)
{
for (int bi=0; bi<size; bi++)
std::cout<<ps[bi];
std::cout<<std::endl;
int i=size-1;
if (ps[i]==3)
{
ps[i]=4;
continue;
}
if (ps[i]==4)
{
while (ps[i]==4)
{
ps[i]=3;
--i;
}
ps[i]=4;
if (i<k)
k--;
}
}
}
When programm was executing on Windows 7, I saw that load of CPU is only 10-15%, in order to make my code worked faster, i decided to change priority of my programm to High. But when i did it there was no increase in work and load of CPU stayed the same. Why CPU load doesn't change? Incorrect statement SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS);? Or this code cannot work faster?
If your CPU is not working at it's full capacity it means that your application is not capable of using it because of causes like I/O, sleeps, memory or other device throughtput capabilties.
Most probably, however, it means that your CPU has 2+ cores and your application is single-threaded. In this case you have to go through the process of paralellizing your application, which is often neither simple nor fast.
In case of the code you posted, the most time consuming operation is actually (most probably) printing the results. Remove the cout code and see for yourself how fast the code will work.
Increasing the priority of your programm won't help much.
What you need to do is to remove the cout from your calculations. Store your computations and output them afterwards.
As others have noted it might also be that you use a multi-core machine. Anyway removing any output from your computation loop is always a first step to use 100% of your machines computation power for that and not waste cycles on output.
std::vector<int> results;
results.reserve(1000); // this should ideally match the number of results you expect
while (k>=0)
{
for (int bi=0; bi<size; bi++){
results.push_back(ps[bi]);
}
int i=size-1;
if (ps[i]==3)
{
ps[i]=4;
continue;
}
if (ps[i]==4)
{
while (ps[i]==4)
{
ps[i]=3;
--i;
}
ps[i]=4;
if (i<k)
k--;
}
}
// now here yuo can output your data
for(auto&& res : results){
cout << res << "\n"; // \n to not force flush
}
cout << endl; // now force flush
What's probably happening is you're on a multi-core/multi-thread machine and you're running on only one thread, the rest of the CPU power is just sitting idle. So you'll want to multi-thread your code. Look at boost thread.
I have a program that starts up and within about 5 minutes the virtual size of process is about 13 gigs. It runs on Linux, uses boost, gnu c++ library and various other 3rd party libraries.
After 5 minutes size stays at 13 gigs and rss size steady at around 5 gigs.
I can't just run it in a debugger because at startup about 30 threads are started, each of which starts running its own code, that does various allocations. So stepping through and checking virtual memory at different parts of code at each breakpoint is not feasible.
I thought of changing program to start each thread one at a time to make it easier to track allocation of memory, but before doing this are there any good tools?
Valgrind is fairly slow, maybe tcmalloc could provide the info?
I would use valgrind (perhaps run it an entire night) or else use Boehm GC.
Alternatively, use the proc(5) filesystem to understand (e.g. thru /proc/$pid/statm & /proc/$pid/maps) when a lot of memory gets allocated.
The most important is to find memory leaks. If the memory don't grow after startup it is less an issue.
Perhaps adding instance counters to each class might help (use atomic integers or mutexes to serialize them).
If the program's source code is big (e.g. a million of source lines) so that spending several days/weeks is worth the effort, perhaps customizing the GCC compiler (e.g. with MELT) might be relevant.
a std::set minibenchmark
You mentioned big std::set based upon million rows.
#include <set>
#include <string>
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <unistd.h>
#include <time.h>
class MyElem
{
int _n;
char _s[16-sizeof(_n)];
public:
MyElem(int k) : _n(k)
{
snprintf (_s, sizeof(_s), "%d", k);
};
~MyElem()
{
_n=0;
memset(_s, 0, sizeof(_s));
};
int n() const
{
return _n;
};
std::string str() const
{
return std::string(_s);
};
bool less(const MyElem&x) const
{
return _n < x._n;
};
};
bool operator < (const MyElem& l, const MyElem& r)
{
return l.less(r);
}
typedef std::set<MyElem> MySet;
void bench (int cnt, MySet& set)
{
for (long i=0; i<(long)cnt*1024; i++)
set.insert(MyElem(i));
time_t now = 0;
time (&now);
set.insert (((now) & 0xfffffff) * 100);
}
int main (int argc, char** argv)
{
MySet s;
clock_t cstart, cend;
int c = argc>1?atoi(argv[1]):256;
if (c<16) c=16;
printf ("c=%d Kiter\n", c);
cstart = clock();
bench (c, s);
cend = clock();
int x = getpid();
char cmdbuf[64];
snprintf(cmdbuf, sizeof(cmdbuf), "pmap %d", x);
printf ("running %s\n", cmdbuf);
fflush (NULL);
system(cmdbuf);
putchar('\n');
printf ("at end c=%d Kiter clockdiff=%.2f millisec = %.f µs/Kiter\n",
c, (cend-cstart)*1.0e-3, (double)(cend-cstart)/c);
if (s.find(x) != s.end())
printf("set has %d\n", x);
else
printf("set don't contain %d\n", x);
return 0;
}
Notice the 16 bytes sizeof(MyElem). On Debian/Sid/AMD64 with GCC 4.8.1 (intel i3770K processor, 16Gbytes RAM) and compiling that bench with g++ -Wall -O1 tset.cc -o ./tset-01
With 32768 thousands of iterations, so 32M elements:
total 2109592K
(last line above given by pmap)
at end c=32768 Kiter clockdiff=16470.00 millisec = 503 µs/Kiter
Then the implicit time from my zsh
./tset-01 32768 16.77s user 0.54s system 99% cpu 17.343 total
This is about 2.1Gbytes. so perhaps 64.3 bytes per element & set member overhead (since sizeof(MyElem)==16 the set seems to have a non-negligible cost of perhaps 6 words per element)