I'm trying to convert this to AVX2:
// parallel arrays
int16_t* Nums = ...
int16_t* Capacities = ...
int** Data = ...
int* freePointer = ...
for (int i = 0; i < n; i++)
{
if (Nums[i] == 0)
Capacities[i] = 0;
else
{
Data[i] = freePointer;
freePointer += Capacities[i];
}
}
But didn't get too far:
for (int i = 0; i < n; i += 4) // 4 as Data is 64 bits
{
const __m256i nums = _mm256_loadu_si256((__m256i*)&Nums[i]);
const __m256i bZeroes = _mm256_cmpeq_epi16(nums, ZEROES256);
const __m256i capacities = _mm256_loadu_si256((__m256i*)&Capacities[i]);
const __m256i zeroedCapacities = _mm256_andnot_si256(bZeroes, capacities);
_mm256_storeu_si256((__m256i*)&Capacities[i], zeroedCapacities);
}
Stuck at the else branch, not sure how to add (prefix sum?...) Capacities into freePointer and assign the "serial" results to Data in the same 256-bit SIMD register.
My terminology is probably off, I hope the code gets across what I'm trying to accomplish.
lane0: freePointer
lane1: freePointer + Capacities[i + 0]
lane2: freePointer + Capacities[i + 0] + Capacities[i + 1]
lane3: freePointer + Capacities[i + 0] + Capacities[i + 1] + Capacities[i + 2]
Basically this is what I want to do in as few instructions as possible, if at all possible. Target is AVX2.
You can find a lot of details here: https://stackoverflow.com/a/69452433/5021064
Here you can plug in any type instead of T and U see the resulting asm for x86 and arm
Related
i have this code snippet I came across and I'm trying to use OpenMP to make it run faster than the original version. However, in comparison this seems to be taking about the same amount of time as the older version. Not sure why this multithreading approach is not working to optimize it. Like the timings are still the same. What can I do to make it run even faster?:
void sobel(unsigned char *data_out,
unsigned char *data_in, unsigned height,
unsigned width)
{
/* Sobel matrices for convolution */
int sobelv[3][3] = { {-1, -2, -1}, {0, 0, 0}, {1, 2, 1} };
int sobelh[3][3] = { {-1, 0, 1}, {-2, 0, 2}, {-1, 0, 1} };
unsigned int size, i, j;
int lay;
size = height * width;
#ifdef OPENMP
#pragma omp parallel for collapse(64) shared (data_in,data_out,sobelv, sobelh,size) private (i,j,lay)
#endif
for (lay = 0; lay < 3; lay++) {
for (i = 1; i < height - 1; ++i) {
for (j = 1; j < width - 1; j++) {
int sumh, sumv;
int k = -1, l = -1;
sumh = 0;
sumv = 0;
/* Convolution part */
for ( k = -1; k < 2; k++)
for (l = -1; l < 2; l++) {
sumh =
sumh + sobelh[k + 1][l + 1] *(int) data_in[lay * size + (i + k) * width +(j + l)];
sumv =
sumv + sobelv[k + 1][l +1] * (int) data_in[lay *size +(i +k) *width + (j +l)];
}
int temp = abs(sumh / 8) + abs(sumv / 8);
data_out[lay * size + i * width + j] =
(temp > 255? 255: temp);
}
}
}
}
the main function is simply calling this function like this:
sobel(data_out, data_in, header.height, header.width);
any help would be appreciated!! :)
The best optimization you can apply is to vectorize the code. Compilers can often auto-vectorize the code when it is sufficiently simple but this one is too complex for most compilers (including GCC and Clang) to vectorize it.
Manual code vectorization is cumbersome error-prone and often make the code (more) dependent of a specific architecture (eg. x86-64). However, you can help the compiler to generate it for you. To do that, you it better to:
avoid mixing signed/unsigned types and type of different size;
use the smallest possible types fitting your needs;
avoid loops and conditions in the vectorized loop;
access data contiguously;
avoid integer multiplication/division with small types (on x86-64 and/or with some compilers);
prefer using local short-scoped variables when this is possible;
enable advanced optimizations like -O3 for GCC/Clang, possibly coupled with -mavx2 if your target platform supports the AVX-2 instruction set, or with -march=native if your target platform is the one where the program is built;
be careful about aliasing (possibly using temporary arrays, strict aliasing rules, memcpy calls, restrict compiler extensions, etc.) [thanks to #Laci].
You can check the generated assembly code to see if the code is vectorized or not.
Moreover, using collapse(2) should enough here to get a good speed-up. collapse(3) can introduce some unwanted overheads due to the last loop being shared amongst threads. collapse(64) is not correct (it cannot be bigger than the number of nested loops).
Here is the resulting untested code:
#include <cmath>
void sobel(unsigned char *data_out,
unsigned char *data_in, int height,
int width)
{
const int size = height * width;
#ifdef OPENMP
#pragma omp parallel for collapse(2) shared(data_in,data_out,size)
#endif
for (int lay = 0; lay < 3; lay++)
{
for (int i = 1; i < height - 1; ++i)
{
for (int j = 1; j < width - 1; j++)
{
short a11 = data_in[lay * size + (i-1) * width + (j-1)];
short a12 = data_in[lay * size + (i-1) * width + j];
short a13 = data_in[lay * size + (i-1) * width + (j+1)];
short a21 = data_in[lay * size + i * width + (j-1)];
short a23 = data_in[lay * size + i * width + (j+1)];
short a31 = data_in[lay * size + (i+1) * width + (j-1)];
short a32 = data_in[lay * size + (i+1) * width + j];
short a33 = data_in[lay * size + (i+1) * width + (j+1)];
short sumh = a13 - a11 + (a23 - a21) + (a23 - a21) + a33 - a31;
short sumv = a31 + a32 + a32 + a33 - (a11 + a12 + a12 + a13);
short temp = (abs(sumh) >> 3) + (abs(sumv) >> 3);
data_out[lay * size + i * width + j] = (temp > 255? 255: temp);
}
}
}
}
I expect the code to be several time faster (especially true in sequential) -- typically about 10 times faster with AVX-2 since the processor can work on 16 values at once (despite a bit more work related to SIMD instructions).
Another possible optimization you can do is called register blocking. The idea is to change the loop so that you work on small fixed-size tiles (eg. 2x2 or 4x2 SIMD values). This should reduces the number of L1-cache loads and the number of char-to-short/short-to-char conversions to perform. However, this is hard to help the compiler so it does this optimization correctly on such a code. It is probably better to use SIMD intrinsics if performance is critical and do the register blocking yourself.
The code snippet below is running slower than expected. The authors of this paper http://www.cvlibs.net/publications/Geiger2010ACCV.pdf compute support_points of a 900x700 image in 118 ms. I have implemented their algorithm below in Halide.
In my algorithm, the nested for loops over length and width iterate over xi and yi, which are points in output_x and output_y (defined previously but not shown below). Over each iteration of the nested for loops, a vector top_k is computed and pushed_back into support_points.
Computing this pipeline even for left_buffer.width() == 20 and left_buffer.height() == 20 takes 500 ms. Thus this implementation is several orders of magnitude slower:
...
int k = 4; // # of support points
vector<pair<Expr, Expr>> support_points(k * left_buffer.width() * left_buffer.height());
// Calculate support pixel for each
Func support("support");
support(x, y) = Tuple(i32(0), i32(0), f32(0));
for (int yi = 0; yi < left_buffer.height(); yi++) {
for (int xi = 0; xi < left_buffer.width() - 2; xi++) {
bool left = xi < left_buffer.width() / 4;
bool center = (xi >= left_buffer.width() / 4 && xi < left_buffer.width() * 3 / 4);
bool right = xi >= left_buffer.width() * 3 / 4;
vector <pair<Expr, Expr>> scan_range;
pair <Expr, Expr> scan_height(0, (Expr) left_buffer.height());
pair <Expr, Expr> scan_width;
int which_pred = 0;
if (left) {
scan_width = make_pair((Expr) 0, (Expr) left_buffer.width() / 2);
which_pred = 0;
}
else if (center) {
scan_width = make_pair((Expr) xi - left_buffer.width() / 4, (Expr) left_buffer.width() / 2);
which_pred = 1;
}
else if (right) {
scan_width = make_pair((Expr) left_buffer.width() / 2, (Expr) left_buffer.width() / 2);
which_pred = 2;
}
else {
cout<<"Error"<<endl;
}
scan_range = {scan_width, scan_height};
// cout<<"xi "<<xi<<endl;
// cout<<"yi "<<yi<<endl;
// cout<<"scan_width= "<<scan_width.first<<" "<<scan_width.second<<endl;
// cout<<"scan_height= "<<scan_height.first<<" "<<scan_height.second<<endl;
RDom scanner(scan_range);
Expr predicate[3] = {scanner.x != xi && scanner.y != yi, scanner.x != 0 && scanner.y != 0, scanner.x != xi && scanner.y != yi};
scanner.where(predicate[which_pred]);
std::vector<Expr> top_k(k * 3);
for (int i = 0; i < k; i++) { // say we want top 4 support points.
top_k[3*i] = 10000.0f;
top_k[3*i+1] = 0;
top_k[3*i+2] = 0;
}
Func argmin("argmin");
argmin() = Tuple(top_k);
Expr next_val = abs(output_x(xi, yi) - output_x(scanner.x, scanner.y)) + abs(output_y(xi, yi) - output_y(scanner.x, scanner.y));
Expr next_x = scanner.x;
Expr next_y = scanner.y;
top_k = Tuple(argmin()).as_vector();
// Insert a single element into a sorted list without actually branching
top_k.push_back(next_val);
top_k.push_back(next_x);
top_k.push_back(next_y);
for (int i = k; i > 0; i--) {
Expr prev_val = top_k[(i-1)*3];
Expr prev_x = top_k[(i-1)*3 + 1];
Expr prev_y = top_k[(i-1)*3 + 2];
Expr should_swap = top_k[i*3] < prev_val;
top_k[(i-1)*3] = select(should_swap, top_k[i*3], prev_val);
top_k[(i-1)*3 + 1] = select(should_swap, top_k[i*3 + 1], prev_x);
top_k[(i-1)*3 + 2] = select(should_swap, top_k[i*3 + 2], prev_y);
top_k[i*3] = select(should_swap, prev_val, top_k[i*3]);
top_k[i*3 + 1] = select(should_swap, prev_x, top_k[i*3 + 1]);
top_k[i*3 + 2] = select(should_swap, prev_y, top_k[i*3 + 2]);
}
// Discard the k+1th element
top_k.pop_back(); top_k.pop_back(); top_k.pop_back();
bool cond = xi == 10 && yi == 10;
cout << xi << " "<< yi << " " << cond << endl;
Expr e = argmin()[0];
e = print_when(cond, e, "<- argmin() val");
argmin() = Tuple(top_k);
argmin.compute_root();
// argmin.trace_stores();
argmin.compile_to_lowered_stmt("argmin.html", {}, HTML);
Realization real = argmin.realize();
for (int i = 0; i < k; i++) {
pair<Expr, Expr> c(top_k[3*i+1], top_k[3*i+2]);
support_points.push_back(c);
}
}
}
double t2 = current_time();
cout<<(t2-t1)/100<<" ms"<<endl;
cout<<"executed"<<endl;
}
How can I improve efficiency?
It looks like you may be getting a bit confused between the stages of your program. With Halide, your C++ code that works with Exprs, Funcs, etc. is not actually evaluating anything, it is constructing a Halide program, which you can then compile and run. That means that the C++ for loops, std::vectors, etc. that you're using are all happening at program construction time (essentially compile time) of the Halide program. You might think of it like C++ templates, which evaluate at compile time, vs. the C++ code they construct, which evaluate at the run time of your program: the C++ code you're writing here is equivalent to template code with respect to the Halide program that you are building.
This gets a bit more confusing with the ability to JIT-compile and evaluate a Halide program inside of the same C++ program that builds it (realize).
As it is, I suspect the above program doesn't actually compute the results you expect it to. After the double for loop, what are you hoping to do with support_points? What you have built there is a big array of expressions (pieces of code), not concrete values. And you are JIT-compiling and running a new piece of Halide code each time around those loops (i.e., for every pixel).
I think you may have an easier time understanding what you are building if you stick to ahead-of-time compilation (compile_to_file or generators) for now. That makes the two stages—Halide code generation time, and the runtime of that code inside a separate program—very distinct.
I am trying to do an element wise multiply for my own op in tensorflow + Eigen. This is a simple version of what I am currently using:
// eg) temp_res_shape = [3, 8], temp_b_shape = [1, 8]
// allocate Tensorflow tensors
Tensor temp_res;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<complex64>::v(),
temp_res_shape, &temp_res));
Tensor temp_a;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<complex64>::v(),
temp_res_shape, &temp_a));
Tensor temp_b;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<complex64>::v(),
temp_b_shape, &temp_b));
// These actually come from different places but the type/shape is right
// ie) I want to do this within Eigen::TensorMap if possible
auto mult_res = Tensor(temp_res).flat_inner_dims<complex64, 2>();
auto a_in = Tensor(temp_a).flat_inner_dims<complex64, 2>();
auto b_in = Tensor(temp_b).flat_inner_dims<complex64, 2>();
// convert to an array
auto a_data = a_in.data();
auto b_data = b_in.data();
auto res = mult_res.data();
for ( int i = 0; i < 3; i++ ) {
for ( int j = 0; j < 8; j++ )
res[ i*8 + j ] = a_data[ i*3 + 8 ] * b_data[j];
}
This is obviously the wrong way to do it but I couldn't get anything else working. I feel like it should be something of the form:
mult_res.device( device ) = a_in * b_in;
But that does the matrix multiply. I couldn't figure out how to convert b_in to a diagonal matrix to multiply that way either :/
I feel like this should be trivial but I can't work it out (my cpp is not great). Thanks in advance!
I just started learning cuda and I'm having an issue converting some code to use shared memory and another to use constant memory, for comparison purposes.
__global__ void CUDA(int *device_array_Image1, int *device_array_Image2,int *device_array_Image3, int *device_array_kernel, int *device_array_Result1,int *device_array_Result2,int *device_array_Result3){
int i = blockIdx.x;
int j = threadIdx.x;
int ArraySum1 = 0 ; // set sum = 0 initially
int ArraySum2 = 0 ;
int ArraySum3 = 0 ;
for (int N = -1 ; N <= 1 ; N++)
{
for (int M = -1 ; M <= 1 ; M++)
{
ArraySum1 = ArraySum1 + (device_array_Image1[(i + N) * Image_Size + (j + M)]* device_array_kernel[(N + 1) * 3 + (M + 1)]);
ArraySum2 = ArraySum2 + (device_array_Image2[(i + N) * Image_Size + (j + M)]* device_array_kernel[(N + 1) * 3 + (M + 1)]);
ArraySum3 = ArraySum3 + (device_array_Image3[(i + N) * Image_Size + (j + M)]* device_array_kernel[(N + 1) * 3 + (M + 1)]);
}
}
device_array_Result1[i * Image_Size + j] = ArraySum1;
device_array_Result2[i * Image_Size + j] = ArraySum2;
device_array_Result3[i * Image_Size + j] = ArraySum3;
}
This is what I have done so far but I'm having an issue understanding the shared and constant memory so if anyone could help with the code or point me in the right direction I'd be really grateful.
Thanks for any help.
a) Shared memory: This memory will be visible only to all threads in a block. This shared memory is useful if you are accessing data more than once from that block.So in squaring of a number it will not be useful but while matrix multiplication it is useful.
b) Constant memory: Data is stored in device global memory and data can be read through multiprocessor constant cache. 64KB constant memory and 8KB cache is given to each multiprocessor.Data is broadcast to all threads in a warp.So if all the threads in the warp request the same value, that value is delivered to in a single cycle.
Below links helped me in understanding constant and shared memory
1) http://cuda-programming.blogspot.in/2013/01/what-is-constant-memory-in-cuda.html
2) http://cuda-programming.blogspot.in/2013/01/shared-memory-and-synchronization-in.html
3) https://devblogs.nvidia.com/parallelforall/using-shared-memory-cuda-cc/
Please refer this links.
I have an unsigned short dst[16][16] matrix and a larger unsigned char src[m][n] matrix.
Now I have to access in the src matrix and add a 16x16 submatrix to dst, using SSE2 or SSE3.
In an older implementation, I was sure that my summed values were never greater than 256, so I could do this:
for (int row = 0; row < 16; ++row)
{
__m128i subMat = _mm_lddqu_si128(reinterpret_cast<const __m128i*>(src));
dst[row] = _mm_add_epi8(dst[row], subMat);
src += W; // Step to the next row I need to add
}
where W is an offset to reach the desired rows. This code works, but now my values in src are larger and summed could be greater than 256, so I need to store them as ushort.
I've tried the following, but it doesn't work.
for (int row = 0; row < 16; ++row)
{
__m128i subMat = _mm_lddqu_si128(reinterpret_cast<const __m128i*>(src));
dst[row] = _mm_add_epi16(dst[row], subMat);
src += W; // Step to the next row I need to add
}
How can I solve this problem?
EDIT
Thank you paul, but I think your offsets are wrong. I've tried your solution and seems that submatrix's rows are added to the wrong dst's rows. I hope the right solution is this:
for (int row = 0; row < 32; row += 2)
{
__m128i subMat = _mm_lddqu_si128(reinterpret_cast<const __m128i*>(src));
__m128i subMatLo = _mm_unpacklo_epi8(subMat, _mm_set1_epi8(0));
__m128i subMatHi = _mm_unpackhi_epi8(subMat, _mm_set1_epi8(0));
dst[row] = _mm_add_epi16(dst[row], subMatLo);
dst[row + 1] = _mm_add_epi16(dst[row + 1], subMatHi);
src += W;
}
You need to unpack your vector of 16 x 8 bit values into two vectors of 8 x 16 bit values and then add both these vectors to your destination:
for (int row = 0; row < 16; ++row)
{
__m128i subMat = _mm_lddqu_si128(reinterpret_cast<const __m128i*>(src));
__m128i subMatLo = _mm_unpacklo_epi8(subMat, _mm_set1_epi8(0));
__m128i subMatHi = _mm_unpackhi_epi8(subMat, _mm_set1_epi8(0));
dst[row] = _mm_add_epi16(dst[row], subMatLo);
dst[row + 1] = _mm_add_epi16(dst[row + 1], subMatHi);
src += W;
}