Why my SSE code is slower than native C++ code? - c++

First of all, I am new to SSE. I decided to accelerate my code, but it seems, that it works slower, then my native code.
This is an example, that calculates the sum of squares. On my Intel i7-6700HQ, it takes 0.43s for native code and 0.52 for SSE. So, where is a bottleneck?
inline float squared_sum(const float x, const float y)
{
return x * x + y * y;
}
#define USE_SIMD
void calculations()
{
high_resolution_clock::time_point t1, t2;
int result_v = 0;
t1 = high_resolution_clock::now();
alignas(16) float data_x[4];
alignas(16) float data_y[4];
alignas(16) float result[4];
__m128 v_x, v_y, v_res;
for (int y = 0; y < 5120; y++)
{
data_y[0] = y;
data_y[1] = y + 1;
data_y[2] = y + 2;
data_y[3] = y + 3;
for (int x = 0; x < 5120; x++)
{
data_x[0] = x;
data_x[1] = x + 1;
data_x[2] = x + 2;
data_x[3] = x + 3;
#ifdef USE_SIMD
v_x = _mm_load_ps(data_x);
v_y = _mm_load_ps(data_y);
v_x = _mm_mul_ps(v_x, v_x);
v_y = _mm_mul_ps(v_y, v_y);
v_res = _mm_add_ps(v_x, v_y);
_mm_store_ps(result, v_res);
#else
result[0] = squared_sum(data_x[0], data_y[0]);
result[1] = squared_sum(data_x[1], data_y[1]);
result[2] = squared_sum(data_x[2], data_y[2]);
result[3] = squared_sum(data_x[3], data_y[3]);
#endif
result_v += (int)(result[0] + result[1] + result[2] + result[3]);
}
}
t2 = high_resolution_clock::now();
duration<double> time_span1 = duration_cast<duration<double>>(t2 - t1);
std::cout << "Exec time:\t" << time_span1.count() << " s\n";
}
UPDATE: fixed code according to comments.
I am using Visual Studio 2017. Compiled for x64.
Optimization: Maximum Optimization (Favor Speed) (/O2);
Inline Function Expansion: Any Suitable (/Ob2);
Favor Size or Speed: Favor fast code (/Ot);
Omit Frame Pointers: Yes (/Oy)
Conclusion
Compilers generate already optimized code, so nowadays it is hard to accelerate it even more. The one thing you can do, to accelerate code more, is parallelization.
Thanks for the answers. They mainly the same, so I accept Søren V. Poulsen answer because it was the first.

Modern compiles are incredible machines and will already use SIMD instructions if possible (and with the correct compilation flags).
One general strategy to determine what the compiler is doing is looking at the disassembly of your code. If you don't want to do it on your own machine you can use an online service like Godbolt: https://gcc.godbolt.org/z/T6GooQ.
One tip is to avoid atomic for storing intermediate results like you are doing here. Atomic values are used to ensure synchronization between threads, and this may come at a very high computational cost, relatively speaking.

Looking through the assembly for the compiler's code based (without your SIMD stuff),
calculations():
pxor xmm2, xmm2
xor edx, edx
movdqa xmm0, XMMWORD PTR .LC0[rip]
movdqa xmm11, XMMWORD PTR .LC1[rip]
movdqa xmm9, XMMWORD PTR .LC2[rip]
movdqa xmm8, XMMWORD PTR .LC3[rip]
movdqa xmm7, XMMWORD PTR .LC4[rip]
.L4:
movdqa xmm5, xmm0
movdqa xmm4, xmm0
cvtdq2ps xmm6, xmm0
movdqa xmm10, xmm0
paddd xmm0, xmm7
cvtdq2ps xmm3, xmm0
paddd xmm5, xmm9
paddd xmm4, xmm8
cvtdq2ps xmm5, xmm5
cvtdq2ps xmm4, xmm4
mulps xmm6, xmm6
mov eax, 5120
paddd xmm10, xmm11
mulps xmm5, xmm5
mulps xmm4, xmm4
mulps xmm3, xmm3
pxor xmm12, xmm12
.L2:
movdqa xmm1, xmm12
cvtdq2ps xmm14, xmm12
mulps xmm14, xmm14
movdqa xmm13, xmm12
paddd xmm12, xmm7
cvtdq2ps xmm12, xmm12
paddd xmm1, xmm9
cvtdq2ps xmm0, xmm1
mulps xmm0, xmm0
paddd xmm13, xmm8
cvtdq2ps xmm13, xmm13
sub eax, 1
mulps xmm13, xmm13
addps xmm14, xmm6
mulps xmm12, xmm12
addps xmm0, xmm5
addps xmm13, xmm4
addps xmm12, xmm3
addps xmm0, xmm14
addps xmm0, xmm13
addps xmm0, xmm12
movdqa xmm12, xmm1
cvttps2dq xmm0, xmm0
paddd xmm2, xmm0
jne .L2
add edx, 1
movdqa xmm0, xmm10
cmp edx, 1280
jne .L4
movdqa xmm0, xmm2
psrldq xmm0, 8
paddd xmm2, xmm0
movdqa xmm0, xmm2
psrldq xmm0, 4
paddd xmm2, xmm0
movd eax, xmm2
ret
main:
xor eax, eax
ret
_GLOBAL__sub_I_calculations():
sub rsp, 8
mov edi, OFFSET FLAT:_ZStL8__ioinit
call std::ios_base::Init::Init() [complete object constructor]
mov edx, OFFSET FLAT:__dso_handle
mov esi, OFFSET FLAT:_ZStL8__ioinit
mov edi, OFFSET FLAT:_ZNSt8ios_base4InitD1Ev
add rsp, 8
jmp __cxa_atexit
.LC0:
.long 0
.long 1
.long 2
.long 3
.LC1:
.long 4
.long 4
.long 4
.long 4
.LC2:
.long 1
.long 1
.long 1
.long 1
.LC3:
.long 2
.long 2
.long 2
.long 2
.LC4:
.long 3
.long 3
.long 3
.long 3
Your SIMD code generates:
calculations():
pxor xmm5, xmm5
xor eax, eax
mov r8d, 1
movabs rdi, -4294967296
cvtsi2ss xmm5, eax
.L4:
mov r9d, r8d
mov esi, 1
movd edx, xmm5
pxor xmm5, xmm5
pxor xmm4, xmm4
mov ecx, edx
mov rdx, QWORD PTR [rsp-24]
cvtsi2ss xmm5, r8d
add r8d, 1
cvtsi2ss xmm4, r8d
and rdx, rdi
or rdx, rcx
pxor xmm2, xmm2
mov edx, edx
movd ecx, xmm5
sal rcx, 32
or rdx, rcx
mov QWORD PTR [rsp-24], rdx
movd edx, xmm4
pxor xmm4, xmm4
mov ecx, edx
mov rdx, QWORD PTR [rsp-16]
and rdx, rdi
or rdx, rcx
lea ecx, [r9+2]
mov edx, edx
cvtsi2ss xmm4, ecx
movd ecx, xmm4
sal rcx, 32
or rdx, rcx
mov QWORD PTR [rsp-16], rdx
movaps xmm4, XMMWORD PTR [rsp-24]
mulps xmm4, xmm4
.L2:
movd edx, xmm2
mov r10d, esi
pxor xmm2, xmm2
pxor xmm7, xmm7
mov ecx, edx
mov rdx, QWORD PTR [rsp-40]
cvtsi2ss xmm2, esi
add esi, 1
and rdx, rdi
cvtsi2ss xmm7, esi
or rdx, rcx
mov ecx, edx
movd r11d, xmm2
movd edx, xmm7
sal r11, 32
or rcx, r11
pxor xmm7, xmm7
mov QWORD PTR [rsp-40], rcx
mov ecx, edx
mov rdx, QWORD PTR [rsp-32]
and rdx, rdi
or rdx, rcx
lea ecx, [r10+2]
mov edx, edx
cvtsi2ss xmm7, ecx
movd ecx, xmm7
sal rcx, 32
or rdx, rcx
mov QWORD PTR [rsp-32], rdx
movaps xmm0, XMMWORD PTR [rsp-40]
mulps xmm0, xmm0
addps xmm0, xmm4
movaps xmm3, xmm0
movaps xmm1, xmm0
shufps xmm3, xmm0, 85
addss xmm1, xmm3
movaps xmm3, xmm0
unpckhps xmm3, xmm0
shufps xmm0, xmm0, 255
addss xmm1, xmm3
addss xmm0, xmm1
cvttss2si edx, xmm0
add eax, edx
cmp r10d, 5120
jne .L2
cmp r9d, 5120
jne .L4
rep ret
main:
xor eax, eax
ret
_GLOBAL__sub_I_calculations():
sub rsp, 8
mov edi, OFFSET FLAT:_ZStL8__ioinit
call std::ios_base::Init::Init() [complete object constructor]
mov edx, OFFSET FLAT:__dso_handle
mov esi, OFFSET FLAT:_ZStL8__ioinit
mov edi, OFFSET FLAT:_ZNSt8ios_base4InitD1Ev
add rsp, 8
jmp __cxa_atexit
Note that the compiler's version is using cvtdq2ps, paddd, cvtdq2ps, mulps, addps, and cvttps2dq. All of these are SIMD instructions. By combining them effectively, the compiler generates fast code.
In constrast, your code generates a lot of add, and, cvtsi2ss, lea, mov, movd, or, pxor, sal, which are not SIMD instructions.
I suspect the compiler does a better job of dealing with data type conversion and data rearrangement than you do, and that this allows it to arrange its math more effectively.

Related

Loop unroll issue with Visual Studio compiler

I have some simple setup, where I noticed that VS compiler seems not smart enough to unroll loop, but other compilers like clang or gcc do so. Do I miss some optimization flag for VS?
#include <cstddef>
struct A
{
double data[4];
double *begin() { return data; }
double *end() { return data + 4; }
double const *begin() const { return data; }
double const *end() const { return data + 4; }
};
double sum_index(A const &a) {
double ret = 0;
for(std::size_t i = 0; i < 4; ++i)
{
ret += a.data[i];
}
return ret;
}
double sum_iter(A const &a) {
double ret = 0;
for(auto const &v : a)
{
ret += v;
}
return ret;
}
I used https://godbolt.org/ compiler explorer to generate assembler code.
gcc 11.2 with -O3:
sum_index(A const&):
pxor xmm0, xmm0
addsd xmm0, QWORD PTR [rdi]
addsd xmm0, QWORD PTR [rdi+8]
addsd xmm0, QWORD PTR [rdi+16]
addsd xmm0, QWORD PTR [rdi+24]
ret
sum_iter(A const&):
movsd xmm1, QWORD PTR [rdi]
addsd xmm1, QWORD PTR .LC0[rip]
movsd xmm0, QWORD PTR [rdi+8]
addsd xmm1, xmm0
movupd xmm0, XMMWORD PTR [rdi+16]
addsd xmm1, xmm0
unpckhpd xmm0, xmm0
addsd xmm0, xmm1
ret
.LC0:
.long 0
.long 0
clang 13.0.1 with -O3:
sum_index(A const&): # #sum_index(A const&)
xorpd xmm0, xmm0
addsd xmm0, qword ptr [rdi]
addsd xmm0, qword ptr [rdi + 8]
addsd xmm0, qword ptr [rdi + 16]
addsd xmm0, qword ptr [rdi + 24]
ret
sum_iter(A const&): # #sum_iter(A const&)
xorpd xmm0, xmm0
addsd xmm0, qword ptr [rdi]
addsd xmm0, qword ptr [rdi + 8]
addsd xmm0, qword ptr [rdi + 16]
addsd xmm0, qword ptr [rdi + 24]
ret
MSVC 19.30 with /O2 (there is no /O3?):
this$ = 8
double const * A::begin(void)const PROC ; A::begin, COMDAT
mov rax, rcx
ret 0
double const * A::begin(void)const ENDP ; A::begin
this$ = 8
double const * A::end(void)const PROC ; A::end, COMDAT
lea rax, QWORD PTR [rcx+32]
ret 0
double const * A::end(void)const ENDP ; A::end
a$ = 8
double sum_index(A const &) PROC ; sum_index, COMDAT
movsd xmm0, QWORD PTR [rcx]
xorps xmm1, xmm1
addsd xmm0, xmm1
addsd xmm0, QWORD PTR [rcx+8]
addsd xmm0, QWORD PTR [rcx+16]
addsd xmm0, QWORD PTR [rcx+24]
ret 0
double sum_index(A const &) ENDP ; sum_index
a$ = 8
double sum_iter(A const &) PROC ; sum_iter, COMDAT
lea rax, QWORD PTR [rcx+32]
xorps xmm0, xmm0
cmp rcx, rax
je SHORT $LN12#sum_iter
npad 4
$LL8#sum_iter:
addsd xmm0, QWORD PTR [rcx]
add rcx, 8
cmp rcx, rax
jne SHORT $LL8#sum_iter
$LN12#sum_iter:
ret 0
double sum_iter(A const &) ENDP ; sum_iter
Obviously there is problem with unrolling the loop for MSVC. Is there some additional optimization flag I have to set?
Thanks for help!

Loop unrolling and SSE -- clang vs gcc

Disclaimer: full code can be found here.
16 byte alignment
Given a fairly simple type to support proper SSE alignment
struct alignas(16) simd_pack
{
std::int32_t data[4];
};
and a function that adds two arrays together
void add_packed(simd_pack* lhs_and_result, simd_pack* rhs, std::size_t size)
{
for (std::size_t i = 0; i < size; i++)
for (std::size_t j = 0; j < 4; j++)
lhs_and_result[i].data[j] += rhs[i].data[j];
}
compile the code with clang and gcc using -O3.
Clang produces the following assembly:
add_packed(simd_pack*, simd_pack*, unsigned long): # #add_packed(simd_pack*, simd_pack*, unsigned long)
test rdx, rdx
je .LBB0_3
mov eax, 12
.LBB0_2: # =>This Inner Loop Header: Depth=1
mov ecx, dword ptr [rsi + rax - 12]
add dword ptr [rdi + rax - 12], ecx
mov ecx, dword ptr [rsi + rax - 8]
add dword ptr [rdi + rax - 8], ecx
mov ecx, dword ptr [rsi + rax - 4]
add dword ptr [rdi + rax - 4], ecx
mov ecx, dword ptr [rsi + rax]
add dword ptr [rdi + rax], ecx
add rax, 16
add rdx, -1
jne .LBB0_2
.LBB0_3:
ret
I'm not very literate in assembly but to me it looks like clang is simply unrolling the inner for loop. If we take a look at gcc we get:
add_packed(simd_pack*, simd_pack*, unsigned long):
test rdx, rdx
je .L1
sal rdx, 4
xor eax, eax
.L3:
movdqa xmm0, XMMWORD PTR [rdi+rax]
paddd xmm0, XMMWORD PTR [rsi+rax]
movaps XMMWORD PTR [rdi+rax], xmm0
add rax, 16
cmp rax, rdx
jne .L3
.L1:
ret
which is what I expect.
64 byte alignment
The difference gets even bigger (obviously) if we go to 64 byte alignment (which usually is a cache line if I'm not mistaken)
struct alignas(64) cache_line
{
std::int32_t data[16];
};
void add_cache_line(cache_line* lhs_and_result, cache_line* rhs, std::size_t size)
{
for (std::size_t i = 0; i < size; i++)
for (std::size_t j = 0; j < 16; j++)
lhs_and_result[i].data[j] += rhs[i].data[j];
}
Clang keeps simply unrolling:
add_cache_line(cache_line*, cache_line*, unsigned long): # #add_cache_line(cache_line*, cache_line*, unsigned long)
test rdx, rdx
je .LBB1_3
mov eax, 60
.LBB1_2: # =>This Inner Loop Header: Depth=1
mov ecx, dword ptr [rsi + rax - 60]
add dword ptr [rdi + rax - 60], ecx
mov ecx, dword ptr [rsi + rax - 56]
add dword ptr [rdi + rax - 56], ecx
mov ecx, dword ptr [rsi + rax - 52]
add dword ptr [rdi + rax - 52], ecx
mov ecx, dword ptr [rsi + rax - 48]
add dword ptr [rdi + rax - 48], ecx
mov ecx, dword ptr [rsi + rax - 44]
add dword ptr [rdi + rax - 44], ecx
mov ecx, dword ptr [rsi + rax - 40]
add dword ptr [rdi + rax - 40], ecx
mov ecx, dword ptr [rsi + rax - 36]
add dword ptr [rdi + rax - 36], ecx
mov ecx, dword ptr [rsi + rax - 32]
add dword ptr [rdi + rax - 32], ecx
mov ecx, dword ptr [rsi + rax - 28]
add dword ptr [rdi + rax - 28], ecx
mov ecx, dword ptr [rsi + rax - 24]
add dword ptr [rdi + rax - 24], ecx
mov ecx, dword ptr [rsi + rax - 20]
add dword ptr [rdi + rax - 20], ecx
mov ecx, dword ptr [rsi + rax - 16]
add dword ptr [rdi + rax - 16], ecx
mov ecx, dword ptr [rsi + rax - 12]
add dword ptr [rdi + rax - 12], ecx
mov ecx, dword ptr [rsi + rax - 8]
add dword ptr [rdi + rax - 8], ecx
mov ecx, dword ptr [rsi + rax - 4]
add dword ptr [rdi + rax - 4], ecx
mov ecx, dword ptr [rsi + rax]
add dword ptr [rdi + rax], ecx
add rax, 64
add rdx, -1
jne .LBB1_2
.LBB1_3:
ret
while gcc uses SSE and also unrolls that:
add_cache_line(cache_line*, cache_line*, unsigned long):
mov rcx, rdx
test rdx, rdx
je .L9
sal rcx, 6
mov rax, rdi
mov rdx, rsi
add rcx, rdi
.L11:
movdqa xmm2, XMMWORD PTR [rdx+16]
movdqa xmm3, XMMWORD PTR [rax]
add rax, 64
add rdx, 64
movdqa xmm1, XMMWORD PTR [rdx-32]
movdqa xmm0, XMMWORD PTR [rdx-16]
paddd xmm3, XMMWORD PTR [rdx-64]
paddd xmm2, XMMWORD PTR [rax-48]
paddd xmm1, XMMWORD PTR [rax-32]
paddd xmm0, XMMWORD PTR [rax-16]
movaps XMMWORD PTR [rax-64], xmm3
movaps XMMWORD PTR [rax-48], xmm2
movaps XMMWORD PTR [rax-32], xmm1
movaps XMMWORD PTR [rax-16], xmm0
cmp rax, rcx
jne .L11
.L9:
ret
No alignment
It's getting interesting if we use plain 32 bit integer arrays with no alignment at all. We use the exact same compiler flags.
void add_unaligned(std::int32_t* lhs_and_result, std::int32_t* rhs, std::size_t size)
{
for (std::size_t i = 0; i < size; i++)
lhs_and_result[i] += rhs[i];
}
Clang
Clang's assembly exploaded a fair bit by adding some branches:
add_unaligned(int*, int*, unsigned long): # #add_unaligned(int*, int*, unsigned long)
test rdx, rdx
je .LBB2_16
cmp rdx, 7
jbe .LBB2_2
lea rax, [rsi + 4*rdx]
cmp rax, rdi
jbe .LBB2_9
lea rax, [rdi + 4*rdx]
cmp rax, rsi
jbe .LBB2_9
.LBB2_2:
xor r10d, r10d
.LBB2_3:
mov r8, r10
not r8
add r8, rdx
mov rcx, rdx
and rcx, 3
je .LBB2_5
.LBB2_4: # =>This Inner Loop Header: Depth=1
mov eax, dword ptr [rsi + 4*r10]
add dword ptr [rdi + 4*r10], eax
add r10, 1
add rcx, -1
jne .LBB2_4
.LBB2_5:
cmp r8, 3
jb .LBB2_16
.LBB2_6: # =>This Inner Loop Header: Depth=1
mov eax, dword ptr [rsi + 4*r10]
add dword ptr [rdi + 4*r10], eax
mov eax, dword ptr [rsi + 4*r10 + 4]
add dword ptr [rdi + 4*r10 + 4], eax
mov eax, dword ptr [rsi + 4*r10 + 8]
add dword ptr [rdi + 4*r10 + 8], eax
mov eax, dword ptr [rsi + 4*r10 + 12]
add dword ptr [rdi + 4*r10 + 12], eax
add r10, 4
cmp rdx, r10
jne .LBB2_6
jmp .LBB2_16
.LBB2_9:
mov r10, rdx
and r10, -8
lea rax, [r10 - 8]
mov r9, rax
shr r9, 3
add r9, 1
mov r8d, r9d
and r8d, 1
test rax, rax
je .LBB2_10
sub r9, r8
xor ecx, ecx
.LBB2_12: # =>This Inner Loop Header: Depth=1
movdqu xmm0, xmmword ptr [rsi + 4*rcx]
movdqu xmm1, xmmword ptr [rsi + 4*rcx + 16]
movdqu xmm2, xmmword ptr [rdi + 4*rcx]
paddd xmm2, xmm0
movdqu xmm0, xmmword ptr [rdi + 4*rcx + 16]
paddd xmm0, xmm1
movdqu xmm1, xmmword ptr [rdi + 4*rcx + 32]
movdqu xmm3, xmmword ptr [rdi + 4*rcx + 48]
movdqu xmmword ptr [rdi + 4*rcx], xmm2
movdqu xmmword ptr [rdi + 4*rcx + 16], xmm0
movdqu xmm0, xmmword ptr [rsi + 4*rcx + 32]
paddd xmm0, xmm1
movdqu xmm1, xmmword ptr [rsi + 4*rcx + 48]
paddd xmm1, xmm3
movdqu xmmword ptr [rdi + 4*rcx + 32], xmm0
movdqu xmmword ptr [rdi + 4*rcx + 48], xmm1
add rcx, 16
add r9, -2
jne .LBB2_12
test r8, r8
je .LBB2_15
.LBB2_14:
movdqu xmm0, xmmword ptr [rsi + 4*rcx]
movdqu xmm1, xmmword ptr [rsi + 4*rcx + 16]
movdqu xmm2, xmmword ptr [rdi + 4*rcx]
paddd xmm2, xmm0
movdqu xmm0, xmmword ptr [rdi + 4*rcx + 16]
paddd xmm0, xmm1
movdqu xmmword ptr [rdi + 4*rcx], xmm2
movdqu xmmword ptr [rdi + 4*rcx + 16], xmm0
.LBB2_15:
cmp r10, rdx
jne .LBB2_3
.LBB2_16:
ret
.LBB2_10:
xor ecx, ecx
test r8, r8
jne .LBB2_14
jmp .LBB2_15
What is happening at .LBB2_4 and .LBB2_6? It looks like it's unrolling a loop again but I'm not sure what happens there (mainly because of the registers used).
In .LBB2_12 it even unrolls the SSE part. I think it's only unrolled two-fold though because it needs two SIMD registers to load each operand because they are unaligned now. .LBB2_14 contains the SSE part without the unrolling.
How is the control flow here? I'm assuming it should be:
keep using the unrolled SSE part until the remaining data is too small to fill all the registers (xmm0..3)
switch to the single stage SSE part and do it once if we have enough data remaining to fill xmm0 (4 integers in our case)
process the remaining data (3 operations at max, otherwise it would be SSE suitable again)
The order of the labels and the jump instructions are confusing, is that (approx.) what happens here?
GCC
Gcc's assembly is a bit easier to read:
add_unaligned(int*, int*, unsigned long):
test rdx, rdx
je .L16
lea rcx, [rsi+4]
mov rax, rdi
sub rax, rcx
cmp rax, 8
jbe .L22
lea rax, [rdx-1]
cmp rax, 2
jbe .L22
mov rcx, rdx
xor eax, eax
shr rcx, 2
sal rcx, 4
.L19:
movdqu xmm0, XMMWORD PTR [rdi+rax]
movdqu xmm1, XMMWORD PTR [rsi+rax]
paddd xmm0, xmm1
movups XMMWORD PTR [rdi+rax], xmm0
add rax, 16
cmp rax, rcx
jne .L19
mov rax, rdx
and rax, -4
test dl, 3
je .L16
mov ecx, DWORD PTR [rsi+rax*4]
add DWORD PTR [rdi+rax*4], ecx
lea rcx, [rax+1]
cmp rdx, rcx
jbe .L16
add rax, 2
mov r8d, DWORD PTR [rsi+rcx*4]
add DWORD PTR [rdi+rcx*4], r8d
cmp rdx, rax
jbe .L16
mov edx, DWORD PTR [rsi+rax*4]
add DWORD PTR [rdi+rax*4], edx
ret
.L22:
xor eax, eax
.L18:
mov ecx, DWORD PTR [rsi+rax*4]
add DWORD PTR [rdi+rax*4], ecx
add rax, 1
cmp rdx, rax
jne .L18
.L16:
ret
I assume the control flow is similar to clang
keep using the single stage SSE part until the remaining data is too small to fill xmm0 and xmm1
process the remaining data (3 operations at max, otherwise it would be SSE suitable again)
It looks like exactly this is happening in .L19 but what is .L18 doing then?
Summary
Here is the full code, including assembly. My question are:
Why is clang unrolling the functions that use aligned data instead of using SSE or a combination of both (like gcc)?
What are .LBB2_4 and .LBB2_6 in clang's assembly doing?
Are my assumptions about the control flow of the function with the unaligned data correct?
What is .L18 in gcc's assembly doing?

Tail Call Optimization (TCO) in Clang on O2

I was playing with Tail Call Optimization (TCO) and various optimization levels in clang on godbolt. I have the very simple function (lifted from here):
int factorial (int x, int y){
if (x==0)
return y;
else
return factorial(x-1,y*x);
}
If I set -O1, all is good and I understand the generated assembly. However, if I set -O2, a lot more happens and I don't understand what is happening. Could someone explain?
Generated Assembly at -02:
.LCPI0_0:
.long 0 # 0x0
.long 4294967295 # 0xffffffff
.long 4294967294 # 0xfffffffe
.long 4294967293 # 0xfffffffd
.LCPI0_1:
.zero 4
.long 1 # 0x1
.long 1 # 0x1
.long 1 # 0x1
.LCPI0_2:
.long 1 # 0x1
.long 1 # 0x1
.long 1 # 0x1
.long 1 # 0x1
.LCPI0_3:
.long 4294967292 # 0xfffffffc
.long 4294967292 # 0xfffffffc
.long 4294967292 # 0xfffffffc
.long 4294967292 # 0xfffffffc
.LCPI0_4:
.long 4294967288 # 0xfffffff8
.long 4294967288 # 0xfffffff8
.long 4294967288 # 0xfffffff8
.long 4294967288 # 0xfffffff8
.LCPI0_5:
.long 4294967284 # 0xfffffff4
.long 4294967284 # 0xfffffff4
.long 4294967284 # 0xfffffff4
.long 4294967284 # 0xfffffff4
.LCPI0_6:
.long 4294967280 # 0xfffffff0
.long 4294967280 # 0xfffffff0
.long 4294967280 # 0xfffffff0
.long 4294967280 # 0xfffffff0
.LCPI0_7:
.long 4294967276 # 0xffffffec
.long 4294967276 # 0xffffffec
.long 4294967276 # 0xffffffec
.long 4294967276 # 0xffffffec
.LCPI0_8:
.long 4294967272 # 0xffffffe8
.long 4294967272 # 0xffffffe8
.long 4294967272 # 0xffffffe8
.long 4294967272 # 0xffffffe8
.LCPI0_9:
.long 4294967268 # 0xffffffe4
.long 4294967268 # 0xffffffe4
.long 4294967268 # 0xffffffe4
.long 4294967268 # 0xffffffe4
.LCPI0_10:
.long 4294967264 # 0xffffffe0
.long 4294967264 # 0xffffffe0
.long 4294967264 # 0xffffffe0
.long 4294967264 # 0xffffffe0
factorial(int, int): # #factorial(int, int)
mov eax, esi
test edi, edi
je .LBB0_12
cmp edi, 8
jb .LBB0_11
mov ecx, edi
and ecx, -8
movd xmm0, edi
pshufd xmm6, xmm0, 0 # xmm6 = xmm0[0,0,0,0]
paddd xmm6, xmmword ptr [rip + .LCPI0_0]
movd xmm0, eax
movaps xmm1, xmmword ptr [rip + .LCPI0_1] # xmm1 = <u,1,1,1>
movss xmm1, xmm0 # xmm1 = xmm0[0],xmm1[1,2,3]
lea esi, [rcx - 8]
mov edx, esi
shr edx, 3
add edx, 1
mov eax, edx
and eax, 3
cmp esi, 24
jae .LBB0_4
movdqa xmm2, xmmword ptr [rip + .LCPI0_2] # xmm2 = [1,1,1,1]
test eax, eax
jne .LBB0_7
jmp .LBB0_9
.LBB0_4:
mov esi, 1
sub esi, edx
lea edx, [rax + rsi]
add edx, -1
movdqa xmm2, xmmword ptr [rip + .LCPI0_2] # xmm2 = [1,1,1,1]
movdqa xmm9, xmmword ptr [rip + .LCPI0_4] # xmm9 = [4294967288,4294967288,4294967288,4294967288]
movdqa xmm10, xmmword ptr [rip + .LCPI0_5] # xmm10 = [4294967284,4294967284,4294967284,4294967284]
movdqa xmm11, xmmword ptr [rip + .LCPI0_6] # xmm11 = [4294967280,4294967280,4294967280,4294967280]
movdqa xmm12, xmmword ptr [rip + .LCPI0_7] # xmm12 = [4294967276,4294967276,4294967276,4294967276]
movdqa xmm13, xmmword ptr [rip + .LCPI0_8] # xmm13 = [4294967272,4294967272,4294967272,4294967272]
movdqa xmm14, xmmword ptr [rip + .LCPI0_9] # xmm14 = [4294967268,4294967268,4294967268,4294967268]
movdqa xmm15, xmmword ptr [rip + .LCPI0_10] # xmm15 = [4294967264,4294967264,4294967264,4294967264]
.LBB0_5: # =>This Inner Loop Header: Depth=1
movdqa xmm0, xmm6
paddd xmm0, xmmword ptr [rip + .LCPI0_3]
pshufd xmm7, xmm6, 245 # xmm7 = xmm6[1,1,3,3]
pshufd xmm3, xmm1, 245 # xmm3 = xmm1[1,1,3,3]
pmuludq xmm3, xmm7
pmuludq xmm1, xmm6
pshufd xmm7, xmm2, 245 # xmm7 = xmm2[1,1,3,3]
pshufd xmm4, xmm0, 245 # xmm4 = xmm0[1,1,3,3]
pmuludq xmm4, xmm7
pmuludq xmm0, xmm2
movdqa xmm2, xmm6
paddd xmm2, xmm9
movdqa xmm7, xmm6
paddd xmm7, xmm10
pmuludq xmm1, xmm2
pshufd xmm2, xmm2, 245 # xmm2 = xmm2[1,1,3,3]
pmuludq xmm2, xmm3
pmuludq xmm0, xmm7
pshufd xmm3, xmm7, 245 # xmm3 = xmm7[1,1,3,3]
pmuludq xmm3, xmm4
movdqa xmm4, xmm6
paddd xmm4, xmm11
movdqa xmm7, xmm6
paddd xmm7, xmm12
pshufd xmm5, xmm4, 245 # xmm5 = xmm4[1,1,3,3]
pmuludq xmm5, xmm2
pmuludq xmm4, xmm1
pshufd xmm8, xmm7, 245 # xmm8 = xmm7[1,1,3,3]
pmuludq xmm8, xmm3
pmuludq xmm7, xmm0
movdqa xmm0, xmm6
paddd xmm0, xmm13
movdqa xmm3, xmm6
paddd xmm3, xmm14
pmuludq xmm4, xmm0
pshufd xmm1, xmm4, 232 # xmm1 = xmm4[0,2,2,3]
pshufd xmm0, xmm0, 245 # xmm0 = xmm0[1,1,3,3]
pmuludq xmm0, xmm5
pshufd xmm0, xmm0, 232 # xmm0 = xmm0[0,2,2,3]
punpckldq xmm1, xmm0 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
pmuludq xmm7, xmm3
pshufd xmm2, xmm7, 232 # xmm2 = xmm7[0,2,2,3]
pshufd xmm0, xmm3, 245 # xmm0 = xmm3[1,1,3,3]
pmuludq xmm0, xmm8
pshufd xmm0, xmm0, 232 # xmm0 = xmm0[0,2,2,3]
punpckldq xmm2, xmm0 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
paddd xmm6, xmm15
add edx, 4
jne .LBB0_5
test eax, eax
je .LBB0_9
.LBB0_7:
neg eax
movdqa xmm3, xmmword ptr [rip + .LCPI0_3] # xmm3 = [4294967292,4294967292,4294967292,4294967292]
movdqa xmm4, xmmword ptr [rip + .LCPI0_4] # xmm4 = [4294967288,4294967288,4294967288,4294967288]
.LBB0_8: # =>This Inner Loop Header: Depth=1
movdqa xmm0, xmm6
paddd xmm0, xmm3
pshufd xmm5, xmm1, 245 # xmm5 = xmm1[1,1,3,3]
pmuludq xmm1, xmm6
pshufd xmm1, xmm1, 232 # xmm1 = xmm1[0,2,2,3]
pshufd xmm7, xmm6, 245 # xmm7 = xmm6[1,1,3,3]
pmuludq xmm7, xmm5
pshufd xmm5, xmm7, 232 # xmm5 = xmm7[0,2,2,3]
punpckldq xmm1, xmm5 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
pshufd xmm5, xmm2, 245 # xmm5 = xmm2[1,1,3,3]
pmuludq xmm2, xmm0
pshufd xmm2, xmm2, 232 # xmm2 = xmm2[0,2,2,3]
pshufd xmm0, xmm0, 245 # xmm0 = xmm0[1,1,3,3]
pmuludq xmm0, xmm5
pshufd xmm0, xmm0, 232 # xmm0 = xmm0[0,2,2,3]
punpckldq xmm2, xmm0 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
paddd xmm6, xmm4
inc eax
jne .LBB0_8
.LBB0_9:
pshufd xmm0, xmm2, 245 # xmm0 = xmm2[1,1,3,3]
pmuludq xmm2, xmm1
pshufd xmm2, xmm2, 232 # xmm2 = xmm2[0,2,2,3]
pshufd xmm1, xmm1, 245 # xmm1 = xmm1[1,1,3,3]
pmuludq xmm1, xmm0
pshufd xmm0, xmm1, 232 # xmm0 = xmm1[0,2,2,3]
punpckldq xmm2, xmm0 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
pshufd xmm0, xmm2, 78 # xmm0 = xmm2[2,3,0,1]
pmuludq xmm0, xmm2
pshufd xmm0, xmm0, 232 # xmm0 = xmm0[0,2,2,3]
pshufd xmm2, xmm1, 10 # xmm2 = xmm1[2,2,0,0]
pmuludq xmm2, xmm1
pshufd xmm1, xmm2, 232 # xmm1 = xmm2[0,2,2,3]
punpckldq xmm0, xmm1 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
pshufd xmm1, xmm0, 229 # xmm1 = xmm0[1,1,2,3]
pmuludq xmm1, xmm0
movd eax, xmm1
cmp ecx, edi
je .LBB0_12
sub edi, ecx
.LBB0_11: # =>This Inner Loop Header: Depth=1
imul eax, edi
add edi, -1
jne .LBB0_11
.LBB0_12:
ret
Generated assembly at -01:
factorial(int, int): # #factorial(int, int)
mov eax, esi
test edi, edi
je .LBB0_2
.LBB0_1: # =>This Inner Loop Header: Depth=1
imul eax, edi
add edi, -1
jne .LBB0_1
.LBB0_2:
ret

Different assembly when rangifying a simple algorithm

When I was preparing supplementary info for this question, I noticed that “rangified” implementations of a very simple algorithm resulted in important differences (to my eyes) in the resulting assembly, compared with “legacy” implementations.
I expanded the tests a bit, with the following results (GCC 9.1 -O3):
Case 1. Simple for loop (https://godbolt.org/z/rAVaT2)
#include <vector>
void foo(std::vector<double> &u, std::vector<double> const &v)
{
for (std::size_t i = 0u; i < u.size(); ++i)
u[i] += v[i];
}
mov rdx, QWORD PTR [rdi]
mov rdi, QWORD PTR [rdi+8]
sub rdi, rdx
sar rdi, 3
je .L1
mov rcx, QWORD PTR [rsi]
lea rax, [rcx+15]
sub rax, rdx
cmp rax, 30
jbe .L7
lea rax, [rdi-1]
cmp rax, 1
jbe .L7
mov rsi, rdi
xor eax, eax
shr rsi
sal rsi, 4
.L4:
movupd xmm0, XMMWORD PTR [rcx+rax]
movupd xmm1, XMMWORD PTR [rdx+rax]
addpd xmm0, xmm1
movups XMMWORD PTR [rdx+rax], xmm0
add rax, 16
cmp rsi, rax
jne .L4
mov rsi, rdi
and rsi, -2
and edi, 1
je .L1
lea rax, [rdx+rsi*8]
movsd xmm0, QWORD PTR [rax]
addsd xmm0, QWORD PTR [rcx+rsi*8]
movsd QWORD PTR [rax], xmm0
ret
.L7:
xor eax, eax
.L3:
movsd xmm0, QWORD PTR [rdx+rax*8]
addsd xmm0, QWORD PTR [rcx+rax*8]
movsd QWORD PTR [rdx+rax*8], xmm0
add rax, 1
cmp rdi, rax
jne .L3
.L1:
ret
Case 2. std::transform (https://godbolt.org/z/2iZaqo)
#include <algorithm>
#include <vector>
void foo(std::vector<double> &u, std::vector<double> const &v)
{
std::transform(std::begin(u), std::end(u),
std::begin(v),
std::begin(u),
std::plus());
}
mov rdx, QWORD PTR [rdi]
mov rax, QWORD PTR [rdi+8]
mov rsi, QWORD PTR [rsi]
cmp rax, rdx
je .L1
sub rax, 8
lea rcx, [rsi+15]
sub rax, rdx
sub rcx, rdx
shr rax, 3
cmp rcx, 30
jbe .L7
movabs rcx, 2305843009213693950
test rax, rcx
je .L7
lea rcx, [rax+1]
xor eax, eax
mov rdi, rcx
shr rdi
sal rdi, 4
.L4:
movupd xmm0, XMMWORD PTR [rdx+rax]
movupd xmm1, XMMWORD PTR [rsi+rax]
addpd xmm0, xmm1
movups XMMWORD PTR [rdx+rax], xmm0
add rax, 16
cmp rax, rdi
jne .L4
mov rdi, rcx
and rdi, -2
lea rax, [0+rdi*8]
add rdx, rax
add rsi, rax
cmp rcx, rdi
je .L1
movsd xmm0, QWORD PTR [rdx]
addsd xmm0, QWORD PTR [rsi]
movsd QWORD PTR [rdx], xmm0
ret
.L7:
xor ecx, ecx
.L3:
movsd xmm0, QWORD PTR [rdx+rcx*8]
addsd xmm0, QWORD PTR [rsi+rcx*8]
mov rdi, rcx
movsd QWORD PTR [rdx+rcx*8], xmm0
add rcx, 1
cmp rax, rdi
jne .L3
.L1:
ret
Case 3. Range-v3 view::zip (https://godbolt.org/z/0BEkfT)
#define RANGES_ASSERT(...) ((void)0)
#include <algorithm>
#include <range/v3/view/zip.hpp>
#include <vector>
void foo(std::vector<double> &u, std::vector<double> const &v)
{
auto w = ranges::view::zip(u, v);
std::for_each(std::begin(w), std::end(w),
[](auto &&x) { std::get<0u>(x) += std::get<1u>(x); });
}
mov rdx, QWORD PTR [rsi]
mov rsi, QWORD PTR [rsi+8]
mov rax, QWORD PTR [rdi]
mov rcx, QWORD PTR [rdi+8]
cmp rdx, rsi
je .L1
cmp rax, rcx
je .L1
.L3:
movsd xmm0, QWORD PTR [rax]
addsd xmm0, QWORD PTR [rdx]
add rax, 8
add rdx, 8
movsd QWORD PTR [rax-8], xmm0
cmp rax, rcx
je .L1
cmp rdx, rsi
jne .L3
.L1:
ret
Case 4. cmcstl2 ranges::transform (https://godbolt.org/z/MjYO1G)
#include <experimental/ranges/algorithm>
#include <vector>
namespace std
{
namespace ranges = experimental::ranges;
}
void foo(std::vector<double> &u,s td::vector<double> const &v)
{
std::ranges::transform(std::ranges::begin(u), std::ranges::end(u),
std::ranges::begin(v), std::ranges::end(v),
std::ranges::begin(u),
std::plus());
}
mov r8, QWORD PTR [rsi+8]
mov rdx, QWORD PTR [rsi]
mov rax, QWORD PTR [rdi]
mov rcx, QWORD PTR [rdi+8]
cmp rdx, r8
je .L1
cmp rcx, rax
jne .L3
jmp .L1
.L16:
cmp rdx, r8
je .L1
.L3:
movsd xmm0, QWORD PTR [rax]
addsd xmm0, QWORD PTR [rdx]
add rax, 8
add rdx, 8
movsd QWORD PTR [rax-8], xmm0
cmp rax, rcx
jne .L16
.L1:
ret
I can’t read assembly, but I seem to understand that the assemblies of Case 1 and Case 2 are almost equivalent and involve packed sums, whilst the assembly of the ranges versions (Cases 3 and 4) is much terser, but not vectorized.
I would really love to understand what those differences mean. Do my interpretation of the assembly make any sense? What are the additional instructions in the non-ranges versions? Why are there those differences?

SSE2 - 16-byte aligned dynamic allocation of memory

EDIT:
This is a followup to SSE2 Compiler Error
This is the real bug I experienced before and have reproduced below by changing the _mm_malloc statement as Michael Burr suggested:
Unhandled exception at 0x00415116 in SO.exe: 0xC0000005: Access violation reading
location 0xffffffff.
At line label: movdqa xmm0, xmmword ptr [t1+eax]
I'm trying to dynamically allocate t1 and t2 and according to this tutorial, I've used _mm_malloc:
#include <emmintrin.h>
int main(int argc, char* argv[])
{
int *t1, *t2;
const int n = 100000;
t1 = (int*)_mm_malloc(n*sizeof(int),16);
t2 = (int*)_mm_malloc(n*sizeof(int),16);
__m128i mul1, mul2;
for (int j = 0; j < n; j++)
{
t1[j] = j;
t2[j] = (j+1);
} // set temporary variables to random values
_asm
{
mov eax, 0
label: movdqa xmm0, xmmword ptr [t1+eax]
movdqa xmm1, xmmword ptr [t2+eax]
pmuludq xmm0, xmm1
movdqa mul1, xmm0
movdqa xmm0, xmmword ptr [t1+eax]
pshufd xmm0, xmm0, 05fh
pshufd xmm1, xmm1, 05fh
pmuludq xmm0, xmm1
movdqa mul2, xmm0
add eax, 16
cmp eax, 100000
jnge label
}
_mm_free(t1);
_mm_free(t2);
return 0;
}
I think the 2nd problem is that you're reading at an offset from the pointer variable (not an offset from what the pointer points to).
Change:
label: movdqa xmm0, xmmword ptr [t1+eax]
To something like:
mov ebx, [t1]
label: movdqa xmm0, xmmword ptr [ebx+eax]
And similarly for your accesses through the t2 pointer.
This might be even better (though I haven't had an opportunity to test it, so it might not even work):
_asm
{
mov eax, [t1]
mov ebx, [t1]
lea ecx, [eax + (100000*4)]
label: movdqa xmm0, xmmword ptr [eax]
movdqa xmm1, xmmword ptr [ebx]
pmuludq xmm0, xmm1
movdqa mul1, xmm0
movdqa xmm0, xmmword ptr [eax]
pshufd xmm0, xmm0, 05fh
pshufd xmm1, xmm1, 05fh
pmuludq xmm0, xmm1
movdqa mul2, xmm0
add eax, 16
add ebx, 16
cmp eax, ecx
jnge label
}
You're not allocating enough memory:
t1 = (int*)_mm_malloc(n * sizeof( int),16);
t2 = (int*)_mm_malloc(n * sizeof( int),16);
Perhaps:
t1 = (int*)_mm_malloc(n*sizeof(int),16);