I've been trying to implement shift by vector in SSE2 intrinsics, but from experimentation and the intel intrinsic guide, it appears to only use the least-significant part of the vector.
To reword my question, given a vector {v1, v2, ..., vn} and a set of shifts {s1, s2, ..., sn}, how do I calculate a result {r1, r2, ..., rn} such that:
r1 = v1 << s1
r2 = v2 << s2
...
rn = vn << sn
since it appears that _mm_sll_epi* performs this:
r1 = v1 << s1
r2 = v2 << s1
...
rn = vn << s1
Thanks in advance.
EDIT:
Here's the code I have:
#include <iostream>
#include <cstdint>
#include <mmintrin.h>
#include <emmintrin.h>
namespace SIMD {
using namespace std;
class SSE2 {
public:
// flipped operands due to function arguments
SSE2(uint64_t a, uint64_t b, uint64_t c, uint64_t d) { low = _mm_set_epi64x(b, a); high = _mm_set_epi64x(d, c); }
uint64_t& operator[](int idx)
{
switch (idx) {
case 0:
_mm_storel_epi64((__m128i*)result, low);
return result[0];
case 1:
_mm_store_si128((__m128i*)result, low);
return result[1];
case 2:
_mm_storel_epi64((__m128i*)result, high);
return result[0];
case 3:
_mm_store_si128((__m128i*)result, high);
return result[1];
}
/* Undefined behaviour */
return 0;
}
SSE2& operator<<=(const SSE2& rhs)
{
low = _mm_sll_epi64(low, rhs.getlow());
high = _mm_sll_epi64(high, rhs.gethigh());
return *this;
}
void print()
{
uint64_t a[2];
_mm_store_si128((__m128i*)a, low);
cout << hex;
cout << a[0] << ' ' << a[1] << ' ';
_mm_store_si128((__m128i*)a, high);
cout << a[0] << ' ' << a[1] << ' ';
cout << dec;
}
__m128i getlow() const
{
return low;
}
__m128i gethigh() const
{
return high;
}
private:
__m128i low, high;
uint64_t result[2];
};
}
int main()
{
cout << "operator<<= test: vector << vector: ";
{
auto x = SIMD::SSE2(7, 8, 15, 10);
auto y = SIMD::SSE2(4, 5, 6, 7);
x.print();
y.print();
x <<= y;
if (x[0] != 112 || x[1] != 256 || x[2] != 960 || x[3] != 1280) {
cout << "FAILED: ";
x.print();
cout << endl;
} else {
cout << "PASSED" << endl;
}
}
return 0;
}
What should be happening gets results of {7 << 4 = 112, 8 << 5 = 256, 15 << 6 = 960, 10 << 7 = 1280}. The results seem to be {7 << 4 = 112, 8 << 4 = 128, 15 << 6 = 960, 15 << 6 = 640}, which isn't what I want.
Hope this helps, Jens.
If AVX2 is available, and your elements are 32 or 64 bits, your operation takes one variable-shift instruction: vpsrlvq, (__m128i _mm_srlv_epi64 (__m128i a, __m128i count) )
For 32bit elements with SSE4.1, see Shifting 4 integers right by different values SIMD. Depending on latency vs. throughput requirements, you can do separate shifts shift and then blend, or use a multiply (by a specially-constructed vector of powers of 2) to get variable-count left shifts and then do a same-count-for-all-elements right shift.
For your case, 64bit elements with runtime-variable shift counts:
There are only two elements per SSE vector, so we just need two shifts and then combine the results (which we can do with a pblendw, or with a floating-point movsd (which may cause extra bypass-delay latency on some CPUs), or we can use two shuffles, or we can do two ANDs and an OR.
__m128i SSE2_emulated_srlv_epi64(__m128i a, __m128i count)
{
__m128i shift_low = _mm_srl_epi64(a, count); // high 64 is garbage
__m128i count_high = _mm_unpackhi_epi64(count,count); // broadcast the high element
__m128i shift_high = _mm_srl_epi64(a, count_high); // low 64 is garbage
// SSE4.1:
// return _mm_blend_epi16(shift_low, shift_high, 0x0F);
#if 1 // use movsd to blend
__m128d blended = _mm_move_sd( _mm_castsi128_pd(shift_high), _mm_castsi128_pd(shift_low) ); // use movsd as a blend. Faster than multiple instructions on most CPUs, but probably bad on Nehalem.
return _mm_castpd_si128(blended);
#else // SSE2 without using FP instructions:
// if we're going to do it this way, we could have shuffled the input before shifting. Probably not helpful though.
shift_high = _mm_unpackhi_epi64(shift_high, shift_high); // broadcast the high64
return _mm_unpacklo_epi64(shift_high, shift_low); // combine
#endif
}
Other shuffles like pshufd or psrldq would work, but punpckhqdq gets the job done without needing an immediate byte, so it's one byte shorter. SSSE3 palignr could get the high element from one register and the low element from another register into one vector, but they'd be reversed (so we'd need a pshufd to swap high and low halves). shufpd would work to blend, but has no advantage over movsd.
See Agner Fog's microarch guide for the details of the potential bypass-delay latency from using an FP instruction between two integer instructions. It's probably fine on Intel SnB-family CPUs, because other FP shuffles are. (And yes, movsd xmm1, xmm0 runs on the shuffle unit in port5. Use movaps or movapd for reg-reg moves even of scalars if you don't need the merging behaviour).
This compiles (on Godbolt with gcc5.3 -O3) to
movdqa xmm2, xmm0 # tmp97, a
psrlq xmm2, xmm1 # tmp97, count
punpckhqdq xmm1, xmm1 # tmp99, count
psrlq xmm0, xmm1 # tmp100, tmp99
movsd xmm0, xmm2 # tmp102, tmp97
ret
Related
I have the following AVX and Native codes:
__forceinline double dotProduct_2(const double* u, const double* v)
{
_mm256_zeroupper();
__m256d xy = _mm256_mul_pd(_mm256_load_pd(u), _mm256_load_pd(v));
__m256d temp = _mm256_hadd_pd(xy, xy);
__m128d dotproduct = _mm_add_pd(_mm256_extractf128_pd(temp, 0), _mm256_extractf128_pd(temp, 1));
return dotproduct.m128d_f64[0];
}
__forceinline double dotProduct_1(const D3& a, const D3& b)
{
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3];
}
And respective test scripts:
std::cout << res_1 << " " << res_2 << " " << res_3 << '\n';
{
std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
for (int i = 0; i < (1 << 30); ++i)
{
zx_1 += dotProduct_1(aVx[i % 10000], aVx[(i + 1) % 10000]);
}
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
std::cout << "NAIVE : " << std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count() << '\n';
}
{
std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
for (int i = 0; i < (1 << 30); ++i)
{
zx_2 += dotProduct_2(&aVx[i % 10000][0], &aVx[(i + 1) % 10000][0]);
}
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
std::cout << "AVX : " << std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count() << '\n';
}
std::cout << math::min2(zx_1, zx_2) << " " << zx_1 << " " << zx_2;
Well, all of the data are aligned by 32. (D3 with __declspec... and aVx arr with _mm_malloc()..)
And, as i can see, native variant is equal/or faster than AVX variant. I can't understand it's nrmally behaviour ? Because i'm think that AVX is 'super FAST' ... If not, how i can optimize it ? I compile it on MSVC 2015(x64), with arch AVX. Also, my hardwre is intel i7 4750HQ(haswell)
Simple profiling with basic loops isn't a great idea - it usually just means you are memory bandwidth limited, so the tests end up coming out at about the same speed (memory is typically slower than the CPU, and that's basically all you are testing here).
As others have said, your code example isn't great, because you are constantly going across the lanes (which I assume is just to find the fastest dot product, and not specifically because a sum of all the dot products is the desired result?). To be honest, if you really need a fast dot product (for AOS data as presented here), I think I would prefer to replace the VHADDPD with a VADDPD + VPERMILPD (trading an additional instruction for twice the throughput, and a lower latency)
double dotProduct_3(const double* u, const double* v)
{
__m256d dp = _mm256_mul_pd(_mm256_load_pd(u), _mm256_load_pd(v));
__m128d a = _mm256_extractf128_pd(dp, 0);
__m128d b = _mm256_extractf128_pd(dp, 1);
__m128d c = _mm_add_pd(a, b);
__m128d yy = _mm_unpackhi_pd(c, c);
__m128d dotproduct = _mm_add_pd(c, yy);
return _mm_cvtsd_f64(dotproduct);
}
asm:
dotProduct_3(double const*, double const*):
vmovapd ymm0,YMMWORD PTR [rsi]
vmulpd ymm0,ymm0,YMMWORD PTR [rdi]
vextractf128 xmm1,ymm0,0x1
vaddpd xmm0,xmm1,xmm0
vpermilpd xmm1,xmm0,0x3
vaddpd xmm0,xmm1,xmm0
vzeroupper
ret
Generally speaking, if you are using horizontal adds, you're doing it wrong! Whilst a 256bit register may seem ideal for a Vector4d, it's not actually a particularly great representation (especially if you consider that AVX512 is now available!). A very similar question to this came up recently: For C++ Vector3 utility class implementations, is array faster than struct and class?
If you want performance, then structure-of-arrays is the best way to go.
struct HybridVec4SOA
{
__m256d x;
__m256d y;
__m256d z;
__m256d w;
};
__m256d dot(const HybridVec4SOA& a, const HybridVec4SOA& b)
{
return _mm256_fmadd_pd(a.w, b.w,
_mm256_fmadd_pd(a.z, b.z,
_mm256_fmadd_pd(a.y, b.y,
_mm256_mul_pd(a.x, b.x))));
}
asm:
dot(HybridVec4SOA const&, HybridVec4SOA const&):
vmovapd ymm1,YMMWORD PTR [rdi+0x20]
vmovapd ymm2,YMMWORD PTR [rdi+0x40]
vmovapd ymm3,YMMWORD PTR [rdi+0x60]
vmovapd ymm0,YMMWORD PTR [rsi]
vmulpd ymm0,ymm0,YMMWORD PTR [rdi]
vfmadd231pd ymm0,ymm1,YMMWORD PTR [rsi+0x20]
vfmadd231pd ymm0,ymm2,YMMWORD PTR [rsi+0x40]
vfmadd231pd ymm0,ymm3,YMMWORD PTR [rsi+0x60]
ret
If you compare the latencies (and more importantly throughput) of load/mul/fmadd compared to hadd and extract, and then consider that the SOA version is computing 4 dot products at a time (instead of 1), you'll start to understand why it's the way to go...
You add too much overhead with vzeroupper and hadd instructions. Good way to write it, is to do all multiplies in a loop and aggregate the result just once at the end. Imagine you unroll original loop 4 times and use 4 accumulators:
for(i=0; i < (1<<30); i+=4) {
s0 += a[i+0] * b[i+0];
s1 += a[i+1] * b[i+1];
s2 += a[i+2] * b[i+2];
s3 += a[i+3] * b[i+3];
}
return s0+s1+s2+s3;
And now just replace unrolled loop with SIMD mul and add (or even FMA intrinsic if available)
Given an number A, I want to find the Ath Fibonacci number
that is multiple of 3 or if the number representation has at least a 3
on it.
Example:
Fibonacci > 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, ...
Input: 1, Output: 3;
3 is the first Fibonacci number that is multiple of 3 or has an 3 on
it.
Input: 3, Output: 21;
21 is the third Fibonacci number that is multiple of 3 or has an 3 on
it.
Edit: Variable type changed to unsigned long long int and ajust on Fibonacci generator. Thanks #rcgldr and #Jarod42 for the help!
My code:
#include<bits/stdc++.h>
using namespace std;
int tem(unsigned long long int i){
while(i != 0){
if(i%10 == 3){
return true;
}
i = i/10;
}
return false;
}
int main(){
int a, count = 0;
unsigned long long int f1 = 1, f2 = 1;
while(scanf("%d", &a) != EOF){
for(unsigned long long int i = 2; i > 0; i++){
i = f1 + f2;
f1 = f2;
f2 = i;
if((i%3 == 0) || tem(i)){
count++;
if(count == a){
cout << i << endl;
break;
}
}
}
}
}
When A > 20, it starts to slow down. Makes sense because it tends to be exponecial. My code is not very efficient, but I didn't find an better logic to use.
I looked into these links, but didn't find an conclusion:
1 - Recursive Fibonacci
2 - Fibonacci Optimization
Any ideas? Thanks for the help!
You can speed up the Fibonacci part using this sequence
uint64_t f0 = 0; // fib( 0)
uint64_t f1 = 1; // fib(-1)
int n = ... ; // generate fib(n)
for(int i = 0; i < n; i++){
std::swap(f0,f1);
f0 += f1;
}
Note Fib(93) is the maximum Fibonacci number that fits in a 64 bit unsigned integer, it also has a 3 in it. Fib(92) is the maximum Fibonacci number that is a multiple of 3.
I used this example code to find all of the values (a ranges from 0 to 62), it seems to run fairly fast, so I'm not sure what the issue is. Is optimization enabled?
#include <iostream>
#include <iomanip>
typedef unsigned long long uint64_t;
int tem(uint64_t i){
while(i != 0){
if(i%10 == 3)
return true;
i = i/10;
}
return false;
}
int main(){
int a = 0, n;
uint64_t f0 = 1, f1 = -1; // fib(-1), fib(-2)
for(n = 0; n <= 93; n++){
std::swap(f0, f1); // f0 = next fib
f0 += f1;
if((n&3) == 0 || tem(f0)){
std::cout << std::setw( 2) << a << " "
<< std::setw( 2) << n << " "
<< std::setw(20) << f0 << std::endl;
a++;
}
}
return 0;
}
Depending on the compiler, i%10 and i/10 may use a multiply by "magic number" and shift to replace divide by a constant. Code generated by Visual Studio 2015 for tem(), which is fairly "clever":
tem proc ;rcx = input
test rcx,rcx ; return false if rcx == 0
je SHORT tem1
mov r8, 0cccccccccccccccdh ;magic number for divide by 10
tem0: mov rax, r8 ;rax = magic number
mul rcx ;rdx = quotient << 3
shr rdx, 3 ;rdx = quotient
lea rax, QWORD PTR [rdx+rdx*4] ;rax = quotient*5
add rax, rax ;rax = quotient*10
sub rcx, rax ;rcx -= quotient * 10 == rcx % 10
cmp rcx, 3 ;br if rcx % 10 == 3
je SHORT tem2
mov rcx, rdx ;rcx = quotient (rcx /= 10)
test rdx, rdx ;loop if quotient != 0
jne SHORT tem0
tem1: xor eax, eax ;return false
ret 0
tem2: mov eax, 1 ;return true
ret 0
tem endp
Just pointing out some obvious coding errors
for(unsigned long long int i = 2; i > 0; i++)
is redundant.
for(;;){
unsigned long long i = f1+f2;
should suffice. Secondly
return 0;
is meaningless because it breaks out of the while loop. A break would be better.
There's a clever way to do Fibonacci.
http://stsievert.com/blog/2015/01/31/the-mysterious-eigenvalue/
Code's in python and is just for the nth number, but I think you get the idea.
def fib(n):
lambda1 = (1 + sqrt(5))/2
lambda2 = (1 - sqrt(5))/2
return (lambda1**n - lambda2**n) / sqrt(5)
def fib_approx(n)
# for practical range, percent error < 10^-6
return 1.618034**n / sqrt(5)
Actual refined question:
Why does this not print 0?
#include "stdafx.h"
#include <iostream>
#include <string>
int _tmain(int argc, _TCHAR* argv[])
{
unsigned char barray[] = {1,2,3,4,5,6,7,8,9};
unsigned long weirdValue = barray[3] << 32;
std::cout << weirdValue; // prints 4
std::string bla;
std::getline(std::cin, bla);
return 0;
}
The disassembly of the shift operation:
10: unsigned long weirdValue = barray[3] << 32;
00411424 movzx eax,byte ptr [ebp-1Dh]
00411428 shl eax,20h
0041142B mov dword ptr [ebp-2Ch],eax
Original question:
I found the following snippet in some old code we maintain. It converts a byte array to multiple float values and adds the floats to a list. Why does it work for byte arrays greater than 4?
unsigned long ulValue = 0;
for (USHORT usIndex = 0; usIndex < m_oData.usNumberOfBytes; usIndex++)
{
if (usIndex > 0 && (usIndex % 4) == 0)
{
float* pfValue = (float*)&ulValue;
oValues.push_back(*pfValue);
ulValue = 0;
}
ulValue += (m_oData.pabyDataBytes[usIndex] << (8*usIndex)); // Why does this work for usIndex > 3??
}
I would understand that this works if << was a rotate operator, not a shift operator. Or if it was
ulValue += (m_oData.pabyDataBytes[usIndex] << (8*(usIndex%4)))
But the code like i found it just confuses me.
The code is compiled using VS 2005.
If i try the original snippet in the immediate window, it doesn't work though.
I know how to do this properly, i just want to know why the code and especially the shift operation works as it is.
Edit: The disassembly for the shift operation is:
13D61D0A shl ecx,3 // multiply uIndex by 8
13D61D0D shl eax,cl // shift to left, does nothing for multiples of 32
13D61D0F add eax,dword ptr [ulValue]
13D61D15 mov dword ptr [ulValue],eax
So the disassembly is fine.
The shift count is masked to 5 bits, which limits the range to 0-31.
A shift of 32 therefore is same as a shift of zero.
http://x86.renejeschke.de/html/file_module_x86_id_285.html
Well my problem is as follows:
I'm trying to translate an x86 assembly source code to c++ source code.
Explanation as to what registers are.
skip this if you know what they are and how they work.
As you may or may not know, assembly language makes use of "general purpose registers".
In x86 assembly these registers are, and can be considered as "4 bytes" in length variables ( int var in c++ ), their names are: eax, ebx, ecx and edx.
Now, these registers are each respectively broken down into ax, bx, cx and dx that represent the 2 bytes less significant value of each register.
ax, bx, cx and dx are also broken down into ah, bx, ch and dh ( most significant byte ) and al, bl, cl and dl ( less significant byte ).
So, for example:
If I set eax:
EAX = 0xAB12CDEF
that would automatically change ax, al and ah
AX would become 0xCDEF
AH would become 0xCD
AL would become 0xEF
My question is: How do I make that possible in C++ ?
int eax, ax, ah, al;
eax = 0xAB12CDEF
How can I make, ax, ah and al, change at the same time?
Or is it possible to make them pointers to different portions eax, if so, how?
Thanks!
P.S. Also how could i use to make another variable be a char ?
How could I make variable new variable "char chAL" point to al which points to eax.
So that when i make a change to chAL, the changes would automatically reverberate to eax, ah and al.
If your goal is to emulate X86 assembly code, then indeed you need to support the behaviour of X86 registers.
Here's a simple implementation using a union:
#include <iostream>
#include <cstdint>
using namespace std;
union reg_t {
uint64_t rx;
uint32_t ex;
uint16_t x;
struct {
uint8_t l;
uint8_t h;
};
};
int main(){
reg_t a;
a.rx = 0xdeadbeefcafebabe;
cout << "rax = " << hex << a.rx << endl;
cout << "eax = " << hex << a.ex << endl;
cout << "ax = " << hex << a.x << endl;
cout << "al = " << hex << (uint16_t)a.l << endl;
cout << "ah = " << hex << (uint16_t)a.h << endl;
cout << "ax & 0xFF = " << hex << (a.x & 0xFF) << endl;
cout << "(ah << 8) + al = " << hex << (a.h << 8) + a.l << endl;
}
output:
rax = deadbeefcafebabe
eax = cafebabe
ax = babe
al = be
ah = ba
ax & 0xFF = be
(ah << 8) + al = babe
You'll get the correct result on the right platform (little-endian). You'll have to swap
bytes, and/or add padding for other platforms.
That's the basic, down to earth solution, which will certainly work on many x86 platforms (at least X86/linux/g++ works fine), but the behaviour this very approach relies on seems undefined in C++.
Here's another approach using a byte array to store register content:
class x86register {
uint8_t bytes[8];
public:
x86register &operator =(const uint64_t &v){
for (int i = 0; i < 8; i++)
bytes[i] = (v >> (i * 8)) & 0xff;
return *this;
}
x86register &operator =(const uint32_t &v){
for (int i = 0; i < 4; i++)
bytes[i] = (v >> (i * 8)) & 0xff;
return *this;
}
x86register &operator =(const uint16_t &v){
for (int i = 0; i < 2; i++)
bytes[i] = (v >> (i * 8)) & 0xff;
return *this;
}
x86register &operator =(const uint8_t &v){
bytes[0] = v;
return *this;
}
operator uint64_t(){
uint64_t res = 0;
for (int i = 7; i >= 0; i--)
res = (res << 8) + bytes[i];
return res;
}
operator uint32_t(){
uint32_t res = 0;
for (int i = 4; i >= 0; i--)
res = (res << 8) + bytes[i];
return res;
}
operator uint16_t(){
uint16_t res = 0;
for (int i = 2; i >= 0; i--)
res = (res << 8) + bytes[i];
return res;
}
operator uint8_t(){
return bytes[0];
}
};
This simple class should work regardless of endianness on the running platform. Also, you probably want to add a few other accessors/mutators to handle the HSB (AH, BH, etc) of word registers.
You can extract parts of eax using bitwise operations, like this:
void main()
{
int eax, ax, ah, al;
eax = 0xAB12CDEF;
ax = eax & 0x0000FFFF;
ah = (eax & 0x0000FF00) >> 8;
al = eax & 0x000000FF;
printf("ax = eax & 0x0000FFFF = 0x%X\n", ax);
printf("ah = (eax & 0x0000FF00) >> 8 = 0x%X\n", ah);
printf("al = eax & 0x000000FF = 0x%X\n", al);
}
Output
ax = eax & 0x0000FFFF = 0xCDEF
ah = (eax & 0x0000FF00) >> 8 = 0xCD
al = eax & 0x000000FF = 0xEF
You could also define macro like that:
#define AX(dw) ((dw) & 0x0000FFFF)
#define AH(dw) ((dw) & 0x0000FF00) >> 8)
#define AL(dw) ((dw) & 0x000000FF)
void main()
{
int eax = 0xAB12CDEF;
cout << "ax = " << hex << AX(eax) << endl; // prints ax = 0xCDEF
}
If you want it to work as simply as you've put the example ints, you can get away with it through reinterpret casts, though this violates pointer aliasing rules, so the behavior is undefined.
std::uint32_t eax = 0xAB12CDEF;
std::uint16_t& ax = reinterpret_cast<std::uint16_t*>(&eax)[1];
std::uint8_t& ah = reinterpret_cast<std::uint8_t&>(ax);
std::uint8_t& al = (&ah)[1];
The second line casts the address of eax to a std::uint16_t*, by applying [1] to that, you get the second half of the 32 bits.
The third line is just a cast to uint8_t, which works because ah will be the same as the front of ax.
Indexing into the address of ah by 1 gives the following byte, which is al.
What you're trying to do seems pretty unsafe and strange though. So to get the most similar behavior in the sanest way, you could just use a custom type. However the results will be consistent from machine to machine in the below, but they won't in the above because of different endian schemes.
class Reg {
private:
std::uint32_t data_;
public:
Reg(std::uint32_t in) : data_{in} { }
std::uint32_t ex() const {
return data_;
}
std::uint16_t x() const {
return static_cast<std::uint16_t>(data_ & 0xFFFF);
}
std::uint8_t h() const {
return static_cast<std::uint8_t>((data_ & 0xFF00) >> 8);
}
std::uint8_t l() const {
return static_cast<std::uint8_t>(data_ & 0xFF);
}
};
Profiling suggests that this function here is a real bottle neck for my application:
static inline int countEqualChars(const char* string1, const char* string2, int size) {
int r = 0;
for (int j = 0; j < size; ++j) {
if (string1[j] == string2[j]) {
++r;
}
}
return r;
}
Even with -O3 and -march=native, G++ 4.7.2 does not vectorize this function (I checked the assembler output). Now, I'm not an expert with SSE and friends, but I think that comparing more than one character at once should be faster. Any ideas on how to speed things up? Target architecture is x86-64.
Of course it can.
pcmpeqb compares two vectors of 16 bytes and produces a vector with zeros where they differed, and -1 where they match. Use this to compare 16 bytes at a time, adding the result to an accumulator vector (make sure to accumulate the results of at most 255 vector compares to avoid overflow). When you're done, there are 16 results in the accumulator. Sum them and negate to get the number of equal elements.
If the lengths are very short, it will be hard to get a significant speedup from this approach. If the lengths are long, then it will be worth pursuing.
Compiler flags for vectorization:
-ftree-vectorize
-ftree-vectorize -march=<your_architecture> (Use all instruction-set extensions available on your computer, not just baseline like SSE2 for x86-64). Use -march=native to optimize for the machine the compiler is running on.) -march=<foo> also sets -mtune=<foo>, which is also a good thing.
Using SSEx intrinsics:
Padd and align the buffer to 16 bytes (according to the vector size you're actually going to use)
Create an accumlator countU8 with _mm_set1_epi8(0)
For all n/16 input (sub) vectors, do:
Load 16 chars from both strings with _mm_load_si128 or _mm_loadu_si128 (for unaligned loads)
_mm_cmpeq_epi8
compare the octets in parallel. Each match yields 0xFF (-1), 0x00 otherwise.
Substract the above result vector from countU8 using _mm_sub_epi8 (minus -1 -> +1)
Always after 255 cycles, the 16 8bit counters must be extracted into a larger integer type to prevent overflows. See unpack and horizontal add in this nice answer for how to do that: https://stackoverflow.com/a/10930706/1175253
Code:
#include <iostream>
#include <vector>
#include <cassert>
#include <cstdint>
#include <climits>
#include <cstring>
#include <emmintrin.h>
#ifdef __SSE2__
#if !defined(UINTPTR_MAX) || !defined(UINT64_MAX) || !defined(UINT32_MAX)
# error "Limit macros are not defined"
#endif
#if UINTPTR_MAX == UINT64_MAX
#define PTR_64
#elif UINTPTR_MAX == UINT32_MAX
#define PTR_32
#else
# error "Current UINTPTR_MAX is not supported"
#endif
template<typename T>
void print_vector(std::ostream& out,const __m128i& vec)
{
static_assert(sizeof(vec) % sizeof(T) == 0,"Invalid element size");
std::cout << '{';
const T* const end = reinterpret_cast<const T*>(&vec)-1;
const T* const upper = end+(sizeof(vec)/sizeof(T));
for(const T* elem = upper;
elem != end;
--elem
)
{
if(elem != upper)
std::cout << ',';
std::cout << +(*elem);
}
std::cout << '}' << std::endl;
}
#define PRINT_VECTOR(_TYPE,_VEC) do{ std::cout << #_VEC << " : "; print_vector<_TYPE>(std::cout,_VEC); } while(0)
///#note SSE2 required (macro: __SSE2__)
///#warning Not tested!
size_t counteq_epi8(const __m128i* a_in,const __m128i* b_in,size_t count)
{
assert(a_in != nullptr && (uintptr_t(a_in) % 16) == 0);
assert(b_in != nullptr && (uintptr_t(b_in) % 16) == 0);
//assert(count > 0);
/*
//maybe not so good with all that branching and additional loop variables
__m128i accumulatorU8 = _mm_set1_epi8(0);
__m128i sum2xU64 = _mm_set1_epi8(0);
for(size_t i = 0;i < count;++i)
{
//this operation could also be unrolled, where multiple result registers would be accumulated
accumulatorU8 = _mm_sub_epi8(accumulatorU8,_mm_cmpeq_epi8(*a_in++,*b_in++));
if(i % 255 == 0)
{
//before overflow of uint8, the counter will be extracted
__m128i sum2xU16 = _mm_sad_epu8(accumulatorU8,_mm_set1_epi8(0));
sum2xU64 = _mm_add_epi64(sum2xU64,sum2xU16);
//reset accumulatorU8
accumulatorU8 = _mm_set1_epi8(0);
}
}
//blindly accumulate remaining values
__m128i sum2xU16 = _mm_sad_epu8(accumulatorU8,_mm_set1_epi8(0));
sum2xU64 = _mm_add_epi64(sum2xU64,sum2xU16);
//do a horizontal addition of the two counter values
sum2xU64 = _mm_add_epi64(sum2xU64,_mm_srli_si128(sum2xU64,64/8));
#if defined PTR_64
return _mm_cvtsi128_si64(sum2xU64);
#elif defined PTR_32
return _mm_cvtsi128_si32(sum2xU64);
#else
# error "macro PTR_(32|64) is not set"
#endif
*/
__m128i sum2xU64 = _mm_set1_epi32(0);
while(count--)
{
__m128i matches = _mm_sub_epi8(_mm_set1_epi32(0),_mm_cmpeq_epi8(*a_in++,*b_in++));
__m128i sum2xU16 = _mm_sad_epu8(matches,_mm_set1_epi32(0));
sum2xU64 = _mm_add_epi64(sum2xU64,sum2xU16);
#ifndef NDEBUG
PRINT_VECTOR(uint16_t,sum2xU64);
#endif
}
//do a horizontal addition of the two counter values
sum2xU64 = _mm_add_epi64(sum2xU64,_mm_srli_si128(sum2xU64,64/8));
#ifndef NDEBUG
std::cout << "----------------------------------------" << std::endl;
PRINT_VECTOR(uint16_t,sum2xU64);
#endif
#if !defined(UINTPTR_MAX) || !defined(UINT64_MAX) || !defined(UINT32_MAX)
# error "Limit macros are not defined"
#endif
#if defined PTR_64
return _mm_cvtsi128_si64(sum2xU64);
#elif defined PTR_32
return _mm_cvtsi128_si32(sum2xU64);
#else
# error "macro PTR_(32|64) is not set"
#endif
}
#endif
int main(int argc, char* argv[])
{
std::vector<__m128i> a(64); // * 16 bytes
std::vector<__m128i> b(a.size());
const size_t nBytes = a.size() * sizeof(std::vector<__m128i>::value_type);
char* const a_out = reinterpret_cast<char*>(a.data());
char* const b_out = reinterpret_cast<char*>(b.data());
memset(a_out,0,nBytes);
memset(b_out,0,nBytes);
a_out[1023] = 1;
b_out[1023] = 1;
size_t equalBytes = counteq_epi8(a.data(),b.data(),a.size());
std::cout << "equalBytes = " << equalBytes << std::endl;
return 0;
}
The fastest SSE implementation I got for large and small arrays:
size_t counteq_epi8(const __m128i* a_in,const __m128i* b_in,size_t count)
{
assert((count > 0 ? a_in != nullptr : true) && (uintptr_t(a_in) % sizeof(__m128i)) == 0);
assert((count > 0 ? b_in != nullptr : true) && (uintptr_t(b_in) % sizeof(__m128i)) == 0);
//assert(count > 0);
const size_t maxInnerLoops = 255;
const size_t nNestedLoops = count / maxInnerLoops;
const size_t nRemainderLoops = count % maxInnerLoops;
const __m128i zero = _mm_setzero_si128();
__m128i sum16xU8 = zero;
__m128i sum2xU64 = zero;
for(size_t i = 0;i < nNestedLoops;++i)
{
for(size_t j = 0;j < maxInnerLoops;++j)
{
sum16xU8 = _mm_sub_epi8(sum16xU8,_mm_cmpeq_epi8(*a_in++,*b_in++));
}
sum2xU64 = _mm_add_epi64(sum2xU64,_mm_sad_epu8(sum16xU8,zero));
sum16xU8 = zero;
}
for(size_t j = 0;j < nRemainderLoops;++j)
{
sum16xU8 = _mm_sub_epi8(sum16xU8,_mm_cmpeq_epi8(*a_in++,*b_in++));
}
sum2xU64 = _mm_add_epi64(sum2xU64,_mm_sad_epu8(sum16xU8,zero));
sum2xU64 = _mm_add_epi64(sum2xU64,_mm_srli_si128(sum2xU64,64/8));
#if UINTPTR_MAX == UINT64_MAX
return _mm_cvtsi128_si64(sum2xU64);
#elif UINTPTR_MAX == UINT32_MAX
return _mm_cvtsi128_si32(sum2xU64);
#else
# error "macro PTR_(32|64) is not set"
#endif
}
Auto-vectorization in current gcc is a matter of helping the compiler to understand that's easy to vectorize the code. In your case: it will understand the vectorization request if you remove the conditional and rewrite the code in a more imperative way:
static inline int count(const char* string1, const char* string2, int size) {
int r = 0;
bool b;
for (int j = 0; j < size; ++j) {
b = (string1[j] == string2[j]);
r += b;
}
return r;
}
In this case:
movdqa 16(%rsp), %xmm1
movl $.LC2, %esi
pxor %xmm2, %xmm2
movzbl 416(%rsp), %edx
movdqa .LC1(%rip), %xmm3
pcmpeqb 224(%rsp), %xmm1
cmpb %dl, 208(%rsp)
movzbl 417(%rsp), %eax
movl $1, %edi
pand %xmm3, %xmm1
movdqa %xmm1, %xmm5
sete %dl
movdqa %xmm1, %xmm4
movzbl %dl, %edx
punpcklbw %xmm2, %xmm5
punpckhbw %xmm2, %xmm4
pxor %xmm1, %xmm1
movdqa %xmm5, %xmm6
movdqa %xmm5, %xmm0
movdqa %xmm4, %xmm5
punpcklwd %xmm1, %xmm6
(etc.)