Efficient float to int without overflow - c++

using int_type = int;
int_type min = std::numeric_limits<Depth>::min();
int_type max = std::numeric_limits<Depth>::max();
int_type convert(float f) {
if(f < static_cast<float>(min)) return min; // overflow
else if(f > static_cast<float>(max)) return max; // overflow
else return static_cast<int_type>(f);
}
Is there a more efficient way to convert float f to int_type, while clamping it to the minimal and maximal values of the integer type?
For example, without casting min and max to float for the comparisons.

Sometimes Almost always, trusting the compiler is the best thing to do.
This code:
template<class Integral>
__attribute__((noinline))
int convert(float f)
{
using int_type = Integral;
constexpr int_type min = std::numeric_limits<int_type>::min();
constexpr int_type max = std::numeric_limits<int_type>::max();
constexpr float fmin = static_cast<float>(min);
constexpr float fmax = static_cast<float>(max);
if(f < fmin) return min; // overflow
if(f > fmax) return max; // overflow
return static_cast<int_type>(f);
}
compiled with -O2 and -fomit-frame-pointer, yields:
__Z7convertIiEif: ## #_Z7convertIiEif
.cfi_startproc
movl $-2147483648, %eax ## imm = 0xFFFFFFFF80000000
movss LCPI1_0(%rip), %xmm1 ## xmm1 = mem[0],zero,zero,zero
ucomiss %xmm0, %xmm1
ja LBB1_3
movl $2147483647, %eax ## imm = 0x7FFFFFFF
ucomiss LCPI1_1(%rip), %xmm0
ja LBB1_3
cvttss2si %xmm0, %eax
LBB1_3:
retq
I'm not sure it could be any more efficient.
Note LCPI_x defined here:
.section __TEXT,__literal4,4byte_literals
.align 2
LCPI1_0:
.long 3472883712 ## float -2.14748365E+9
LCPI1_1:
.long 1325400064 ## float 2.14748365E+9
How about clamping using fmin(), fmax()... [thanks to njuffa for the question]
The code does become more efficient, because the conditional jumps are removed. However, it starts to behave incorrectly at the clamping limits.
Consider:
template<class Integral>
__attribute__((noinline))
int convert2(float f)
{
using int_type = Integral;
constexpr int_type min = std::numeric_limits<int_type>::min();
constexpr int_type max = std::numeric_limits<int_type>::max();
constexpr float fmin = static_cast<float>(min);
constexpr float fmax = static_cast<float>(max);
f = std::min(f, fmax);
f = std::max(f, fmin);
return static_cast<int_type>(f);
}
call with
auto i = convert2<int>(float(std::numeric_limits<int>::max()));
results in:
-2147483648
Clearly we need to reduce the limits by epsilon because of a float's inability to accurately represent the full range of an int, so...
template<class Integral>
__attribute__((noinline))
int convert2(float f)
{
using int_type = Integral;
constexpr int_type min = std::numeric_limits<int_type>::min();
constexpr int_type max = std::numeric_limits<int_type>::max();
constexpr float fmin = static_cast<float>(min) - (std::numeric_limits<float>::epsilon() * static_cast<float>(min));
constexpr float fmax = static_cast<float>(max) - (std::numeric_limits<float>::epsilon() * static_cast<float>(max));
f = std::min(f, fmax);
f = std::max(f, fmin);
return static_cast<int_type>(f);
}
Should be better...
except that now the same function call yields:
2147483392
Incidentally, working on this actually led me to a bug in the original code. Because of the same rounding error issue, the > and < operators need to be replaced with >= and <=.
like so:
template<class Integral>
__attribute__((noinline))
int convert(float f)
{
using int_type = Integral;
constexpr int_type min = std::numeric_limits<int_type>::min();
constexpr int_type max = std::numeric_limits<int_type>::max();
constexpr float fmin = static_cast<float>(min);
constexpr float fmax = static_cast<float>(max);
if(f <= fmin) return min; // overflow
if(f >= fmax) return max; // overflow
return static_cast<int_type>(f);
}

For 32-bit integers, you can let the CPU do some of the clamping work for you.
The cvtss2si instruction will actually return 0x80000000 in the case of an out of range floating point number. This lets you eliminate one test most of the time:
int convert(float value)
{
int result = _mm_cvtss_si32(_mm_load_ss(&value));
if (result == 0x80000000 && value > 0.0f)
result = 0x7fffffff;
return result;
}
If you have lots of them to convert, then _mm_cvtps_epi32 lets you process four at once (with the same behaviour on overflow). That should be much faster than processing them one at a time, but you'd need to structure the code differently to make use of it.

If you are want to truncate, you can take advantage of avx2 and avx instructions 512:
#include <float.h>
int main() {
__m256 a = {5.423423, -4.243423, 423.4234234, FLT_MAX, 79.4234876, 19.7, 8.5454, 7675675.6};
__m256i b = _mm256_cvttps_epi32(a);
void p256_hex_u32(__m256i in) {
alignas(32) uint32_t v[8];
_mm256_store_si256((__m256i*)v, in);
printf("v4_u32: %d %d %d %d %d %d %d %d\n", v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);
}
Compile with:
g++ -std=c++17 -mavx2 a.cpp && ./a.out
and for mavx512 (my cpu does not support so I will not provide a working test, feel free to edit):
_mm512_maskz_cvtt_roundpd_epi64(k, value, _MM_FROUND_NO_EXC);

Related

Absolute hysteresis calculation in C++

I want to implement a template function, which detects if the difference of ValueA and ValueB is bigger than a given hystersis.
e.x.
ValueA=5, ValueB=7, Hystersis=1 -> true
ValueA=5, ValueB=7, Hystersis=3 -> false
ValueA=-5, ValueB=1, Hystersis=7 -> false
So I implemented this function:
template<typename T>
bool MyClass::IsHysteresisExceeded(T ValueA, T ValueB, T Hysteresis) {
T ValueMax = std::max(ValueA, ValueB);
T ValueMin = std::min(ValueA, ValueB);
return (ValueMax - ValueMin) > Hysteresis;
}
But with the following parameters this function returns false when I expected true as result.
IsHysteresisExceeded<int>(-2147483648, 2147483647, 10)
I know that a integer overflow occurs while subtracting, but I did not find an elegant solution yet.
I have the following solution for integers:
template<typename T>
bool IsHysteresisExceeded(T ValueA, T ValueB, T Hysteresis) {
T ValueMax = std::max(ValueA, ValueB);
T ValueMin = std::min(ValueA, ValueB);
assert(Hysteresis >= 0);
T underflowRange = std::numeric_limits<T>::min() + Hysteresis;
bool underflow = underflowRange > ValueMax;
return !underflow && (ValueMax - Hysteresis > ValueMin);
}
The trick is to detect the underflow. If it happens you may be sure ValueMin is in range <ValueMax,std::numeric_limits<T>::min()> and
(ValueMax - Hysteresis) < std::numeric_limits<T>::min() <= ValueMin
I posted the code on godbolt.org
Edit:
My previous answer used a very popular approach and was also wrong. I proposed to detect the underflow like:
T lowBound = ValueMax - Hysteresis;
bool underflow = lowBound > ValueMax;
Although it produces expected results on the architectures i know, it is an undefined behavior.
One way to detect possible overflow is to use some indicator of "how far" from limits is a value. I use a simple division, which wants to normalize vale values in the range [-1,1].
Then I substract both "positions" to get the range between them, and compare it with a valid range, this is, 1:
#include <limits>
#include <math.h>
#include <iostream>
template<typename T>
bool IsHysteresisExceeded(T ValueA, T ValueB, T Hysteresis) {
long double posA = (long double) ValueA / std::numeric_limits<T>::max();
long double posB = (long double) ValueB / std::numeric_limits<T>::max();
if (std::fabs(posA - posB) > 1)
return true; //ValueMax - ValueMin would overflow
T ValueMax = std::max(ValueA, ValueB);
T ValueMin = std::min(ValueA, ValueB);
return (ValueMax - ValueMin) > Hysteresis;
}
int main()
{
std::cout << (IsHysteresisExceeded<int>(-2147483648, 2147483647, 10) ? "Exceeded" : "In range") << std::endl;
}
I was hoping that this version would compile down efficiently, but alas, the C++ compiler on my machine is unable to merge the two branches. Posting anyway because it uses only +, -, < and a default constructor for 0.
#include <algorithm>
#include <tuple>
template <typename T> bool IsHysteresisExceeded(T a, T b, T h) {
std::tie(a, b) = std::minmax(a, b);
return a < T{} ? h + a < b : h < b - a;
}
bool test(int a, int b, int h) { return IsHysteresisExceeded(a, b, h); }

Compile-time table generation for "ICSIlog"

The following C code is used to generate a lookup table at runtime to help implement the "ICSI" log algorithm (referenced from https://github.com/mgbellemare/SkipCTS/blob/master/src/icsilog.cpp):
/*
This method fills a given array of floats with the information necessary to compute the icsi_log. This method has to be called before any call to icsi_log.
Parameters:
n is the number of bits used from the mantissa (0<=n<=23). Higher n means higher accuracy but slower execution. We found that a good value for n is 14.
lookup_table requires a float* pointing to a continuous (preallocated) memory array of 2^n*sizeof(float) bytes.
Return values: void
*/
void fill_icsi_log_table(const int n, float *lookup_table)
{
float numlog;
int incr,i,p;
int *const exp_ptr = ((int*)&numlog);
int x = *exp_ptr; /*x is the float treated as an integer*/
x = 0x3F800000; /*set the exponent to 0 so numlog=1.0*/
*exp_ptr = x;
incr = 1 << (23-n); /*amount to increase the mantissa*/
p = 1 << n;
for(i=0;i<p;i++)
{
lookup_table[i] = (float) log2(numlog); /*save the log of the value*/
x += incr;
*exp_ptr = x; /*update the float value*/
}
}
/* ICSIlog V 2.0 */
void fill_icsi_log_table2(const unsigned precision, float* const pTable)
{
/* step along table elements and x-axis positions
(start with extra half increment, so the steps intersect at their midpoints.) */
float oneToTwo = 1.0f + (1.0f / (float)( 1 <<(precision + 1) ));
int i;
for(i = 0; i < (1 << precision); ++i )
{+
// make y-axis value for table element
pTable[i] = logf(oneToTwo) / 0.69314718055995f;
oneToTwo += 1.0f / (float)( 1 << precision );
}
}
Is there a way that either of these functions could be adapted to generate a lookup table at compile-time using templates and C++11-amenable single-line return constexpr functions similar to the following structure?
/** Range generation,
* from http://stackoverflow.com/questions/13313980/populate-an-array-using-constexpr-at-compile-time **/
template<unsigned... Is> struct seq{};
template<unsigned N, unsigned... Is>
struct gen_seq : gen_seq<N-1, N-1, Is...>{};
template<unsigned... Is>
struct gen_seq<0, Is...> : seq<Is...>{};
/** A table consisting of indexes and values,
* which will all be computed at compile-time **/
template<unsigned N>
struct Table
{
unsigned indexes[N];
double values[N];
static constexpr unsigned length = N;
};
template< typename LambdaType, unsigned... Is>
constexpr Table< sizeof...(Is) > TableGenerator(seq<Is...>, LambdaType evalFunc)
{
return {{ Is... }, { evalFunc(Is)... }};
}
template<unsigned N, typename LambdaType>
constexpr Table<N> TableGenerator( LambdaType evalFunc )
{
return TableGenerator(gen_seq<N>(), evalFunc);
}
/** Function that computes a value for each index **/
constexpr double myFunc( unsigned idx )
{
return sin(0.2 * idx) + cos(0.5*idx);
}
Working from this example as a starting point and the "v2.0" variant of the table generation code:
/* ICSIlog V 2.0 */
void fill_icsi_log_table2(const unsigned precision, float* const pTable)
{
/* step along table elements and x-axis positions
(start with extra half increment, so the steps intersect at their midpoints.) */
float oneToTwo = 1.0f + (1.0f / (float)( 1 <<(precision + 1) ));
int i;
for(i = 0; i < (1 << precision); ++i )
{
// make y-axis value for table element
pTable[i] = logf(oneToTwo) / 0.69314718055995f;
oneToTwo += 1.0f / (float)( 1 << precision );
}
}
This recursive template structure:
#include <math.h>
#define PRECISION (4)
constexpr float table_log(float oneToTwo)
{
return logf(oneToTwo) / 0.69314718055995f;
}
template<size_t c, size_t precision, float* const* pTable>
struct ForLoop {
template<template <size_t, size_t, float* const*> class Func>
static void iterate(float oneToTwo) {
ForLoop<c - 1, precision, pTable>::template
iterate<Func>(Func<c - 1, precision, pTable>()(oneToTwo));
}
};
template<size_t precision, float* const* pTable>
struct ForLoop<0, precision, pTable> {
template<template <size_t, size_t, float* const*> class Func>
static void iterate(float oneToTwo) {
Func<0, precision, pTable>()(oneToTwo);
}
};
template <size_t index, size_t precision, float* const *pTable>
struct LogTabe {
float operator()(float oneToTwo) {
float a = table_log(oneToTwo);
(*pTable)[(1 << precision) - index] = a;
return oneToTwo + 1.0f / (float)(1 << precision);
}
};
static float *const table = new float[1 << PRECISION];
extern float *const table;
int main() {
ForLoop<(1 << PRECISION) + 1, PRECISION, &table>::iterate<LogTabe>(1.0f + (1.0f / (float)( 1 << (PRECISION + 1))));
}
Compiled with gcc x86-64 8.1, -std=c++11 -O1, generates an output table consistent with the original code and the asm output:
mov rax, QWORD PTR table[rip]
mov DWORD PTR [rax], 0x3d35d69b
mov DWORD PTR [rax+4], 0x3e0462c4
mov DWORD PTR [rax+8], 0x3e567af2
mov DWORD PTR [rax+12], 0x3e92203d
mov DWORD PTR [rax+16], 0x3eb7110e
mov DWORD PTR [rax+20], 0x3eda3f60
mov DWORD PTR [rax+24], 0x3efbd42b
mov DWORD PTR [rax+28], 0x3f0df989
mov DWORD PTR [rax+32], 0x3f1d5da0
mov DWORD PTR [rax+36], 0x3f2c2411
mov DWORD PTR [rax+40], 0x3f3a58fe
mov DWORD PTR [rax+44], 0x3f480731
mov DWORD PTR [rax+48], 0x3f553848
mov DWORD PTR [rax+52], 0x3f61f4e6
mov DWORD PTR [rax+56], 0x3f6e44cd
mov DWORD PTR [rax+60], 0x3f7a2f04
mov DWORD PTR [rax+64], 0x3f88759c
mov eax, 0
ret
_GLOBAL__sub_I_main:
sub rsp, 8
mov edi, 64
call operator new[](unsigned long)
mov QWORD PTR table[rip], rax
add rsp, 8
ret
Showing that the table values have been successfully pre-computed at compile-time. However recent versions of Clang refuse to compile the code on the objection given by max66 in the comments that the "cmath" and "math.h" library functions are not strictly constexpr (but since it's being evaluated at compile-time anyway, a Taylor series expansion to arbitrary precision itself implemented as a constexpr function would likely work fine as a substitute.)

SIMD Program slow runtime

I'm starting with SIMD programming but i don't know what to do at this moment. I'm trying to diminish runtime but its doing it the other way.
This is my basic code:
https://codepaste.net/a8ut89
void blurr2(double * u, double * r) {
int i;
double dos[2] = { 2.0, 2.0 };
for (i = 0; i < SIZE - 1; i++) {
r[i] = u[i] + u[i + 1];
}
}
blurr2: 0.43s
int contarNegativos(double * u) {
int i;
int contador = 0;
for (i = 0; i < SIZE; i++) {
if (u[i] < 0) {
contador++;
}
}
return contador;
}
negativeCount: 1.38s
void ord(double * v, double * u, double * r) {
int i;
for (i = 0; i < SIZE; i += 2) {
r[i] = *(__int64*)&(v[i]) | *(__int64*)&(u[i]);
}
}
ord: 0.33
And this is my SIMD code:
https://codepaste.net/fbg1g5
void blurr2(double * u, double * r) {
__m128d rp2;
__m128d rdos;
__m128d rr;
int i;
int sizeAux = SIZE % 2 == 1 ? SIZE : SIZE - 1;
double dos[2] = { 2.0, 2.0 };
rdos = *(__m128d*)dos;
for (i = 0; i < sizeAux; i += 2) {
rp2 = *(__m128d*)&u[i + 1];
rr = _mm_add_pd(*(__m128d*)&u[i], rp2);
*((__m128d*)&r[i]) = _mm_div_pd(rr, rdos);
}
}
blurr2: 0.42s
int contarNegativos(double * u) {
__m128d rcero;
__m128d rr;
int i;
double cero[2] = { 0.0, 0.0 };
int contador = 0;
rcero = *(__m128d*)cero;
for (i = 0; i < SIZE; i += 2) {
rr = _mm_cmplt_pd(*(__m128d*)&u[i], rcero);
if (((__int64 *)&rr)[0]) {
contador++;
};
if (((__int64 *)&rr)[1]) {
contador++;
};
}
return contador;
}
negativeCount: 1.42s
void ord(double * v, double * u, double * r) {
__m128d rr;
int i;
for (i = 0; i < SIZE; i += 2) {
*((__m128d*)&r[i]) = _mm_or_pd(*(__m128d*)&v[i], *(__m128d*)&u[i]);
}
}
ord: 0.35s
**Differents solutions.
Can you explain me what i'm doing wrong? I'm a bit lost...
Use _mm_loadu_pd instead of pointer-casting and dereferencing a __m128d. Your code is guaranteed to segfault on gcc/clang where __m128d is assumed to be aligned.
blurr2: multiply by 0.5 instead of dividing by 2. It will be much faster. (I commented the same thing on a question with the exact same code in the last day or two, was that also you?)
negativeCount: _mm_castpd_si128 the compare result to integer, and accumulate it with _mm_sub_epi64. (The bit pattern is all-zero or all-one, i.e. 2's complement 0 / -1).
#include <immintrin.h>
#include <stdint.h>
static const size_t SIZE = 1024;
uint64_t countNegative(double * u) {
__m128i counts = _mm_setzero_si128();
for (size_t i = 0; i < SIZE; i += 2) {
__m128d cmp = _mm_cmplt_pd(_mm_loadu_pd(&u[i]), _mm_setzero_pd());
counts = _mm_sub_epi64(counts, _mm_castpd_si128(cmp));
}
//return counts[0] + counts[1]; // GNU C only, and less efficient
// horizontal sum
__m128i hi64 = _mm_shuffle_epi32(counts, _MM_SHUFFLE(1, 0, 3, 2));
counts = _mm_add_epi64(counts, hi64);
uint64_t scalarcount = _mm_cvtsi128_si64(counts);
return scalarcount;
}
To learn more about efficient vector horizontal sums, see Fastest way to do horizontal float vector sum on x86. But the first rule is to do it outside the loop.
(source + asm on the Godbolt compiler explorer)
From MSVC (which I'm guessing you're using, or you'd get segfaults from *(__m128d*)foo), the inner loop is:
$LL4#countNegat:
movups xmm0, XMMWORD PTR [rcx]
lea rcx, QWORD PTR [rcx+16]
cmpltpd xmm0, xmm2
psubq xmm1, xmm0
sub rax, 1
jne SHORT $LL4#countNegat
It could maybe go faster with unrolling (and maybe two vector accumulators), but this is fairly good and might go close to 1.25 clocks per 16 bytes on Sandybridge/Haswell. (Bottleneck on 5 fused-domain uops).
Your version was actually unpacking to integer inside the inner loop! And if you were using MSVC -Ox, it was actually branching instead of using a branchless compare + conditional add. I'm surprised it wasn't slower than the scalar version.
Also, (int64_t *)&rr violates strict aliasing. char* can alias anything, but it's not safe to cast other pointers onto SIMD vectors and expect it to work. If it does, you got lucky. Compilers usually generate similar code for that or intrinsics, and usually not worse for proper intrinsics.
Do you know that ord function with SIMD is not 1:1 to ord function without using SIMD instructions ?
In ord function without using SIMD, result of OR operation is calculated for even indexes
r[0] = v[0] | u[0],
r[2] = v[2] | u[2],
r[4] = v[4] | u[4]
what with odd indexes? maybe, if OR operations are calculated for all indexes, it will take more time than now.

Any way faster than pow() to compute an integer power of 10 in C++?

I know power of 2 can be implemented using << operator.
What about power of 10? Like 10^5? Is there any way faster than pow(10,5) in C++? It is a pretty straight-forward computation by hand. But seems not easy for computers due to binary representation of the numbers... Let us assume I am only interested in integer powers, 10^n, where n is an integer.
Something like this:
int quick_pow10(int n)
{
static int pow10[10] = {
1, 10, 100, 1000, 10000,
100000, 1000000, 10000000, 100000000, 1000000000
};
return pow10[n];
}
Obviously, can do the same thing for long long.
This should be several times faster than any competing method. However, it is quite limited if you have lots of bases (although the number of values goes down quite dramatically with larger bases), so if there isn't a huge number of combinations, it's still doable.
As a comparison:
#include <iostream>
#include <cstdlib>
#include <cmath>
static int quick_pow10(int n)
{
static int pow10[10] = {
1, 10, 100, 1000, 10000,
100000, 1000000, 10000000, 100000000, 1000000000
};
return pow10[n];
}
static int integer_pow(int x, int n)
{
int r = 1;
while (n--)
r *= x;
return r;
}
static int opt_int_pow(int n)
{
int r = 1;
const int x = 10;
while (n)
{
if (n & 1)
{
r *= x;
n--;
}
else
{
r *= x * x;
n -= 2;
}
}
return r;
}
int main(int argc, char **argv)
{
long long sum = 0;
int n = strtol(argv[1], 0, 0);
const long outer_loops = 1000000000;
if (argv[2][0] == 'a')
{
for(long i = 0; i < outer_loops / n; i++)
{
for(int j = 1; j < n+1; j++)
{
sum += quick_pow10(n);
}
}
}
if (argv[2][0] == 'b')
{
for(long i = 0; i < outer_loops / n; i++)
{
for(int j = 1; j < n+1; j++)
{
sum += integer_pow(10,n);
}
}
}
if (argv[2][0] == 'c')
{
for(long i = 0; i < outer_loops / n; i++)
{
for(int j = 1; j < n+1; j++)
{
sum += opt_int_pow(n);
}
}
}
std::cout << "sum=" << sum << std::endl;
return 0;
}
Compiled with g++ 4.6.3, using -Wall -O2 -std=c++0x, gives the following results:
$ g++ -Wall -O2 -std=c++0x pow.cpp
$ time ./a.out 8 a
sum=100000000000000000
real 0m0.124s
user 0m0.119s
sys 0m0.004s
$ time ./a.out 8 b
sum=100000000000000000
real 0m7.502s
user 0m7.482s
sys 0m0.003s
$ time ./a.out 8 c
sum=100000000000000000
real 0m6.098s
user 0m6.077s
sys 0m0.002s
(I did have an option for using pow as well, but it took 1m22.56s when I first tried it, so I removed it when I decided to have optimised loop variant)
There are certainly ways to compute integral powers of 10 faster than using std::pow()! The first realization is that pow(x, n) can be implemented in O(log n) time. The next realization is that pow(x, 10) is the same as (x << 3) * (x << 1). Of course, the compiler knows the latter, i.e., when you are multiplying an integer by the integer constant 10, the compiler will do whatever is fastest to multiply by 10. Based on these two rules it is easy to create fast computations, even if x is a big integer type.
In case you are interested in games like this:
A generic O(log n) version of power is discussed in Elements of Programming.
Lots of interesting "tricks" with integers are discussed in Hacker's Delight.
A solution for any base using template meta-programming :
template<int E, int N>
struct pow {
enum { value = E * pow<E, N - 1>::value };
};
template <int E>
struct pow<E, 0> {
enum { value = 1 };
};
Then it can be used to generate a lookup-table that can be used at runtime :
template<int E>
long long quick_pow(unsigned int n) {
static long long lookupTable[] = {
pow<E, 0>::value, pow<E, 1>::value, pow<E, 2>::value,
pow<E, 3>::value, pow<E, 4>::value, pow<E, 5>::value,
pow<E, 6>::value, pow<E, 7>::value, pow<E, 8>::value,
pow<E, 9>::value
};
return lookupTable[n];
}
This must be used with correct compiler flags in order to detect the possible overflows.
Usage example :
for(unsigned int n = 0; n < 10; ++n) {
std::cout << quick_pow<10>(n) << std::endl;
}
An integer power function (which doesn't involve floating-point conversions and computations) may very well be faster than pow():
int integer_pow(int x, int n)
{
int r = 1;
while (n--)
r *= x;
return r;
}
Edit: benchmarked - the naive integer exponentiation method seems to outperform the floating-point one by about a factor of two:
h2co3-macbook:~ h2co3$ cat quirk.c
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <errno.h>
#include <string.h>
#include <math.h>
int integer_pow(int x, int n)
{
int r = 1;
while (n--)
r *= x;
return r;
}
int main(int argc, char *argv[])
{
int x = 0;
for (int i = 0; i < 100000000; i++) {
x += powerfunc(i, 5);
}
printf("x = %d\n", x);
return 0;
}
h2co3-macbook:~ h2co3$ clang -Wall -o quirk quirk.c -Dpowerfunc=integer_pow
h2co3-macbook:~ h2co3$ time ./quirk
x = -1945812992
real 0m1.169s
user 0m1.164s
sys 0m0.003s
h2co3-macbook:~ h2co3$ clang -Wall -o quirk quirk.c -Dpowerfunc=pow
h2co3-macbook:~ h2co3$ time ./quirk
x = -2147483648
real 0m2.898s
user 0m2.891s
sys 0m0.004s
h2co3-macbook:~ h2co3$
No multiplication and no table version:
//Nx10^n
int Npow10(int N, int n){
N <<= n;
while(n--) N += N << 2;
return N;
}
Here is a stab at it:
// specialize if you have a bignum integer like type you want to work with:
template<typename T> struct is_integer_like:std::is_integral<T> {};
template<typename T> struct make_unsigned_like:std::make_unsigned<T> {};
template<typename T, typename U>
T powT( T base, U exponent ) {
static_assert( is_integer_like<U>::value, "exponent must be integer-like" );
static_assert( std::is_same< U, typename make_unsigned_like<U>::type >::value, "exponent must be unsigned" );
T retval = 1;
T& multiplicand = base;
if (exponent) {
while (true) {
// branch prediction will be awful here, you may have to micro-optimize:
retval *= (exponent&1)?multiplicand:1;
// or /2, whatever -- `>>1` is probably faster, esp for bignums:
exponent = exponent>>1;
if (!exponent)
break;
multiplicand *= multiplicand;
}
}
return retval;
}
What is going on above is a few things.
First, so BigNum support is cheap, it is templateized. Out of the box, it supports any base type that supports *= own_type and either can be implicitly converted to int, or int can be implicitly converted to it (if both is true, problems will occur), and you need to specialize some templates to indicate that the exponent type involved is both unsigned and integer-like.
In this case, integer-like and unsigned means that it supports &1 returning bool and >>1 returning something it can be constructed from and eventually (after repeated >>1s) reaches a point where evaluating it in a bool context returns false. I used traits classes to express the restriction, because naive use by a value like -1 would compile and (on some platforms) loop forever, while (on others) would not.
Execution time for this algorithm, assuming multiplication is O(1), is O(lg(exponent)), where lg(exponent) is the number of times it takes to <<1 the exponent before it evaluates as false in a boolean context. For traditional integer types, this would be the binary log of the exponents value: so no more than 32.
I also eliminated all branches within the loop (or, made it obvious to existing compilers that no branch is needed, more precisely), with just the control branch (which is true uniformly until it is false once). Possibly eliminating even that branch might be worth it for high bases and low exponents...
Now, with constexpr, you can do like so:
constexpr int pow10(int n) {
int result = 1;
for (int i = 1; i<=n; ++i)
result *= 10;
return result;
}
int main () {
int i = pow10(5);
}
i will be calculated at compile time. ASM generated for x86-64 gcc 9.2:
main:
push rbp
mov rbp, rsp
mov DWORD PTR [rbp-4], 100000
mov eax, 0
pop rbp
ret
You can use the lookup table which will be by far the fastest
You can also consider using this:-
template <typename T>
T expt(T p, unsigned q)
{
T r(1);
while (q != 0) {
if (q % 2 == 1) { // q is odd
r *= p;
q--;
}
p *= p;
q /= 2;
}
return r;
}
This function will calculate x ^ y much faster then pow. In case of integer values.
int pot(int x, int y){
int solution = 1;
while(y){
if(y&1)
solution*= x;
x *= x;
y >>= 1;
}
return solution;
}
A generic table builder based on constexpr functions. The floating point part requires c++20 and gcc, but the non-floating point part works for c++17. If you change the "auto" type param to "long" you can use c++14. Not properly tested.
#include <cstdio>
#include <cassert>
#include <cmath>
// Precomputes x^N
// Inspired by https://stackoverflow.com/a/34465458
template<auto x, unsigned char N, typename AccumulatorType>
struct PowTable {
constexpr PowTable() : mTable() {
AccumulatorType p{ 1 };
for (unsigned char i = 0; i < N; ++i) {
p *= x;
mTable[i] = p;
}
}
AccumulatorType operator[](unsigned char n) const {
assert(n < N);
return mTable[n];
}
AccumulatorType mTable[N];
};
long pow10(unsigned char n) {
static constexpr PowTable<10l, 10, long> powTable;
return powTable[n-1];
}
double powe(unsigned char n) {
static constexpr PowTable<2.71828182845904523536, 10, double> powTable;
return powTable[n-1];
}
int main() {
printf("10^3=%ld\n", pow10(3));
printf("e^2=%f", powe(2));
assert(pow10(3) == 1000);
assert(powe(2) - 7.389056 < 0.001);
}
Based on Mats Petersson approach, but compile time generation of cache.
#include <iostream>
#include <limits>
#include <array>
// digits
template <typename T>
constexpr T digits(T number) {
return number == 0 ? 0
: 1 + digits<T>(number / 10);
}
// pow
// https://stackoverflow.com/questions/24656212/why-does-gcc-complain-error-type-intt-of-template-argument-0-depends-on-a
// unfortunatly we can't write `template <typename T, T N>` because of partial specialization `PowerOfTen<T, 1>`
template <typename T, uintmax_t N>
struct PowerOfTen {
enum { value = 10 * PowerOfTen<T, N - 1>::value };
};
template <typename T>
struct PowerOfTen<T, 1> {
enum { value = 1 };
};
// sequence
template<typename T, T...>
struct pow10_sequence { };
template<typename T, T From, T N, T... Is>
struct make_pow10_sequence_from
: make_pow10_sequence_from<T, From, N - 1, N - 1, Is...> {
//
};
template<typename T, T From, T... Is>
struct make_pow10_sequence_from<T, From, From, Is...>
: pow10_sequence<T, Is...> {
//
};
// base10list
template <typename T, T N, T... Is>
constexpr std::array<T, N> base10list(pow10_sequence<T, Is...>) {
return {{ PowerOfTen<T, Is>::value... }};
}
template <typename T, T N>
constexpr std::array<T, N> base10list() {
return base10list<T, N>(make_pow10_sequence_from<T, 1, N+1>());
}
template <typename T>
constexpr std::array<T, digits(std::numeric_limits<T>::max())> base10list() {
return base10list<T, digits(std::numeric_limits<T>::max())>();
};
// main pow function
template <typename T>
static T template_quick_pow10(T n) {
static auto values = base10list<T>();
return values[n];
}
// client code
int main(int argc, char **argv) {
long long sum = 0;
int n = strtol(argv[1], 0, 0);
const long outer_loops = 1000000000;
if (argv[2][0] == 't') {
for(long i = 0; i < outer_loops / n; i++) {
for(int j = 1; j < n+1; j++) {
sum += template_quick_pow10(n);
}
}
}
std::cout << "sum=" << sum << std::endl;
return 0;
}
Code does not contain quick_pow10, integer_pow, opt_int_pow for better readability, but tests done with them in the code.
Compiled with gcc version 4.6.3 (Ubuntu/Linaro 4.6.3-1ubuntu5), using -Wall -O2 -std=c++0x, gives the following results:
$ g++ -Wall -O2 -std=c++0x main.cpp
$ time ./a.out 8 a
sum=100000000000000000
real 0m0.438s
user 0m0.432s
sys 0m0.008s
$ time ./a.out 8 b
sum=100000000000000000
real 0m8.783s
user 0m8.777s
sys 0m0.004s
$ time ./a.out 8 c
sum=100000000000000000
real 0m6.708s
user 0m6.700s
sys 0m0.004s
$ time ./a.out 8 t
sum=100000000000000000
real 0m0.439s
user 0m0.436s
sys 0m0.000s
if you want to calculate, e.g.,10^5, then you can:
int main() {
cout << (int)1e5 << endl; // will print 100000
cout << (int)1e3 << endl; // will print 1000
return 0;
}
result *= 10 can also be written as result = (result << 3) + (result << 1)
constexpr int pow10(int n) {
int result = 1;
for (int i = 0; i < n; i++) {
result = (result << 3) + (result << 1);
}
return result;
}

Can counting byte matches between two strings be optimized using SIMD?

Profiling suggests that this function here is a real bottle neck for my application:
static inline int countEqualChars(const char* string1, const char* string2, int size) {
int r = 0;
for (int j = 0; j < size; ++j) {
if (string1[j] == string2[j]) {
++r;
}
}
return r;
}
Even with -O3 and -march=native, G++ 4.7.2 does not vectorize this function (I checked the assembler output). Now, I'm not an expert with SSE and friends, but I think that comparing more than one character at once should be faster. Any ideas on how to speed things up? Target architecture is x86-64.
Of course it can.
pcmpeqb compares two vectors of 16 bytes and produces a vector with zeros where they differed, and -1 where they match. Use this to compare 16 bytes at a time, adding the result to an accumulator vector (make sure to accumulate the results of at most 255 vector compares to avoid overflow). When you're done, there are 16 results in the accumulator. Sum them and negate to get the number of equal elements.
If the lengths are very short, it will be hard to get a significant speedup from this approach. If the lengths are long, then it will be worth pursuing.
Compiler flags for vectorization:
-ftree-vectorize
-ftree-vectorize -march=<your_architecture> (Use all instruction-set extensions available on your computer, not just baseline like SSE2 for x86-64). Use -march=native to optimize for the machine the compiler is running on.) -march=<foo> also sets -mtune=<foo>, which is also a good thing.
Using SSEx intrinsics:
Padd and align the buffer to 16 bytes (according to the vector size you're actually going to use)
Create an accumlator countU8 with _mm_set1_epi8(0)
For all n/16 input (sub) vectors, do:
Load 16 chars from both strings with _mm_load_si128 or _mm_loadu_si128 (for unaligned loads)
_mm_cmpeq_epi8
compare the octets in parallel. Each match yields 0xFF (-1), 0x00 otherwise.
Substract the above result vector from countU8 using _mm_sub_epi8 (minus -1 -> +1)
Always after 255 cycles, the 16 8bit counters must be extracted into a larger integer type to prevent overflows. See unpack and horizontal add in this nice answer for how to do that: https://stackoverflow.com/a/10930706/1175253
Code:
#include <iostream>
#include <vector>
#include <cassert>
#include <cstdint>
#include <climits>
#include <cstring>
#include <emmintrin.h>
#ifdef __SSE2__
#if !defined(UINTPTR_MAX) || !defined(UINT64_MAX) || !defined(UINT32_MAX)
# error "Limit macros are not defined"
#endif
#if UINTPTR_MAX == UINT64_MAX
#define PTR_64
#elif UINTPTR_MAX == UINT32_MAX
#define PTR_32
#else
# error "Current UINTPTR_MAX is not supported"
#endif
template<typename T>
void print_vector(std::ostream& out,const __m128i& vec)
{
static_assert(sizeof(vec) % sizeof(T) == 0,"Invalid element size");
std::cout << '{';
const T* const end = reinterpret_cast<const T*>(&vec)-1;
const T* const upper = end+(sizeof(vec)/sizeof(T));
for(const T* elem = upper;
elem != end;
--elem
)
{
if(elem != upper)
std::cout << ',';
std::cout << +(*elem);
}
std::cout << '}' << std::endl;
}
#define PRINT_VECTOR(_TYPE,_VEC) do{ std::cout << #_VEC << " : "; print_vector<_TYPE>(std::cout,_VEC); } while(0)
///#note SSE2 required (macro: __SSE2__)
///#warning Not tested!
size_t counteq_epi8(const __m128i* a_in,const __m128i* b_in,size_t count)
{
assert(a_in != nullptr && (uintptr_t(a_in) % 16) == 0);
assert(b_in != nullptr && (uintptr_t(b_in) % 16) == 0);
//assert(count > 0);
/*
//maybe not so good with all that branching and additional loop variables
__m128i accumulatorU8 = _mm_set1_epi8(0);
__m128i sum2xU64 = _mm_set1_epi8(0);
for(size_t i = 0;i < count;++i)
{
//this operation could also be unrolled, where multiple result registers would be accumulated
accumulatorU8 = _mm_sub_epi8(accumulatorU8,_mm_cmpeq_epi8(*a_in++,*b_in++));
if(i % 255 == 0)
{
//before overflow of uint8, the counter will be extracted
__m128i sum2xU16 = _mm_sad_epu8(accumulatorU8,_mm_set1_epi8(0));
sum2xU64 = _mm_add_epi64(sum2xU64,sum2xU16);
//reset accumulatorU8
accumulatorU8 = _mm_set1_epi8(0);
}
}
//blindly accumulate remaining values
__m128i sum2xU16 = _mm_sad_epu8(accumulatorU8,_mm_set1_epi8(0));
sum2xU64 = _mm_add_epi64(sum2xU64,sum2xU16);
//do a horizontal addition of the two counter values
sum2xU64 = _mm_add_epi64(sum2xU64,_mm_srli_si128(sum2xU64,64/8));
#if defined PTR_64
return _mm_cvtsi128_si64(sum2xU64);
#elif defined PTR_32
return _mm_cvtsi128_si32(sum2xU64);
#else
# error "macro PTR_(32|64) is not set"
#endif
*/
__m128i sum2xU64 = _mm_set1_epi32(0);
while(count--)
{
__m128i matches = _mm_sub_epi8(_mm_set1_epi32(0),_mm_cmpeq_epi8(*a_in++,*b_in++));
__m128i sum2xU16 = _mm_sad_epu8(matches,_mm_set1_epi32(0));
sum2xU64 = _mm_add_epi64(sum2xU64,sum2xU16);
#ifndef NDEBUG
PRINT_VECTOR(uint16_t,sum2xU64);
#endif
}
//do a horizontal addition of the two counter values
sum2xU64 = _mm_add_epi64(sum2xU64,_mm_srli_si128(sum2xU64,64/8));
#ifndef NDEBUG
std::cout << "----------------------------------------" << std::endl;
PRINT_VECTOR(uint16_t,sum2xU64);
#endif
#if !defined(UINTPTR_MAX) || !defined(UINT64_MAX) || !defined(UINT32_MAX)
# error "Limit macros are not defined"
#endif
#if defined PTR_64
return _mm_cvtsi128_si64(sum2xU64);
#elif defined PTR_32
return _mm_cvtsi128_si32(sum2xU64);
#else
# error "macro PTR_(32|64) is not set"
#endif
}
#endif
int main(int argc, char* argv[])
{
std::vector<__m128i> a(64); // * 16 bytes
std::vector<__m128i> b(a.size());
const size_t nBytes = a.size() * sizeof(std::vector<__m128i>::value_type);
char* const a_out = reinterpret_cast<char*>(a.data());
char* const b_out = reinterpret_cast<char*>(b.data());
memset(a_out,0,nBytes);
memset(b_out,0,nBytes);
a_out[1023] = 1;
b_out[1023] = 1;
size_t equalBytes = counteq_epi8(a.data(),b.data(),a.size());
std::cout << "equalBytes = " << equalBytes << std::endl;
return 0;
}
The fastest SSE implementation I got for large and small arrays:
size_t counteq_epi8(const __m128i* a_in,const __m128i* b_in,size_t count)
{
assert((count > 0 ? a_in != nullptr : true) && (uintptr_t(a_in) % sizeof(__m128i)) == 0);
assert((count > 0 ? b_in != nullptr : true) && (uintptr_t(b_in) % sizeof(__m128i)) == 0);
//assert(count > 0);
const size_t maxInnerLoops = 255;
const size_t nNestedLoops = count / maxInnerLoops;
const size_t nRemainderLoops = count % maxInnerLoops;
const __m128i zero = _mm_setzero_si128();
__m128i sum16xU8 = zero;
__m128i sum2xU64 = zero;
for(size_t i = 0;i < nNestedLoops;++i)
{
for(size_t j = 0;j < maxInnerLoops;++j)
{
sum16xU8 = _mm_sub_epi8(sum16xU8,_mm_cmpeq_epi8(*a_in++,*b_in++));
}
sum2xU64 = _mm_add_epi64(sum2xU64,_mm_sad_epu8(sum16xU8,zero));
sum16xU8 = zero;
}
for(size_t j = 0;j < nRemainderLoops;++j)
{
sum16xU8 = _mm_sub_epi8(sum16xU8,_mm_cmpeq_epi8(*a_in++,*b_in++));
}
sum2xU64 = _mm_add_epi64(sum2xU64,_mm_sad_epu8(sum16xU8,zero));
sum2xU64 = _mm_add_epi64(sum2xU64,_mm_srli_si128(sum2xU64,64/8));
#if UINTPTR_MAX == UINT64_MAX
return _mm_cvtsi128_si64(sum2xU64);
#elif UINTPTR_MAX == UINT32_MAX
return _mm_cvtsi128_si32(sum2xU64);
#else
# error "macro PTR_(32|64) is not set"
#endif
}
Auto-vectorization in current gcc is a matter of helping the compiler to understand that's easy to vectorize the code. In your case: it will understand the vectorization request if you remove the conditional and rewrite the code in a more imperative way:
static inline int count(const char* string1, const char* string2, int size) {
int r = 0;
bool b;
for (int j = 0; j < size; ++j) {
b = (string1[j] == string2[j]);
r += b;
}
return r;
}
In this case:
movdqa 16(%rsp), %xmm1
movl $.LC2, %esi
pxor %xmm2, %xmm2
movzbl 416(%rsp), %edx
movdqa .LC1(%rip), %xmm3
pcmpeqb 224(%rsp), %xmm1
cmpb %dl, 208(%rsp)
movzbl 417(%rsp), %eax
movl $1, %edi
pand %xmm3, %xmm1
movdqa %xmm1, %xmm5
sete %dl
movdqa %xmm1, %xmm4
movzbl %dl, %edx
punpcklbw %xmm2, %xmm5
punpckhbw %xmm2, %xmm4
pxor %xmm1, %xmm1
movdqa %xmm5, %xmm6
movdqa %xmm5, %xmm0
movdqa %xmm4, %xmm5
punpcklwd %xmm1, %xmm6
(etc.)