float mixValue = ... //in range -1.0f to 1.0f
for(... ; ... ; ... ) //long loop
{
float inputLevel = ... //in range -1.0f to 1.0f
if(inputLevel < 0.0 && mixValue < 0.0)
{
mixValue = (mixValue + inputLevel) + (mixValue*inputLevel);
}
else
{
mixValue = (mixValue + inputLevel) - (mixValue*inputLevel);
}
}
just a simple question, can we calculate mixValue without branching? or any other optimization suggestion, such as using SIMD?
edit:
just for more information, I ended up
using this solution, based on chosen answer:
const float sign[] = {-1, 1};
float mixValue = ... //in range -1.0f to 1.0f
for(... ; ... ; ... ) //long loop
{
float inputLevel = ... //in range -1.0f to 1.0f
unsigned a = *(unsigned*)(&mixValue);
unsigned b = *(unsigned*)(&inputLevel);
float mulValue = mixValue * inputLevel * sign[(a & b) >> (8*sizeof(unsigned)-1)];
float addValue = mixValue + inputLevel;
mixValue = addValue + mulValue;
}
thank you.
How about this:
const float sign[] = {-1, 1};
float mixValue = ... //in range -1.0f to 1.0f
for(... ; ... ; ... ) //long loop
{
float inputLevel = ... //in range -1.0f to 1.0f
int bothNegative = (inputLevel < 0.0) & (mixValue < 0.0);
mixValue = (mixValue + inputLevel) + (sign[bothNegative]*mixValue*inputLevel);
}
Edit: Mike was correct that && would introduce a branch and thanks for Pedro for proving it. I changed && to & and now GCC (version 4.4.0) generates branch-free code.
Inspired by Roku's answer (which on MSVC++10 branches), this doesn't seem to branch:
#include <iostream>
using namespace std;
const float sign[] = {-1, 1};
int main() {
const int N = 10;
float mixValue = -0.5F;
for(int i = 0; i < N; i++) {
volatile float inputLevel = -0.3F;
int bothNegative = ((((unsigned char*)&inputLevel)[3] & 0x80) & (((unsigned char*)&mixValue)[3] & 0x80)) >> 7;
mixValue = (mixValue + inputLevel) + (sign[bothNegative]*mixValue*inputLevel);
}
std::cout << mixValue << std::endl;
}
Here's the disassembly, as analyzed by IDA Pro (compiled on MSVC++10, Release mode):
Disassembly http://img248.imageshack.us/img248/6865/floattestbranchmine.png
float mixValue = ... //in range -1.0f to 1.0f
for(... ; ... ; ... ) //long loop
{
float inputLevel = ... //in range -1.0f to 1.0f
float mulValue = mixValue * inputLevel;
float addValue = mixValue + inputLevel;
__int32 a = *(__int32*)(&mixValue);
__int32 b = *(__int32*)(&inputLevel);
__int32 c = *(__int32*)(&mulValue);
__int32 d = c & ((a ^ b) | 0x7FFFFFFF);
mixValue = addValue + *(float*)(&d);
}
Just off the top of my head (I'm sure it can be reduced):
mixValue = (mixValue + inputLevel) + (((mixValue / fabs(mixValue)) + (inputLevel / fabs(inputLevel))+1) / fabs(((mixValue / fabs(mixValue)) + (inputLevel / fabs(inputLevel))+1)))*-1*(mixValue*inputLevel);
Just to clarify a bit, I'll calculate sign separately:
float sign = (((mixValue / fabs(mixValue)) + (inputLevel / fabs(inputLevel))+1) / fabs(((mixValue / fabs(mixValue)) + (inputLevel / fabs(inputLevel))+1)))*-1;
mixValue = (mixValue + inputLevel) + sign*(mixValue*inputLevel);
This is floating point math, so you'll likely need to correct for some rounding issues, but that should set you on the right path I think.
If you are worried about excessive branching, look at Duff's Device. This should help unwind the loop somewhat. Truth be told, loop unwinding is something that will be done by the optimizer, so trying to do it by hand may be a waste of time. Check the assembly output to find out.
SIMD will definitely be of assistance provided you a performing the exact same operation to each item in your array. Be aware than not all hardware supports SIMD but some compilers like gcc do provide intrinsics for SIMD which will save your from dipping into assembler.
If you are using gcc to compile ARM code, the SIMD intrinsics can be found here
Have you benchmarked the loop with and without the branch ?
At least you could remove one part of the branch, since mixValue is outside of the loop.
float multiplier(float a, float b){
unsigned char c1Neg = reinterpret_cast<unsigned char *>(&a)[3] & 0x80;
unsigned char c2Neg = reinterpret_cast<unsigned char *>(&b)[3] & 0x80;
unsigned char multiplierIsNeg = c1Neg & c2Neg;
float one = 1;
reinterpret_cast<unsigned char *>(&one)[3] |= multiplierIsNeg;
return -one;
}
cout << multiplier(-1,-1) << endl; // +1
cout << multiplier( 1,-1) << endl; // -1
cout << multiplier( 1, 1) << endl; // -1
cout << multiplier(-1, 1) << endl; // -1
Looking at your code, you see that you will always add the absolute value of mixValue and inputLevel, except when both are positive.
With some bit-fiddling and IEEE floatingpoint knowledge, you may get rid of the conditional:
// sets the first bit of f to zero => makes it positive.
void absf( float& f ) {
assert( sizeof( float ) == sizeof( int ) );
reinterpret_cast<int&>( f ) &= ~0x80000000;
}
// returns a first-bit = 1 if f is positive
int pos( float& f ) {
return ~(reinterpret_cast<int&>(f) & 0x80000000) & 0x80000000;
}
// returns -fabs( f*g ) if f>0 and g>0, fabs(f*g) otherwise.
float prod( float& f, float& g ) {
float p = f*g;
float& rp=p;
int& ri = reinterpret_cast<int&>(rp);
absf(p);
ri |= ( pos(f) & pos(g) & 0x80000000); // first bit = + & +
return p;
}
int main(){
struct T { float f, g, r;
void test() {
float p = prod(f,g);
float d = (p-r)/r;
assert( -1e-15 < d && d < 1e-15 );
}
};
T vals[] = { {1,1,-1},{1,-1,1},{-1,1,1},{-1,-1,1} };
for( T* val=vals; val != vals+4; ++val ) {
val->test();
}
}
And finally: your loop
for( ... ) {
mixedResult += inputLevel + prod(mixedResult,inputLevel);
}
Note: the dimensions of your accumulation don't match. The inputLevel is a dimensionless quantity, while mixedResult is your... result (e.g. in Pascal, in Volts, ...). You cannot add two quantities with different dimensions. Probably you want mixedResult += prod( mixedResult, inputLevel ) as your accumulator.
Some compilers (ie MSC) would also require manual sign checking.
Source:
volatile float mixValue;
volatile float inputLevel;
float u = mixValue*inputLevel;
float v = -u;
float a[] = { v, u };
mixValue = (mixValue + inputLevel) + a[ (inputLevel<0.0) & (mixValue<0.0) ];
IntelC 11.1:
movss xmm1, DWORD PTR [12+esp]
mulss xmm1, DWORD PTR [16+esp]
movss xmm6, DWORD PTR [12+esp]
movss xmm2, DWORD PTR [16+esp]
movss xmm3, DWORD PTR [16+esp]
movss xmm5, DWORD PTR [12+esp]
xorps xmm4, xmm4
movaps xmm0, xmm4
subss xmm0, xmm1
movss DWORD PTR [esp], xmm0
movss DWORD PTR [4+esp], xmm1
addss xmm6, xmm2
xor eax, eax
cmpltss xmm3, xmm4
movd ecx, xmm3
neg ecx
cmpltss xmm5, xmm4
movd edx, xmm5
neg edx
and ecx, edx
addss xmm6, DWORD PTR [esp+ecx*4]
movss DWORD PTR [12+esp], xmm6
gcc 4.5:
flds 32(%esp)
flds 16(%esp)
fmulp %st, %st(1)
fld %st(0)
fchs
fstps (%esp)
fstps 4(%esp)
flds 32(%esp)
flds 16(%esp)
flds 16(%esp)
flds 32(%esp)
fxch %st(2)
faddp %st, %st(3)
fldz
fcomi %st(2), %st
fstp %st(2)
fxch %st(1)
seta %dl
xorl %eax, %eax
fcomip %st(1), %st
fstp %st(0)
seta %al
andl %edx, %eax
fadds (%esp,%eax,4)
xorl %eax, %eax
fstps 32(%esp)
Related
For some real-time DSP application I need to compute the absolute values of a complex valued vector.
The straightforward implementation would look like that
computeAbsolute (std::complex<float>* complexSourceVec,
float* realValuedDestinationVec,
int vecLength)
{
for (int i = 0; i < vecLength; ++i)
realValuedDestinationVec[i] = std::abs (complexSourceVec[i]);
}
I want to replace this implementation with an AVX2 optimized version, based on AVX2 instrincts. What would be the most efficient way to implement it that way?
Note: The source data is handed to me by an API I have no access to, so there is no chance to change the layout of the complex input vector for better efficiency.
Inspired by the answer of Dan M. I first implemented his version with some tweaks:
First changed it to use the wider 256 Bit registers, then marked the temporary re and im arrays with __attribute__((aligned (32))) to be able to use aligned load
void computeAbsolute1 (const std::complex<float>* cplxIn, float* absOut, const int length)
{
for (int i = 0; i < length; i += 8)
{
float re[8] __attribute__((aligned (32))) = {cplxIn[i].real(), cplxIn[i + 1].real(), cplxIn[i + 2].real(), cplxIn[i + 3].real(), cplxIn[i + 4].real(), cplxIn[i + 5].real(), cplxIn[i + 6].real(), cplxIn[i + 7].real()};
float im[8] __attribute__((aligned (32))) = {cplxIn[i].imag(), cplxIn[i + 1].imag(), cplxIn[i + 2].imag(), cplxIn[i + 3].imag(), cplxIn[i + 4].imag(), cplxIn[i + 5].imag(), cplxIn[i + 6].imag(), cplxIn[i + 7].imag()};
__m256 x4 = _mm256_load_ps (re);
__m256 y4 = _mm256_load_ps (im);
__m256 b4 = _mm256_sqrt_ps (_mm256_add_ps (_mm256_mul_ps (x4,x4), _mm256_mul_ps (y4,y4)));
_mm256_storeu_ps (absOut + i, b4);
}
}
However manually shuffling the values this way seemed like a task that could be speeded up somehow. Now this is the solution I came up with, that runs 2 - 3 times faster in a quick test compiled by clang with full optimization:
#include <complex>
#include <immintrin.h>
void computeAbsolute2 (const std::complex<float>* __restrict cplxIn, float* __restrict absOut, const int length)
{
for (int i = 0; i < length; i += 8)
{
// load 8 complex values (--> 16 floats overall) into two SIMD registers
__m256 inLo = _mm256_loadu_ps (reinterpret_cast<const float*> (cplxIn + i ));
__m256 inHi = _mm256_loadu_ps (reinterpret_cast<const float*> (cplxIn + i + 4));
// seperates the real and imaginary part, however values are in a wrong order
__m256 re = _mm256_shuffle_ps (inLo, inHi, _MM_SHUFFLE (2, 0, 2, 0));
__m256 im = _mm256_shuffle_ps (inLo, inHi, _MM_SHUFFLE (3, 1, 3, 1));
// do the heavy work on the unordered vectors
__m256 abs = _mm256_sqrt_ps (_mm256_add_ps (_mm256_mul_ps (re, re), _mm256_mul_ps (im, im)));
// reorder values prior to storing
__m256d ordered = _mm256_permute4x64_pd (_mm256_castps_pd(abs), _MM_SHUFFLE(3,1,2,0));
_mm256_storeu_ps (absOut + i, _mm256_castpd_ps(ordered));
}
}
I think I'll go with that implementation if no one comes up with a faster solution
This compiles efficiently with gcc and clang (on the Godbolt compiler explorer).
It's really hard (if possible) to write "highly optimized AVX2" version of complex abs since the way complex numbers are defined in the standard prevents (specifically due to all inf/nan corner cases) a lot of optimization.
However, if you don't care about the correctness you can just use -ffast-math and some compilers would optimize the code for you. See gcc output: https://godbolt.org/z/QbZlBI
You can also take this output and create your own abs function with inline assembly.
But yes, as was already mentioned, if you really need performance, you probably want to swap std::complex for something else.
I was able to get a decent output for your specific case with all the required shuffles by manually filling small re and im arrays. See: https://godbolt.org/z/sWAAXo
This could be trivially extended for ymm registers.
Anyway, here is the ultimate solution adapted from this SO answer which uses intrinsics in combination with clever compiler optimizations:
#include <complex>
#include <cassert>
#include <immintrin.h>
static inline void cabs_soa4(const float *re, const float *im, float *b) {
__m128 x4 = _mm_loadu_ps(re);
__m128 y4 = _mm_loadu_ps(im);
__m128 b4 = _mm_sqrt_ps(_mm_add_ps(_mm_mul_ps(x4,x4), _mm_mul_ps(y4,y4)));
_mm_storeu_ps(b, b4);
}
void computeAbsolute (const std::complex<float>* src,
float* realValuedDestinationVec,
int vecLength)
{
for (int i = 0; i < vecLength; i += 4) {
float re[4] = {src[i].real(), src[i + 1].real(), src[i + 2].real(), src[i + 3].real()};
float im[4] = {src[i].imag(), src[i + 1].imag(), src[i + 2].imag(), src[i + 3].imag()};
cabs_soa4(re, im, realValuedDestinationVec);
}
}
which compiles to simple
_Z15computeAbsolutePKSt7complexIfEPfi:
test edx, edx
jle .L5
lea eax, [rdx-1]
shr eax, 2
sal rax, 5
lea rax, [rdi+32+rax]
.L3:
vmovups xmm0, XMMWORD PTR [rdi]
vmovups xmm2, XMMWORD PTR [rdi+16]
add rdi, 32
vshufps xmm1, xmm0, xmm2, 136
vmulps xmm1, xmm1, xmm1
vshufps xmm0, xmm0, xmm2, 221
vfmadd132ps xmm0, xmm1, xmm0
vsqrtps xmm0, xmm0
vmovups XMMWORD PTR [rsi], xmm0
cmp rax, rdi
jne .L3
.L5:
ret
https://godbolt.org/z/Yu64Wg
I would like to speed up a part of my code but I don't think there is a possible better way to do the following calculation:
float invSum = 1.0f / float(sum);
for (int i = 0; i < numBins; ++i)
{
histVec[i] *= invSum;
}
for (int i = 0; i < numBins; ++i)
{
float midPoint = (float)i*binSize + binOffset;
float f = histVec[i];
fmean += f * midPoint;
}
for (int i = 0; i < numBins; ++i)
{
float midPoint = (float)i*binSize + binOffset;
float f = histVec[i];
float diff = midPoint - fmean;
var += f * hwk::sqr(diff);
}
numBins in the for-loops is typically 10 but this bit of code is called very often (frequency of 80 frames per seconds, called at least 8 times per frame)
I tried to use some SSE methods but it is only slightly speeding up this code. I think I could avoid calculating twice midPoint but I am not sure how. Is there a better way to compute fmean and var?
Here is the SSE code:
// make hist contain a multiple of 4 valid values
for (int i = numBins; i < ((numBins + 3) & ~3); i++)
hist[i] = 0;
// find sum of bins in inHist
__m128i iSum4 = _mm_set1_epi32(0);
for (int i = 0; i < numBins; i += 4)
{
__m128i a = *((__m128i *) &inHist[i]);
iSum4 = _mm_add_epi32(iSum4, a);
}
int iSum = iSum4.m128i_i32[0] + iSum4.m128i_i32[1] + iSum4.m128i_i32[2] + iSum4.m128i_i32[3];
//float stdevB, meanB;
if (iSum == 0.0f)
{
stdev = 0.0;
mean = 0.0;
}
else
{
// Set histVec to normalised values in inHist
__m128 invSum = _mm_set1_ps(1.0f / float(iSum));
for (int i = 0; i < numBins; i += 4)
{
__m128i a = *((__m128i *) &inHist[i]);
__m128 b = _mm_cvtepi32_ps(a);
__m128 c = _mm_mul_ps(b, invSum);
_mm_store_ps(&histVec[i], c);
}
float binSize = 256.0f / (float)numBins;
float halfBinSize = binSize * 0.5f;
float binOffset = halfBinSize;
__m128 binSizeMask = _mm_set1_ps(binSize);
__m128 binOffsetMask = _mm_set1_ps(binOffset);
__m128 fmean4 = _mm_set1_ps(0.0f);
for (int i = 0; i < numBins; i += 4)
{
__m128i idx4 = _mm_set_epi32(i + 3, i + 2, i + 1, i);
__m128 idx_m128 = _mm_cvtepi32_ps(idx4);
__m128 histVec4 = _mm_load_ps(&histVec[i]);
__m128 midPoint4 = _mm_add_ps(_mm_mul_ps(idx_m128, binSizeMask), binOffsetMask);
fmean4 = _mm_add_ps(fmean4, _mm_mul_ps(histVec4, midPoint4));
}
fmean4 = _mm_hadd_ps(fmean4, fmean4); // 01 23 01 23
fmean4 = _mm_hadd_ps(fmean4, fmean4); // 0123 0123 0123 0123
float fmean = fmean4.m128_f32[0];
//fmean4 = _mm_set1_ps(fmean);
__m128 var4 = _mm_set1_ps(0.0f);
for (int i = 0; i < numBins; i+=4)
{
__m128i idx4 = _mm_set_epi32(i + 3, i + 2, i + 1, i);
__m128 idx_m128 = _mm_cvtepi32_ps(idx4);
__m128 histVec4 = _mm_load_ps(&histVec[i]);
__m128 midPoint4 = _mm_add_ps(_mm_mul_ps(idx_m128, binSizeMask), binOffsetMask);
__m128 diff4 = _mm_sub_ps(midPoint4, fmean4);
var4 = _mm_add_ps(var4, _mm_mul_ps(histVec4, _mm_mul_ps(diff4, diff4)));
}
var4 = _mm_hadd_ps(var4, var4); // 01 23 01 23
var4 = _mm_hadd_ps(var4, var4); // 0123 0123 0123 0123
float var = var4.m128_f32[0];
stdev = sqrt(var);
mean = fmean;
}
I might be doing something wrong since I dont have a lot of improvement as I was expecting.
Is there something in the SSE code that might possibly slow down the process?
(editor's note: the SSE part of this question was originally asked as https://stackoverflow.com/questions/31837817/foor-loop-optimisation-sse-comparison, which was closed as a duplicate.)
I only just realized that your data array starts out as an array of int, since you didn't have declarations in your code. I can see in the SSE version that you start with integers, and only store a float version of it later.
Keeping everything integer will let us do the loop-counter-vector with a simple ivec = _mm_add_epi32(ivec, _mm_set1_epi32(4)); Aki Suihkonen's answer has some transformations that should let it optimize a lot better. Especially, the auto-vectorizer should be able to do more even without -ffast-math. In fact, it does quite well. You could do better with intrinsics, esp. saving some vector 32bit multiplies and shortening the dependency chain.
My old answer, based on just trying to optimize your code as written, assuming FP input:
You may be able to combine all 3 loops into one, using the algorithm #Jason linked to. It might not be profitable, though, since it involves a division. For small numbers of bins, probably just loop multiple times.
Start by reading the guides at http://agner.org/optimize/. A couple of the techniques in his Optimising Assembly guide will speed up your SSE attempt (which I edited into this question for you).
combine your loops where possible, so you do more with the data for each time it's loaded / stored.
multiple accumulators to hide the latency of loop-carried dependency chains. (Even FP add takes 3 cycles on recent Intel CPUs.) This won't apply for really short arrays like your case.
instead of int->float conversion on every iteration, use a float loop counter as well as the int loop counter. (add a vector of _mm_set1_ps(4.0f) every iteration.) _mm_set... with variable args is something to avoid in loops, when possible. It takes several instructions (esp. when each arg to setr has to be calculated separately.)
gcc -O3 manages to auto-vectorize the first loop, but not the others. With -O3 -ffast-math, it auto-vectorizes more. -ffast-math allows it to do FP operations in a different order than the code specifies. e.g. adding up the array in 4 elements of a vector, and only combining the 4 accumulators at the end.
Telling gcc that the input pointer is aligned by 16 lets gcc auto-vectorize with a lot less overhead (no scalar loops for unaligned portions).
// return mean
float fpstats(float histVec[], float sum, float binSize, float binOffset, long numBins, float *variance_p)
{
numBins += 3;
numBins &= ~3; // round up to multiple of 4. This is just a quick hack to make the code fast and simple.
histVec = (float*)__builtin_assume_aligned(histVec, 16);
float invSum = 1.0f / float(sum);
float var = 0, fmean = 0;
for (int i = 0; i < numBins; ++i)
{
histVec[i] *= invSum;
float midPoint = (float)i*binSize + binOffset;
float f = histVec[i];
fmean += f * midPoint;
}
for (int i = 0; i < numBins; ++i)
{
float midPoint = (float)i*binSize + binOffset;
float f = histVec[i];
float diff = midPoint - fmean;
// var += f * hwk::sqr(diff);
var += f * (diff * diff);
}
*variance_p = var;
return fmean;
}
gcc generates some weird code for the 2nd loop.
# broadcasting fmean after the 1st loop
subss %xmm0, %xmm2 # fmean, D.2466
shufps $0, %xmm2, %xmm2 # vect_cst_.16
.L5: ## top of 2nd loop
movdqa %xmm3, %xmm5 # vect_vec_iv_.8, vect_vec_iv_.8
cvtdq2ps %xmm3, %xmm3 # vect_vec_iv_.8, vect__32.9
movq %rcx, %rsi # D.2465, D.2467
addq $1, %rcx #, D.2465
mulps %xmm1, %xmm3 # vect_cst_.11, vect__33.10
salq $4, %rsi #, D.2467
paddd %xmm7, %xmm5 # vect_cst_.7, vect_vec_iv_.8
addps %xmm2, %xmm3 # vect_cst_.16, vect_diff_39.15
mulps %xmm3, %xmm3 # vect_diff_39.15, vect_powmult_53.17
mulps (%rdi,%rsi), %xmm3 # MEM[base: histVec_10, index: _107, offset: 0B], vect__41.18
addps %xmm3, %xmm4 # vect__41.18, vect_var_42.19
cmpq %rcx, %rax # D.2465, bnd.26
ja .L8 #, ### <--- This is insane.
haddps %xmm4, %xmm4 # vect_var_42.19, tmp160
haddps %xmm4, %xmm4 # tmp160, vect_var_42.21
.L2:
movss %xmm4, (%rdx) # var, *variance_p_44(D)
ret
.p2align 4,,10
.p2align 3
.L8:
movdqa %xmm5, %xmm3 # vect_vec_iv_.8, vect_vec_iv_.8
jmp .L5 #
So instead of just jumping back to the top every iteration, gcc decides to jump ahead to copy a register, and then unconditionally jmp back to the top of the loop. The uop loop buffer may remove the front-end overhead of this sillyness, but gcc should have structured the loop so it didn't copy xmm5->xmm3 and then xmm3->xmm5 every iteration, because that's silly. It should have the conditional jump just go to the top of the loop.
Also note the technique gcc used to get a float version of the loop counter: start with an integer vector of 1 2 3 4, and add set1_epi32(4). Use that as an input for packed int->float cvtdq2ps. On Intel HW, that instruction runs on the FP-add port, and has 3 cycle latency, same as packed FP add. gcc prob. would have done better to just add a vector of set1_ps(4.0), even though this creates a 3-cycle loop-carried dependency chain, instead of 1 cycle vector int add, with a 3 cycle convert forking off on every iteration.
small iteration count
You say this will often be used on exactly 10 bins? A specialized version for just 10 bins could give a big speedup, by avoiding all the loop overhead and keeping everything in registers.
With that small a problem size, you can have the FP weights just sitting there in memory, instead of re-computing them with integer->float conversion every time.
Also, 10 bins is going to mean a lot of horizontal operations relative to the amount of vertical operations, since you only have 2 and a half vectors worth of data.
If exactly 10 is really common, specialize a version for that. If under-16 is common, specialize a version for that. (They can and should share the const float weights[] = { 0.0f, 1.0f, 2.0f, ...}; array.)
You probably will want to use intrinsics for the specialized small-problem versions, rather than auto-vectorization.
Having zero-padding after the end of the useful data in your array might still be a good idea in your specialized version(s). However, you can load the last 2 floats and clear the upper 64b of a vector register with a movq instruction. (__m128i _mm_cvtsi64_si128 (__int64 a)). Cast this to __m128 and you're good to go.
As peterchen mentioned, these operations are very trivial for current desktop processors. The function is linear, i.e. O(n). What's the typical size of numBins? If it's rather large (say, over 1000000), parallelization will help. This could be simple using a library like OpenMP. If numBins starts approaching MAXINT, you may consider GPGPU as an option (CUDA/OpenCL).
All that considered, you should try profiling your application. Chances are good that, if there is a performance constraint, it's not in this method. Michael Abrash's definition of "high-performance code" has helped me greatly in determining if/when to optimize:
Before we can create high-performance code, we must understand what high performance is. The objective (not always attained) in creating high-performance software is to make the software able to carry out its appointed tasks so rapidly that it responds instantaneously, as far as the user is concerned. In other words, high-performance code should ideally run so fast that any further improvement in the code would be pointless. Notice that the above definition most emphatically does not say anything about making the software as fast as possible.
Reference:
The Graphics Programming Black Book
The overall function to be calculated is
std = sqrt(SUM_i { hist[i]/sum * (midpoint_i - mean_midpoint)^2 })
Using the identity
Var (aX + b) = Var (X) * a^2
one can reduce the complexity of the overall operation considerably
1) midpoint of a bin doesn't need offset b
2) no need to prescale by bin array elements with bin width
and
3) no need to normalize histogram entries with reciprocal of sum
The optimized calculation goes as follows
float calcVariance(int histBin[], float binWidth)
{
int i;
int sum = 0;
int mid = 0;
int var = 0;
for (i = 0; i < 10; i++)
{
sum += histBin[i];
mid += i*histBin[i];
}
float inv_sum = 1.0f / (float)sum;
float mid_sum = mid * inv_sum;
for (i = 0; i < 10; i++)
{
int diff = i * sum - mid; // because mid is prescaled by sum
var += histBin[i] * diff * diff;
}
return sqrt(float(var) / (float)(sum * sum * sum)) * binWidth;
}
Minor changes are required if it's float histBin[];
Also I second padding histBin size to a multiple of 4 for better vectorization.
EDIT
Another way to calculate this with floats in the inner loop:
float inv_sum = 1.0f / (float)sum;
float mid_sum = mid * inv_sum;
float var = 0.0f;
for (i = 0; i < 10; i++)
{
float diff = (float)i - mid_sum;
var += (float)histBin[i] * diff * diff;
}
return sqrt(var * inv_sum) * binWidth;
Perform the scaling on the global results only and keep integers as long as possible.
Group all computation in a single loop, using Σ(X-m)²/N = ΣX²/N - m².
// Accumulate the histogram
int mean= 0, var= 0;
for (int i = 0; i < numBins; ++i)
{
mean+= i * histVec[i];
var+= i * i * histVec[i];
}
// Compute the reduced mean and variance
float fmean= (float(mean) / sum);
float fvar= float(var) / sum - fmean * fmean;
// Rescale
fmean= fmean * binSize + binOffset;
fvar= fvar * binSize * binSize;
The required integer type will depend on the maximum value in the bins. The SSE optimization of the loop can exploit the _mm_madd_epi16 instruction.
If the number of bins is a small as 10, consider fully unrolling the loop. Precompute the i and i² vectors in a table.
In the lucky case that the data fits in 16 bits and the sums in 32 bits, the accumulation is done with something like
static short I[16]= { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0 };
static short I2[16]= { 0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 0, 0, 0, 0, 0, 0 };
// First group
__m128i i= _mm_load_si128((__m128i*)&I[0]);
__m128i i2= _mm_load_si128((__m128i*)&I2[0]);
__m128i h= _mm_load_si128((__m128i*)&inHist[0]);
__m128i mean= _mm_madd_epi16(i, h);
__m128i var= _mm_madd_epi16(i2, h);
// Second group
i= _mm_load_si128((__m128i*)&I[8]);
i2= _mm_load_si128((__m128i*)&I2[8]);
h= _mm_load_si128((__m128i*)&inHist[8]);
mean= _mm_add_epi32(mean, _mm_madd_epi16(i, h));
var= _mm_add_epi32(var, _mm_madd_epi16(i2, h));
CAUTION: unchecked
I have two similar programs one in C++ and another in D.
The compilation is on on Windows7 64bit, to 64bit binaries.
C++ version, VS 2013:
#include <iostream>
#include <string>
int main(int argc, char* argv[])
{
float eps = 1.0f;
float f = 0.0f;
while (f + eps != f)
f += 1.0f;
std::cout << "eps = " + std::to_string(eps) + ", max_f = " + std::to_string(f) << std::endl;
return 0;
}
D version, DMD v2.066.1:
import std.stdio;
import std.conv;
int main(string[] argv)
{
float eps = 1.0f;
float f = 0.0f;
while (f + eps != f)
f += 1.0f;
writeln("eps = " ~ to!string(eps) ~ ", max_f = " ~ to!string(f));
return 0;
}
C++ version works as expected and finds that f + e == f when f = 16777216.
But D version hungs forever. When I put breakpoint I see that in D version f also 16777216 (after running for some time) and Watch window (I use VisualD) shows that (f + e != f) is 'false' so the loop should be terminate but it's not the case during runtime.
I think assembly could give the answer but I'm not very good with it.
I'm new to D, so it should be the case that I misused the language/compiler (compiled with DMD just as 'dmd test.d' without additional options and also from VS with VisualD with default options). Any ideas what could be wrong with D version of the program? Thanks!
Disassembly:
C++:
000000013F7D1410 mov rax,rsp
000000013F7D1413 push rbp
000000013F7D1414 lea rbp,[rax-5Fh]
000000013F7D1418 sub rsp,0E0h
000000013F7D141F mov qword ptr [rbp+17h],0FFFFFFFFFFFFFFFEh
000000013F7D1427 mov qword ptr [rax+8],rbx
000000013F7D142B movaps xmmword ptr [rax-18h],xmm6
000000013F7D142F xorps xmm1,xmm1
float eps = 1.0f;
float f = 0.0f;
000000013F7D1432 movss xmm6,dword ptr [__real#3f800000 (013F7D67E8h)]
000000013F7D143A nop word ptr [rax+rax]
f += 1.0f;
000000013F7D1440 addss xmm1,xmm6
while (f + eps != f)
000000013F7D1444 movaps xmm0,xmm1
000000013F7D1447 addss xmm0,xmm6
000000013F7D144B ucomiss xmm0,xmm1
000000013F7D144E jp main+30h (013F7D1440h)
000000013F7D1450 jne main+30h (013F7D1440h)
D:
000000013F761002 mov ebp,esp
000000013F761004 sub rsp,50h
{
float eps = 1.0f;
000000013F761008 xor eax,eax
000000013F76100A mov dword ptr [rbp-50h],eax
000000013F76100D movss xmm0,dword ptr [rbp-50h]
000000013F761012 movss dword ptr [f],xmm0
float f = 0.0f;
while (f + eps != f)
f += 1.0f;
000000013F761017 movss xmm1,dword ptr [__NULL_IMPORT_DESCRIPTOR+1138h (013F7C3040h)]
000000013F76101F movss xmm2,dword ptr [f]
000000013F761024 addss xmm2,xmm1
000000013F761028 movss dword ptr [f],xmm2
000000013F76102D fld dword ptr [f]
000000013F761030 fadd dword ptr [__NULL_IMPORT_DESCRIPTOR+1138h (013F7C3040h)]
000000013F761036 fld dword ptr [f]
000000013F761039 fucomip st,st(1)
000000013F76103B fstp st(0)
000000013F76103D jne D main+17h (013F761017h)
000000013F76103F jp D main+17h (013F761017h)
Summary
Accept harold's answer that program behavior is due to the mixed FPU and SSE usage.
Here's a summary what happens in D assembly snippet. In fact the loop will run forever.
SSE behaves strictly according to IEEE-754 when f reaches 16777216.0 and we add 1.0 to this value (f += 1.0f) we still obtain 16777216.0 in xmm2 register, then we store it to memory.
(f + eps != f) expression is computed on the FPU. Since FPU registers have enough precision (f+eps) results in 16777217.0. If we stored this result back to memory into float variable then we would get expected value 16777216.0 (since 16777217.0 is not represented as float). And (f + eps != f) would be 'false' and loop would terminate. But we do not store any numbers back to memory and perform comparison on the FPU (since we have both operands). It means that we compare one number that is computed strictly according to IEEE-754 (f) and another that is computed with 80bit accuracy (f+eps). 16777216.0 != 16777217.0 and the loop runs forever.
I'm not an expert in this area but for me it looks like that doing floating point with SSE instructions is more robust as was demonstrated in C++ version of the program.
Update
I had a discussion on the D forum http://forum.dlang.org/thread/ucnayusylmpvkpcnbhgh#forum.dlang.org
It turned out that program behaves correctly - it's according to the language specification that intermediate calculations can be performed with higher accuracy.
The robust implementation for any D compiler is:
import std.stdio;
int main()
{
const float eps = 1.0f;
const float step = 1.0;
float f = 0.0f;
float fPlusEps = f + eps;
while (f != fPlusEps)
{
f += step;
fPlusEps = f + eps;
}
writeln("eps = ", eps, ", max_f = ", f);
return 0;
}
Mixed FPU and SSE code, that's .. really strange. I see absolutely no reason to implement it this way.
But they have, and the result is that f + eps != f is evaluated with 80bit extended precision, while
f += 1.0f is evaluated using 32bit floats.
That means the loop can never end, since f will stop going up before the value that makes
f + eps != f false (which, in 80bit precision, is huge) is reached.
Trying to break a loop with != or == with floating point values is looking for troubles.
The different behavior is mot likely due to the float to double to 80-bits internal floating point conversion compiler may adopt when passing values to the FPU.
When extending the mantissa, in particular- some compilers or optimizer can decide to let the less significant bit "random" instead of zeroed. So 1.0f, when given to the FPU may become 1.000000000000000000000012134432 that -according to a float- precision, is still 1.0, but wen 1.000000000000000000000012134432 and 1.000000000000000000000089544455 (the two tail are random) are compared by the FPU, look different.
You should verify how C++ and D compiler treat the floating point extension/reduction and eventually configure the appropriate switches: if the two compilers are not from the same manufacturer, thay had probably made different choices for their respective defaults.
I have a c++ function, looking like:
inline unsigned short function_name(float x, float y, someStruct *cfg)
{
int x_pos = (int)(x*2 + 0.5f);
int y_pos = (int)(y*2 + 0.5f);
int dict_index = x_pos + (y_pos * cfg->subdivisions_adj);
[...]
while someStruct is declared as:
struct someStruct {
int subdivisions;
int subdivisions_adj;
[...]
}
The generated assembly for the third line (int dict_index = [...]) is:
cvttss2si edi,xmm3
imul edi,[ecx+04h]
movss xmm3,[ecx+0ch]
movaps xmm4,xmm3
mulss xmm4,xmm0
addss xmm4,xmm1
cvttss2si eax,xmm4
add edi,eax
(See also the result by AMDCodeAnalyst)
Can anyone explain what this assembly does? I don't know why cvttss2si and movaps are used at all, aren't they for floating point numbers?
I am using Visual Studio 2008 on Windows 7, with the SSE2 instruction set enabled.
What you are seeing is simply that the compiler merges the first three lines together into one intermingled sequence of instructions.
cvttss2si edi,xmm3
Convert xmm3 as float to 32-bit int. Presumably xmm3 contains the float value of y_pos, and this is the result of (int) on the calculateion of y_pos.
imul edi,[ecx+04h]
Multiply by cfg->subdivisions_adj (ecx = cfg, subdivisions_adj = offset of 4)
movss xmm3,[ecx+0ch]
Would be part of the ... in your cfg variable, I suppose.
movaps xmm4,xmm3
mulss xmm4,xmm0
adss xmm4,xmm1
calculate x_pos = x * 2 + 0.5
cvttss2si eax,xmm4
(int) x_pos;
add edi,eax
Add x_pos to y_pos * cfg->subdivisions_adj;
I'm implementing bilinear interpolation in a tight loop and trying to optimize it with SSE, but I get zero speed-up from it.
Here is the code, the non-SIMD version uses a simple vector structure which could be defined as struct Vec3f { float x, y, z; } with implemented multiplication and addition operators:
#ifdef USE_SIMD
const Color c11 = pixelCache[y1 * size.x + x1];
const Color c12 = pixelCache[y2 * size.x + x1];
const Color c22 = pixelCache[y2 * size.x + x2];
const Color c21 = pixelCache[y1 * size.x + x2];
__declspec(align(16)) float mc11[4] = { 1.0, c11.GetB(), c11.GetG(), c11.GetR() };
__declspec(align(16)) float mc12[4] = { 1.0, c12.GetB(), c12.GetG(), c12.GetR() };
__declspec(align(16)) float mc22[4] = { 1.0, c22.GetB(), c22.GetG(), c22.GetR() };
__declspec(align(16)) float mc21[4] = { 1.0, c21.GetB(), c21.GetG(), c21.GetR() };
// scalars in vector form for SSE
const float s11 = (x2-x)*(y2-y);
const float s12 = (x2-x)*(y-y1);
const float s22 = (x-x1)*(y-y1);
const float s21 = (x-x1)*(y2-y);
__declspec(align(16)) float ms11[4] = {1.0, s11, s11, s11};
__declspec(align(16)) float ms12[4] = {1.0, s12, s12, s12};
__declspec(align(16)) float ms22[4] = {1.0, s22, s22, s22};
__declspec(align(16)) float ms21[4] = {1.0, s21, s21, s21};
__asm {
movaps xmm0, mc11
movaps xmm1, mc12
movaps xmm2, mc22
movaps xmm3, mc21
movaps xmm4, ms11
movaps xmm5, ms12
movaps xmm6, ms22
movaps xmm7, ms21
mulps xmm0, xmm4
mulps xmm1, xmm5
mulps xmm2, xmm6
mulps xmm3, xmm7
addps xmm0, xmm1
addps xmm0, xmm2
addps xmm0, xmm3
movaps mc11, xmm0
}
#else
const Vec3f c11 = toFloat(pixelCache[y1 * size.x + x1]);
const Vec3f c12 = toFloat(pixelCache[y2 * size.x + x1]);
const Vec3f c22 = toFloat(pixelCache[y2 * size.x + x2]);
const Vec3f c21 = toFloat(pixelCache[y1 * size.x + x2]);
const Vec3f colour =
c11*(x2-x)*(y2-y) +
c21*(x-x1)*(y2-y) +
c12*(x2-x)*(y-y1) +
c22*(x-x1)*(y-y1);
#endif
Rearranging the asm code to reuse the registers(ended up with just three xmm registers) didn't give any effect. I've also tried using intrinsics:
// perform bilinear interpolation
const Vec3f c11 = toFloat(pixelCache[y1 * size.x + x1]);
const Vec3f c12 = toFloat(pixelCache[y2 * size.x + x1]);
const Vec3f c22 = toFloat(pixelCache[y2 * size.x + x2]);
const Vec3f c21 = toFloat(pixelCache[y1 * size.x + x2]);
// scalars in vector form for SSE
const float s11 = (x2-x)*(y2-y);
const float s12 = (x2-x)*(y-y1);
const float s22 = (x-x1)*(y-y1);
const float s21 = (x-x1)*(y2-y);
__m128 mc11 = _mm_set_ps(1.f, c11.b, c11.g, c11.r);
__m128 mc12 = _mm_set_ps(1.f, c12.b, c12.g, c12.r);
__m128 mc22 = _mm_set_ps(1.f, c22.b, c22.g, c22.r);
__m128 mc21 = _mm_set_ps(1.f, c21.b, c21.g, c21.r);
__m128 ms11 = _mm_set_ps(1.f, s11, s11, s11);
__m128 ms12 = _mm_set_ps(1.f, s12, s12, s12);
__m128 ms22 = _mm_set_ps(1.f, s22, s22, s22);
__m128 ms21 = _mm_set_ps(1.f, s21, s21, s21);
mc11 = _mm_mul_ps(mc11, ms11);
mc12 = _mm_mul_ps(mc12, ms12);
mc22 = _mm_mul_ps(mc22, ms22);
mc21 = _mm_mul_ps(mc21, ms21);
mc11 = _mm_add_ps(mc11, mc12);
mc11 = _mm_add_ps(mc11, mc22);
mc11 = _mm_add_ps(mc11, mc21);
Vec3f colour;
_mm_storeu_ps(colour.array, mc11);
And to no avail. Am I missing something, or it is impossible to gain any extra speed here?
Why floating point?
Given packed pixel argb for a, b, c, d, and xerr, yerr in the range 0-256, a simple example is:
// =================================================================================================================
// xs_Bilerp
// =================================================================================================================
finline uint32 xs_Bilerp (uint32 a, uint32 b, uint32 c, uint32 d, uint32 xerr, uint32 yerr)
{
#define xs_rbmask 0x00ff00ff
#define xs_agmask 0xff00ff00
if (a==b && c==d && a==d) return a;
const uint32 arb = a & xs_rbmask;
const uint32 crb = c & xs_rbmask;
const uint32 aag = a & xs_agmask;
const uint32 cag = c & xs_agmask;
const uint32 rbdx1 = (b & xs_rbmask) - arb;
const uint32 rbdx2 = (d & xs_rbmask) - crb;
const uint32 agdx1 = ((b & xs_agmask)>>8) - (aag >> 8);
const uint32 agdx2 = ((d & xs_agmask)>>8) - (cag >> 8);
const uint32 rb1 = (arb + ((rbdx1 * xerr) >> 8)) & xs_rbmask;
const uint32 ag1 = (aag + ((agdx1 * xerr) )) & xs_agmask;
const uint32 rbdy = ((crb + ((rbdx2 * xerr) >> 8)) & xs_rbmask) - rb1;
const uint32 agdy = (((cag + ((agdx2 * xerr) )) & xs_agmask)>>8) - (ag1 >> 8);
const uint32 rb = (rb1 + ((rbdy * yerr) >> 8)) & xs_rbmask;
const uint32 ag = (ag1 + ((agdy * yerr) )) & xs_agmask;
return ag | rb;
}