I need to compare a big amount of similar images of small size (up to 200x200).
So I try to implement SSIM (Structural similarity see https://en.wikipedia.org/wiki/Structural_similarity ) algorithm.
SSIM requires calculation of covariance of two 8-bit gray images.
A trivial implementation look like:
float SigmaXY(const uint8_t * x, const uint8_t * y, size_t size, float averageX, float averageY)
{
float sum = 0;
for(size_t i = 0; i < size; ++i)
sum += (x[i] - averageX) * (y[i] - averageY);
return sum / size;
}
But it has poor performance.
So I hope to improve it with using SIMD or CUDA (I heard that it can be done).
Unfortunately I have no experience to do this.
How it will look? And where I have to go?
I have another nice solution!
At first I want to mention some mathematical formulas:
averageX = Sum(x[i])/size;
averageY = Sum(y[i])/size;
And therefore:
Sum((x[i] - averageX)*(y[i] - averageY))/size =
Sum(x[i]*y[i])/size - Sum(x[i]*averageY)/size -
Sum(averageX*y[i])/size + Sum(averageX*averageY)/size =
Sum(x[i]*y[i])/size - averageY*Sum(x[i])/size -
averageX*Sum(y[i])/size + averageX*averageY*Sum(1)/size =
Sum(x[i]*y[i])/size - averageY*averageX -
averageX*averageY + averageX*averageY =
Sum(x[i]*y[i])/size - averageY*averageX;
It allows to modify our algorithm:
float SigmaXY(const uint8_t * x, const uint8_t * y, size_t size, float averageX, float averageY)
{
uint32_t sum = 0; // If images will have size greater then 256x256 than you have to use uint64_t.
for(size_t i = 0; i < size; ++i)
sum += x[i]*y[i];
return sum / size - averageY*averageX;
}
And only after that we can use SIMD (I used SSE2):
#include <emmintrin.h>
inline __m128i SigmaXY(__m128i x, __m128i y)
{
__m128i lo = _mm_madd_epi16(_mm_unpacklo_epi8(x, _mm_setzero_si128()), _mm_unpacklo_epi8(y, _mm_setzero_si128()));
__m128i hi = _mm_madd_epi16(_mm_unpackhi_epi8(y, _mm_setzero_si128()), _mm_unpackhi_epi8(y, _mm_setzero_si128()));
return _mm_add_epi32(lo, hi);
}
float SigmaXY(const uint8_t * x, const uint8_t * y, size_t size, float averageX, float averageY)
{
uint32_t sum = 0;
size_t i = 0, alignedSize = size/16*16;
if(size >= 16)
{
__m128i sums = _mm_setzero_si128();
for(; i < alignedSize; i += 16)
{
__m128i _x = _mm_loadu_si128((__m128i*)(x + i));
__m128i _y = _mm_loadu_si128((__m128i*)(y + i));
sums = _mm_add_epi32(sums, SigmaXY(_x, _y));
}
uint32_t _sums[4];
_mm_storeu_si128(_sums, sums);
sum = _sums[0] + _sums[1] + _sums[2] + _sums[3];
}
for(; i < size; ++i)
sum += x[i]*y[i];
return sum / size - averageY*averageX;
}
There is a SIMD implementation of the algorithm (I used SSE4.1):
#include <smmintrin.h>
template <int shift> inline __m128 SigmaXY(const __m128i & x, const __m128i & y, __m128 & averageX, __m128 & averageY)
{
__m128 _x = _mm_cvtepi32_ps(_mm_cvtepu8_epi32(_mm_srli_si128(x, shift)));
__m128 _y = _mm_cvtepi32_ps(_mm_cvtepu8_epi32(_mm_srli_si128(y, shift)));
return _mm_mul_ps(_mm_sub_ps(_x, averageX), _mm_sub_ps(_y, averageY))
}
float SigmaXY(const uint8_t * x, const uint8_t * y, size_t size, float averageX, float averageY)
{
float sum = 0;
size_t i = 0, alignedSize = size/16*16;
if(size >= 16)
{
__m128 sums = _mm_setzero_ps();
__m128 avgX = _mm_set1_ps(averageX);
__m128 avgY = _mm_set1_ps(averageY);
for(; i < alignedSize; i += 16)
{
__m128i _x = _mm_loadu_si128((__m128i*)(x + i));
__m128i _y = _mm_loadu_si128((__m128i*)(y + i));
sums = _mm_add_ps(sums, SigmaXY<0>(_x, _y, avgX, avgY);
sums = _mm_add_ps(sums, SigmaXY<4>(_x, _y, avgX, avgY);
sums = _mm_add_ps(sums, SigmaXY<8>(_x, _y, avgX, avgY);
sums = _mm_add_ps(sums, SigmaXY<12>(_x, _y, avgX, avgY);
}
float _sums[4];
_mm_storeu_ps(_sums, sums);
sum = _sums[0] + _sums[1] + _sums[2] + _sums[3];
}
for(; i < size; ++i)
sum += (x[i] - averageX) * (y[i] - averageY);
return sum / size;
}
I hope that it will useful for you.
Related
How can I resize a YUV image in CUDA? I tried converting libyuv’s scaling code to CUDA, but the performance is very bad.
void ScalePlaneSimple(int src_width, int src_height,
int dst_width, int dst_height,
int src_stride, int dst_stride,
const Npp8u* src_ptr, Npp8u* dst_ptr) {
int i;
// Initial source x/y coordinate and step values as 16.16 fixed point.
int x = 0;
int y = 0;
int dx = 0;
int dy = 0;
ScaleSlope(src_width, src_height, dst_width, dst_height, kFilterNone,
&x, &y, &dx, &dy);
src_width = Abs(src_width);
if (src_width * 2 == dst_width && x < 0x8000) {
for (i = 0; i < dst_height; ++i) {
ScaleColsUp2_C(dst_ptr, src_ptr + (y >> 16) * src_stride, dst_width, x, dx);
dst_ptr += dst_stride;
y += dy;
}
}
else
{
for (i = 0; i < dst_height; ++i) {
ScaleCols_C<<<1,1>>>(dst_ptr, src_ptr + (y >> 16) * src_stride, dst_width, x, dx);
dst_ptr += dst_stride;
y += dy;
}
}
}
__global__ void ScaleCols_C(Npp8u* dst_ptr, const Npp8u* src_ptr,
int dst_width, int x, int dx) {
int j;
for (j = 0; j < dst_width - 1; j += 2) {
dst_ptr[0] = src_ptr[x >> 16];
x += dx;
dst_ptr[1] = src_ptr[x >> 16];
x += dx;
dst_ptr += 2;
}
if (dst_width & 1) {
dst_ptr[0] = src_ptr[x >> 16];
}
}
Maybe I should use parallel computing? Any advice is welcome.
If you want to use cuda, take a look at the NVidia performance primitives. There are image resizing functions. (It's a copy of the interface for the Intel performance primitives, if you don't want to use the gpu)
I need to find the largest element in 1d matrix and its column and row indexes.
I use 1d matrix, so just finding the max element's index is needed first and then it is easy to get row and column.
My problem is that I cannot get that index.
I have a working function that finds largest element and uses SSE, here it is:
float find_largest_element_in_matrix_SSE(float* m, unsigned const int dims)
{
size_t i;
int index = -1;
__m128 max_el = _mm_loadu_ps(m);
__m128 curr;
for (i = 4; i < dims * dims; i += 4)
{
curr = _mm_loadu_ps(m + i);
max_el = _mm_max_ps(max_el, curr);
}
__declspec(align(16))float max_v[4] = { 0 };
_mm_store_ps(max_v, max_el);
return max(max(max(max_v[0], max_v[1]), max_v[2]), max_v[3]);
}
and also I have a non-working function that uses AVX:
float find_largest_element_in_matrix_AVX(float* m, unsigned const int dims)
{
size_t i;
int index = -1;
__m256 max_el = _mm256_loadu_ps(m);
__m256 curr;
for (i = 8; i < dims * dims; i += 8)
{
curr = _mm256_loadu_ps(m + i);
max_el = _mm256_max_ps(max_el, curr);
}
__declspec(align(32))float max_v[8] = { 0 };
_mm256_store_ps(max_v, max_el);
__m256 y = _mm256_permute2f128_ps(max_el, max_el, 1);
__m256 m1 = _mm256_max_ps(max_el, y);m1[1] = max(max_el[1], max_el[3])
__m256 m2 = _mm256_permute_ps(m1, 5);
__m256 m_res = _mm256_max_ps(m1, m2);
return m[0];
}
Could anyone help me with actually finding the index of the max element and make my AVX version work?
Here's a working SSE (SSE 4) implementation that returns the max val and corresponding index, along with a scalar reference implementation and test harness:
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <time.h>
#include <smmintrin.h> // SSE 4.1
float find_largest_element_in_matrix_ref(const float* m, int dims, int *maxIndex)
{
float maxVal = m[0];
int i;
*maxIndex = 0;
for (i = 1; i < dims * dims; ++i)
{
if (m[i] > maxVal)
{
maxVal = m[i];
*maxIndex = i;
}
}
return maxVal;
}
float find_largest_element_in_matrix_SSE(const float* m, int dims, int *maxIndex)
{
float maxVal = m[0];
float aMaxVal[4];
int32_t aMaxIndex[4];
int i;
*maxIndex = 0;
const __m128i vIndexInc = _mm_set1_epi32(4);
__m128i vMaxIndex = _mm_setr_epi32(0, 1, 2, 3);
__m128i vIndex = vMaxIndex;
__m128 vMaxVal = _mm_loadu_ps(m);
for (i = 4; i < dims * dims; i += 4)
{
__m128 v = _mm_loadu_ps(&m[i]);
__m128 vcmp = _mm_cmpgt_ps(v, vMaxVal);
vIndex = _mm_add_epi32(vIndex, vIndexInc);
vMaxVal = _mm_max_ps(vMaxVal, v);
vMaxIndex = _mm_blendv_epi8(vMaxIndex, vIndex, _mm_castps_si128(vcmp));
}
_mm_storeu_ps(aMaxVal, vMaxVal);
_mm_storeu_si128((__m128i *)aMaxIndex, vMaxIndex);
maxVal = aMaxVal[0];
*maxIndex = aMaxIndex[0];
for (i = 1; i < 4; ++i)
{
if (aMaxVal[i] > maxVal)
{
maxVal = aMaxVal[i];
*maxIndex = aMaxIndex[i];
}
}
return maxVal;
}
int main()
{
const int dims = 1024;
float m[dims * dims];
float maxVal_ref, maxVal_SSE;
int maxIndex_ref = -1, maxIndex_SSE = -1;
int i;
srand(time(NULL));
for (i = 0; i < dims * dims; ++i)
{
m[i] = (float)rand() / RAND_MAX;
}
maxVal_ref = find_largest_element_in_matrix_ref(m, dims, &maxIndex_ref);
maxVal_SSE = find_largest_element_in_matrix_SSE(m, dims, &maxIndex_SSE);
if (maxVal_ref == maxVal_SSE && maxIndex_ref == maxIndex_SSE)
{
printf("PASS: maxVal = %f, maxIndex = %d\n",
maxVal_ref, maxIndex_ref);
}
else
{
printf("FAIL: maxVal_ref = %f, maxVal_SSE = %f, maxIndex_ref = %d, maxIndex_SSE = %d\n",
maxVal_ref, maxVal_SSE, maxIndex_ref, maxIndex_SSE);
}
return 0;
}
Compile and run:
$ gcc -Wall -msse4 Yakovenko.c && ./a.out
PASS: maxVal = 0.999999, maxIndex = 120409
Obviously you can get the row and column indices if needed:
int rowIndex = maxIndex / dims;
int colIndex = maxIndex % dims;
From here it should be fairly straightforward to write an AVX2 implementation.
One approach would be to calculate maximum in the first pass, and find the index by linear search in the second pass. Here is a sample implementation in SSE2:
#define anybit __builtin_ctz //or lookup table with 16 entries...
float find_largest_element_in_matrix_SSE(const float* m, int dims, int *maxIndex) {
//first pass: calculate maximum as usual
__m128 vMaxVal = _mm_loadu_ps(m);
for (int i = 4; i < dims * dims; i += 4)
vMaxVal = _mm_max_ps(vMaxVal, _mm_loadu_ps(&m[i]));
//perform in-register reduction
vMaxVal = _mm_max_ps(vMaxVal, _mm_shuffle_ps(vMaxVal, vMaxVal, _MM_SHUFFLE(2, 3, 0, 1)));
vMaxVal = _mm_max_ps(vMaxVal, _mm_shuffle_ps(vMaxVal, vMaxVal, _MM_SHUFFLE(1, 0, 3, 2)));
//second pass: search for maximal value
for (int i = 0; i < dims * dims; i += 4) {
__m128 vIsMax = _mm_cmpeq_ps(vMaxVal, _mm_loadu_ps(&m[i]));
if (int mask = _mm_movemask_ps(vIsMax)) {
*maxIndex = i + anybit(mask);
return _mm_cvtss_f32(vMaxVal);
}
}
}
Note that the branch in the second loop should be almost perfectly predicted unless your input data is very small.
The solution suffers from several problems, notably:
It may work incorrectly in presence of weird floating point values, e.g. with NaNs.
If your matrix does not fit into CPU cache, then the code would read the matrix twice from the main memory, so it would be two times slower than the single-pass approach. This can be solved for large matrices by block-wise processing.
In the first loop each iteration depends on the previous one (vMaxVal is both modified and read) so it would be slowed down by latency of _mm_max_ps. Perhaps it would be great to unroll the first loop a bit (2x or 4x), while having 4 independent registers for vMaxVal (actually, the second loop would also benefit from unrolling).
Porting to AVX should be pretty straight-forward, except for the in-register reduction:
vMaxVal = _mm256_max_ps(vMaxVal, _mm256_shuffle_ps(vMaxVal, vMaxVal, _MM_SHUFFLE(2, 3, 0, 1)));
vMaxVal = _mm256_max_ps(vMaxVal, _mm256_shuffle_ps(vMaxVal, vMaxVal, _MM_SHUFFLE(1, 0, 3, 2)));
vMaxVal = _mm256_max_ps(vMaxVal, _mm256_permute2f128_ps(vMaxVal, vMaxVal, 1));
yet another approach:
void find_largest_element_in_matrix_SSE(float * matrix, size_t n, int * row, int * column, float * v){
__m128 indecies = _mm_setr_ps(0, 1, 2, 3);
__m128 update = _mm_setr_ps(4, 4, 4, 4);
__m128 max_indecies = _mm_setr_ps(0, 1, 2, 3);
__m128 max = _mm_load_ps(matrix);
for (int i = 4; i < n * n; i+=4){
indecies = _mm_add_ps(indecies, update);
__m128 pm2 = _mm_load_ps(&matrix[i]);
__m128 mask = _mm_cmpge_ps(max, pm2);
max = _mm_max_ps(max, pm2);
max_indecies = _mm_or_ps(_mm_and_ps(max_indecies, mask), _mm_andnot_ps(mask, indecies));
}
__declspec (align(16)) int max_ind[4];
__m128i maxi = _mm_cvtps_epi32(max_indecies);
_mm_store_si128((__m128i *) max_ind, maxi);
int c = max_ind[0];
for (int i = 1; i < 4; i++)
if (matrix[max_ind[i]] >= matrix[c] && max_ind[i] < c){
c = max_ind[i];
}
*v = matrix[c];
*row = c / n;
*column = c % n;
}
void find_largest_element_in_matrix_AVX(float * matrix, size_t n, int * row, int * column, float * v){
__m256 indecies = _mm256_setr_ps(0, 1, 2, 3, 4, 5, 6, 7);
__m256 update = _mm256_setr_ps(8, 8, 8, 8, 8, 8, 8, 8);
__m256 max_indecies = _mm256_setr_ps(0, 1, 2, 3, 4, 5, 6, 7);
__m256 max = _mm256_load_ps(matrix);
for (int i = 8; i < n * n; i += 8){
indecies = _mm256_add_ps(indecies, update);
__m256 pm2 = _mm256_load_ps(&matrix[i]);
__m256 mask = _mm256_cmp_ps(max, pm2, _CMP_GE_OQ);
max = _mm256_max_ps(max, pm2);
max_indecies = _mm256_or_ps(_mm256_and_ps(max_indecies, mask), _mm256_andnot_ps(mask, indecies));
}
__declspec (align(32)) int max_ind[8];
__m256i maxi = _mm256_cvtps_epi32(max_indecies);
_mm256_store_si256((__m256i *) max_ind, maxi);
int c = max_ind[0];
for (int i = 1; i < 8; i++)
if (matrix[max_ind[i]] >= matrix[c] && max_ind[i] < c){
c = max_ind[i];
}
*v = matrix[c];
*row = c / n;
*column = c % n;
}
I have a mask (8-bit gray image) and I need calculate center of region with given index of the mask.
To do this I need calculate moments of first order along axes X and Y for this mask.
Currently I'm using next code:
void GetCenter(const uint8_t * mask, size_t stride, size_t width, size_t height,
uint8_t index, double * centerX, double * centerY)
{
uint64_t sum = 0, sumX = 0, sumY = 0;
for(size_t y = 0; y < height; ++y)
{
for(size_t x = 0; x < width; ++x)
{
if(mask[x] == index)
{
sum++;
sumX += x;
sumY += y;
}
}
mask += stride;
}
*centerX = sum ? (double)sumX/sum : 0;
*centerY = sum ? (double)sumY/sum : 0;
}
And I have a question: Is there any way to improve performance of this algorithm?
There is a way to greatly (more then ten times) improve performance of this algorithm.
To do it you need use SIMD instructions of CPU such as (SSE2, AVX2, Altivec, NEON etc.).
I wrote an example with using of SSE2 instructions (AVX2 code will be similar to it):
const __m128i K_0 = _mm_setzero_si128();
const __m128i K8_1 = _mm_set1_epi8(1);
const __m128i K16_1 = _mm_set1_epi16(1);
const __m128i K16_8 = _mm_set1_epi16(8);
const __m128i K16_I = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
inline void AddMoments(const __m128i & mask, const __m128i & x, const __m128i & y,
__m128i & sumX, __m128i & sumY)
{
sumX = _mm_add_epi32(sumX, _mm_madd_epi16(_mm_and_si128(mask, x), K16_1));
sumY = _mm_add_epi32(sumY, _mm_madd_epi16(_mm_and_si128(mask, y), K16_1));
}
inline int ExtractSum(__m128i a)
{
return _mm_cvtsi128_si32(a) + _mm_cvtsi128_si32(_mm_srli_si128(a, 4)) +
_mm_cvtsi128_si32(_mm_srli_si128(a, 8)) + _mm_cvtsi128_si32(_mm_srli_si128(a, 12));
}
void GetCenter(const uint8_t * mask, size_t stride, size_t width, size_t height,
uint8_t index, double * centerX, double * centerY)
{
size_t alignedWidth = width & ~(sizeof(__m128i) - 1);
const __m128i _index = _mm_set1_epi8(index);
uint64_t sum = 0, sumX = 0, sumY = 0;
for(size_t y = 0; y < height; ++y)
{
size_t x = 0;
__m128i _x = K16_I;
__m128i _y = _mm_set1_epi16((short)y);
__m128i _sum = K_0;
__m128i _sumX = K_0;
__m128i _sumY = K_0;
for(; x < alignedWidth; x += sizeof(__m128i))
{
__m128i _mask = _mm_and_si128(_mm_cmpeq_epi8(_mm_loadu_si128((__m128i*)(mask + x)), _index), K8_1);
_sum = _mm_add_epi64(_sum, _mm_sad_epu8(_mask, K_0));
AddMoments(_mm_cmpeq_epi16(_mm_unpacklo_epi8(_mask, K_0), K16_1), _x, _y, _sumX, _sumY);
_x = _mm_add_epi16(_x, K16_8);
AddMoments(_mm_cmpeq_epi16(_mm_unpackhi_epi8(_mask, K_0), K16_1), _x, _y, _sumX, _sumY);
_x = _mm_add_epi16(_x, K16_8);
}
sum += ExtractSum(_sum);
sumX += ExtractSum(_sumX);
sumY += ExtractSum(_sumY);
for(; x < width; ++x)
{
if(mask[x] == index)
{
sum++;
sumX += x;
sumY += y;
}
}
mask += stride;
}
*centerX = sum ? (double)sumX/sum : 0;
*centerY = sum ? (double)sumY/sum : 0;
}
P.S. There is a more simple and cross platform way to improve performance with using of external library (http://simd.sourceforge.net/):
void GetCenter(const uint8_t * mask, size_t stride, size_t width, size_t height,
uint8_t index, double * centerX, double * centerY)
{
uint64_t sum, sumX, sumY, sumXX, sumXY, sumYY;
::SimdGetMoments(mask, stride, width, height, index,
&sum, &sumX, &sumY, &sumXX, &sumXY, &sumYY);
*centerX = sum ? (double)sumX/sum : 0;
*centerY = sum ? (double)sumY/sum : 0;
}
An implementation with using of _mm_movemask_epi8 and 8-bit lookup tables:
uint8_t g_sum[1 << 8], g_sumX[1 << 8];
bool Init()
{
for(int i = 0, n = 1 << 8; i < n; ++i)
{
g_sum[i] = 0;
g_sumX[i] = 0;
for(int j = 0; j < 8; ++j)
{
g_sum[i] += (i >> j) & 1;
g_sumX[i] += ((i >> j) & 1)*j;
}
}
return true;
}
bool g_inited = Init();
inline void AddMoments(uint8_t mask, size_t x, size_t y,
uint64_t & sum, uint64_t & sumX, uint64_t & sumY)
{
int value = g_sum[mask];
sum += value;
sumX += x * value + g_sumX[mask];
sumY += y * value;
}
void GetCenter(const uint8_t * mask, size_t stride, size_t width, size_t height,
uint8_t index, double * centerX, double * centerY)
{
size_t alignedWidth = width & ~(sizeof(__m128i) - 1);
const __m128i _index = _mm_set1_epi8(index);
union PackedValue
{
uint8_t u8[4];
uint16_t u16[2];
uint32_t u32;
} _mask;
uint64_t sum = 0, sumX = 0, sumY = 0;
for(size_t y = 0; y < height; ++y)
{
size_t x = 0;
for(; x < alignedWidth; x += sizeof(__m128i))
{
_mask.u32 = _mm_movemask_epi8(_mm_cmpeq_epi8(
_mm_loadu_si128((__m128i*)(mask + x)), _index));
AddMoments(_mask.u8[0], x, y, sum, sumX, sumY);
AddMoments(_mask.u8[1], x + 8, y, sum, sumX, sumY);
}
for(; x < width; ++x)
{
if(mask[x] == index)
{
sum++;
sumX += x;
sumY += y;
}
}
mask += stride;
}
*centerX = sum ? (double)sumX/sum : 0;
*centerY = sum ? (double)sumY/sum : 0;
}
An implementation with using of _mm_movemask_epi8 and 16-bit lookup tables:
uint16_t g_sum[1 << 16], g_sumX[1 << 16];
bool Init()
{
for(int i = 0, n = 1 << 16; i < n; ++i)
{
g_sum[i] = 0;
g_sumX[i] = 0;
for(int j = 0; j < 16; ++j)
{
g_sum[i] += (i >> j) & 1;
g_sumX[i] += ((i >> j) & 1)*j;
}
}
return true;
}
bool g_inited = Init();
inline void AddMoments(uint16_t mask, size_t x, size_t y,
uint64_t & sum, uint64_t & sumX, uint64_t & sumY)
{
int value = g_sum[mask];
sum += value;
sumX += x * value + g_sumX[mask];
sumY += y * value;
}
void GetCenter(const uint8_t * mask, size_t stride, size_t width, size_t height,
uint8_t index, double * centerX, double * centerY)
{
size_t alignedWidth = width & ~(sizeof(__m128i) - 1);
const __m128i _index = _mm_set1_epi8(index);
union PackedValue
{
uint8_t u8[4];
uint16_t u16[2];
uint32_t u32;
} _mask;
uint64_t sum = 0, sumX = 0, sumY = 0;
for(size_t y = 0; y < height; ++y)
{
size_t x = 0;
for(; x < alignedWidth; x += sizeof(__m128i))
{
_mask.u32 = _mm_movemask_epi8(_mm_cmpeq_epi8(
_mm_loadu_si128((__m128i*)(mask + x)), _index));
AddMoments(_mask.u16[0], x, y, sum, sumX, sumY);
}
for(; x < width; ++x)
{
if(mask[x] == index)
{
sum++;
sumX += x;
sumY += y;
}
}
mask += stride;
}
*centerX = sum ? (double)sumX/sum : 0;
*centerY = sum ? (double)sumY/sum : 0;
}
Performance comparison for 1920x1080 image:
Base version: 8.261 ms;
1-st optimization:0.363 ms (in 22 times faster);
2-nd optimization:0.280 ms (in 29 times faster);
3-rd optimization:0.299 ms (in 27 times faster);
4-th optimization:0.325 ms (in 25 times faster);
As you can see above the code with using of 8-bit lookup tables has better performance then the code with using of 16-bit lookup tables. But anyway external library is better though it performs additional calculations of the second order moments.
Another acceleration technique is by run-length coding.
You can decompose the rows in horizontal runs where the mask is active. You can detect the runs on the fly, or precompute them and store the image in that form, if that makes sense.
Then a run can be accumulated as a whole. Let a run start from (X, Y) and have length L, then use
Sum+= L;
SumX+= (2 * X + L + 1) * L;
SumY+= Y * L;
In the end, divide SumX by 2.
The longer the runs, the more effective the trick is.
Using SSE2 or later, you try with the instruction PMOVMSKB—Move Byte Mask.
First compare 16 mask pixels to the (replicated) index value to get 16 comparison results. Then pack these to a 16 bits number, using the magical instruction.
Then, using two precomputed lookup tables, perform the accumulations in scalar mode.
One lookup table gives you the count of active mask pixels, and the other gives you the sum of active mask pixels weighted by their abscissa, i.e the X moment.
Something like
int PackedValue= _mm_movemask_epi8(_mm_cmpeq_epi8(_mm_loadu_si128((__m128i*)&Mask[X]), ReplicatedIndex));
Sum+= Count[PackedValue];
SumX+= X * Count[PackedValue] + MomentX[PackedValue];
SumY+= Y * Count[PackedValue];
Depending on the amount of memory you agree to spend, the lookup tables can have byte indexes (256 entries, use the table twice) or word indexes (65536 entries). In both cases, the count and moment values fit in a single byte (1 to 8/16 and 0 to 28/120 respectively).
AVX implementation is also possible, packing 32 pixels at a time. Lookup tables with doubleword indexes seem unreasonable, though. :-)
We need to change/reimplement standard DFT implementation in GSL, which is
int
FUNCTION(gsl_dft_complex,transform) (const BASE data[],
const size_t stride, const size_t n,
BASE result[],
const gsl_fft_direction sign)
{
size_t i, j, exponent;
const double d_theta = 2.0 * ((int) sign) * M_PI / (double) n;
/* FIXME: check that input length == output length and give error */
for (i = 0; i < n; i++)
{
ATOMIC sum_real = 0;
ATOMIC sum_imag = 0;
exponent = 0;
for (j = 0; j < n; j++)
{
double theta = d_theta * (double) exponent;
/* sum = exp(i theta) * data[j] */
ATOMIC w_real = (ATOMIC) cos (theta);
ATOMIC w_imag = (ATOMIC) sin (theta);
ATOMIC data_real = REAL(data,stride,j);
ATOMIC data_imag = IMAG(data,stride,j);
sum_real += w_real * data_real - w_imag * data_imag;
sum_imag += w_real * data_imag + w_imag * data_real;
exponent = (exponent + i) % n;
}
REAL(result,stride,i) = sum_real;
IMAG(result,stride,i) = sum_imag;
}
return 0;
}
In this implementation, GSL iterates over input vector twice for sample/input size. However, we need to construct for different frequency bins. For instance, we have 4096 samples, but we need to calculate DFT for 128 different frequencies. Could you help me to define or implement required DFT behaviour? Thanks in advance.
EDIT: We do not search for first m frequencies.
Actually, is below approach correct for finding DFT result with given frequency bin number?
N = sample size
B = frequency bin size
k = 0,...,127 X[k] = SUM(0,N){x[i]*exp(-j*2*pi*k*i/B)}
EDIT: I might have not explained the problem for DFT elaborately, nevertheless, I am happy to provide the answer below:
void compute_dft(const std::vector<double>& signal,
const std::vector<double>& frequency_band,
std::vector<double>& result,
const double sampling_rate)
{
if(0 == result.size() || result.size() != (frequency_band.size() << 1)){
result.resize(frequency_band.size() << 1, 0.0);
}
//note complex signal assumption
const double d_theta = -2.0 * PI * sampling_rate;
for(size_t k = 0; k < frequency_band.size(); ++k){
const double f_k = frequency_band[k];
double real_sum = 0.0;
double imag_sum = 0.0;
for(size_t n = 0; n < (signal.size() >> 1); ++n){
double theta = d_theta * f_k * (n + 1);
double w_real = cos(theta);
double w_imag = sin(theta);
double d_real = signal[2*n];
double d_imag = signal[2*n + 1];
real_sum += w_real * d_real - w_imag * d_imag;
imag_sum += w_real * d_imag + w_imag * d_real;
}
result[2*k] = real_sum;
result[2*k + 1] = imag_sum;
}
}
Assuming you just want the the first m output frequencies:
int
FUNCTION(gsl_dft_complex,transform) (const BASE data[],
const size_t stride,
const size_t n, // input size
const size_t m, // output size (m <= n)
BASE result[],
const gsl_fft_direction sign)
{
size_t i, j, exponent;
const double d_theta = 2.0 * ((int) sign) * M_PI / (double) n;
/* FIXME: check that m <= n and give error */
for (i = 0; i < m; i++) // for each of m output bins
{
ATOMIC sum_real = 0;
ATOMIC sum_imag = 0;
exponent = 0;
for (j = 0; j < n; j++) // for each of n input points
{
double theta = d_theta * (double) exponent;
/* sum = exp(i theta) * data[j] */
ATOMIC w_real = (ATOMIC) cos (theta);
ATOMIC w_imag = (ATOMIC) sin (theta);
ATOMIC data_real = REAL(data,stride,j);
ATOMIC data_imag = IMAG(data,stride,j);
sum_real += w_real * data_real - w_imag * data_imag;
sum_imag += w_real * data_imag + w_imag * data_real;
exponent = (exponent + i) % n;
}
REAL(result,stride,i) = sum_real;
IMAG(result,stride,i) = sum_imag;
}
return 0;
}
Is anybody aware of a method to achieve vec_msum functionality against a vector of float values?
I'm quite new to SIMD, and although I think I'm starting to make sense of it - there are still a few puzzles.
My end goal is to rewrite the function "convolve_altivec" (as found in the accepted answer for this question) such that it accepts input parameters as float values, instead of short's.
That is, the prototype should be
float convolve_altivec(const float *a, const float *b, int n)
I'm trying to match the functionality provided by the original non-optimised function below:
float convolve(const float *a, const float *b, int n)
{
float out = 0.f;
while (n --)
out += (*(a ++)) * (*(b ++));
return out;
}
My initial efforts have seen me trying to port an existing SSE version of this same function to altivec instructions.
For a float version you need vec_madd.
Here's a float version of the previous 16 bit int version and test harness I posted in response to your earlier question:
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <altivec.h>
static float convolve_ref(const float *a, const float *b, int n)
{
float sum = 0.0f;
int i;
for (i = 0; i < n; ++i)
{
sum += a[i] * b[i];
}
return sum;
}
static inline float convolve_altivec(const float *a, const float *b, int n)
{
float sum = 0.0f;
vector float vsum = { 0.0f, 0.0f, 0.0f, 0.0f };
union {
vector float v;
float a[4];
} usum;
vector float *pa = (vector float *)a;
vector float *pb = (vector float *)b;
assert(((unsigned long)a & 15) == 0);
assert(((unsigned long)b & 15) == 0);
while (n >= 4)
{
vsum = vec_madd(*pa, *pb, vsum);
pa++;
pb++;
n -= 4;
}
usum.v = vsum;
sum = usum.a[0] + usum.a[1] + usum.a[2] + usum.a[3];
a = (float *)pa;
b = (float *)pb;
while (n --)
{
sum += (*a++ * *b++);
}
return sum;
}
int main(void)
{
const int n = 1002;
vector float _a[n / 4 + 1];
vector float _b[n / 4 + 1];
float *a = (float *)_a;
float *b = (float *)_b;
float sum_ref, sum_test;
int i;
for (i = 0; i < n; ++i)
{
a[i] = (float)rand();
b[i] = (float)rand();
}
sum_ref = convolve_ref(a, b, n);
sum_test = convolve_altivec(a, b, n);
printf("sum_ref = %g\n", sum_ref);
printf("sum_test = %g\n", sum_test);
printf("%s\n", fabsf((sum_ref - sum_test) / sum_ref) < 1.0e-6 ? "PASS" : "FAIL");
return 0;
}