Is it possible to use CUDA parallelizing this nested for loop? - c++

I want to speed up this nested for loop, just start learn CUDA, how could I use CUDA to parallel this c++ code ?
#define PI 3.14159265
using namespace std;
int main()
{
int nbint = 2;
int hits = 20;
int nbinp = 2;
float _theta, _phi, _l, _m, _n, _k = 0, delta = 5;
float x[20],y[20],z[20],a[20],t[20];
for (int i = 0; i < hits; ++i)
{
x[i] = rand() / (float)(RAND_MAX / 100);
}
for (int i = 0; i < hits; ++i)
{
y[i] = rand() / (float)(RAND_MAX / 100);
}
for (int i = 0; i < hits; ++i)
{
z[i] = rand() / (float)(RAND_MAX / 100);
}
for (int i = 0; i < hits; ++i)
{
a[i] = rand() / (float)(RAND_MAX / 100);
}
float maxforall = 1e-6;
float theta0;
float phi0;
for (int i = 0; i < nbint; i++)
{
_theta = (0.5 + i)*delta;
for (int j = 0; j < nbinp; j++)
{
_phi = (0.5 + j)*delta / _theta;
_l = sin(_theta* PI / 180.0)*cos(_phi* PI / 180.0);
_m = sin(_theta* PI / 180.0)*sin(_phi* PI / 180.0);
_n = cos(_theta* PI / 180.0);
for (int k = 0; k < hits; k++)
{
_k = -(_l*x[k] + _m*y[k] + _n*z[k]);
t[k] = a[k] - _k;
}
qsort(t, 0, hits - 1);
float max = t[0];
for (int k = 0; k < hits; k++)
{
if (max < t[k])
max = t[k];
}
if (max > maxforall)
{
maxforall = max;
}
}
}
return 0;
}
I want to put innermost for loop and the sort part(maybe the whole nested loop) into parallel. After sort those array I found the maximum of all arrays. I use maximum to simplify the code. The reason I need sort is that maximum represent
here is a continuous time information(all arrays contain time information). The sort part make those time from lowest to highest. Then I compare the a specific time interval(not a single value). The compare process almost like I choose maximum but with a continuous interval not a single value.

Your 3 nested loops calculate nbint*nbinp*hits values. Since each of those values is independent from each other, all values can be calculated in parallel.
You stated in your comments that you have a commutative and associative "filter condition" which reduces the output to a single scalar value. This can be exploited to avoid sorting and storing the temporary values. Instead, we can calculate the values on-the-fly and then apply a parallel reduction to determine the end result.
This can be done in "raw" CUDA, below I implemented this idea using thrust. The main idea is to run grid_op nbint*nbinp*hits times in parallel. In order to find out the three original "loop indices" from the single scalar index which is passed to grid_op the algorithm from this SO question is used.
thrust::transform_reduce performs the on-the-fly transformation and the subsequent parallel reduction (here thrust::maximum is used as a substitute).
#include <cmath>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/tuple.h>
// ### BEGIN utility for demo ####
#include <iostream>
#include <thrust/random.h>
thrust::host_vector<float> random_vector(const size_t N)
{
thrust::default_random_engine rng;
thrust::uniform_real_distribution<float> u01(0.0f, 1.0f);
thrust::host_vector<float> temp(N);
for(size_t i = 0; i < N; i++) {
temp[i] = u01(rng);
}
return temp;
}
// ### END utility for demo ####
template <typename... Iterators>
thrust::zip_iterator<thrust::tuple<Iterators...>> zip(Iterators... its)
{
return thrust::make_zip_iterator(thrust::make_tuple(its...));
}
template <typename ZipIterator>
class grid_op
{
public:
grid_op(ZipIterator zipIt, std::size_t dim1, std::size_t dim2) : zipIt(zipIt), dim1(dim1), dim2(dim2){}
__host__ __device__
float operator()(std::size_t index) const
{
const auto coords = unflatten_3d_index(index, dim1, dim2);
const auto values = zipIt[thrust::get<2>(coords)];
const float delta = 5;
const float _theta = (0.5f + thrust::get<0>(coords))*delta;
const float _phi = (0.5f + thrust::get<1>(coords))*delta / _theta;
const float _l = sin(_theta* M_PI / 180.0)*cos(_phi* M_PI / 180.0);
const float _m = sin(_theta* M_PI / 180.0)*sin(_phi* M_PI / 180.0);
const float _n = cos(_theta* M_PI / 180.0);
const float _k = -(_l*thrust::get<0>(values) + _m*thrust::get<1>(values) + _n*thrust::get<2>(values));
return (thrust::get<3>(values) - _k);
}
private:
__host__ __device__
thrust::tuple<std::size_t, std::size_t, std::size_t>
unflatten_3d_index(std::size_t index, std::size_t dim1, std::size_t dim2) const
{
// taken from https://stackoverflow.com/questions/29142417/4d-position-from-1d-index
std::size_t x = index % dim1;
std::size_t y = ( ( index - x ) / dim1 ) % dim2;
std::size_t z = ( ( index - y * dim1 - x ) / (dim1 * dim2) );
return thrust::make_tuple(x,y,z);
}
ZipIterator zipIt;
std::size_t dim1;
std::size_t dim2;
};
template <typename ZipIterator>
grid_op<ZipIterator> make_grid_op(ZipIterator zipIt, std::size_t dim1, std::size_t dim2)
{
return grid_op<ZipIterator>(zipIt, dim1, dim2);
}
int main()
{
const int nbint = 3;
const int nbinp = 4;
const int hits = 20;
const std::size_t N = nbint * nbinp * hits;
thrust::device_vector<float> d_x = random_vector(hits);
thrust::device_vector<float> d_y = random_vector(hits);
thrust::device_vector<float> d_z = random_vector(hits);
thrust::device_vector<float> d_a = random_vector(hits);
auto zipIt = zip(d_x.begin(), d_y.begin(), d_z.begin(), d_a.begin());
auto countingIt = thrust::counting_iterator<std::size_t>(0);
auto unary_op = make_grid_op(zipIt, nbint, nbinp);
auto binary_op = thrust::maximum<float>();
const float init = 0;
float max = thrust::transform_reduce(
countingIt, countingIt+N,
unary_op,
init,
binary_op
);
std::cout << "max = " << max << std::endl;
}

Related

Why is multi-threading of matrix calculation not faster than single-core?

this is my first time using multi-threading to speed up a heavy calculation.
Background: The idea is to calculate a Kernel Covariance matrix, by reading a list of 3D points x_test and calculating the corresponding matrix, which has dimensions x_test.size() x x_test.size().
I already sped up the calculations by only calculating the lower triangluar matrix. Since all the calculations are independent from each other I tried to speed up the process (x_test.size() = 27000 in my case) by splitting the calculations of the matrix entries row-wise, assigning a range of rows to each thread.
On a single core the calculations took about 280 seconds each time, on 4 cores it took 270-290 seconds.
main.cpp
int main(int argc, char *argv[]) {
double sigma0sq = 1;
double lengthScale [] = {0.7633, 0.6937, 3.3307e+07};
const std::vector<std::vector<double>> x_test = parse2DCsvFile(inputPath);
/* Finding data slices of similar size */
//This piece of code works, each thread is assigned roughly the same number of matrix entries
int numElements = x_test.size()*x_test.size()/2;
const int numThreads = 4;
int elemsPerThread = numElements / numThreads;
std::vector<int> indices;
int j = 0;
for(std::size_t i=1; i<x_test.size()+1; ++i){
int prod = i*(i+1)/2 - j*(j+1)/2;
if (prod > elemsPerThread) {
i--;
j = i;
indices.push_back(i);
if(indices.size() == numThreads-1)
break;
}
}
indices.insert(indices.begin(), 0);
indices.push_back(x_test.size());
/* Spreding calculations to multiple threads */
std::vector<std::thread> threads;
for(std::size_t i = 1; i < indices.size(); ++i){
threads.push_back(std::thread(calculateKMatrixCpp, x_test, lengthScale, sigma0sq, i, indices.at(i-1), indices.at(i)));
}
for(auto & th: threads){
th.join();
}
return 0;
}
As you can see, each thread performs the following calculations on the data assigned to it:
void calculateKMatrixCpp(const std::vector<std::vector<double>> xtest, double lengthScale[], double sigma0sq, int threadCounter, int start, int stop){
char buffer[8192];
std::ofstream out("lower_half_matrix_" + std::to_string(threadCounter) +".csv");
out.rdbuf()->pubsetbuf(buffer, 8196);
for(int i = start; i < stop; ++i){
for(int j = 0; j < i+1; ++j){
double kij = seKernel(xtest.at(i), xtest.at(j), lengthScale, sigma0sq);
if (j!=0)
out << ',';
out << kij;
}
if(i!=xtest.size()-1 )
out << '\n';
}
out.close();
}
and
double seKernel(const std::vector<double> x1,const std::vector<double> x2, double lengthScale[], double sigma0sq) {
double sum(0);
for(std::size_t i=0; i<x1.size();i++){
sum += pow((x1.at(i)-x2.at(i))/lengthScale[i],2);
}
return sigma0sq*exp(-0.5*sum);
}
Aspects I considered
locking by simultaneous access to data vector -> I don't pass a reference to the threads, but a copy of the data. I know this is not optimal in terms of RAM usage, but as far as I know this should prevent simultaneous data access since every thread has its own copy
Output -> every thread writes its part of the lower triangular matrix to its own file. My task manager doesn't indicate a full SSD utilization in the slightest
Compiler and machine
Windows 11
GNU GCC Compiler
Code::Blocks (although I don't think that should be of importance)
There are many details that can be improved in your code, but I think the two biggest issues are:
using vectors or vectors, which leads to fragmented data;
writing each piece of data to file as soon as its value is computed.
The first point is easy to fix: use something like std::vector<std::array<double, 3>>. In the code below I use an alias to make it more readable:
using Point3D = std::array<double, 3>;
std::vector<Point3D> x_test;
The second point is slightly harder to address. I assume you wanted to write to the disk inside each thread because you couldn't manage to write to a shared buffer that you could then write to a file.
Here is a way to do exactly that:
void calculateKMatrixCpp(
std::vector<Point3D> const& xtest, Point3D const& lengthScale, double sigma0sq,
int threadCounter, int start, int stop, std::vector<double>& kMatrix
) {
// ...
double& kij = kMatrix[i * xtest.size() + j];
kij = seKernel(xtest[i], xtest[j], lengthScale, sigma0sq);
// ...
}
// ...
threads.push_back(std::thread(
calculateKMatrixCpp, x_test, lengthScale, sigma0sq,
i, indices[i-1], indices[i], std::ref(kMatrix)
));
Here, kMatrix is the shared buffer and represents the whole matrix you are trying to compute. You need to pass it to the thread via std::ref. Each thread will write to a different location in that buffer, so there is no need for any mutex or other synchronization.
Once you make these changes and try to write kMatrix to the disk, you will realize that this is the part that takes the most time, by far.
Below is the full code I tried on my machine, and the computation time was about 2 seconds whereas the writing-to-file part took 300 seconds! No amount of multithreading can speed that up.
If you truly want to write all that data to the disk, you may have some luck with file mapping. Computing the exact size needed should be easy enough if all values have the same number of digits, and it looks like you could write the values with multithreading. I have never done anything like that, so I can't really say much more about it, but it looks to me like the fastest way to write multiple gigabytes of memory to the disk.
#include <vector>
#include <thread>
#include <iostream>
#include <string>
#include <cmath>
#include <array>
#include <random>
#include <fstream>
#include <chrono>
using Point3D = std::array<double, 3>;
auto generateSampleData() -> std::vector<Point3D> {
static std::minstd_rand g(std::random_device{}());
std::uniform_real_distribution<> d(-1.0, 1.0);
std::vector<Point3D> data;
data.reserve(27000);
for (auto i = 0; i < 27000; ++i) {
data.push_back({ d(g), d(g), d(g) });
}
return data;
}
double seKernel(Point3D const& x1, Point3D const& x2, Point3D const& lengthScale, double sigma0sq) {
double sum = 0.0;
for (auto i = 0u; i < 3u; ++i) {
double distance = (x1[i] - x2[i]) / lengthScale[i];
sum += distance*distance;
}
return sigma0sq * std::exp(-0.5*sum);
}
void calculateKMatrixCpp(std::vector<Point3D> const& xtest, Point3D const& lengthScale, double sigma0sq, int threadCounter, int start, int stop, std::vector<double>& kMatrix) {
std::cout << "start of thread " << threadCounter << "\n" << std::flush;
for(int i = start; i < stop; ++i) {
for(int j = 0; j < i+1; ++j) {
double& kij = kMatrix[i * xtest.size() + j];
kij = seKernel(xtest[i], xtest[j], lengthScale, sigma0sq);
}
}
std::cout << "end of thread " << threadCounter << "\n" << std::flush;
}
int main() {
double sigma0sq = 1;
Point3D lengthScale = {0.7633, 0.6937, 3.3307e+07};
const std::vector<Point3D> x_test = generateSampleData();
/* Finding data slices of similar size */
//This piece of code works, each thread is assigned roughly the same number of matrix entries
int numElements = x_test.size()*x_test.size()/2;
const int numThreads = 4;
int elemsPerThread = numElements / numThreads;
std::vector<int> indices;
int j = 0;
for(std::size_t i = 1; i < x_test.size()+1; ++i){
int prod = i*(i+1)/2 - j*(j+1)/2;
if (prod > elemsPerThread) {
i--;
j = i;
indices.push_back(i);
if(indices.size() == numThreads-1)
break;
}
}
indices.insert(indices.begin(), 0);
indices.push_back(x_test.size());
auto start = std::chrono::system_clock::now();
std::vector<double> kMatrix(x_test.size() * x_test.size(), 0.0);
std::vector<std::thread> threads;
for (std::size_t i = 1; i < indices.size(); ++i) {
threads.push_back(std::thread(calculateKMatrixCpp, x_test, lengthScale, sigma0sq, i, indices[i - 1], indices[i], std::ref(kMatrix)));
}
for (auto& t : threads) {
t.join();
}
auto end = std::chrono::system_clock::now();
auto elapsed_seconds = std::chrono::duration<double>(end - start).count();
std::cout << "computation time: " << elapsed_seconds << "s" << std::endl;
start = std::chrono::system_clock::now();
constexpr int buffer_size = 131072;
char buffer[buffer_size];
std::ofstream out("matrix.csv");
out.rdbuf()->pubsetbuf(buffer, buffer_size);
for (int i = 0; i < x_test.size(); ++i) {
for (int j = 0; j < i + 1; ++j) {
if (j != 0) {
out << ',';
}
out << kMatrix[i * x_test.size() + j];
}
if (i != x_test.size() - 1) {
out << '\n';
}
}
end = std::chrono::system_clock::now();
elapsed_seconds = std::chrono::duration<double>(end - start).count();
std::cout << "writing time: " << elapsed_seconds << "s" << std::endl;
}
Okey I've wrote implementation with optimized formatting.
By using #Nelfeal code it was taking on my system around 250 seconds for the run to complete with write time taking the most by far. Or rather std::ofstream formatting taking most of the time.
I've written a C++20 version via std::format_to/format. It is a multi-threaded version that takes around 25-40 seconds to complete all the computations, formatting, and writing. If run in a single thread, it takes on my system around 70 seconds. Same performance should be achievable via fmt library on C++11/14/17.
Here is the code:
import <vector>;
import <thread>;
import <iostream>;
import <string>;
import <cmath>;
import <array>;
import <random>;
import <fstream>;
import <chrono>;
import <format>;
import <filesystem>;
using Point3D = std::array<double, 3>;
auto generateSampleData(Point3D scale) -> std::vector<Point3D>
{
static std::minstd_rand g(std::random_device{}());
std::uniform_real_distribution<> d(-1.0, 1.0);
std::vector<Point3D> data;
data.reserve(27000);
for (auto i = 0; i < 27000; ++i)
{
data.push_back({ d(g)* scale[0], d(g)* scale[1], d(g)* scale[2] });
}
return data;
}
double seKernel(Point3D const& x1, Point3D const& x2, Point3D const& lengthScale, double sigma0sq) {
double sum = 0.0;
for (auto i = 0u; i < 3u; ++i) {
double distance = (x1[i] - x2[i]) / lengthScale[i];
sum += distance * distance;
}
return sigma0sq * std::exp(-0.5 * sum);
}
void calculateKMatrixCpp(std::vector<Point3D> const& xtest, Point3D lengthScale, double sigma0sq, int threadCounter, int start, int stop, std::filesystem::path localPath)
{
using namespace std::string_view_literals;
std::vector<char> buffer;
buffer.reserve(15'000);
std::ofstream out(localPath);
std::cout << std::format("starting thread {}: from {} to {}\n"sv, threadCounter, start, stop);
for (int i = start; i < stop; ++i)
{
for (int j = 0; j < i; ++j)
{
double kij = seKernel(xtest[i], xtest[j], lengthScale, sigma0sq);
std::format_to(std::back_inserter(buffer), "{:.6g}, "sv, kij);
}
double kii = seKernel(xtest[i], xtest[i], lengthScale, sigma0sq);
std::format_to(std::back_inserter(buffer), "{:.6g}\n"sv, kii);
out.write(buffer.data(), buffer.size());
buffer.clear();
}
}
int main() {
double sigma0sq = 1;
Point3D lengthScale = { 0.7633, 0.6937, 3.3307e+07 };
const std::vector<Point3D> x_test = generateSampleData(lengthScale);
/* Finding data slices of similar size */
//This piece of code works, each thread is assigned roughly the same number of matrix entries
int numElements = x_test.size() * (x_test.size()+1) / 2;
const int numThreads = 3;
int elemsPerThread = numElements / numThreads;
std::vector<int> indices;
int j = 0;
for (std::size_t i = 1; i < x_test.size() + 1; ++i) {
int prod = i * (i + 1) / 2 - j * (j + 1) / 2;
if (prod > elemsPerThread) {
i--;
j = i;
indices.push_back(i);
if (indices.size() == numThreads - 1)
break;
}
}
indices.insert(indices.begin(), 0);
indices.push_back(x_test.size());
auto start = std::chrono::system_clock::now();
std::vector<std::thread> threads;
using namespace std::string_view_literals;
for (std::size_t i = 1; i < indices.size(); ++i)
{
threads.push_back(std::thread(calculateKMatrixCpp, std::ref(x_test), lengthScale, sigma0sq, i, indices[i - 1], indices[i], std::format("./matrix_{}.csv"sv, i-1)));
}
for (auto& t : threads)
{
t.join();
}
auto end = std::chrono::system_clock::now();
auto elapsed_seconds = std::chrono::duration<double>(end - start);
std::cout << std::format("total elapsed time: {}"sv, elapsed_seconds);
return 0;
}
Note: I used 6 digits of precision here as it is the default for std::ofstream. More digits means more writing time to disk and lower performance.

CUDA Sort Z-Axis 3D Array C++/Thrust

I'm looking to sort a large 3D array along the z-axis.
Example array is X x Y x Z (1000x1000x5)
I'd like to sort along the z-axis so I'd perform 1000x1000 sorts for 5 element along the z-axis.
Edit Update: Tried an attempt to use thrust below. It's functional and I'd store the output back, but this is very slow since I'm sorting 5 elements at a time per (x,y) location:
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/gather.h>
#include <thrust/iterator/counting_iterator.h>
int main(){
int x = 1000, y = 1000, z = 5;
float*** unsorted_cube = new float** [x];
for (int i = 0; i < x; i++)
{
// Allocate memory blocks for
// rows of each 2D array
unsorted_cube[i] = new float* [y];
for (int j = 0; j < y; j++)
{
// Allocate memory blocks for
// columns of each 2D array
unsorted_cube[i][j] = new float[z];
}
}
for (int i = 0; i < x; i++)
{
for (int j = 0; j < y; j++)
{
unsorted_cube[i][j][0] = 4.0f;
unsorted_cube[i][j][1] = 3.0f;
unsorted_cube[i][j][2] = 1.0f;
unsorted_cube[i][j][3] = 5.0f;
unsorted_cube[i][j][4] = 2.0f;
}
}
for (int i = 0; i < 5; i++)
{
printf("unsorted_cube first 5 elements to sort at (0,0): %f\n", unsorted_cube[0][0][i]);
}
float* temp_input;
float* temp_output;
float* raw_ptr;
float raw_ptr_out[5];
cudaMalloc((void**)&raw_ptr, N_Size * sizeof(float));
for (int i = 0; i < x; i++)
{
for (int j = 0; j < y; j++)
{
temp_input[0] = unsorted_cube[i][j][0];
temp_input[1] = unsorted_cube[i][j][1];
temp_input[2] = unsorted_cube[i][j][2];
temp_input[3] = unsorted_cube[i][j][3];
temp_input[4] = unsorted_cube[i][j][4];
cudaMemcpy(raw_ptr, temp_input, 5 * sizeof(float), cudaMemcpyHostToDevice);
thrust::device_ptr<float> dev_ptr = thrust::device_pointer_cast(raw_ptr);
thrust::sort(dev_ptr, dev_ptr + 5);
thrust::host_vector<float> host_vec(5);
thrust::copy(dev_ptr, dev_ptr + 5, raw_ptr_out);
if (i == 0 && j == 0)
{
for (int i = 0; i < 5; i++)
{
temp_output[i] = raw_ptr_out[i];
}
printf("sorted_cube[0,0,0] : %f\n", temp_output[0]);
printf("sorted_cube[0,0,1] : %f\n", temp_output[1]);
printf("sorted_cube[0,0,2] : %f\n", temp_output[2]);
printf("sorted_cube[0,0,3] : %f\n", temp_output[3]);
printf("sorted_cube[0,0,4] : %f\n", temp_output[4]);
}
}
}
}
Assuming that the data is in a format where the values in each xy-plane are consecutive in memory: data[((z * y_length) + y) * x_length + x] (which is be best for coalescing memory accesses on the GPU, as well)
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/zip_iterator.h>
void sort_in_z_dir(thrust::device_vector<float> &data,
int x_length, int y_length) { // z_length == 5
auto z_stride = x_length * y_length;
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(
data.begin(),
data.begin() + z_stride,
data.begin() + 2 * z_stride,
data.begin() + 3 * z_stride,
data.begin() + 4 * z_stride)),
thrust::make_zip_iterator(thrust::make_tuple(
data.begin() + z_stride,
data.begin() + 2 * z_stride,
data.begin() + 3 * z_stride,
data.begin() + 4 * z_stride,
data.begin() + 5 * z_stride)),
[] __host__ __device__
(thrust::tuple<float, float, float, float, float> &values) {
float local_data[5] = {thrust::get<0>(values),
thrust::get<1>(values),
thrust::get<2>(values),
thrust::get<3>(values),
thrust::get<4>(values)};
thrust::sort(thrust::seq, local_data, local_data + 5);
thrust::get<0>(values) = local_data[0];
thrust::get<1>(values) = local_data[1];
thrust::get<2>(values) = local_data[2];
thrust::get<3>(values) = local_data[3];
thrust::get<4>(values) = local_data[4];
});
}
This solution is certainly very ugly in terms of hardcoding z_length. One can use some C++ template-"magic" to make z_length into a template parameter, but this seemed to be overkill for this answer about Thrust.
See Convert std::tuple to std::array C++11 and How to convert std::array to std::tuple? for examples on interfacing between arrays and tuples.
The good thing about this solution that up to the sorting algorithm itself it should be pretty much optimal performance-wise. I don't know if thrust::sort is optimized for such small input arrays, but you can replace it by any self written sorting algorithm as I proposed in the comments.
If you want to be able to use different z_length without all this hassle, you might prefer this solution, which sorts in global memory, which is far from optimal, and feels a bit hacky because it uses Thrust pretty much only to launch a kernel. Here you want to have the data ordered the other way around: data[((x * y_length) + y) * z_length + z]
#include <thrust/counting_iterator.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
void sort_in_z_dir_alternative(thrust::device_vector<float> &data,
int x_length, int y_length, int z_length) {
int n_threads = x_length * y_length;
thrust::for_each(
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(n_threads),
[ddata = thrust::raw_pointer_cast(data.data()), z_length] __host__ __device__ (int idx) {
thrust::sort(thrust::seq,
ddata + z_length * idx,
ddata + z_length * (idx + 1));
});
}
If you are ok with z_length being a template parameter, this might be a solution that combines the best from both worlds (data format like in the first example):
#include <thrust/counting_iterator.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
template <int z_length>
void sort_in_z_dir_middle_ground(thrust::device_vector<float> &data,
int x_length, int y_length) {
int n_threads = x_length * y_length; // == z_stride
thrust::for_each(
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(n_threads),
[ddata = thrust::raw_pointer_cast(data.data()),
z_length, n_threads] __host__ __device__ (int idx) {
float local_data[z_length];
#pragma unroll
for (int i = 0; i < z_length; ++i) {
local_data[i] = ddata[idx + i * n_threads];
}
thrust::sort(thrust::seq,
local_data,
local_data + z_length);
#pragma unroll
for (int i = 0; i < z_length; ++i) {
ddata[idx + i * n_threads] = local_data[i];
}
});
}

Combining integers and floating point numbers: performance considerations

I have a complex set of template functions which do calculations in a loop, combining floating point numbers and the uint32_t loop indices. I was surprised to observe that for this kind of functions, my test code runs faster with double precision floating point numbers than with single precision ones.
As a test, I changed the format of my indices to uint16_t. After this, both the double and float version of the program were faster (as expected), but now the float version was significantly faster than the double version. I also tested the program with uint64_t indices. In this case the double and the float version are equally slow.
I imagine that this is because an uint32_t fits into the mantissa of a double but not into a float. Once the indices type was reduced to uint16_t, they also fit into the mantissa of a float and a conversion should be trivial. In case of uint64_t, the conversion to double also needs rounding, which would explain why both versions perform equally.
Can anybody confirm this explanation?
EDIT: Using int or long as index type, the program runs as fast as for unit16_t. I guess this speaks against what I suspected first.
EDIT: I compiled the program for windows on an x86 architecture.
EDIT: Here is a piece of code that reproduces the effect of double being faster as float for uint32_t and both cases being equally fast for int. Please do not comment on the usefulness of this code. It is a modified fragment of code reproducing the effect which does nothing sensible.
The main file:
#include "stdafx.h"
typedef short spectraType;
typedef int intermediateValue;
typedef double returnType;
#include "Preprocess_t.h"
#include "Windows.h"
#include <iostream>
int main()
{
const size_t numberOfBins = 10000;
const size_t numberOfSpectra = 500;
const size_t peakWidth = 25;
bool startPeak = false;
short peakHeight;
Preprocess<short, returnType> myPreprocessor;
std::vector<returnType> processedSpectrum;
std::vector<std::vector<short>> spectra(numberOfSpectra, std::vector<short>(numberOfBins));
std::vector<float> peakShape(peakWidth);
LARGE_INTEGER freq, start, stop;
double time_ms;
QueryPerformanceFrequency(&freq);
for (size_t i = 0; i < peakWidth; ++i)
{
peakShape[i] = static_cast<float>(exp(-(i - peakWidth / 2.0) *(i - peakWidth / 2.0) / 10.0));
}
for (size_t i = 0; i < numberOfSpectra; ++i)
{
size_t j = 0;
for (; j < 200; ++j)
{
spectra[i][j] = rand() % 100;
}
for (size_t k = 0; k < 25; ++k)
{
spectra[i][j] = static_cast<short>(16383 * peakShape[k]);
j++;
}
for (; j < numberOfBins; ++j)
{
startPeak = !static_cast<bool>(abs(rand()) % (numberOfBins / 4));
if (startPeak)
{
peakHeight = rand() % 16384;
for (size_t k = 0; k < 25 && j< numberOfBins; ++k)
{
spectra[i][j] = peakHeight * peakShape[k] + rand() % 100;
j++;
}
}
else
{
spectra[i][j] = rand() % 100;
}
}
for (j = 0; j < numberOfBins; ++j)
{
double temp = 1000.0*exp(-(static_cast<float>(j) / (numberOfBins / 3.0)))*sin(static_cast<float>(j) / (numberOfBins / 10.0));
spectra[i][j] -= static_cast<short>(1000.0*exp(-(static_cast<float>(j) / (numberOfBins / 3.0)))*sin(static_cast<float>(j) / (numberOfBins / 10.0)));
}
}
// This is where the critical code is called
QueryPerformanceCounter(&start);
for (int i = 0; i < numberOfSpectra; ++i)
{
myPreprocessor.SetSpectrum(&spectra[i], 1000, &processedSpectrum);
myPreprocessor.CorrectBaseline(30, 2.0);
}
QueryPerformanceCounter(&stop);
time_ms = static_cast<double>(stop.QuadPart - start.QuadPart) / static_cast<double>(freq.QuadPart);
std::cout << "time spend preprocessing: " << time_ms << std::endl;
std::cin.ignore();
return 0;
}
And the included header Preprocess_t.h:
#pragma once
#include <vector>
//typedef unsigned int indexType;
typedef unsigned short indexType;
template<typename T, typename Out_Type>
class Preprocess
{
public:
Preprocess() :threshold(1), sdev(1), laserPeakThreshold(500), a(0), b(0), firstPointUsedAfterLaserPeak(0) {};
~Preprocess() {};
void SetSpectrum(std::vector<T>* input, T laserPeakThreshold, std::vector<Out_Type>* processedSpectrum); ///#note We need the laserPeakThresholdParameter for the baseline correction, not onla for the shift.
void CorrectBaseline(indexType numberOfPoints, Out_Type thresholdFactor);
private:
void LinFitValues(indexType beginPoint);
Out_Type SumOfSquareDiffs(Out_Type x, indexType n);
Out_Type LinResidualSumOfSquareDist(indexType beginPoint);
std::vector<T>* input;
std::vector<Out_Type>* processedSpectrum;
std::vector<indexType> fitWave_X;
std::vector<Out_Type> fitWave;
Out_Type threshold;
Out_Type sdev;
T laserPeakThreshold;
Out_Type a, b;
indexType firstPointUsedAfterLaserPeak;
indexType numberOfPoints;
};
template<typename T, typename Out_Type>
void Preprocess<T, Out_Type>::CorrectBaseline(indexType numberOfPoints, Out_Type thresholdFactor)
{
this->numberOfPoints = numberOfPoints;
indexType numberOfBins = input->size();
indexType firstPointUsedAfterLaserPeak = 0;
indexType positionInFitWave = 0;
positionInFitWave = firstPointUsedAfterLaserPeak;
for (indexType i = firstPointUsedAfterLaserPeak; i < numberOfBins - numberOfPoints; i++) {
LinFitValues(positionInFitWave);
processedSpectrum->at(i + numberOfPoints) = input->at(i + numberOfPoints) - static_cast<Out_Type>(a + b*(i + numberOfPoints));
positionInFitWave++;
fitWave[positionInFitWave + numberOfPoints - 1] = input->at(i + numberOfPoints - 1);
fitWave_X[positionInFitWave + numberOfPoints - 1] = i + numberOfPoints - 1;
}
}
template<typename T, typename Out_Type>
void Preprocess<T, Out_Type>::LinFitValues(indexType beginPoint)
{
Out_Type y_mean, x_mean, SSxy, SSxx, normFactor;
y_mean = x_mean = SSxy = SSxx = normFactor = static_cast<Out_Type>(0);
indexType endPoint = beginPoint + numberOfPoints;
Out_Type temp;
if ((fitWave_X[endPoint - 1] - fitWave_X[beginPoint]) == numberOfPoints)
{
x_mean = (fitWave_X[endPoint - 1] - fitWave_X[beginPoint]) / static_cast<Out_Type>(2);
for (indexType i = beginPoint; i < endPoint; i++) {
y_mean += fitWave[i];
}
y_mean /= numberOfPoints;
SSxx = SumOfSquareDiffs(x_mean, fitWave_X[endPoint - 1]) - SumOfSquareDiffs(x_mean, fitWave_X[beginPoint]);
for (indexType i = beginPoint; i < endPoint; i++)
{
SSxy += (fitWave_X[i] - x_mean)*(fitWave[i] - y_mean);
}
}
else
{
for (indexType i = beginPoint; i < endPoint; i++) {
y_mean += fitWave[i];
x_mean += fitWave_X[i];
}
y_mean /= numberOfPoints;
x_mean /= numberOfPoints;
for (indexType i = beginPoint; i < endPoint; i++)
{
temp = (fitWave_X[i] - x_mean);
SSxy += temp*(fitWave[i] - y_mean);
SSxx += temp*temp;
}
}
b = SSxy / SSxx;
a = y_mean - b*x_mean;
}
template<typename T, typename Out_Type>
inline Out_Type Preprocess<T, Out_Type>::SumOfSquareDiffs(Out_Type x, indexType n)
{
return n*x*x + n*(n - 1)*x + ((n - 1)*n*(2 * n - 1)) / static_cast<Out_Type>(6);
}
template<typename T, typename Out_Type>
Out_Type Preprocess<T, Out_Type>::LinResidualSumOfSquareDist(indexType beginPoint)
{
Out_Type sumOfSquares = 0;
Out_Type temp;
for (indexType i = 0; i < numberOfPoints; ++i) {
temp = fitWave[i + beginPoint] - (a + b*fitWave_X[i + beginPoint]);
sumOfSquares += temp*temp;
}
return sumOfSquares;
}
template<typename T, typename Out_Type>
inline void Preprocess<T, Out_Type>::SetSpectrum(std::vector<T>* input, T laserPeakThreshold, std::vector<Out_Type>* processedSpectrum)
{
this->input = input;
fitWave_X.resize(input->size());
fitWave.resize(input->size());
this->laserPeakThreshold = laserPeakThreshold;
this->processedSpectrum = processedSpectrum;
processedSpectrum->resize(input->size());
}
You are using MSVC? I had a similar effect when I implemented code that essentially was a matrix-multiplication plus a vector addition. Here, I thought that floats would be faster because they can be better SIMD-parallelized as more can be packed in the SSE registers. However, using doubles was much faster.
After some investigation, I figured out from the assembler code that the float's need conversion from the internal FPU precision and this rounding was consuming most of the runtime. You can change the FP model to something that is faster with the cost of reduced precision. There is also some discussion in older threads here at SO.

Weighted Variance and Weighted Standard Deviation in C++

Hi I'm trying to calculate the weighted variance and weighted standard deviation of a series of ints or floats. I found these links:
http://math.tutorvista.com/statistics/standard-deviation.html#weighted-standard-deviation
http://www.itl.nist.gov/div898/software/dataplot/refman2/ch2/weightsd.pdf (warning pdf)
Here are my template functions so far. Variance and standard deviation work fine but for the life of me I can't get the weighted versions to match the test case at the bottom of the pdf:
template <class T>
inline float Mean( T samples[], int count )
{
float mean = 0.0f;
if( count >= 1 )
{
for( int i = 0; i < count; i++ )
mean += samples[i];
mean /= (float) count;
}
return mean;
}
template <class T>
inline float Variance( T samples[], int count )
{
float variance = 0.0f;
if( count > 1 )
{
float mean = 0.0f;
for( int i = 0; i < count; i++ )
mean += samples[i];
mean /= (float) count;
for( int i = 0; i < count; i++ )
{
float sum = (float) samples[i] - mean;
variance += sum*sum;
}
variance /= (float) count - 1.0f;
}
return variance;
}
template <class T>
inline float StdDev( T samples[], int count )
{
return sqrtf( Variance( samples, count ) );
}
template <class T>
inline float VarianceWeighted( T samples[], T weights[], int count )
{
float varianceWeighted = 0.0f;
if( count > 1 )
{
float sumWeights = 0.0f, meanWeighted = 0.0f;
int numNonzero = 0;
for( int i = 0; i < count; i++ )
{
meanWeighted += samples[i]*weights[i];
sumWeights += weights[i];
if( ((float) weights[i]) != 0.0f ) numNonzero++;
}
if( sumWeights != 0.0f && numNonzero > 1 )
{
meanWeighted /= sumWeights;
for( int i = 0; i < count; i++ )
{
float sum = samples[i] - meanWeighted;
varianceWeighted += weights[i]*sum*sum;
}
varianceWeighted *= ((float) numNonzero)/((float) count*(numNonzero - 1.0f)*sumWeights); // this should be right but isn't?!
}
}
return varianceWeighted;
}
template <class T>
inline float StdDevWeighted( T samples[], T weights[], int count )
{
return sqrtf( VarianceWeighted( samples, weights, count ) );
}
Test case:
int samples[] = { 2, 3, 5, 7, 11, 13, 17, 19, 23 };
printf( "%.2f\n", StdDev( samples, 9 ) );
int weights[] = { 1, 1, 0, 0, 4, 1, 2, 1, 0 };
printf( "%.2f\n", StdDevWeighted( samples, weights, 9 ) );
Result:
7.46
1.94
Should be:
7.46
5.82
I think the problem is that weighted variance has a few different interpretations and I don't know which one is standard. I found this variation:
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Weighted_incremental_algorithm
template <class T>
inline float VarianceWeighted( T samples[], T weights[], int count )
{
float varianceWeighted = 0.0f;
if( count > 1 )
{
float sumWeights = 0.0f, meanWeighted = 0.0f, m2 = 0.0f;
for( int i = 0; i < count; i++ )
{
float temp = weights[i] + sumWeights,
delta = samples[i] - meanWeighted,
r = delta*weights[i]/temp;
meanWeighted += r;
m2 += sumWeights*delta*r; // Alternatively, m2 += weights[i] * delta * (samples[i]−meanWeighted)
sumWeights = temp;
}
varianceWeighted = (m2/sumWeights)*((float) count/(count - 1));
}
return varianceWeighted;
}
Result:
7.46
5.64
I also tried looking at boost and esutil but they didn't help much:
http://www.boost.org/doc/libs/1_48_0/boost/accumulators/statistics/weighted_variance.hpp
http://esutil.googlecode.com/svn-history/r269/trunk/esutil/stat/util.py
I don't need an entire statistics library, and more importantly, I want to understand the implementation.
Can someone please post functions to calculate these correctly?
Bonus points if your functions can do it in a single pass.
Also, does anyone know if weighted variance gives the same result as ordinary variance with repeated values? For example, would the variance of samples[] = { 1, 2, 3, 3 } be the same as weighted variance of samples[] = { 1, 2, 3 }, weights[] = { 1, 1, 2 }?
Update: here is a google docs spreadsheet I have set up to explore the problem. Unfortunately my answers are nowhere close to the NIST pdf. I think the problem is in the unbias step, but I can't see how to fix it.
https://docs.google.com/spreadsheet/ccc?key=0ApzPh5nRin0ldGNNYjhCUTlWTks2TGJrZW4wQUcyZnc&usp=sharing
The result is a weighted variance of 3.77, which is the square of the weighted standard deviation of 1.94 I got in my c++ code.
I am attempting to install octave on my Mac OS X setup so that I can run their var() function with weights, but it is taking forever to install it with brew. I am deeply into yak shaving now.
float mean(uint16_t* x, uint16_t n) {
uint16_t sum_xi = 0;
int i;
for (i = 0; i < n; i++) {
sum_xi += x[i];
}
return (float) sum_xi / n;
}
/**
* http://www.itl.nist.gov/div898/software/dataplot/refman2/ch2/weigmean.pdf
*/
float weighted_mean(uint16_t* x, uint16_t* w, uint16_t n) {
int sum_wixi = 0;
int sum_wi = 0;
int i;
for (i = 0; i < n; i++) {
sum_wixi += w[i] * x[i];
sum_wi += w[i];
}
return (float) sum_wixi / (float) sum_wi;
}
float variance(uint16_t* x, uint16_t n) {
float mean_x = mean(x, n);
float dist, dist2;
float sum_dist2 = 0;
int i;
for (i = 0; i < n; i++) {
dist = x[i] - mean_x;
dist2 = dist * dist;
sum_dist2 += dist2;
}
return sum_dist2 / (n - 1);
}
/**
* http://www.itl.nist.gov/div898/software/dataplot/refman2/ch2/weighvar.pdf
*/
float weighted_variance(uint16_t* x, uint16_t* w, uint16_t n) {
float xw = weighted_mean(x, w, n);
float dist, dist2;
float sum_wi_times_dist2 = 0;
int sum_wi = 0;
int n_prime = 0;
int i;
for (i = 0; i < n; i++) {
dist = x[i] - xw;
dist2 = dist * dist;
sum_wi_times_dist2 += w[i] * dist2;
sum_wi += w[i];
if (w[i] > 0)
n_prime++;
}
if (n_prime > 1) {
return sum_wi_times_dist2 / ((float) ((n_prime - 1) * sum_wi) / n_prime);
} else {
return 0.0f;
}
}
/**
* http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Weighted_incremental_algorithm
*/
float weighted_incremental_variance(uint16_t* x, uint16_t* w, uint16_t n) {
uint16_t sumweight = 0;
float mean = 0;
float M2 = 0;
int n_prime = 0;
uint16_t temp;
float delta;
float R;
int i;
for (i = 0; i < n; i++) {
if (w[i] == 0)
continue;
temp = w[i] + sumweight;
delta = x[i] - mean;
R = delta * w[i] / temp;
mean += R;
M2 += sumweight * delta * R;
sumweight = temp;
n_prime++;
}
if (n_prime > 1) {
float variance_n = M2 / sumweight;
return variance_n * n_prime / (n_prime - 1);
} else {
return 0.0f;
}
}
void main(void) {
uint16_t n = 9;
uint16_t x[] = { 2, 3, 5, 7, 11, 13, 17, 19, 23 };
uint16_t w[] = { 1, 1, 0, 0, 4, 1, 2, 1, 0 };
printf("%f\n", weighted_variance(x, w, n)); /* outputs: 33.900002 */
printf("%f\n", weighted_incremental_variance(x, w, n)); /* outputs: 33.900005 */
}
Solution
You accidentally added an extra term "count" in the denominator of the variance update term.
When using the correction below I get your expected answer of
5.82
FYI, one way to pick up on things like this when you are doing a code review is to do a "dimensional analysis". The "units" of the equation were wrong. You were effectively dividing by an order N squared term when it should be an order N term.
Before
template <class T>
inline float VarianceWeighted( T samples[], T weights[], int count )
{
...
varianceWeighted *= ((float) numNonzero)/((float) count*(numNonzero - 1.0f)*sumWeights); // this should be right but isn't?!
...
}
After
Removing "count" this line should be replaced by
template <class T>
inline float VarianceWeighted( T samples[], T weights[], int count )
{
...
varianceWeighted *= ((float) numNonzero)/((float) (numNonzero - 1.0f)*sumWeights); // removed count term
...
}
Here's a much shorter version with a working Demo :
#include <iostream>
#include <vector>
#include <boost/accumulators/accumulators.hpp>
#include <boost/accumulators/statistics/stats.hpp>
#include <boost/accumulators/statistics/weighted_variance.hpp>
#include <boost/accumulators/statistics/variance.hpp>
namespace ba = boost::accumulators;
int main() {
std::vector<double> numbers{2, 3, 5, 7, 11, 13, 17, 19, 23};
std::vector<double> weights{1, 1, 0, 0, 4, 1, 2, 1, 0 };
ba::accumulator_set<double, ba::stats<ba::tag::variance > > acc;
ba::accumulator_set<double, ba::stats<ba::tag::weighted_variance > , double > acc_weighted;
double n = numbers.size();
double N = n;
for(size_t i = 0 ; i<numbers.size() ; i++ ) {
acc ( numbers[i] );
acc_weighted( numbers[i] , ba::weight = weights[i] );
if(weights[i] == 0) {
n=n-1;
}
};
std::cout << "Sample Standard Deviation, s: " << std::sqrt(ba::variance(acc) *N/(N-1)) << std::endl;
std::cout << "Weighted Sample Standard Deviation, s: " << std::sqrt(ba::weighted_variance(acc_weighted)*n/(n-1)) << std::endl;
}
Make note that n must reflect the number of samples with nonzero weights, hence extra n=n-1; line.
Sample Standard Deviation, s: 7.45729
Weighted Sample Standard Deviation, s: 5.82237

'std::vector<double>::iterator' has no member named 'begin'

So I am trying to perform recursion ( A very simple code for split radix recursive butterflies) on a large C++ STL vector and I am using iterators to call the recursion but it isn't working as I keep getting errors.
#include <iostream>
#include <cmath>
#include <vector>
#include <string>
#include <algorithm>
using namespace std;
template <typename T>
class fft_data{
public:
vector<T> re;
vector<T> im;
};
void inline split_radix_rec(vector<double>::iterator r,vector<double>::iterator i, int sgn,int N) {
if (N == 1) {
return;
} else if (N == 2) {
for (int k = 0; k < N/2; k++) {
int index = 2*k;
int index1 = index+1;
double taur = *(r+index1);
double taui = *(i+index1);
*(r+index1) = *(r+index) - taur;
*(i+index1) = *(i+index) - taui;
*(r+index) = *(r+index) + taur;
*(i+index) = *(i+index) + taui;
}
N=N/2;
} else {
int m = N/2;
int p = N/4;
double PI2 = 6.28318530717958647692528676655900577;
double theta = -1.0 * sgn * PI2/N;
double S = sin(theta);
double C = cos(theta);
double PI6 = 3.0*6.28318530717958647692528676655900577;
double theta3 = -1.0 * sgn * PI6/N;
double S3 = sin(theta3);
double C3 = cos(theta3);
double wlr = 1.0;
double wli = 0.0;
//T wl2r = (T) 1.0;
//T wl2i = (T) 0.0;
double wl3r = 1.0;
double wl3i = 0.0;
double tau1r,tau1i,tau2r,tau2i;
double ur,ui,vr,vi;
for (int j = 0; j < p; j++) {
int index1 = j+m;
int index2 = index1+p;
int index3 = j+p;
tau1r = *(r+index1);
tau1i = *(i+index1);
tau2r = *(r+index2);
tau2i = *(i+index2);
ur = tau1r + tau2r;
ui = tau1i + tau2i;
vr = sgn* (tau2r - tau1r);
vi = sgn* (tau2i - tau1i);
*(r+index2) = *(r+index3) - vi;
*(i+index2) = *(i+index3) + vr;
*(r+index1) = *(r+j) - ur;
*(i+index1) = *(i+j) - ui;
*(r+index3) = *(r+index3) + vi;
*(i+index3) = *(i+index3) - vr;
*(r+j) = *(r+j) + ur;
*(i+j) = *(i+j) + ui;
}
split_radix_rec(r.begin(),i.begin(),sgn,m);
split_radix_rec(r.begin()+m,i.begin()+m,sgn,p);
split_radix_rec(r.begin()+m+p,i.begin()+m+p,sgn,p);
}
}
int main() {
vector<double> u,v;
for (int i = 0; i < 256; i++) {
u.push_back(i);
v.push_back(i);
}
int sgn = 1;
int N = 256;
split_radix_rec(u.begin(),v.begin(),sgn,N);
return 0;
}
Here are the errors I am getting
main.cpp:93:21: error: 'std::vector<double>::iterator' has no member named 'begin'
6 Identical errors on lines 93,94,95 (the three split_radix_rec() functions called from within the split_radix_rec function). This is part of a much larger code so I want it to work for STL vectors. What am I doing wrong?
As the error states, you are calling begin() on a std::vector<double>::iterator.
You should call that on a std::vector<double>, so that it could return you a std::vector<double>::iterator.
r,i are itself iterators(begins) in your code.
Try:
split_radix_rec(r,i,sgn,m);
split_radix_rec(r+m,i+m,sgn,p);
split_radix_rec(r+m+p,i+m+p,sgn,p);
There is way too much code to give you a concise answer, but the error clearly states that you are calling begin() on a vector iterator instead of a vector. And that happens at the split_radix_rec recursive call. You may have intended this instead:
split_radix_rec(r,i,sgn,m);
split_radix_rec(r+m,i+m,sgn,p);
split_radix_rec(r+m+p,i+m+p,sgn,p);