I have created a program which creates a Mandelbrot set. Now I'm trying to make it multithreaded.
// mandelbrot.cpp
// compile with: g++ -std=c++11 mandelbrot.cpp -o mandelbrot
// view output with: eog mandelbrot.ppm
#include <fstream>
#include <complex> // if you make use of complex number facilities in C++
#include <iostream>
#include <cstdlib>
#include <thread>
#include <mutex>
#include <vector>
using namespace std;
template <class T> struct RGB { T r, g, b; };
template <class T>
class Matrix {
public:
Matrix(const size_t rows, const size_t cols) : _rows(rows), _cols(cols) {
_matrix = new T*[rows];
for (size_t i = 0; i < rows; ++i) {
_matrix[i] = new T[cols];
}
}
Matrix(const Matrix &m) : _rows(m._rows), _cols(m._cols) {
_matrix = new T*[m._rows];
for (size_t i = 0; i < m._rows; ++i) {
_matrix[i] = new T[m._cols];
for (size_t j = 0; j < m._cols; ++j) {
_matrix[i][j] = m._matrix[i][j];
}
}
}
~Matrix() {
for (size_t i = 0; i < _rows; ++i) {
delete [] _matrix[i];
}
delete [] _matrix;
}
T *operator[] (const size_t nIndex)
{
return _matrix[nIndex];
}
size_t width() const { return _cols; }
size_t height() const { return _rows; }
protected:
size_t _rows, _cols;
T **_matrix;
};
// Portable PixMap image
class PPMImage : public Matrix<RGB<unsigned char> >
{
public:
unsigned int size;
PPMImage(const size_t height, const size_t width) : Matrix(height, width) { }
void save(const std::string &filename)
{
std::ofstream out(filename, std::ios_base::binary);
out <<"P6" << std::endl << _cols << " " << _rows << std::endl << 255 << std::endl;
for (size_t y=0; y<_rows; y++)
for (size_t x=0; x<_cols; x++)
out << _matrix[y][x].r << _matrix[y][x].g << _matrix[y][x].b;
}
};
/*Draw mandelbrot according to the provided parameters*/
void draw_Mandelbrot(PPMImage & image, const unsigned width, const unsigned height, double cxmin, double cxmax, double cymin, double cymax,unsigned int max_iterations)
{
for (std::size_t ix = 0; ix < width; ++ix)
for (std::size_t iy = 0; iy < height; ++iy)
{
std::complex<double> c(cxmin + ix / (width - 1.0)*(cxmax - cxmin), cymin + iy / (height - 1.0)*(cymax - cymin));
std::complex<double> z = 0;
unsigned int iterations;
for (iterations = 0; iterations < max_iterations && std::abs(z) < 2.0; ++iterations)
z = z*z + c;
image[iy][ix].r = image[iy][ix].g = image[iy][ix].b = iterations;
}
}
int main()
{
const unsigned width = 1600;
const unsigned height = 1600;
PPMImage image(height, width);
int parts = 8;
std::vector<int>bnd (parts, image.size);
std::thread *tt = new std::thread[parts - 1];
time_t start, end;
time(&start);
//Lauch parts-1 threads
for (int i = 0; i < parts - 1; ++i) {
tt[i] = std::thread(draw_Mandelbrot,ref(image), width, height, -2.0, 0.5, -1.0, 1.0, 10);
}
//Use the main thread to do part of the work !!!
for (int i = parts - 1; i < parts; ++i) {
draw_Mandelbrot(ref(image), width, height, -2.0, 0.5, -1.0, 1.0, 10);
}
//Join parts-1 threads
for (int i = 0; i < parts - 1; ++i)
tt[i].join();
time(&end);
std::cout << difftime(end, start) << " seconds" << std::endl;
image.save("mandelbrot.ppm");
delete[] tt;
return 0;
}
Now every thread draws the complete fractal (look in main()). How can I let the threads draw different parts of the fractal?
You're making this (quite a lot) harder than it needs to be. This is the sort of task to which OpenMP is almost perfectly suited. For this task it gives almost perfect scaling with a bare minimum of effort.
I modified your draw_mandelbrot by inserting a pragma before the outer for loop:
#pragma omp parallel for
for (int ix = 0; ix < width; ++ix)
for (int iy = 0; iy < height; ++iy)
Then I simplified your main down to:
int main() {
const unsigned width = 1600;
const unsigned height = 1600;
PPMImage image(height, width);
clock_t start = clock();
draw_Mandelbrot(image, width, height, -2.0, 0.5, -1.0, 1.0, 10);
clock_t stop = clock();
std::cout << (double(stop - start) / CLOCKS_PER_SEC) << " seconds\n";
image.save("mandelbrot.ppm");
return 0;
}
On my (fairly slow) machine, your original code ran in 4.73 seconds. My modified code ran in 1.38 seconds. That's an improvement of 3.4x out of code that's nearly indistinguishable from a trivial single-threaded version.
Just for what it's worth, I did a bit more rewriting to get this:
// mandelbrot.cpp
// compile with: g++ -std=c++11 mandelbrot.cpp -o mandelbrot
// view output with: eog mandelbrot.ppm
#include <fstream>
#include <complex> // if you make use of complex number facilities in C++
#include <iostream>
#include <cstdlib>
#include <thread>
#include <mutex>
#include <vector>
using namespace std;
template <class T> struct RGB { T r, g, b; };
template <class T>
struct Matrix
{
std::vector<T> data;
size_t rows;
size_t cols;
class proxy {
Matrix &m;
size_t index_1;
public:
proxy(Matrix &m, size_t index_1) : m(m), index_1(index_1) { }
T &operator[](size_t index) { return m.data[index * m.rows + index_1]; }
};
class const_proxy {
Matrix const &m;
size_t index_1;
public:
const_proxy(Matrix const &m, size_t index_1) : m(m), index_1(index_1) { }
T const &operator[](size_t index) const { return m.data[index * m.rows + index_1]; }
};
public:
Matrix(size_t rows, size_t cols) : data(rows * cols), rows(rows), cols(cols) { }
proxy operator[](size_t index) { return proxy(*this, index); }
const_proxy operator[](size_t index) const { return const_proxy(*this, index); }
};
template <class T>
std::ostream &operator<<(std::ostream &out, Matrix<T> const &m) {
out << "P6" << std::endl << m.cols << " " << m.rows << std::endl << 255 << std::endl;
for (size_t y = 0; y < m.rows; y++)
for (size_t x = 0; x < m.cols; x++) {
T pixel = m[y][x];
out << pixel.r << pixel.g << pixel.b;
}
return out;
}
/*Draw Mandelbrot according to the provided parameters*/
template <class T>
void draw_Mandelbrot(T & image, const unsigned width, const unsigned height, double cxmin, double cxmax, double cymin, double cymax, unsigned int max_iterations) {
#pragma omp parallel for
for (int ix = 0; ix < width; ++ix)
for (int iy = 0; iy < height; ++iy)
{
std::complex<double> c(cxmin + ix / (width - 1.0)*(cxmax - cxmin), cymin + iy / (height - 1.0)*(cymax - cymin));
std::complex<double> z = 0;
unsigned int iterations;
for (iterations = 0; iterations < max_iterations && std::abs(z) < 2.0; ++iterations)
z = z*z + c;
image[iy][ix].r = image[iy][ix].g = image[iy][ix].b = iterations;
}
}
int main() {
const unsigned width = 1600;
const unsigned height = 1600;
Matrix<RGB<unsigned char>> image(height, width);
clock_t start = clock();
draw_Mandelbrot(image, width, height, -2.0, 0.5, -1.0, 1.0, 255);
clock_t stop = clock();
std::cout << (double(stop - start) / CLOCKS_PER_SEC) << " seconds\n";
std::ofstream out("mandelbrot.ppm", std::ios::binary);
out << image;
return 0;
}
On my machine, this code runs in about 0.5 to 0.6 seconds.
As to why I made these changes: mostly to make it faster, cleaner, and simpler. Your Matrix class allocated a separate block of memory for each row (or perhaps column--didn't pay very close of attention). This allocates one contiguous block of the entire matrix instead. This eliminates a level of indirection to get to the data, and increases locality of reference, thus improving cache usage. It also reduces the total amount of data used.
Changing from using time to using clock to do the timing was to measure CPU time instead of wall time (and typically improve precision substantially as well).
Getting rid of the PPMImage class was done simply because (IMO) having a PPImage class that derives from a Matrix class just doesn't make much (if any) sense. I suppose it works (for a sufficiently loose definition of "work") but it doesn't strike me as good design. If you insist on doing it at all, it should at least be private derivation, because you're just using the Matrix as a way of implementing your PPMImage class, not (at least I certainly hope not) trying to make assertions about properties of PPM images.
If, for whatever, reason, you decide to handle the threading manually, the obvious way of dividing the work up between threads would still be by looking at the loops inside of draw_mandelbrot. The obvious one would be to leave your outer loop alone, but send the computation for each iteration off to a thread pool:
for (int ix = 0; ix < width; ++ix)
compute_thread(ix);
where the body of compute_thread is basically this chunk of code:
for (int iy = 0; iy < height; ++iy)
{
std::complex<double> c(cxmin + ix / (width - 1.0)*(cxmax - cxmin), cymin + iy / (height - 1.0)*(cymax - cymin));
std::complex<double> z = 0;
unsigned int iterations;
for (iterations = 0; iterations < max_iterations && std::abs(z) < 2.0; ++iterations)
z = z*z + c;
image[iy][ix].r = image[iy][ix].g = image[iy][ix].b = iterations;
}
There would obviously be a little work involved in passing the correct data to the compute thread (each thread should be pass a reference to a slice of the resulting picture), but that would be an obvious and fairly clean place to divide things up. In particular it divides the job up into enough tasks that you semi-automatically get pretty good load balancing (i.e., you can keep all the cores busy) but large enough that you don't waste massive amounts of time on communication and synchronization between the threads.
As to the result, with the number of iterations set to 255, I get the following (scaled to 25%):
...which is pretty much as I'd expect.
One of the big issues with this approach is that different regions take different amounts of time to calculate.
A more general approach is.
Start 1 source thread.
Start N worker threads.
Start 1 sink thread.
Create 2 thread safe queues (call them the source queue and the sink queue).
Divide the image into M (many more than N) pieces.
The source thread pushes pieces into the source queue
The workers pull piecse from the source queue, convert the pieces into result fragments, and pushes those fragments into the sink queue.
The sink thread takes fragments from the sink queue and combines them into the final image.
By dividing up the work this way, all the worker threads will be busy all the time.
You can divide the fractal into pieces by divide the start and end of the fractal with the screen dimension:
$this->stepsRe = (double)((($this->startRe * -1) + ($this->endeRe)) / ($this->size_x-1));
$this->stepsIm = (double)((($this->startIm * -1) + ($this->endeIm)) / ($this->size_y-1));
Related
this is my first time using multi-threading to speed up a heavy calculation.
Background: The idea is to calculate a Kernel Covariance matrix, by reading a list of 3D points x_test and calculating the corresponding matrix, which has dimensions x_test.size() x x_test.size().
I already sped up the calculations by only calculating the lower triangluar matrix. Since all the calculations are independent from each other I tried to speed up the process (x_test.size() = 27000 in my case) by splitting the calculations of the matrix entries row-wise, assigning a range of rows to each thread.
On a single core the calculations took about 280 seconds each time, on 4 cores it took 270-290 seconds.
main.cpp
int main(int argc, char *argv[]) {
double sigma0sq = 1;
double lengthScale [] = {0.7633, 0.6937, 3.3307e+07};
const std::vector<std::vector<double>> x_test = parse2DCsvFile(inputPath);
/* Finding data slices of similar size */
//This piece of code works, each thread is assigned roughly the same number of matrix entries
int numElements = x_test.size()*x_test.size()/2;
const int numThreads = 4;
int elemsPerThread = numElements / numThreads;
std::vector<int> indices;
int j = 0;
for(std::size_t i=1; i<x_test.size()+1; ++i){
int prod = i*(i+1)/2 - j*(j+1)/2;
if (prod > elemsPerThread) {
i--;
j = i;
indices.push_back(i);
if(indices.size() == numThreads-1)
break;
}
}
indices.insert(indices.begin(), 0);
indices.push_back(x_test.size());
/* Spreding calculations to multiple threads */
std::vector<std::thread> threads;
for(std::size_t i = 1; i < indices.size(); ++i){
threads.push_back(std::thread(calculateKMatrixCpp, x_test, lengthScale, sigma0sq, i, indices.at(i-1), indices.at(i)));
}
for(auto & th: threads){
th.join();
}
return 0;
}
As you can see, each thread performs the following calculations on the data assigned to it:
void calculateKMatrixCpp(const std::vector<std::vector<double>> xtest, double lengthScale[], double sigma0sq, int threadCounter, int start, int stop){
char buffer[8192];
std::ofstream out("lower_half_matrix_" + std::to_string(threadCounter) +".csv");
out.rdbuf()->pubsetbuf(buffer, 8196);
for(int i = start; i < stop; ++i){
for(int j = 0; j < i+1; ++j){
double kij = seKernel(xtest.at(i), xtest.at(j), lengthScale, sigma0sq);
if (j!=0)
out << ',';
out << kij;
}
if(i!=xtest.size()-1 )
out << '\n';
}
out.close();
}
and
double seKernel(const std::vector<double> x1,const std::vector<double> x2, double lengthScale[], double sigma0sq) {
double sum(0);
for(std::size_t i=0; i<x1.size();i++){
sum += pow((x1.at(i)-x2.at(i))/lengthScale[i],2);
}
return sigma0sq*exp(-0.5*sum);
}
Aspects I considered
locking by simultaneous access to data vector -> I don't pass a reference to the threads, but a copy of the data. I know this is not optimal in terms of RAM usage, but as far as I know this should prevent simultaneous data access since every thread has its own copy
Output -> every thread writes its part of the lower triangular matrix to its own file. My task manager doesn't indicate a full SSD utilization in the slightest
Compiler and machine
Windows 11
GNU GCC Compiler
Code::Blocks (although I don't think that should be of importance)
There are many details that can be improved in your code, but I think the two biggest issues are:
using vectors or vectors, which leads to fragmented data;
writing each piece of data to file as soon as its value is computed.
The first point is easy to fix: use something like std::vector<std::array<double, 3>>. In the code below I use an alias to make it more readable:
using Point3D = std::array<double, 3>;
std::vector<Point3D> x_test;
The second point is slightly harder to address. I assume you wanted to write to the disk inside each thread because you couldn't manage to write to a shared buffer that you could then write to a file.
Here is a way to do exactly that:
void calculateKMatrixCpp(
std::vector<Point3D> const& xtest, Point3D const& lengthScale, double sigma0sq,
int threadCounter, int start, int stop, std::vector<double>& kMatrix
) {
// ...
double& kij = kMatrix[i * xtest.size() + j];
kij = seKernel(xtest[i], xtest[j], lengthScale, sigma0sq);
// ...
}
// ...
threads.push_back(std::thread(
calculateKMatrixCpp, x_test, lengthScale, sigma0sq,
i, indices[i-1], indices[i], std::ref(kMatrix)
));
Here, kMatrix is the shared buffer and represents the whole matrix you are trying to compute. You need to pass it to the thread via std::ref. Each thread will write to a different location in that buffer, so there is no need for any mutex or other synchronization.
Once you make these changes and try to write kMatrix to the disk, you will realize that this is the part that takes the most time, by far.
Below is the full code I tried on my machine, and the computation time was about 2 seconds whereas the writing-to-file part took 300 seconds! No amount of multithreading can speed that up.
If you truly want to write all that data to the disk, you may have some luck with file mapping. Computing the exact size needed should be easy enough if all values have the same number of digits, and it looks like you could write the values with multithreading. I have never done anything like that, so I can't really say much more about it, but it looks to me like the fastest way to write multiple gigabytes of memory to the disk.
#include <vector>
#include <thread>
#include <iostream>
#include <string>
#include <cmath>
#include <array>
#include <random>
#include <fstream>
#include <chrono>
using Point3D = std::array<double, 3>;
auto generateSampleData() -> std::vector<Point3D> {
static std::minstd_rand g(std::random_device{}());
std::uniform_real_distribution<> d(-1.0, 1.0);
std::vector<Point3D> data;
data.reserve(27000);
for (auto i = 0; i < 27000; ++i) {
data.push_back({ d(g), d(g), d(g) });
}
return data;
}
double seKernel(Point3D const& x1, Point3D const& x2, Point3D const& lengthScale, double sigma0sq) {
double sum = 0.0;
for (auto i = 0u; i < 3u; ++i) {
double distance = (x1[i] - x2[i]) / lengthScale[i];
sum += distance*distance;
}
return sigma0sq * std::exp(-0.5*sum);
}
void calculateKMatrixCpp(std::vector<Point3D> const& xtest, Point3D const& lengthScale, double sigma0sq, int threadCounter, int start, int stop, std::vector<double>& kMatrix) {
std::cout << "start of thread " << threadCounter << "\n" << std::flush;
for(int i = start; i < stop; ++i) {
for(int j = 0; j < i+1; ++j) {
double& kij = kMatrix[i * xtest.size() + j];
kij = seKernel(xtest[i], xtest[j], lengthScale, sigma0sq);
}
}
std::cout << "end of thread " << threadCounter << "\n" << std::flush;
}
int main() {
double sigma0sq = 1;
Point3D lengthScale = {0.7633, 0.6937, 3.3307e+07};
const std::vector<Point3D> x_test = generateSampleData();
/* Finding data slices of similar size */
//This piece of code works, each thread is assigned roughly the same number of matrix entries
int numElements = x_test.size()*x_test.size()/2;
const int numThreads = 4;
int elemsPerThread = numElements / numThreads;
std::vector<int> indices;
int j = 0;
for(std::size_t i = 1; i < x_test.size()+1; ++i){
int prod = i*(i+1)/2 - j*(j+1)/2;
if (prod > elemsPerThread) {
i--;
j = i;
indices.push_back(i);
if(indices.size() == numThreads-1)
break;
}
}
indices.insert(indices.begin(), 0);
indices.push_back(x_test.size());
auto start = std::chrono::system_clock::now();
std::vector<double> kMatrix(x_test.size() * x_test.size(), 0.0);
std::vector<std::thread> threads;
for (std::size_t i = 1; i < indices.size(); ++i) {
threads.push_back(std::thread(calculateKMatrixCpp, x_test, lengthScale, sigma0sq, i, indices[i - 1], indices[i], std::ref(kMatrix)));
}
for (auto& t : threads) {
t.join();
}
auto end = std::chrono::system_clock::now();
auto elapsed_seconds = std::chrono::duration<double>(end - start).count();
std::cout << "computation time: " << elapsed_seconds << "s" << std::endl;
start = std::chrono::system_clock::now();
constexpr int buffer_size = 131072;
char buffer[buffer_size];
std::ofstream out("matrix.csv");
out.rdbuf()->pubsetbuf(buffer, buffer_size);
for (int i = 0; i < x_test.size(); ++i) {
for (int j = 0; j < i + 1; ++j) {
if (j != 0) {
out << ',';
}
out << kMatrix[i * x_test.size() + j];
}
if (i != x_test.size() - 1) {
out << '\n';
}
}
end = std::chrono::system_clock::now();
elapsed_seconds = std::chrono::duration<double>(end - start).count();
std::cout << "writing time: " << elapsed_seconds << "s" << std::endl;
}
Okey I've wrote implementation with optimized formatting.
By using #Nelfeal code it was taking on my system around 250 seconds for the run to complete with write time taking the most by far. Or rather std::ofstream formatting taking most of the time.
I've written a C++20 version via std::format_to/format. It is a multi-threaded version that takes around 25-40 seconds to complete all the computations, formatting, and writing. If run in a single thread, it takes on my system around 70 seconds. Same performance should be achievable via fmt library on C++11/14/17.
Here is the code:
import <vector>;
import <thread>;
import <iostream>;
import <string>;
import <cmath>;
import <array>;
import <random>;
import <fstream>;
import <chrono>;
import <format>;
import <filesystem>;
using Point3D = std::array<double, 3>;
auto generateSampleData(Point3D scale) -> std::vector<Point3D>
{
static std::minstd_rand g(std::random_device{}());
std::uniform_real_distribution<> d(-1.0, 1.0);
std::vector<Point3D> data;
data.reserve(27000);
for (auto i = 0; i < 27000; ++i)
{
data.push_back({ d(g)* scale[0], d(g)* scale[1], d(g)* scale[2] });
}
return data;
}
double seKernel(Point3D const& x1, Point3D const& x2, Point3D const& lengthScale, double sigma0sq) {
double sum = 0.0;
for (auto i = 0u; i < 3u; ++i) {
double distance = (x1[i] - x2[i]) / lengthScale[i];
sum += distance * distance;
}
return sigma0sq * std::exp(-0.5 * sum);
}
void calculateKMatrixCpp(std::vector<Point3D> const& xtest, Point3D lengthScale, double sigma0sq, int threadCounter, int start, int stop, std::filesystem::path localPath)
{
using namespace std::string_view_literals;
std::vector<char> buffer;
buffer.reserve(15'000);
std::ofstream out(localPath);
std::cout << std::format("starting thread {}: from {} to {}\n"sv, threadCounter, start, stop);
for (int i = start; i < stop; ++i)
{
for (int j = 0; j < i; ++j)
{
double kij = seKernel(xtest[i], xtest[j], lengthScale, sigma0sq);
std::format_to(std::back_inserter(buffer), "{:.6g}, "sv, kij);
}
double kii = seKernel(xtest[i], xtest[i], lengthScale, sigma0sq);
std::format_to(std::back_inserter(buffer), "{:.6g}\n"sv, kii);
out.write(buffer.data(), buffer.size());
buffer.clear();
}
}
int main() {
double sigma0sq = 1;
Point3D lengthScale = { 0.7633, 0.6937, 3.3307e+07 };
const std::vector<Point3D> x_test = generateSampleData(lengthScale);
/* Finding data slices of similar size */
//This piece of code works, each thread is assigned roughly the same number of matrix entries
int numElements = x_test.size() * (x_test.size()+1) / 2;
const int numThreads = 3;
int elemsPerThread = numElements / numThreads;
std::vector<int> indices;
int j = 0;
for (std::size_t i = 1; i < x_test.size() + 1; ++i) {
int prod = i * (i + 1) / 2 - j * (j + 1) / 2;
if (prod > elemsPerThread) {
i--;
j = i;
indices.push_back(i);
if (indices.size() == numThreads - 1)
break;
}
}
indices.insert(indices.begin(), 0);
indices.push_back(x_test.size());
auto start = std::chrono::system_clock::now();
std::vector<std::thread> threads;
using namespace std::string_view_literals;
for (std::size_t i = 1; i < indices.size(); ++i)
{
threads.push_back(std::thread(calculateKMatrixCpp, std::ref(x_test), lengthScale, sigma0sq, i, indices[i - 1], indices[i], std::format("./matrix_{}.csv"sv, i-1)));
}
for (auto& t : threads)
{
t.join();
}
auto end = std::chrono::system_clock::now();
auto elapsed_seconds = std::chrono::duration<double>(end - start);
std::cout << std::format("total elapsed time: {}"sv, elapsed_seconds);
return 0;
}
Note: I used 6 digits of precision here as it is the default for std::ofstream. More digits means more writing time to disk and lower performance.
I'm writing a simulation of the Ising model in 2D. The model behaves as predicted except for one thing: the critical temperature is roughly 3.5 while it should be near 2/ln(2 + sqrt (2)).
The project is a C++ program that generates the data, and a shell script that exercises the program. The full code can be found here. Also here's lattice.cpp
#include <iostream>
#include "include/lattice.h"
using namespace std;
/*
Copy assignment operator, too long to include in the header.
*/
lattice &lattice::operator=(const lattice &other) {
size_ = other.size_;
spins_ = other.spins_;
J_ = other.J_;
H_ = other.H_;
delete spins_;
return *this;
}
void lattice::print() {
unsigned int area = size_ * size_;
for (unsigned int i = 0; i < area; i++) {
cout << to_symbol(spins_->at(i));
if (i % size_ == size_ - 1)
cout << endl;
}
cout << endl;
}
/*
Computes the energy associated with a spin at the given point.
It is explicitly float as that would allow the compiler to make use of multiple
registers instead of keeping track of unneeded precision. (typically J, H ~ 1).
*/
float lattice::compute_point_energy(int row, int col) {
int accumulator = get(row + 1, col) + get(row - 1, col) + get(row, col - 1) +
get(row, col + 1);
return -get(row, col) * (accumulator * J_ + H_);
}
/*
Computes total magnetisation in O(n^2). Thread safe
*/
int lattice::total_magnetisation() {
int sum = 0;
#pragma omp parallel for reduction(+ : sum)
for (unsigned int i = 0; i < size_ * size_; i++) {
sum += spins_->at(i);
}
return sum;
}
int inline to_periodic(int row, int col, int size) {
if (row < 0 || row >= size)
row = abs(size - abs(row));
if (col < 0 || col >= size)
col = abs(size - abs(col));
return row * size + col;
}
with lattice.h
#ifndef lattice_h
#define lattice_h
#include <cmath>
#include <vector>
/* Converts spin up/down to easily printable symbols. */
char inline to_symbol(int in) { return in == -1 ? '-' : '+'; }
/* Converts given pair of indices to those with periodic boundary conditions. */
int inline to_periodic(int row, int col, int size) {
if (row < 0 || row >= size)
row = abs(size - abs(row));
if (col < 0 || col >= size)
col = abs(size - abs(col));
return row * size + col;
}
class lattice {
private:
unsigned int size_;
// vector<bool> would be more space efficient, but it would not allow
// multithreading
std::vector<short> *spins_;
float J_;
float H_;
public:
lattice() noexcept : size_(0), spins_(NULL), J_(1.0), H_(0.0) {}
lattice(int new_size, double new_J, double new_H) noexcept
: size_(new_size), spins_(new std::vector<short>(size_ * size_, 1)),
J_(new_J), H_(new_H) {}
lattice(const lattice &other) noexcept
: lattice(other.size_, other.J_, other.H_) {
#pragma omp parallel for
for (unsigned int i = 0; i < size_ * size_; i++)
spins_->at(i) = other.spins_->at(i);
}
lattice &operator=(const lattice &);
~lattice() { delete spins_; }
void print();
short get(int row, int col) {
return spins_->at(to_periodic(row, col, size_));
}
unsigned int get_size() { return size_; }
void flip(int row, int col) { spins_->at(to_periodic(row, col, size_)) *= -1; }
int total_magnetisation();
float compute_point_energy(int row, int col);
};
#endif
and simulation.cpp
#include <iostream>
#include <math.h>
#include "include/simulation.h"
using namespace std;
/*
Advances the simulation a given number of steps, and updates/prints the statistics
into the given file pointer.
Defaults to stdout.
The number of time_steps is explcitly unsigned, so that linters/IDEs remind
the end user of the file that extra care needs to be taken, as well as to allow
advancing the simulation a larger number of times.
*/
void simulation::advance(unsigned int time_steps, FILE *output) {
unsigned int area = spin_lattice_.get_size() * spin_lattice_.get_size();
for (unsigned int i = 0; i < time_steps; i++) {
// If we don't update mean_energy_ every time, we might get incorrect
// thermodynamic behaviour.
total_energy_ = compute_energy(spin_lattice_);
double temperature_delta = total_energy_/area - mean_energy_;
if (abs(temperature_delta) < 1/area){
cerr<<temperature_delta<<"! Reached equilibrium "<<endl;
}
temperature_ += temperature_delta;
mean_energy_ = total_energy_ / area;
if (time_ % print_interval_ == 0) {
total_magnetisation_ = spin_lattice_.total_magnetisation();
mean_magnetisation_ = total_magnetisation_ / area;
print_status(output);
}
advance();
}
}
/*
Advances the simulation a single step.
DOES NOT KEEP TRACK OF STATISTICS. Hence private.
*/
void simulation::advance() {
#pragma omp parallel for collapse(2)
for (unsigned int row = 0; row < spin_lattice_.get_size(); row++) {
for (unsigned int col = 0; col < spin_lattice_.get_size(); col++) {
double dE = compute_dE(row, col);
double p = r_.random_uniform();
float rnd = rand() / (RAND_MAX + 1.);
if (exp(-dE / temperature_) > rnd) {
spin_lattice_.flip(row, col);
}
}
}
time_++;
}
/*
Computes change in energy due to flipping one single spin.
The function returns a single-precision floating-point number, as data cannot under
most circumstances make use of greater precision than that (save J is set to a
non-machine-representable value).
The code modifies the spin lattice, as an alternative (copying the neighborhood
of a given point), would make the code run slower by a factor of 2.25
*/
float simulation::compute_dE(int row, int col) {
float e_0 = spin_lattice_.compute_point_energy(row, col);
return -4*e_0;
}
/*
Computes the total energy associated with spins in the spin_lattice_.
I originally used this function to test the code that tracked energy as the lattice
itself was modified, but that code turned out to be only marginally faster, and
not thread-safe. This is due to a race condition: when one thread uses a neighborhood
of a point, while another thread was computing the energy of one such point in
the neighborhood of (row, col).
*/
double simulation::compute_energy(lattice &other) {
double energy_sum = 0;
unsigned int max = other.get_size();
#pragma omp parallel for reduction(+ : energy_sum)
for (unsigned int i = 0; i < max; i++) {
for (unsigned int j = 0; j < max; j++) {
energy_sum += other.compute_point_energy(i, j);
}
}
return energy_sum;
}
void simulation::set_to_chequerboard(int step){
if (time_ !=0){
return;
}else{
for (unsigned int i=0; i< spin_lattice_.get_size(); ++i){
for (unsigned int j=0; j<spin_lattice_.get_size(); ++j){
if ((i/step)%2-(j/step)%2==0){
spin_lattice_.flip(i, j);
}
}
}
}
}
with simulation.h
#ifndef simulation_h
#define simulation_h
#include "lattice.h"
#include "rng.h"
#include <gsl/gsl_rng.h>
/*
The logic of the entire simulation of the Ising model of magnetism.
This simulation will run and print statistics at a given time interval.
A simulation can be advanced a single time step, or many at a time,
*/
class simulation {
private:
unsigned int time_ = 0; // Current time of the simulation.
rng r_ = rng();
lattice spin_lattice_;
double temperature_;
double mean_magnetisation_ = 1;
double mean_energy_;
double total_magnetisation_;
double total_energy_;
unsigned int print_interval_ = 1;
void advance();
public:
void set_print_interval(unsigned int new_print_interval) { print_interval_ = new_print_interval; }
simulation(int new_size, double new_temp, double new_J, double new_H)
: time_(0), spin_lattice_(lattice(new_size, new_J, new_H)), temperature_(new_temp),
mean_energy_(new_J * (-4)), total_magnetisation_(new_size * new_size),
total_energy_(compute_energy(spin_lattice_)) {}
void print_status(FILE *f) {
f = f==NULL? stdout : f;
fprintf(f, "%4d\t%e \t%e\t%e\n", time_, mean_magnetisation_,
mean_energy_, temperature_);
}
void advance(unsigned int time_steps, FILE *output);
double compute_energy(lattice &other);
float compute_dE(int row, int col);
void set_to_chequerboard(int step);
void print_lattice(){
spin_lattice_.print();
};
// void load_custom(const lattice& custom);
};
#endif
The output right now looks something like this:
while it should be a step down near 2.26
I have found a few issues in your code:
The compute_dE method returns the wrong energy, as the factor of 2 shouldn't be there. The Hamiltonian of the Ising system is
While you are effectively using
The compute_energy method returns the wrong energy. The method should iterate over each spin pair only once. Something like this should do the trick:
for (unsigned int i = 0; i < max; i++) {
for (unsigned int j = i + 1; j < max; j++) {
energy_sum += other.compute_point_energy(i, j);
}
}
You use a temperature that is updated on the fly instead of using the target temperature. I do not really understand the purpose of that.
I'm getting started in multithreaded programming so please excuse me if the following seems obvious. I am adding multithreading to an image processing program and the speedup isn't exactly the one I expected.
I'm currently getting a speedup of 4x times on a 4 physical processor cpu with hyperthreading (8), so I'd like to know if this kind of speedup is expected. The only thing I can think of is that it may make sense if both hyperthreads of a single physical CPU have to share some sort of memory bus.
Being new to multithreading it's not entirely clear to me if this would be considered an I/O bound program considering that all memory is allocated in RAM (I understand that the virtual memory manager of my OS will be the one deciding to page in/out this supposed memory amount from the heap) My machine has 16Gb of RAM in case it helps deciding if paging/swapping can be an issue.
I've written a test program showcasing the serial case and two parallel cases using QThreadPool and tbb::parallel_for
The current program as you can see has no real operations other than setting a supposed image from black to white and it's done on purpose to know what the baseline is before any real operations are applied to the image.
I'm attaching the program in hope that someone can explain me if my quest for a roughly 8x speedup is a lost cause in this kind of processing algorithm. Note that I'm not interested in other kinds of optimizations such as SIMD as my real concern is not just to make it faster, but to make it faster using purely multithreading, without getting into SSE nor processor cache level optimizations.
#include <iostream>
#include <sys/time.h>
#include <vector>
#include <QThreadPool>
#include "/usr/local/include/tbb/tbb.h"
#define LOG(x) (std::cout << x << std::endl)
struct col4
{
unsigned char r, g, b, a;
};
class QTileTask : public QRunnable
{
public:
void run()
{
for(uint32_t y = m_yStart; y < m_yEnd; y++)
{
int rowStart = y * m_width;
for(uint32_t x = m_xStart; x < m_xEnd; x++)
{
int index = rowStart + x;
m_pData[index].r = 255;
m_pData[index].g = 255;
m_pData[index].b = 255;
m_pData[index].a = 255;
}
}
}
col4* m_pData;
uint32_t m_xStart;
uint32_t m_yStart;
uint32_t m_xEnd;
uint32_t m_yEnd;
uint32_t m_width;
};
struct TBBTileTask
{
void operator()()
{
for(uint32_t y = m_yStart; y < m_yEnd; y++)
{
int rowStart = y * m_width;
for(uint32_t x = m_xStart; x < m_xEnd; x++)
{
int index = rowStart + x;
m_pData[index].r = 255;
m_pData[index].g = 255;
m_pData[index].b = 255;
m_pData[index].a = 255;
}
}
}
col4* m_pData;
uint32_t m_xStart;
uint32_t m_yStart;
uint32_t m_xEnd;
uint32_t m_yEnd;
uint32_t m_width;
};
struct TBBCaller
{
TBBCaller(std::vector<TBBTileTask>& t)
: m_tasks(t)
{}
TBBCaller(TBBCaller& e, tbb::split)
: m_tasks(e.m_tasks)
{}
void operator()(const tbb::blocked_range<size_t>& r) const
{
for (size_t i=r.begin();i!=r.end();++i)
m_tasks[i]();
}
std::vector<TBBTileTask>& m_tasks;
};
inline double getcurrenttime( void )
{
timeval t;
gettimeofday(&t, NULL);
return static_cast<double>(t.tv_sec)+(static_cast<double>(t.tv_usec) / 1000000.0);
}
char* getCmdOption(char ** begin, char ** end, const std::string & option)
{
char ** itr = std::find(begin, end, option);
if (itr != end && ++itr != end)
{
return *itr;
}
return 0;
}
bool cmdOptionExists(char** begin, char** end, const std::string& option)
{
return std::find(begin, end, option) != end;
}
void baselineSerial(col4* pData, int resolution)
{
double t = getcurrenttime();
for(int y = 0; y < resolution; y++)
{
int rowStart = y * resolution;
for(int x = 0; x < resolution; x++)
{
int index = rowStart + x;
pData[index].r = 255;
pData[index].g = 255;
pData[index].b = 255;
pData[index].a = 255;
}
}
LOG((getcurrenttime() - t) * 1000 << " ms. (Serial)");
}
void baselineParallelQt(col4* pData, int resolution, uint32_t tileSize)
{
double t = getcurrenttime();
QThreadPool pool;
for(int y = 0; y < resolution; y+=tileSize)
{
for(int x = 0; x < resolution; x+=tileSize)
{
uint32_t xEnd = std::min<uint32_t>(x+tileSize, resolution);
uint32_t yEnd = std::min<uint32_t>(y+tileSize, resolution);
QTileTask* t = new QTileTask;
t->m_pData = pData;
t->m_xStart = x;
t->m_yStart = y;
t->m_xEnd = xEnd;
t->m_yEnd = yEnd;
t->m_width = resolution;
pool.start(t);
}
}
pool.waitForDone();
LOG((getcurrenttime() - t) * 1000 << " ms. (QThreadPool)");
}
void baselineParallelTBB(col4* pData, int resolution, uint32_t tileSize)
{
double t = getcurrenttime();
std::vector<TBBTileTask> tasks;
for(int y = 0; y < resolution; y+=tileSize)
{
for(int x = 0; x < resolution; x+=tileSize)
{
uint32_t xEnd = std::min<uint32_t>(x+tileSize, resolution);
uint32_t yEnd = std::min<uint32_t>(y+tileSize, resolution);
TBBTileTask t;
t.m_pData = pData;
t.m_xStart = x;
t.m_yStart = y;
t.m_xEnd = xEnd;
t.m_yEnd = yEnd;
t.m_width = resolution;
tasks.push_back(t);
}
}
TBBCaller caller(tasks);
tbb::task_scheduler_init init;
tbb::parallel_for(tbb::blocked_range<size_t>(0, tasks.size()), caller);
LOG((getcurrenttime() - t) * 1000 << " ms. (TBB)");
}
int main(int argc, char** argv)
{
int resolution = 1;
uint32_t tileSize = 64;
char * pResText = getCmdOption(argv, argv + argc, "-r");
if (pResText)
{
resolution = atoi(pResText);
}
char * pTileSizeChr = getCmdOption(argv, argv + argc, "-b");
if (pTileSizeChr)
{
tileSize = atoi(pTileSizeChr);
}
if(resolution > 16)
resolution = 16;
resolution = resolution << 10;
uint32_t tileCount = resolution/tileSize + 1;
tileCount *= tileCount;
LOG("Resolution: " << resolution << " Tile Size: "<< tileSize);
LOG("Tile Count: " << tileCount);
uint64_t pixelCount = resolution*resolution;
col4* pData = new col4[pixelCount];
memset(pData, 0, sizeof(col4)*pixelCount);
baselineSerial(pData, resolution);
memset(pData, 0, sizeof(col4)*pixelCount);
baselineParallelQt(pData, resolution, tileSize);
memset(pData, 0, sizeof(col4)*pixelCount);
baselineParallelTBB(pData, resolution, tileSize);
delete[] pData;
return 0;
}
Yes, 4x speedup is expected. Hypertreading is a kind of time sharing implemented in hardware, so you can't expect to benefit from it if one thread is using up all superscalar pipelines available on the core, as it is your case. The other thread will necessarily have to wait.
You can expect an even lower speedup if your memory bus bandwidth is saturated by the threads running in less than the total number of cores available. Usually happens if you have too many cores, like in this question:
Why doesn't this code scale linearly?
I have the assignment to finish a mandelbrot program in C++. I'm not that good in C++, I prefer Java or C# but this has to be done in C++. I got some sample code which I have to finish. I'm trying to put the drawing code in the main (between the works comments) into a method (draw_Mandelbrot). The code in the main method works and gives me a nice mandelbrot image but when I use the draw_Mandelbrot method (and comment the draw code in main) I get a grey rectangle image as output. How can I make the draw_Mandelbrot method work? The code above the draw_Mandelbrot method is all sample code and not created by myself.
// mandelbrot.cpp
// compile with: g++ -std=c++11 mandelbrot.cpp -o mandelbrot
// view output with: eog mandelbrot.ppm
#include <fstream>
#include <complex> // if you make use of complex number facilities in C++
#include <iostream>
#include <cstdlib>
#include <complex>
using namespace std;
template <class T> struct RGB { T r, g, b; };
template <class T>
class Matrix {
public:
Matrix(const size_t rows, const size_t cols) : _rows(rows), _cols(cols) {
_matrix = new T*[rows];
for (size_t i = 0; i < rows; ++i) {
_matrix[i] = new T[cols];
}
}
Matrix(const Matrix &m) : _rows(m._rows), _cols(m._cols) {
_matrix = new T*[m._rows];
for (size_t i = 0; i < m._rows; ++i) {
_matrix[i] = new T[m._cols];
for (size_t j = 0; j < m._cols; ++j) {
_matrix[i][j] = m._matrix[i][j];
}
}
}
~Matrix() {
for (size_t i = 0; i < _rows; ++i) {
delete [] _matrix[i];
}
delete [] _matrix;
}
T *operator[] (const size_t nIndex)
{
return _matrix[nIndex];
}
size_t width() const { return _cols; }
size_t height() const { return _rows; }
protected:
size_t _rows, _cols;
T **_matrix;
};
// Portable PixMap image
class PPMImage : public Matrix<RGB<unsigned char> >
{
public:
PPMImage(const size_t height, const size_t width) : Matrix(height, width) { }
void save(const std::string &filename)
{
std::ofstream out(filename, std::ios_base::binary);
out <<"P6" << std::endl << _cols << " " << _rows << std::endl << 255 << std::endl;
for (size_t y=0; y<_rows; y++)
for (size_t x=0; x<_cols; x++)
out << _matrix[y][x].r << _matrix[y][x].g << _matrix[y][x].b;
}
};
void draw_Mandelbrot(PPMImage image, const unsigned width, const unsigned height, double cxmin, double cxmax, double cymin, double cymax,unsigned int max_iterations)
{
for (std::size_t ix = 0; ix < width; ++ix)
for (std::size_t iy = 0; iy < height; ++iy)
{
std::complex<double> c(cxmin + ix / (width - 1.0)*(cxmax - cxmin), cymin + iy / (height - 1.0)*(cymax - cymin));
std::complex<double> z = 0;
unsigned int iterations;
for (iterations = 0; iterations < max_iterations && std::abs(z) < 2.0; ++iterations)
z = z*z + c;
image[iy][ix].r = image[iy][ix].g = image[iy][ix].b = iterations;
}
}
int main()
{
const unsigned width = 1600;
const unsigned height = 1600;
PPMImage image(height, width);
//image[y][x].r = image[y][x].g = image[y][x].b = 255; // white pixel
//image[y][x].r = image[y][x].g = image[y][x][b] = 0; // black pixel
//image[y][x].r = image[y][x].g = image[y][x].b = 0; // black pixel
//// red pixel
//image[y][x].r = 255;
//image[y][x].g = 0;
//image[y][x].b = 0;
draw_Mandelbrot(image, width, height, -2.0, 0.5, -1.0, 1.0, 10);
//works
//double cymin = -1.0;
//double cymax = 1.0;
//double cxmin = -2.0;
//double cxmax = 0.5;
//unsigned int max_iterations = 100;
//for (std::size_t ix = 0; ix < width; ++ix)
// for (std::size_t iy = 0; iy < height; ++iy)
// {
// std::complex<double> c(cxmin + ix / (width - 1.0)*(cxmax - cxmin), cymin + iy / (height - 1.0)*(cymax - cymin));
// std::complex<double> z = 0;
// unsigned int iterations;
// for (iterations = 0; iterations < max_iterations && std::abs(z) < 2.0; ++iterations)
// z = z*z + c;
// image[iy][ix].r = image[iy][ix].g = image[iy][ix].b = iterations;
// }
//works
image.save("mandelbrot.ppm");
return 0;
}
Output image when using the code in the main method
You're passing the image by value, so the function works on a separate image to the one in main, which is left in its initial state.
Either pass by reference:
void draw_Mandelbrot(PPMImage & image, ...)
or return a value:
PPMImage draw_Mandelbrot(...) {
PPMImage image(height, width);
// your code here
return image;
}
// in main
PPMImage image = draw_Mandelbrot(...);
I had a previous question about a stack overflow error and switch to vectors for my arrays of objects. That question can be referenced here if needed: How to get rid of stack overflow error
My current question is however, how do I speed up the initialization of the vectors. My current method currently takes ~15 seconds. Using arrays instead of vectors it took like a second with a size of arrays small enough that didn't throw the stack overflow error.
Here is how I am initializing it:
in main.cpp I initialize my dungeon object:
dungeon = Dungeon(0, &textureHandler, MIN_X, MAX_Y);
in my dungeon(...) constructor, I initialize my 5x5 vector of rooms and call loadDungeon:
Dungeon::Dungeon(int dungeonID, TextureHandler* textureHandler, int topLeftX, int topLeftY)
{
currentRoomRow = 0;
currentRoomCol = 0;
for (int r = 0; r < MAX_RM_ROWS; ++r)
{
rooms.push_back(vector<Room>());
for (int c = 0; c < MAX_RM_COLS; ++c)
{
rooms[r].push_back(Room());
}
}
loadDungeon(dungeonID, textureHandler, topLeftX, topLeftY);
}
my Room constructor populates my 30x50 vector of cells (so I can set them up in the loadDungeon function):
Room::Room()
{
for (int r = 0; r < MAX_ROWS; ++r)
{
cells.push_back(vector<Cell>());
for (int c = 0; c < MAX_COLS; ++c)
{
cells[r].push_back(Cell());
}
}
}
My default cell constructor is simple and isn't doing much but I'll post it anyway:
Cell::Cell()
{
x = 0;
y = 0;
width = 16;
height = 16;
solid = false;
texCoords.push_back(0);
texCoords.push_back(0);
texCoords.push_back(1);
texCoords.push_back(0);
texCoords.push_back(1);
texCoords.push_back(1);
texCoords.push_back(0);
texCoords.push_back(1);
}
And lastly my loadDungeon() function will set up the cells. Eventually this will read from a file and load the cells up but for now I would like to optimize this a bit if possible.
void Dungeon::loadDungeon(int dungeonID, TextureHandler* textureHandler, int topLeftX, int topLeftY)
{
int startX = topLeftX + (textureHandler->getSpriteWidth()/2);
int startY = topLeftY - (textureHandler->getSpriteHeight()/2);
int xOffset = 0;
int yOffset = 0;
for (int r = 0; r < MAX_RM_ROWS; ++r)
{
for (int c = 0; c < MAX_RM_COLS; ++c)
{
for (int cellRow = 0; cellRow < rooms[r][c].getMaxRows(); ++cellRow)
{
xOffset = 0;
for (int cellCol = 0; cellCol < rooms[r][c].getMaxCols(); ++cellCol)
{
rooms[r][c].setupCell(cellRow, cellCol, startX + xOffset, startY - yOffset, textureHandler->getSpriteWidth(), textureHandler->getSpriteHeight(), false, textureHandler->getSpriteTexCoords("grass"));
xOffset += textureHandler->getSpriteWidth();
}
yOffset += textureHandler->getSpriteHeight();
}
}
}
currentDungeon = dungeonID;
currentRoomRow = 0;
currentRoomCol = 0;
}
So how can I speed this up so it doesn't take ~15 seconds to load up every time. I feel like it shouldn't take 15 seconds to load a simple 2D game.
SOLUTION
Well my solution was to use std::vector::reserve call (rooms.reserve in my code and it ended up working well. I changed my function Dungeon::loadDungeon to Dungeon::loadDefaultDungeon because it now loads off a save file.
Anyway here is the code (I got it down to about 4-5 seconds from ~15+ seconds in debug mode):
Dungeon::Dungeon()
{
rooms.reserve(MAX_RM_ROWS * MAX_RM_COLS);
currentDungeon = 0;
currentRoomRow = 0;
currentRoomCol = 0;
}
void Dungeon::loadDefaultDungeon(TextureHandler* textureHandler, int topLeftX, int topLeftY)
{
int startX = topLeftX + (textureHandler->getSpriteWidth()/2);
int startY = topLeftY - (textureHandler->getSpriteHeight()/2);
int xOffset = 0;
int yOffset = 0;
cerr << "Loading default dungeon..." << endl;
for (int roomRow = 0; roomRow < MAX_RM_ROWS; ++roomRow)
{
for (int roomCol = 0; roomCol < MAX_RM_COLS; ++roomCol)
{
rooms.push_back(Room());
int curRoom = roomRow * MAX_RM_COLS + roomCol;
for (int cellRow = 0; cellRow < rooms[curRoom].getMaxRows(); ++cellRow)
{
for (int cellCol = 0; cellCol < rooms[curRoom].getMaxCols(); ++cellCol)
{
rooms[curRoom].setupCell(cellRow, cellCol, startX + xOffset, startY - yOffset, textureHandler->getSpriteWidth(), textureHandler->getSpriteHeight(), false, textureHandler->getSpriteTexCoords("default"), "default");
xOffset += textureHandler->getSpriteWidth();
}
yOffset += textureHandler->getSpriteHeight();
xOffset = 0;
}
cerr << " room " << curRoom << " complete" << endl;
}
}
cerr << "default dungeon loaded" << endl;
}
Room::Room()
{
cells.reserve(MAX_ROWS * MAX_COLS);
for (int r = 0; r < MAX_ROWS; ++r)
{
for (int c = 0; c < MAX_COLS; ++c)
{
cells.push_back(Cell());
}
}
}
void Room::setupCell(int row, int col, float x, float y, float width, float height, bool solid, /*std::array<float, 8>*/ vector<float> texCoords, string texName)
{
cells[row * MAX_COLS + col].setup(x, y, width, height, solid, texCoords, texName);
}
void Cell::setup(float x, float y, float width, float height, bool solid, /*std::array<float,8>*/ vector<float> t, string texName)
{
this->x = x;
this->y = y;
this->width = width;
this->height = height;
this->solid = solid;
for (int i = 0; i < t.size(); ++i)
this->texCoords.push_back(t[i]);
this->texName = texName;
}
It seems wasteful to have so many dynamic allocations. You can get away with one single allocation by flattening out your vector and accessing it in strides:
std::vector<Room> rooms;
rooms.resize(MAX_RM_ROWS * MAX_RM_COLS);
for (unsigned int i = 0; i != MAX_RM_ROWS; ++i)
{
for (unsigned int j = 0; j != MAX_RM_COLS; ++j)
{
Room & r = rooms[i * MAX_RM_COLS + j];
// use `r` ^^^^^^^^^^^^^^^^^^^-----<< strides!
}
}
Note how resize is performed exactly once, incurring only one single allocation, as well as default-constructing each element. If you'd rather construct each element specifically, use rooms.reserve(MAX_RM_ROWS * MAX_RM_COLS); instead and populate the vector in the loop.
You may also wish to profile with rows and columns swapped and see which is faster.
Since it seems that your vectors have their size defined at compile time, if you can use C++11, you may consider using std::array instead of std::vector. std::array cannot be resized and lacks many of the operations in std::vector, but is much more lightweight and it seems a good fit for what you are doing.
As an example, you could declare cells as:
#include <array>
/* ... */
std::array<std::array<Cell, MAX_COLS>, MAX_ROWS> cells;
UPDATE: since a locally defined std::array allocates its internal array on the stack, the OP will experience a stack overflow due to the considerably large size of the arrays. Still, it is possible to use an std::array (and its benefits compared to using std::vector), by allocating the array on the heap. That can be done by doing something like:
typedef std::array<std::array<Cell, MAX_COLS>, MAX_ROWS> Map;
Map* cells;
/* ... */
cells = new Map();
Even better, smart pointers can be used:
#include <memory>
/* ... */
std::unique_ptr<Map> cells;
cells = std::unique_ptr(new Map());