I'm trying to create my own CFD in C++. I have watched some videos on youtube about the Lattice Boltzmann method, but I cant get my simulations to look like the simulations performed in the videos with lattice Boltzmann implemented in Python.
I use SDL2 to create an image on my screen. I am not trying to create anything fast. Just something that will make pretty simulations on the CPU.
Here is my class for each cell:
//cell class
class cell {
public:
double Fi[nL] = {0,0,0,0,0,0,0,0,0};
double density = 0;
double momentumX = 0;
double momentumY = 0;
double velocityX = 0;
double velocityY = 0;
double Fieq[nL] = {0,0,0,0,0,0,0,0,0};
//obstacle
bool obstacle = false;
void densityOperator() {
for (int i = 0; i < nL; i++) {
density += Fi[i];
}
}
void momentumOperator() {
for (int i = 0; i < nL; i++) {
momentumX += Fi[i] * cX[i];
momentumY += Fi[i] * cY[i];
}
}
void velocityOperator() {
for (int i = 0; i < nL; i++) {
if (density == 0) {
density += 0.001;
}
velocityX += momentumX / density; // prolly very slow
velocityY += momentumY / density;
//velocityX += cX[i];
//velocityY += cY[i];
}
}
void FieqOperator() {
for (int i = 0; i < nL; i++) {
Fieq[i] = weights[i] * density *
(
1 +
(cX[i] * velocityX + cY[i] * velocityY) / Cs +
pow((cX[i] * velocityX + cY[i] * velocityY), 2) / (2 * pow(Cs, 4)) -
(velocityX * velocityX + velocityY * velocityY) / (2 * pow(Cs, 2))
);
}
}
void FiOperator() {
for (int i = 0; i < nL; i++) {
Fi[i] = Fi[i] - (timestep / tau) * (Fi[i] - Fieq[i]);
}
}
void addRightVelocity() {
Fi[0] = 1.f;
Fi[1] = 1.f;
Fi[2] = 1.f;
Fi[3] = 6.f;
Fi[4] = 1.f;
Fi[5] = 1.f;
Fi[6] = 1.f;
Fi[7] = 1.f;
Fi[8] = 1.f;
}
};
Please note that im am using a vector for my cells instead of a 2d array. I am using a index function to go from x,y to 1d cordinate.
int index(int x, int y) {
return x * nY + y;
}
Variables:
//box
const int nX = 400;
const int nY = 100;
//viscosity
float tau = 0.5; // 0.53
//time delta time per iteration
float timestep = 1;
//distance between cells
float dist = 1000;
//Speed of sound
float Cs = 1 / sqrt(3) * (dist / timestep);
//viscociti
float v = pow(Cs, 2) * (tau - timestep / 2); // tau will need to be much smaller
//time steps
int nT = 3000;
//lattice speeds and weights
const int nL = 9;
//Ci vector direction, discrete velocity
int cX[9] = { 0, 0, 1, 1, 1, 0, -1, -1, -1 };
int cY[9] = { 0, 1, 1, 0, -1, -1, -1, 0 , 1 };
//weights, based on navier stokes
float weights[9] = { 4 / 9, 1 / 9, 1 / 36, 1 / 9, 1 / 36, 1 / 9, 1 / 36, 1 / 4, 1 / 36 };
//opposite populations
int cO[9] = { 0, 5, 6, 7, 8, 1, 2, 3, 4 };
My main function:
int main() {
//init vector cells
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
cell cellUnit;
cells.push_back(cellUnit);
TempCells.push_back(cellUnit);
}
}
//SDL
//SDL
//-------------------------------------------------------------
SDL_Window* window = nullptr;
SDL_Renderer* renderer = nullptr;
SDL_Init(SDL_INIT_VIDEO);
SDL_CreateWindowAndRenderer(nX* 3, nY * 3, 0, &window, &renderer);
SDL_RenderSetScale(renderer, 3, 3);
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
SDL_RenderClear(renderer);
//-------------------------------------------------------------//
//Circle Object Gen
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
//cicle position
int circleX = 5;
int circleY = 50;
//circle radius
float radius = 10;
//distance bewtween cell and circle pos
float distance = sqrt(pow(circleX - x, 2) + pow(circleY - y, 2));
if (distance < radius) {
cells[index(x,y)].obstacle = true;
}
else {
cells[index(x, y)].obstacle = false;
}
}
}
//add velocity
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
cells[index(x, y)].addRightVelocity();
//random velocity
for (int i = 0; i < nL; i++) {
cells[index(x,y)].Fi[i] += (rand() % 200) / 100;
}
}
}
for (int t = 0; t < nT; t++) {
//SDL
//--------------------------------------------------------------
//clear renderer
if (t % 20 == 0) {
SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
SDL_RenderClear(renderer);
}
//--------------------------------------------------------------
//streaming:
//because we will loop over the same populations we do not want to switch the same population twice
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
if (x == 0) {
cells[index(x, y)].Fi[3] += 0.4;
}
//for populations
for (int i = 0; i < nL; i++) {
//boundary
//checs if cell is object or air
if (cells[index(x, y)].obstacle == false) {
//air
//targetet cell
int cellX = x + cX[i];
int cellY = y + cY[i];
//out of bounds check + rearange to other side
if (cellX < 0) {
//left to right
cellX = nX;
}
if (cellX >= nX) {
//right to left
cellX = 0;
continue;
}
if (cellY < 0) {
//top to buttom
cellY = nY;
}
if (cellY >= nY) {
//bottom to top
cellY = 0;
}
//if neighborinig cell is object --> collision with object
if (cells[index(cellX, cellY)].obstacle == true) {
//Boundary handling https://youtu.be/jfk4feD7rFQ?t=2821
TempCells[index(x,y)].Fi[cO[i]] = cells[index(x, y)].Fi[i];
}
//if not then stream to neighbor air cell with oposite population
TempCells[index(cellX, cellY)].Fi[cO[i]] = cells[index(x, y)].Fi[i];
}
else {
//wall
//SDL GRAPICHS
if (t % 20 == 0) {
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
SDL_RenderDrawPoint(renderer, x, y);
}
}
}
}
}
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
for (int i = 0; i < nL; i++) {
cells[index(x, y)].Fi[i] = TempCells[index(x, y)].Fi[cO[i]];
}
}
}
//collision:
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
//density:
cells[index(x, y)].densityOperator();
//momentum:
cells[index(x, y)].momentumOperator();
//velocity:
cells[index(x, y)].velocityOperator();
//Fieq + new new Fi:
for (int i = 0; i < nL; i++) {
cells[index(x, y)].FieqOperator();
}
//SDL Graphics
if (t % 20 == 0) {
if (cells[index(x, y)].obstacle == false) {
SDL_SetRenderDrawColor(renderer, cells[index(x, y)].density, cells[index(x, y)].density , 255 , 255);
SDL_RenderDrawPoint(renderer, x, y);
}
}
}
}
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
cells[index(x, y)].FiOperator();
}
}
//SDL Graphics
if (t % 20 == 0 ) {
SDL_RenderPresent(renderer);
}
}
return 0;
}
I do realize my code might be a bit messy and not easy to understand at first. And it is definitely not optimal.
If anyone has any experience in programming their own LBM in c++ i would like to hear your input.
It seams like my simulations is working but i do not get those bueatiful animations like in, https://youtu.be/ZUXmO4hu-20?t=3394
Thanks for any help.
Edit:
I have edited my script to reset, density, velocity X Y and Momentum X Y
Simulation visualised by density, pink is higher, loops if density exceeds color range of 255
Simulation visualised by density
Simulation visualised by density
I'm working on a program that requires calculating the inverse of an 8x8 matrix as fast as possible. Here's the code I wrote:
class matrix
{
public:
int w, h;
std::vector<std::vector<float>> cell;
matrix(int width, int height)
{
w = width;
h = height;
cell.resize(width);
for (int i = 0; i < cell.size(); i++)
{
cell[i].resize(height);
}
}
};
matrix transponseMatrix(matrix M)
{
matrix A(M.h, M.w);
for (int i = 0; i < M.h; i++)
{
for (int j = 0; j < M.w; j++)
{
A.cell[i][j] = M.cell[j][i];
}
}
return A;
}
float getMatrixDeterminant(matrix M)
{
if (M.w != M.h)
{
std::cout << "ERROR! Matrix isn't of nXn type.\n";
return NULL;
}
float determinante = 0;
if (M.w == 1)
{
determinante = M.cell[0][0];
}
if (M.w == 2)
{
determinante = M.cell[0][0] * M.cell[1][1] - M.cell[1][0] * M.cell[0][1];
}
else
{
for (int i = 0; i < M.w; i++)
{
matrix A(M.w - 1, M.h - 1);
int cy = 0;
for (int y = 1; y < M.h; y++)
{
int cx = 0;
for (int x = 0; x < M.w; x++)
{
if (x != i)
{
A.cell[cx][cy] = M.cell[x][y];
cx++;
}
}
cy++;
}
determinante += M.cell[i][0] * pow(-1, i + 0) * getMatrixDeterminant(A);
}
}
return determinante;
}
float getComplementOf(matrix M, int X, int Y)
{
float det;
if (M.w != M.h)
{
std::cout << "ERROR! Matrix isn't of nXn type.\n";
return NULL;
}
if (M.w == 2)
{
det = M.cell[1 - X][1 - Y];
}
else
{
matrix A(M.w - 1, M.h - 1);
int cy = 0;
for (int y = 0; y < M.h; y++)
{
if (y != Y)
{
int cx = 0;
for (int x = 0; x < M.w; x++)
{
if (x != X)
{
A.cell[cx][cy] = M.cell[x][y];
cx++;
}
}
cy++;
}
}
det = getMatrixDeterminant(A);
}
return (pow(-1, X + Y) * det);
}
matrix invertMatrix(matrix M)
{
matrix A(M.w, M.h);
float det = getMatrixDeterminant(M);
if (det == 0)
{
std::cout << "ERROR! Matrix inversion impossible (determinant is equal to 0).\n";
return A;
}
for (int i = 0; i < M.h; i++)
{
for (int j = 0; j < M.w; j++)
{
A.cell[j][i] = getComplementOf(M, j, i) / det;
}
}
A = transponseMatrix(A);
return A;
}
While it does work, it does so way too slowly for my purposes, managing to calculate an 8x8 matrix's inverse about 6 times per second.
I've tried searching for more efficient ways to invert a matrix but was unsuccessfull in finding solutions for matrices of these dimensions.
However I did find conversations in which people claimed that for matrices below 50x50 or even 1000x1000 time shouldn't be a problem, so I was wondering if I have missed something, either a faster method or some unnecessary calculations in my code.
Does anyone have experience regarding this and/or advice?
Sorry for broken english.
Your implementation have problems as others commented on the question. The largest bottleneck is the algorithm itself, calculating tons of determinants.(It's O(n!)!)
If you want a simple implementation, just implement Gaussian elimination. See finding the inverse of a matrix and the pseudo code at Wikipedia. It'll perform fast enough for small sizes such as 8x8.
If you want a complex but more efficient implementation, use a library that is optimized for LU decomposition(Gaussian elimination), QR decomposition, etc.(Such as LAPACK or OpenCV.)
I'm trying to do a normalization of data for a polinomial interpolation with perceptron, I'm using the following formula:
Where:
xi is a data point (x1, x2…xn).
x̄ is the sample mean.
s is the sample standard deviation.
and Z is my new value of input for the perceptron.
I'm programming in C ++, and plotting graph with freeglut.
My function for normalize:
vector<double> Perceptron::normalizar(double x) {
vector<double> aux;
aux.push_back(1.0);
for (unsigned i = 1; i < pesos.size(); i++) {
double t = (pow(x,i) - means[i]) / devianation[i];
aux.push_back(t);
}
return aux;
}
The problem is: before I did the normalization, the polynomial was converging to the points.
But after normalization, the polynomial is converging to other points, and I do not know where it is converging.
The formula for the polynomial would be as follows (with W being the weights of the perceptron):
So I used a mean formula for each value of x.
See the code:
void Perceptron::mean(Points P) { //P is a struct with all x and y values of the points.
means.clear(); //vector that stores the means
for (unsigned i = 0; i < weights.size(); i++) {
double m = 0;
for (unsigned j = 0; j < P.size(); j++) {
m += pow(P[i].x, i);
}
means.push_back(m / P.size());
}
}
void Perceptron::deviation(Points P) {
deviations.clear(); //vector that stores the deviations
for (unsigned i = 0; i < weights.size(); i++) {
double sd = 0;
for (unsigned j = 0; j < P.size(); j++) {
sd += pow(pow(P[j].x, i) - means[i], 2);
}
deviations.push_back(sqrt(sd / P.size()));
}
}
Here is my code for creating the hough accumulator for lines in image :
void hough_lines_acc(cv::Mat img_a_edges, std::vector<std::vector<int> > &hough_acc) {
for (size_t r = 0; r < img_a_edges.rows; r++) {
for (size_t c = 0; c < img_a_edges.cols; c++) {
int theta = static_cast<int> (std::atan2(r, c) * 180 / M_PI);
int rho = static_cast<int> ((c * cos(theta)) + (r * sin(theta)));
if (theta < -90) theta = -90;
if (theta > 89) theta = 89;
++hough_acc[abs(rho)][theta];
}
}
cv::Mat img_mat(hough_acc.size(), hough_acc[0].size(), CV_8U);
std::cout << hough_acc.size() << " " << hough_acc[0].size() << std::endl;
for (size_t i = 0; i < hough_acc.size(); i++) {
for (size_t j = 0; j < hough_acc[0].size(); j++) {
img_mat.at<int> (i,j) = hough_acc[i][j];
}
}
imwrite("../output/ps1-2-b-1.png", img_mat);
}
theta varies from -90 to 89. I am getting negative rho values. Right now I am just replacing the negative who with a positive one but am not getting a correct answer. What do I do to the negative rho? Please explain the answer.
theta = arctan (y / x)
rho = x * cos(theta) + y * sin(theta)
Edited code :
bool hough_lines_acc(cv::Mat img_a_edges, std::vector<std::vector<int> > &hough_acc,\
std::vector<double> thetas, std::vector<double> rhos, int rho_resolution, int theta_resolution) {
int img_w = img_a_edges.cols;
int img_h = img_a_edges.rows;
int max_votes = 0;
int min_votes = INT_MAX;
for (size_t r = 0; r < img_h; r++) {
for (size_t c = 0; c < img_w; c++) {
if(img_a_edges.at<int>(r, c) == 255) {
for (size_t i = 0; i < thetas.size(); i++) {
thetas[i] = (thetas[i] * M_PI / 180);
double rho = ( (c * cos(thetas[i])) + (r * sin(thetas[i])) );
int buff = ++hough_acc[static_cast<int>(abs(rho))][static_cast<int>(i)];
if (buff > max_votes) {
max_votes = buff;
}
if (buff < min_votes) {
min_votes = buff;
}
}
}
}
}
double div = static_cast<double>(max_votes) / 255;
int threshold = 10;
int possible_edge = round(static_cast<double>(max_votes) / div) - threshold;
props({
{"max votes", max_votes},
{"min votes", min_votes},
{"scale", div}
});
// needed for scaling intensity for contrast
// not sure if I am doing it correctly
for (size_t r = 0; r < hough_acc.size(); r++) {
for (size_t c = 0; c < hough_acc[0].size(); c++) {
double val = hough_acc[r][c] / div;
if (val < 0) {
val = 0;
}
hough_acc[r][c] = static_cast<int>(val);
}
}
cv::Mat img_mat = cv::Mat(hough_acc.size(), hough_acc[0].size(), CV_8UC1, cv::Scalar(0));
for (size_t i = 0; i < hough_acc.size(); i++) {
for (size_t j = 0; j < hough_acc[0].size(); j++) {
img_mat.at<uint8_t> (i,j) = static_cast<uint8_t>(hough_acc[i][j]);
}
}
imwrite("../output/ps1-2-b-1.png", img_mat);
return true;
}
Still not correct output. What is the error here?
atan2 of two positive numbers... should not be giving you negative angles, it should only be giving you a range of 0-90
also for the hough transform, I think you want everything relative to one point (ie 0,0 in this case). I think for that you would actually want to make theta=90-atan2(r,c)
Admittedly though, I am a bit confused as I thought you had to encode line direction, rather than just "edge pt". ie I thought at each edge point you had to provide a discrete array of guessed line trajectories and calculate rho and theta for each one and throw all of those into your accumulator. As is... I am not sure what you are calculating.
I am trying to make an alphatrimmed filter in openCV library. My code is not working properly and the resultant image is not looking as image after filtering.
The filter should work in the following way.
Chossing some (array) of pixels in my example it is 9 pixels '3x3' window.
Ordering them in increasing way.
Cutting our 'array' both sides for alpha-2.
calculating arithmetic mean of remaining pixels and inserting them in proper place.
int alphatrimmed(Mat img, int alpha)
{
Mat img9 = img.clone();
const int start = alpha/2 ;
const int end = 9 - (alpha/2);
//going through whole image
for (int i = 1; i < img.rows - 1; i++)
{
for (int j = 1; j < img.cols - 1; j++)
{
uchar element[9];
Vec3b element3[9];
int k = 0;
int a = 0;
//selecting elements for window 3x3
for (int m = i -1; m < i + 2; m++)
{
for (int n = j - 1; n < j + 2; n++)
{
element3[a] = img.at<Vec3b>(m*img.cols + n);
a++;
for (int c = 0; c < img.channels(); c++)
{
element[k] += img.at<Vec3b>(m*img.cols + n)[c];
}
k++;
}
}
//comparing and sorting elements in window (uchar element [9])
for (int b = 0; b < end; b++)
{
int min = b;
for (int d = b + 1; d < 9; d++)
{
if (element[d] < element[min])
{
min = d;
const uchar temp = element[b];
element[b] = element[min];
element[min] = temp;
const Vec3b temporary = element3[b];
element3[b] = element3[min];
element3[min] = temporary;
}
}
}
// index in resultant image( after alpha-trimmed filter)
int result = (i - 1) * (img.cols - 2) + j - 1;
for (int l = start ; l < end; l++)
img9.at<Vec3b>(result) += element3[l];
img9.at<Vec3b>(result) /= (9 - alpha);
}
}
namedWindow("AlphaTrimmed Filter", WINDOW_AUTOSIZE);
imshow("AlphaTrimmed Filter", img9);
return 0;
}
Without actual data, it's somewhat of a guess, but an uchar can't hold the sum of 3 channels. It works modulo 256 (at least on any platform OpenCV supports).
The proper solution is std::sort with a proper comparator for your Vec3b :
void L1(Vec3b a, Vec3b b) { return a[0]+a[1]+a[2] < b[0]+b[1]+b[2]; }