Closed. This question needs details or clarity. It is not currently accepting answers.
Want to improve this question? Add details and clarify the problem by editing this post.
Closed 3 years ago.
Improve this question
I used aruco to build an AR project that uses four points to register //via the ICP algorithm. The point at which the marker is pasted is obtained by solving the tevc in the PNP. The constructed virtual object relies on the point of the VTK world coordinate system, but I got the wrong result, which I have not solved so far. The following is my core code.
I tested the transformation matrix separately and the coordinates I got, they seem to be correct
MainWindow::MainWindow(QWidget *parent)
: QMainWindow(parent)
{
ui.setupUi(this);
initTitle();
initBackGround();
initToolBar();
initConnect();
initTableRightMenu();
initTable();
//ui.toolRB->setVisible(true);
timer = new QTimer(this);
movingPoints.push_back(new PointSelf());
movingPoints.push_back(new PointSelf());
movingPoints.push_back(new PointSelf());
movingPoints.push_back(new PointSelf());
}
void MainWindow::normalSyle()
{
vtkSmartPointer<vtkInteractorStyleTrackballCamera> style =
vtkSmartPointer<vtkInteractorStyleTrackballCamera>::New();
this->ui.openGLWidgetRB->GetRenderWindow()->GetInteractor()->SetInteractorStyle(style);
this->ui.openGLWidgetRB->GetRenderWindow()->GetInteractor()->RemoveObservers(vtkCommand::RightButtonPressEvent);
}
void MainWindow::initTable()
{
ui.tableMeasure->setColumnCount(4);
ui.tableMeasure->setRowCount(8);
ui.tableMeasure->horizontalHeader()->setStyleSheet("QHeaderView::section {background-color:rgb(181, 184, 182);color: black;}");
ui.tableMeasure->horizontalHeader()->setDisabled(true);
ui.tableMeasure->setHorizontalHeaderLabels(QStringList() << "name" << "X" << "Y" << "Z");
ui.tableMeasure->setItem(0, 0, new QTableWidgetItem(QStringLiteral("markerId=0")));
ui.tableMeasure->setItem(1, 0, new QTableWidgetItem(QStringLiteral("markerId=1")));
ui.tableMeasure->setItem(2, 0, new QTableWidgetItem(QStringLiteral("markerId=2")));
ui.tableMeasure->setItem(3, 0, new QTableWidgetItem(QStringLiteral("markerId=3")));
ui.tableMeasure->setItem(4, 0, new QTableWidgetItem(QStringLiteral("markerId=4")));
ui.tableMeasure->setItem(5, 0, new QTableWidgetItem(QStringLiteral("markerId=5")));
ui.tableMeasure->setItem(6, 0, new QTableWidgetItem(QStringLiteral("Point1")));
ui.tableMeasure->setItem(7, 0, new QTableWidgetItem(QStringLiteral("Point2")));
}
double* MainWindow::dealPointPick()
{
QSize size;
size = this->ui.openGLWidgetRB->size();
double *picked = new double[3];
vtkSmartPointer<vtkCellPicker> cellPicker =
vtkSmartPointer<vtkCellPicker>::New();
cellPicker->SetTolerance(0.005);
cellPicker->Pick(rightClikPoint.x(), size.height() - rightClikPoint.y() - 1, 0, ui.openGLWidgetRB->GetRenderWindow()->GetRenderers()->GetFirstRenderer());
cellPicker->GetPickPosition(picked);
if (cellPicker->GetPointId() < 0) return nullptr;
return picked;
}
void MainWindow::dealMarkerId0()
{
double* position = dealPointPick();
pickedPoints.push_back(position);
if (position == nullptr) return;
for (int i = 0; i < 3; i++)
{
ui.tableMeasure->setItem(0, 1 + i, new QTableWidgetItem(QString::number(position[i])));
}
}
void MainWindow::dealMarkerId1()
{
double* position = dealPointPick();
pickedPoints.push_back(position);
if (position == nullptr) return;
for (int i = 0; i < 3; i++)
{
ui.tableMeasure->setItem(1, 1 + i, new QTableWidgetItem(QString::number(position[i])));
}
}
void MainWindow::dealMarkerId2()
{
double* position = dealPointPick();
pickedPoints.push_back(position);
if (position == nullptr) return;
for (int i = 0; i < 3; i++)
{
ui.tableMeasure->setItem(2, 1 + i, new QTableWidgetItem(QString::number(position[i])));
}
}
void MainWindow::dealMarkerId3()
{
double* position = dealPointPick();
pickedPoints.push_back(position);
if (position == nullptr) return;
for (int i = 0; i < 3; i++)
{
ui.tableMeasure->setItem(3, 1 + i, new QTableWidgetItem(QString::number(position[i])));
}
}
void MainWindow::openCamera()
{
capture.open(0);
capture.set(3, 1920);
capture.set(4, 1080);
if (!capture.isOpened())
{
cout << "camera is not open" << endl;
}
else
{
cout << "camera is open" << endl;
connect(timer, SIGNAL(timeout()), this, SLOT(updateWindow()));
timer->start(20);
}
}
Mat cameraMatrix(3, 3, cv::DataType<double>::type);
Mat distCoeffs(4, 1, cv::DataType<double>::type);
void MainWindow::updateWindow()
{
capture >> frame;
cameraMatrix.at<double>(0, 0) = 720;
cameraMatrix.at<double>(0, 1) = 0;
cameraMatrix.at<double>(0, 2) = 640;
cameraMatrix.at<double>(1, 0) = 0;
cameraMatrix.at<double>(1, 1) = 720;
cameraMatrix.at<double>(1, 2) = 360;
cameraMatrix.at<double>(2, 0) = 0;
cameraMatrix.at<double>(2, 1) = 0;
cameraMatrix.at<double>(2, 2) = 0;
distCoeffs.at<double>(0, 0) = 0;
distCoeffs.at<double>(1, 0) = 0;
distCoeffs.at<double>(2, 0) = 0;
distCoeffs.at<double>(3, 0) = 0;
cv::cvtColor(frame, image, COLOR_RGBA2GRAY);
aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejectPoint);
if (ids.size() > 0)
{
cv::aruco::drawDetectedMarkers(frame, corners, ids);
for (int i = 0; i < ids.size(); i++)
{
for (int j = 0; j < 4; j++)
{
circle(frame, Point(corners[i][j].x, corners[i][j].y), 3, Scalar(255, 0, 0));
}
std::vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-15, -15, 0));
objectPoints.push_back(Point3f(15, -15, 0));
objectPoints.push_back(Point3f(15, 15, 0));
objectPoints.push_back(Point3f(-15, 15, 0));
Mat rvec(3, 1, cv::DataType<double>::type);
Mat tvec(3, 1, cv::DataType<double>::type);
Mat rotationMatrix(3, 3, cv::DataType<double>::type);
Mat totM;
cv::solvePnP(objectPoints, corners[i], cameraMatrix, distCoeffs, rvec, tvec);
drawFrameAxes(frame, cameraMatrix, distCoeffs, rvec, tvec, 50);
if (ids[i] == 0)
{
Mat rvec0(3, 1, cv::DataType<double>::type);
Mat tvec0(3, 1, cv::DataType<double>::type);
Mat rotationMatrix0(3, 3, cv::DataType<double>::type);
Mat totM0;
cv::solvePnP(objectPoints, corners[i], cameraMatrix, distCoeffs, rvec0, tvec0);
drawFrameAxes(frame, cameraMatrix, distCoeffs, rvec0, tvec0, 50);
/*for (int i = 0; i < 3; i++)
{
double temp0 = tvec0.at<double>(i, 0);
movingPoints[0][i] = temp0;
}*/
movingPoints[0]->x = tvec0.at<double>(0, 0);
movingPoints[0]->y = tvec0.at<double>(1, 0);
movingPoints[0]->z = tvec0.at<double>(2, 0);
}
if (ids[i] == 1)
{
Mat rvec1(3, 1, cv::DataType<double>::type);
Mat tvec1(3, 1, cv::DataType<double>::type);
Mat rotationMatrix1(3, 3, cv::DataType<double>::type);
Mat totM1;
cv::solvePnP(objectPoints, corners[i], cameraMatrix, distCoeffs, rvec1, tvec1);
drawFrameAxes(frame, cameraMatrix, distCoeffs, rvec1, tvec1, 50);
movingPoints[1]->x = tvec1.at<double>(0, 0);
movingPoints[1]->y = tvec1.at<double>(1, 0);
movingPoints[1]->z = tvec1.at<double>(2, 0);
}
if (ids[i] == 2)
{
Mat rvec2(3, 1, cv::DataType<double>::type);
Mat tvec2(3, 1, cv::DataType<double>::type);
Mat rotationMatrix1(3, 3, cv::DataType<double>::type);
Mat totM2;
cv::solvePnP(objectPoints, corners[i], cameraMatrix, distCoeffs, rvec2, tvec2);
drawFrameAxes(frame, cameraMatrix, distCoeffs, rvec2, tvec2, 50);
movingPoints[2]->x = tvec2.at<double>(0, 0);
movingPoints[2]->y = tvec2.at<double>(1, 0);
movingPoints[2]->z = tvec2.at<double>(2, 0);
}
if (ids[i] == 3)
{
Mat rvec3(3, 1, cv::DataType<double>::type);
Mat tvec3(3, 1, cv::DataType<double>::type);
Mat rotationMatrix1(3, 3, cv::DataType<double>::type);
Mat totM3;
cv::solvePnP(objectPoints, corners[i], cameraMatrix, distCoeffs, rvec3, tvec3);
drawFrameAxes(frame, cameraMatrix, distCoeffs, rvec3, tvec3, 50);
movingPoints[3]->x = tvec3.at<double>(0, 0);
movingPoints[3]->y = tvec3.at<double>(1, 0);
movingPoints[3]->z = tvec3.at<double>(2, 0);
}
cv::Rodrigues(rvec, rotationMatrix);
camPointMatrix->Identity();
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
camPointMatrix->SetElement(i, j, rotationMatrix.at<double>(i, j));
}
camPointMatrix->SetElement(i, 3, tvec.at<double>(i, 0));
}
}
}
cv::cvtColor(image, frame, COLOR_RGBA2GRAY);
cv::imshow("image", frame);
cv::waitKey(10);`enter code here`
}
QString dirname;
vtkSmartPointer<vtkDICOMImageReader> dicomReader =
vtkSmartPointer< vtkDICOMImageReader>::New();
vtkSmartPointer<vtkSmartVolumeMapper> volumeMapper =
vtkSmartPointer<vtkSmartVolumeMapper>::New();
vtkSmartPointer<vtkVolumeProperty> volumeProperty =
vtkSmartPointer<vtkVolumeProperty>::New();
vtkSmartPointer<vtkPiecewiseFunction> compositeOpacity =
vtkSmartPointer<vtkPiecewiseFunction>::New();
vtkSmartPointer<vtkPiecewiseFunction> volumeGradientOpacity =
vtkSmartPointer<vtkPiecewiseFunction>::New();
vtkSmartPointer<vtkColorTransferFunction> color =
vtkSmartPointer<vtkColorTransferFunction>::New();
vtkSmartPointer<vtkVolume> volume1 =
vtkSmartPointer<vtkVolume>::New();
void MainWindow::dicomImport()
{
dirname = QFileDialog::getExistingDirectory(this, "readDicom", "../CT");
if (dirname == "") return;
dicomReader->SetDataByteOrderToLittleEndian();
dicomReader->SetDirectoryName(dirname.toLatin1().data());
dicomReader->Update();
volumeMapper->SetInputData(dicomReader->GetOutput());
volumeProperty->SetInterpolationTypeToLinear();
volumeProperty->SetAmbient(0.4);
volumeProperty->SetDiffuse(0.6);
volumeProperty->SetSpecular(0.2);
compositeOpacity->AddPoint(120, 0.00);
compositeOpacity->AddPoint(200, 0.40);
compositeOpacity->AddPoint(1500, 0.60);
volumeProperty->SetScalarOpacity(compositeOpacity);
volumeGradientOpacity->AddPoint(100, 0.0);
volumeGradientOpacity->AddPoint(190, 0.5);
volumeGradientOpacity->AddPoint(300, 1.0);
color->AddRGBPoint(0.000, 0.00, 0.00, 0.00);
color->AddRGBPoint(64.00, 1.00, 0.52, 0.30);
color->AddRGBPoint(4000.0, 1.00, 1.00, 1.00);
color->AddRGBPoint(1200.0, 0.20, 0.20, 0.20);
volumeProperty->SetColor(color);
volume1->SetMapper(volumeMapper);
volume1->SetProperty(volumeProperty);
render->AddVolume(volume1);
render->ResetCamera();
render->GetActiveCamera()->ParallelProjectionOn();
render->ResetCameraClippingRange();
this->ui.openGLWidgetRB->GetRenderWindow()->GetInteractor()->RemoveObservers(vtkCommand::RightButtonPressEvent);
this->ui.openGLWidgetRB->GetRenderWindow()->Render();
}
void MainWindow::SVDRegistration()
{
double firstArrayPoints[3][4] = { 0.0 };
double OriginPoints[3][4] = { 0.0 };
double TargetPoint[3][4] = { 0.0 };
fixedPoints[4][3] = { 0.0 };
for (int i = 0; i < 4; i++)
{
fixedPoints[i][0] = pickedPoints[i][0];
fixedPoints[i][1] = pickedPoints[i][1];
fixedPoints[i][2] = pickedPoints[i][2];
}
for (int i = 0; i < 4; i++)
{
OriginPoints[0][i] = fixedPoints[i][0];
OriginPoints[1][i] = fixedPoints[i][1];
OriginPoints[2][i] = fixedPoints[i][2];
TargetPoint[0][i] = movingPoints[i]->x;
TargetPoint[1][i] = movingPoints[i]->y;
TargetPoint[2][i] = movingPoints[i]->z;
}
Eigen::MatrixXd OriginMatrix(3, 4);
Eigen::MatrixXd TargetMatrix(3, 4);
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 4; j++)
{
OriginMatrix(i, j) = OriginPoints[i][j];
TargetMatrix(i, j) = TargetPoint[i][j];
}
}
double MassPointOrigin[3] = { 0.0 };
double MassPointTarget[3] = { 0.0 };
for (int i = 0; i < 4; i++)
{
MassPointOrigin[0] += OriginPoints[0][i];
MassPointOrigin[1] += OriginPoints[1][i];
MassPointOrigin[2] += OriginPoints[2][i];
MassPointTarget[0] += TargetPoint[0][i];
MassPointTarget[1] += TargetPoint[1][i];
MassPointTarget[2] += TargetPoint[2][i];
}
MassPointOrigin[0] = MassPointOrigin[0] / 4;
MassPointOrigin[1] = MassPointOrigin[1] / 4;
MassPointOrigin[2] = MassPointOrigin[2] / 4;
MassPointTarget[0] = MassPointTarget[0] / 4;
MassPointTarget[1] = MassPointTarget[1] / 4;
MassPointTarget[2] = MassPointTarget[2] / 4;
double OriginPointsAfterM[3][4] = { 0.0 };
double TargetPointAfterM[3][4] = { 0.0 };
for (int i = 0; i < 4; i++)
{
OriginPointsAfterM[0][i] = OriginPoints[0][i] - MassPointOrigin[0];
OriginPointsAfterM[1][i] = OriginPoints[1][i] - MassPointOrigin[1];
OriginPointsAfterM[2][i] = OriginPoints[2][i] - MassPointOrigin[2];
TargetPointAfterM[0][i] = TargetPoint[0][i] - MassPointTarget[0];
TargetPointAfterM[1][i] = TargetPoint[1][i] - MassPointTarget[1];
TargetPointAfterM[2][i] = TargetPoint[2][i] - MassPointTarget[2];
}
Eigen::MatrixXd MatrixOriginAM(3, 4);
Eigen::MatrixXd MatrixTarget(3, 4);
Eigen::MatrixXd MatrixTargetAM(4, 3);
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 4; j++)
{
MatrixOriginAM(i, j) = OriginPointsAfterM[i][j];
MatrixTarget(i, j) = TargetPointAfterM[i][j];
}
}
MatrixTargetAM = MatrixTarget.transpose();
Eigen::MatrixXd WMatrix = MatrixOriginAM * MatrixTargetAM;
Eigen::JacobiSVD <Eigen::MatrixXd> svd(WMatrix, Eigen::ComputeThinU | Eigen::ComputeThinV);
Eigen::MatrixXd UMatrix = svd.matrixU();
Eigen::MatrixXd right_singular_vectors = svd.matrixV();
Eigen::MatrixXd VTMatrix = right_singular_vectors.transpose();
Eigen::MatrixXd RotationMatrix = UMatrix * VTMatrix;
double Rotate[3][3] = { 0.0 };
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
Rotate[i][j] = RotationMatrix(i, j);
}
}
Eigen::MatrixXd MassCentreOfTarget(3, 1);
Eigen::MatrixXd MassCentreOfOrigin(3, 1);
for (int i = 0; i < 3; i++)
{
MassCentreOfTarget(i, 0) = MassPointTarget[i];
MassCentreOfOrigin(i, 0) = MassPointOrigin[i];
}
Eigen::MatrixXd TranslationMatrix = MassCentreOfOrigin - RotationMatrix * MassCentreOfTarget;
double Translation[3] = { 0.0 };
for (int i = 0; i < 3; i++)
{
Translation[i] = TranslationMatrix(i, 0);
}
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
ReturnMatrix[i][j] = Rotate[i][j];
}
}
for (int i = 0; i < 3; i++)
{
ReturnMatrix[i][3] = Translation[i];
}
ReturnMatrix[3][3] = 1;
double deepCopyMatrix[16] = {0.0 };
for (int i = 0; i < 4; i++)
{
deepCopyMatrix[i * 4 + 0] = ReturnMatrix[i][0];
deepCopyMatrix[i * 4 + 1] = ReturnMatrix[i][1];
deepCopyMatrix[i * 4 + 2] = ReturnMatrix[i][2];
deepCopyMatrix[i * 4 + 3] = ReturnMatrix[i][3];
}
transMatrix->DeepCopy(deepCopyMatrix);
volume1->SetUserMatrix(transMatrix);
render->AddVolume(volume1);
render->Render();
this->ui.openGLWidgetRB->GetRenderWindow()->GetInteractor()->RemoveObservers(vtkCommand::RightButtonPressEvent);
this->ui.openGLWidgetRB->GetRenderWindow()->Render();
}
void MainWindow::stlImport()
{
stlReader = vtkSmartPointer<vtkSTLReader>::New();
QString filename;
filename = QFileDialog::getOpenFileName(this, QStringLiteral("��ȡSTL"), "../", "*.stl;*./STL");
if (filename == "") return;
stlReader->SetFileName(filename.toUtf8().data());
stlReader->Update();
displayClipData(stlReader->GetOutput());
}
void MainWindow::displayClipData(vtkPolyData *input)
{
vtkSmartPointer<vtkPolyDataMapper> mapper =
vtkSmartPointer<vtkPolyDataMapper>::New();
mapper->SetInputData(input);
mapper->ScalarVisibilityOff();
vtkSmartPointer<vtkActor> actor =
vtkSmartPointer<vtkActor>::New();
actor->SetMapper(mapper);
actor->GetProperty()->SetColor(1.0, 1.0, 1.0);
actor->GetProperty()->SetSpecular(0.5);
actor->VisibilityOn();
render->AddActor(actor);
//render->SetBackground(0.157, 0.157, 0.157);
render->ResetCamera();
render->GetActiveCamera()->ParallelProjectionOn();
render->ResetCameraClippingRange();
this->ui.openGLWidgetRB->GetRenderWindow()->GetInteractor()->RemoveObservers(vtkCommand::RightButtonPressEvent);
this->ui.openGLWidgetRB->GetRenderWindow()->Render();
}
Related
I want to implement region growing algorithm for components Cr and Cb (YCbCr) (separate and combined) with manually chosen seed point (mouse click).
At the moment I have two functions that implement region growing for the H component in the HSV color space.
bool isOk(int new_x, int new_y, int width, int height)
{
if (new_x < 0 || new_y < 0 || new_x >= width || new_y >= height)
return false;
return true;
}
void lab04_MouseCallback(int event, int x, int y, int flags, void* param)
{
Mat* src = (Mat*)param;
int height = (*src).rows;
int width = (*src).cols;
if (event == CV_EVENT_LBUTTONDOWN)
{
printf("Seed point(x,y): %d,%d\n", x, y);
Mat labels = Mat::zeros((*src).size(), CV_16UC1);
int w = 3,
hue_avg = 0,
inf_x, sup_x,
inf_y, sup_y,
cnt = 0;
inf_x = (x - w < 0) ? 0 : x - w;
inf_y = (y - w < 0) ? 0 : y - w;
sup_x = (x + w >= width) ? (width - 1) : x + w;
sup_y = (y + w >= height) ? (height - 1) : y + w;
printf("inf x: %d sup x: %d --- inf y: %d sup y: %d\n", inf_x, sup_x, inf_y, sup_y);
for (int i = inf_y; i <= sup_y; ++i)
{
for (int j = inf_x; j <= sup_x; ++j)
{
hue_avg += (*src).data[i * width + j];
//printf("H at <%d, %d> is %d\n", i, j, (*src).data[i * width + j]);
}
}
hue_avg /= (sup_x - inf_x + 1) * (sup_y - inf_y + 1);
printf("Hue average: %d\n\n", hue_avg);
int k = 1, N = 1, hue_std = 10;
int konst = 3;
int T = konst * (float)hue_std;
queue<Point> Q;
Q.push(Point(x, y));
while (!Q.empty())
{
int dx[8] = { -1, 0, 1, 1, 1, 0, -1, -1 };
int dy[8] = { -1, -1, -1, 0, 1, 1, 1, 0 };
Point temp = Q.front();
Q.pop();
for (int dir = 0; dir < 8; ++dir)
{
int new_x = temp.x + dx[dir];
int new_y = temp.y + dy[dir];
if (isOk(new_x, new_y, width, height))
{
//printf("(%d, %d)\n", new_x, new_y);
if (labels.at<ushort>(new_y, new_x) == 0)
{
//printf("labels(%d, %d) = %hu\n", new_x, new_y, labels.at<ushort>(new_y, new_x));
if (abs((*src).at<uchar>(new_y, new_x) - hue_avg) < T)
{
//printf("this one\n");
Q.push(Point(new_x, new_y));
labels.at<ushort>(new_y, new_x) = k;
hue_avg = ((N * hue_avg) + (*src).at<uchar>(new_y, new_x)) / (N + 1);
++N;
}
}
}
}
}
Mat dst = (*src).clone();
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (labels.at<ushort>(i, j) == 1)
{
dst.at<uchar>(i, j) = 255;
}
else
{
dst.at<uchar>(i, j) = 0;
}
}
}
imshow("dst", dst);
}
}
void lab04_MouseClick()
{
Mat src;
Mat hsv;
// Read image from file
char fname[MAX_PATH];
while (openFileDlg(fname))
{
src = imread(fname);
int height = src.rows;
int width = src.cols;
//Create a window
namedWindow("My Window", 1);
// Aplicare FTJ gaussian pt. eliminare zgomote: essential sa il aplicati
GaussianBlur(src, src, Size(5, 5), 0, 0);
// Componenta de culoare Hue a modelului HSV
Mat H = Mat(height, width, CV_8UC1);
// definire pointeri la matricea (8 biti/pixeli) folosita la stocarea
// componentei individuale H
uchar* lpH = H.data;
cvtColor(src, hsv, CV_BGR2HSV); // conversie RGB -> HSV
// definire pointer la matricea (24 biti/pixeli) a imaginii HSV
uchar* hsvDataPtr = hsv.data;
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
// index in matricea hsv (24 biti/pixel)
int hi = i * width * 3 + j * 3;
int gi = i * width + j; // index in matricea H (8 biti/pixel)
lpH[gi] = hsvDataPtr[hi] * 510 / 360; // lpH = 0 .. 255
}
}
//set the callback function for any mouse event
setMouseCallback("My Window", lab04_MouseCallback, &H);
//show the image
imshow("My Window", src);
// Wait until user press some key
waitKey(0);
}
}
How can I change this code to be for components Cr and Cb?
I'm trying to create my own CFD in C++. I have watched some videos on youtube about the Lattice Boltzmann method, but I cant get my simulations to look like the simulations performed in the videos with lattice Boltzmann implemented in Python.
I use SDL2 to create an image on my screen. I am not trying to create anything fast. Just something that will make pretty simulations on the CPU.
Here is my class for each cell:
//cell class
class cell {
public:
double Fi[nL] = {0,0,0,0,0,0,0,0,0};
double density = 0;
double momentumX = 0;
double momentumY = 0;
double velocityX = 0;
double velocityY = 0;
double Fieq[nL] = {0,0,0,0,0,0,0,0,0};
//obstacle
bool obstacle = false;
void densityOperator() {
for (int i = 0; i < nL; i++) {
density += Fi[i];
}
}
void momentumOperator() {
for (int i = 0; i < nL; i++) {
momentumX += Fi[i] * cX[i];
momentumY += Fi[i] * cY[i];
}
}
void velocityOperator() {
for (int i = 0; i < nL; i++) {
if (density == 0) {
density += 0.001;
}
velocityX += momentumX / density; // prolly very slow
velocityY += momentumY / density;
//velocityX += cX[i];
//velocityY += cY[i];
}
}
void FieqOperator() {
for (int i = 0; i < nL; i++) {
Fieq[i] = weights[i] * density *
(
1 +
(cX[i] * velocityX + cY[i] * velocityY) / Cs +
pow((cX[i] * velocityX + cY[i] * velocityY), 2) / (2 * pow(Cs, 4)) -
(velocityX * velocityX + velocityY * velocityY) / (2 * pow(Cs, 2))
);
}
}
void FiOperator() {
for (int i = 0; i < nL; i++) {
Fi[i] = Fi[i] - (timestep / tau) * (Fi[i] - Fieq[i]);
}
}
void addRightVelocity() {
Fi[0] = 1.f;
Fi[1] = 1.f;
Fi[2] = 1.f;
Fi[3] = 6.f;
Fi[4] = 1.f;
Fi[5] = 1.f;
Fi[6] = 1.f;
Fi[7] = 1.f;
Fi[8] = 1.f;
}
};
Please note that im am using a vector for my cells instead of a 2d array. I am using a index function to go from x,y to 1d cordinate.
int index(int x, int y) {
return x * nY + y;
}
Variables:
//box
const int nX = 400;
const int nY = 100;
//viscosity
float tau = 0.5; // 0.53
//time delta time per iteration
float timestep = 1;
//distance between cells
float dist = 1000;
//Speed of sound
float Cs = 1 / sqrt(3) * (dist / timestep);
//viscociti
float v = pow(Cs, 2) * (tau - timestep / 2); // tau will need to be much smaller
//time steps
int nT = 3000;
//lattice speeds and weights
const int nL = 9;
//Ci vector direction, discrete velocity
int cX[9] = { 0, 0, 1, 1, 1, 0, -1, -1, -1 };
int cY[9] = { 0, 1, 1, 0, -1, -1, -1, 0 , 1 };
//weights, based on navier stokes
float weights[9] = { 4 / 9, 1 / 9, 1 / 36, 1 / 9, 1 / 36, 1 / 9, 1 / 36, 1 / 4, 1 / 36 };
//opposite populations
int cO[9] = { 0, 5, 6, 7, 8, 1, 2, 3, 4 };
My main function:
int main() {
//init vector cells
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
cell cellUnit;
cells.push_back(cellUnit);
TempCells.push_back(cellUnit);
}
}
//SDL
//SDL
//-------------------------------------------------------------
SDL_Window* window = nullptr;
SDL_Renderer* renderer = nullptr;
SDL_Init(SDL_INIT_VIDEO);
SDL_CreateWindowAndRenderer(nX* 3, nY * 3, 0, &window, &renderer);
SDL_RenderSetScale(renderer, 3, 3);
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
SDL_RenderClear(renderer);
//-------------------------------------------------------------//
//Circle Object Gen
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
//cicle position
int circleX = 5;
int circleY = 50;
//circle radius
float radius = 10;
//distance bewtween cell and circle pos
float distance = sqrt(pow(circleX - x, 2) + pow(circleY - y, 2));
if (distance < radius) {
cells[index(x,y)].obstacle = true;
}
else {
cells[index(x, y)].obstacle = false;
}
}
}
//add velocity
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
cells[index(x, y)].addRightVelocity();
//random velocity
for (int i = 0; i < nL; i++) {
cells[index(x,y)].Fi[i] += (rand() % 200) / 100;
}
}
}
for (int t = 0; t < nT; t++) {
//SDL
//--------------------------------------------------------------
//clear renderer
if (t % 20 == 0) {
SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
SDL_RenderClear(renderer);
}
//--------------------------------------------------------------
//streaming:
//because we will loop over the same populations we do not want to switch the same population twice
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
if (x == 0) {
cells[index(x, y)].Fi[3] += 0.4;
}
//for populations
for (int i = 0; i < nL; i++) {
//boundary
//checs if cell is object or air
if (cells[index(x, y)].obstacle == false) {
//air
//targetet cell
int cellX = x + cX[i];
int cellY = y + cY[i];
//out of bounds check + rearange to other side
if (cellX < 0) {
//left to right
cellX = nX;
}
if (cellX >= nX) {
//right to left
cellX = 0;
continue;
}
if (cellY < 0) {
//top to buttom
cellY = nY;
}
if (cellY >= nY) {
//bottom to top
cellY = 0;
}
//if neighborinig cell is object --> collision with object
if (cells[index(cellX, cellY)].obstacle == true) {
//Boundary handling https://youtu.be/jfk4feD7rFQ?t=2821
TempCells[index(x,y)].Fi[cO[i]] = cells[index(x, y)].Fi[i];
}
//if not then stream to neighbor air cell with oposite population
TempCells[index(cellX, cellY)].Fi[cO[i]] = cells[index(x, y)].Fi[i];
}
else {
//wall
//SDL GRAPICHS
if (t % 20 == 0) {
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
SDL_RenderDrawPoint(renderer, x, y);
}
}
}
}
}
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
for (int i = 0; i < nL; i++) {
cells[index(x, y)].Fi[i] = TempCells[index(x, y)].Fi[cO[i]];
}
}
}
//collision:
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
//density:
cells[index(x, y)].densityOperator();
//momentum:
cells[index(x, y)].momentumOperator();
//velocity:
cells[index(x, y)].velocityOperator();
//Fieq + new new Fi:
for (int i = 0; i < nL; i++) {
cells[index(x, y)].FieqOperator();
}
//SDL Graphics
if (t % 20 == 0) {
if (cells[index(x, y)].obstacle == false) {
SDL_SetRenderDrawColor(renderer, cells[index(x, y)].density, cells[index(x, y)].density , 255 , 255);
SDL_RenderDrawPoint(renderer, x, y);
}
}
}
}
for (int x = 0; x < nX; x++) {
for (int y = 0; y < nY; y++) {
cells[index(x, y)].FiOperator();
}
}
//SDL Graphics
if (t % 20 == 0 ) {
SDL_RenderPresent(renderer);
}
}
return 0;
}
I do realize my code might be a bit messy and not easy to understand at first. And it is definitely not optimal.
If anyone has any experience in programming their own LBM in c++ i would like to hear your input.
It seams like my simulations is working but i do not get those bueatiful animations like in, https://youtu.be/ZUXmO4hu-20?t=3394
Thanks for any help.
Edit:
I have edited my script to reset, density, velocity X Y and Momentum X Y
Simulation visualised by density, pink is higher, loops if density exceeds color range of 255
Simulation visualised by density
Simulation visualised by density
Making a ray tracer and the code was failing so I decided to redo the whole thing and change the logic of the object but now for some reason, it keeps giving me an error and I've already tried editing it multiple times. ImageMagick give me an error saying that there isn't enough data to produce the ppm
other than the bit about the object the main logic how the ppm is produced hasn't changed much so I can't really figure out where the error is.
main.cpp
vec color(const ray& r, float t, vec a, vec centre)
{
vector <Light> lighting;
lighting.push_back(Light(vec(0, 0, 50), vec(0, 0, -1)));
vec totalLight{0, 0, 0};
for(int i = 0; i <lighting.size(); i++){
if(t > 0.0){
vec hit = unit_vector(r.p_at_par(t) - centre);
vec L = unit_vector(lighting[i].position() - r.p_at_par(t));
vec R = L - 2.0*dot(L, hit)*hit;
vec S = vec(1, 1, 1)*pow(max(0.f, dot(R, vec(0, 0, -1))), 50);//Specular component
vec D = (a * max(0.f, dot(L, hit)) * 1.0);//Diffuse component
totalLight += S + D;
return totalLight;
}
}
}
float clamp(float a)
{
return (a > 255)? 255: (a < 0)? 0: a;
}
int main()
{
const int w = 200, h = 100;
FILE *fp;
fp = fopen("img.ppm", "wb");
fprintf(fp, "P6\n%d %d\n255\n", w, h);
vec lower_corner(-2.0, -1.0, -1.0);
vec horizontal(4.0, 0.0, 0.0);
vec vertical(0.0, 2.0, 0.0);
vec origin(0.0, 0.0, 0.0);
vector <sphere> objects;
objects.push_back(sphere(vec(0,-100.5,-1), 100, vec(0, 1, 0)));
objects.push_back(sphere(vec(0, 0, -1), 0.5, vec(1, 0, 0)));
objects.push_back(sphere(vec(5, 5,-2), 3, vec(1, 0, 0)));
for(int j = h - 1; j >= 0; j--)
{
for(int i = 0; i < w; i++)
{
vec col(0, 0, 0);
static unsigned char pixel[3];
sphere* ClosestObject = NULL;
float u = float(i + random_double())/float(w);
float v = float(j + random_double())/float(h);
ray r(origin, lower_corner + u*horizontal + v*vertical);
float t = 0.0;
float t_near = 200000.0;
vec pixelColor(0.52 , 0.52 ,0.48);
for(int j = 0; j < objects.size(); j++)
{
if(t = objects[j].intersect(r))
{
if(t < t_near)
{
ClosestObject = &objects[j];
t_near = t;
}
}
if( t = 200000.0)
col = pixelColor;
else
col = color(r, t, ClosestObject->color, ClosestObject->centre);
pixel[0] = int(clamp(col.r() * 255));
pixel[1] = int(clamp(col.g() * 255));
pixel[2] = int(clamp(col.b() * 255));
fwrite(pixel, 3, 1, fp);
}
}
fclose(fp);
return 0;
}
}
Actually, a comment but too much text for that…
Your indentation is broken.
Hence, you didn't notice that the return 0; appears in the outer for loop, this one:
for(int j = h - 1; j >= 0; j--)
{
ends with:
fclose(fp);
return 0;
}
Additionally, the
fwrite(pixel, 3, 1, fp);
appears in the most inner loop
for (int j = 0; j < objects.size(); j++)
which is IMHO wrong as well.
So, the produced .ppm file claims to have w×h pixels but instead it provides w×objects.size() pixels.
If objects.size() < h (what I would expect) then you will have too less pixels in the .ppm file and ImageMagick will notice and complain.
Your source code, auto-formatted in my VS2017:
int main()
{
const int w = 200, h = 100;
FILE *fp;
fp = fopen("img.ppm", "wb");
fprintf(fp, "P6\n%d %d\n255\n", w, h);
vec lower_corner(-2.0, -1.0, -1.0);
vec horizontal(4.0, 0.0, 0.0);
vec vertical(0.0, 2.0, 0.0);
vec origin(0.0, 0.0, 0.0);
vector <sphere> objects;
objects.push_back(sphere(vec(0, -100.5, -1), 100, vec(0, 1, 0)));
objects.push_back(sphere(vec(0, 0, -1), 0.5, vec(1, 0, 0)));
objects.push_back(sphere(vec(5, 5, -2), 3, vec(1, 0, 0)));
for (int j = h - 1; j >= 0; j--)
{
for (int i = 0; i < w; i++)
{
vec col(0, 0, 0);
static unsigned char pixel[3];
sphere* ClosestObject = NULL;
float u = float(i + random_double()) / float(w);
float v = float(j + random_double()) / float(h);
ray r(origin, lower_corner + u * horizontal + v * vertical);
float t = 0.0;
float t_near = 200000.0;
vec pixelColor(0.52, 0.52, 0.48);
for (int j = 0; j < objects.size(); j++)
{
if (t = objects[j].intersect(r))
{
if (t < t_near)
{
ClosestObject = &objects[j];
t_near = t;
}
}
if (t = 200000.0)
col = pixelColor;
else
col = color(r, t, ClosestObject->color, ClosestObject->centre);
pixel[0] = int(clamp(col.r() * 255));
pixel[1] = int(clamp(col.g() * 255));
pixel[2] = int(clamp(col.b() * 255));
fwrite(pixel, 3, 1, fp);
}
}
fclose(fp);
return 0;
}
}
Please, check indentation and place closing curly-brackets correctly. Then it should work as before…
I am trying to make a classifier using OpenCV 3.0.0's CvSVM and color histogram. I already tried to make my own using the following code to make the datasets:
int labels[510];
if (label.compare("raw")){
for (int i = 0; i < 509; i++){
labels[i] = 1;
}
}
else if (label.compare("ripe")){
for (int i = 0; i < 509; i++){
labels[i] = 2;
}
}
else if (label.compare("rotten")){
for (int i = 0; i < 509; i++){
labels[i] = 3;
}
}
float trainingData[510][2];
for (int i = 0; i < 254; i++){
trainingData[i][1] = r_hist.at<float>(i - 1);
trainingData[i][2] = i;
}
int j = 0;
for (int i = 255; i < 509; i++){
trainingData[i][1] = g_hist.at<float>(j - 1);
trainingData[i][2] = i;
j++;
}
And this code for the SVM:
int width = 512, height = 512;
Mat image = Mat::zeros(height, width, CV_8UC3);
Mat labelsMat(510, 1, CV_32SC1, labels);
Mat trainingDataMat(510, 2, CV_32FC1, trainingData);
Ptr < cv::ml::SVM > svm = SVM::create();
svm = cv::Algorithm::load<ml::SVM>("svm.xml");
svm->setC(0.01);
svm->setType(ml::SVM::C_SVC);
svm->setKernel(ml::SVM::LINEAR);
svm->setTermCriteria((cvTermCriteria(TermCriteria::MAX_ITER, 100, 1e6)));
svm->train(trainingDataMat, ROW_SAMPLE, labelsMat);
svm->save("svm.xml");
The problem with the code above is that it won't save properly. Is there a better way to do it?
I am new to c++ and I have no real idea why my program crashes only guesses.
The following program suddenly started to crash on line 49 at the void saveSig(cv::Mat *frame) line it self
without even steping in to the function it self.
It ran fine before.
The program soposed to track a person in a video under certain circumstances which I will not go over since they haven't been impllementet yet.
I can only guess that I have ran out of stack and I'm not sure why, again it might be a leek that I missed or maybe I just ran out of stack space or maybe it's completely something else and very stupid.
PS: sorry if the code is not "pretty" I'm really new to C++ and OpenCV and I will appreciate any comments about "bad coding practice".
#include "myCVFunctions.h"
#include <vector>
#define LOADING_VIDEO_ERROR -1
#define LOADING_BACKGROUND_IMAGE_ERROR -2
#define FRAME_BUFFER_SIZE 10
#define SIG_BUFFER_SIZE 6
const cv::string g_c_videoFilePath = "res/tmp.mp4";
const cv::string g_c_bgFilePath = "res/bg.jpg";
const cv::Mat g_c_bg = cv::imread(g_c_bgFilePath);
const cv::Rect g_c_entranceROIRect(869, 999, 345, 80);
const cv::Rect g_c_largeEntranceROIRect(869, 340, 345, 740);
const cv::Rect g_c_sigROI(869,539,345,541);
cv::Mat g_currFrameBackup;
cv::Point g_clickCoords(-1,-1);
cv::Rect g_markedROI;
bool g_trace = false;
bool g_personInside = false;
bool g_useSig = false;
char g_sigCount = 0;
double g_sig[SIG_BUFFER_SIZE];
double g_newSig[SIG_BUFFER_SIZE];
cv::Point g_inSigHeadCoords[SIG_BUFFER_SIZE];
cv::Point g_inNewSigHeadCoords[SIG_BUFFER_SIZE];
long double av1 = 0;
long double av2 = 0;
double minDiff = 9999999999.999999;
void onMouse(int event, int x, int y, int flags, void* userdata){
if(event == CV_EVENT_LBUTTONDOWN){
g_clickCoords.x = x;
g_clickCoords.y = y;
}
if(event == CV_EVENT_MOUSEMOVE && g_clickCoords.x>=0){
g_markedROI = cv::Rect(g_clickCoords, cv::Point(x,y));
g_currFrameBackup.copyTo(*((cv::Mat*)userdata));
cv::rectangle(*((cv::Mat*)userdata), g_markedROI, cv::Scalar(0,255,0));
}
if(event == CV_EVENT_LBUTTONUP){
g_trace = true;
g_useSig = true;
g_clickCoords = cv::Point(-1,-1);
}
}
void saveSig(cv::Mat *frame){ //the crash occurs here
double fftData[512*512];
cv::Mat sigROI, sigHSV, resized;
sigROI = (*frame)(g_c_sigROI);
cv::cvtColor(sigROI, sigHSV, CV_BGR2HSV);
resized = my_cv::resize_zeros(sigHSV, cv::Size(512,512));
cv::MatIterator_<cv::Vec3b> m_it = resized.begin<cv::Vec3b>();
for(int i=0; m_it!=resized.end<cv::Vec3b>(); m_it++, i++){
fftData[i] = (*m_it)[2];
}
my_cv::FFTR fft = my_cv::createFFTR<double>(fftData, 512, 512, FFT_TYPE_2D);
//cv::flip(sigHSV, sigHSV, -1);
//cv::transpose(sigHSV, sigHSV);
//cv::flip(sigHSV, sigHSV, 0);
//cv::imshow("1", sigROI);
//cv::imshow("", sigHSV);
//cv::waitKey();
//resized = my_cv::resize_zeros(sigHSV, cv::Size(512,512));
//m_it = resized.begin<cv::Vec3b>();
//for(int i=0; m_it!=resized.end<cv::Vec3b>(); m_it++, i++){
// fftData[i] = (*m_it)[2];
//}
//my_cv::FFTR fft180 = my_cv::createFFTR<double>(fftData, 512, 512, FFT_TYPE_2D);
my_cv::FFTR multFFT = my_cv::multFFT(fft, fft);
my_cv::m_reverseFFTR(multFFT, FFT_TYPE_2D);
if(g_useSig){
g_newSig[g_sigCount] = my_cv::getFFTAverege(multFFT);
}else{
g_sig[g_sigCount] = my_cv::getFFTAverege(multFFT);
}
g_sigCount++;
if(g_sigCount>=SIG_BUFFER_SIZE&&g_useSig){
av1 = ((g_sig[0]+g_sig[1]+g_sig[2]+g_sig[3]+g_sig[4]+g_sig[5])/6)/1000000.0;
av2 = ((g_newSig[0]+g_newSig[1]+g_newSig[2]+g_newSig[3]+g_newSig[4]+g_newSig[5])/6)/1000000.0;
/*for(int i=0; i<SIG_BUFFER_SIZE; i++){
for(int j=0; j<SIG_BUFFER_SIZE; j++){
double diff = abs(g_newSig[i]-g_sig[j]);
minDiff = (diff<minDiff ? diff : minDiff);
}
}*/
my_cv::deleteFFTR(fft);
//my_cv::deleteFFTR(fft180);
my_cv::deleteFFTR(multFFT);
}
}
void proccesFrame(cv::Mat *frame){
cv::Mat grayFrame, negativeFrame, bwFrame, entranceROI;
negativeFrame = g_c_bg - *frame;
cv::cvtColor(negativeFrame, grayFrame, CV_BGR2GRAY);
cv::threshold(grayFrame, bwFrame, 30, 255, cv::THRESH_BINARY);
cv::Mat erode = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(7,7));
cv::Mat dilate = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(10,10));
cv::erode(bwFrame, bwFrame, erode);
cv::dilate(bwFrame, bwFrame, dilate);
entranceROI = bwFrame(g_c_largeEntranceROIRect);
cv::MatIterator_<uchar> m_it = entranceROI.begin<uchar>();
for(g_personInside = false; m_it!=entranceROI.end<uchar>(); m_it++){
if(*m_it==255){
g_personInside = true;
break;
}
}
if(!g_personInside){
g_trace = false;
g_sigCount = 0;
av1 = 0;
av2 = 0;
minDiff = 9999999999.999999;
}else{
if(g_sigCount<SIG_BUFFER_SIZE){
cv::Mat ROI = bwFrame(g_c_entranceROIRect);
cv::MatIterator_<uchar> bw_it = bwFrame.begin<uchar>();
if(!g_useSig){
for(int i=0; bw_it!=bwFrame.end<uchar>(); bw_it++, i++){
if(*bw_it==255){
g_inSigHeadCoords[g_sigCount] = cv::Point(i%bwFrame.cols, i/bwFrame.cols);
break;
}
}
}else{
for(int i=0; bw_it!=bwFrame.end<uchar>(); bw_it++, i++){
if(*bw_it==255){
g_inNewSigHeadCoords[g_sigCount] = cv::Point(i%bwFrame.cols, i/bwFrame.cols);
break;
}
}
}
saveSig(frame);
}
cv::putText(*frame, "Person inside", cv::Point(20,120), CV_FONT_HERSHEY_PLAIN, 3.0, cv::Scalar(0,255,0), 2);
if(g_useSig&&g_sigCount>=SIG_BUFFER_SIZE){
g_sig;
g_newSig;
g_sigCount++;
//g_trace = true;
}
if(g_trace){
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Vec4i> hierarchy;
findContours(bwFrame, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
std::vector<std::vector<cv::Point>>::iterator o_it = contours.begin();
for(; o_it!=contours.end(); o_it++){
std::vector<cv::Point>::iterator i_it = (*o_it).begin();
for(; i_it!=(*o_it).end()-1; i_it++){
cv::line(*frame, *i_it, *(i_it+1), cv::Scalar(0,255,0) , 3);
}
}
}
}
}
int main(int argc, char* argv[]){
//init//////////////////////////////////////////////////////////////////////
cv::VideoCapture videoBuffer(g_c_videoFilePath);
if(!videoBuffer.isOpened()){
std::cerr << "Can't load video please check the paths\n";
return LOADING_VIDEO_ERROR;
}
if(!g_c_bg.data){
std::cerr << "Can't load background image please check the paths\n";
return LOADING_BACKGROUND_IMAGE_ERROR;
}
std::vector<cv::Mat> frameBuffer;
frameBuffer.resize(FRAME_BUFFER_SIZE);
const std::vector<cv::Mat>::iterator currFrame = frameBuffer.begin();
const cv::string mainWindow = "Object Tracker";
cv::namedWindow(mainWindow, CV_WINDOW_AUTOSIZE);
cv::setMouseCallback(mainWindow, onMouse, (void*)&(*currFrame));
//init end/////////////////////////////////////////////////////////////////////////////
//video loop///////////////////////////////////////////////////////////////////////////
for(char paused = 0;;){
paused = (cv::waitKey(20)==' ' ? 1 : 0);
while(paused){
cv::resize(*currFrame, *currFrame, cv::Size(900, 540));
cv::imshow(mainWindow, *currFrame);
paused = (cv::waitKey(20)==' ' ? 0 : 1);
}
cv::Mat frame;
videoBuffer.read(frame);
frame.copyTo(g_currFrameBackup);
frameBuffer.pop_back();
frameBuffer.insert(frameBuffer.begin(), frame);
std::stringstream ss;
ss << "Frame: " << videoBuffer.get(CV_CAP_PROP_POS_FRAMES);
cv::putText(*currFrame, ss.str().c_str(), cv::Point(20,70), CV_FONT_HERSHEY_PLAIN, 3.0, cv::Scalar(0,255,0), 2);
proccesFrame(&(*currFrame));
/*if(g_personInside){
cv::resize(*currFrame, *currFrame, cv::Size(900, 540));
while(cv::waitKey(40)!=' ')
cv::imshow(mainWindow, *currFrame);
}*/
cv::resize(*currFrame, *currFrame, cv::Size(900, 540));
cv::imshow(mainWindow, *currFrame);
}
//video loop end///////////////////////////////////////////////////////////////////////
return 0;
}
and the "myCVFunctions.h" file:
#pragma once
#include "opencv\cv.h"
#include "opencv\highgui.h"
#include "fftw3.h"
#define FFT_TYPE_1D 1
#define FFT_TYPE_2D 2
namespace my_cv{
struct myComplex{
double real;
double imag;
};
struct FFTR{
myComplex** data;
int cols;
int rows;
};
struct ENTROPR{
double** data;
int cols;
int rows;
};
void printFFTR(FFTR fft);
FFTR createFFTR(cv::Mat mGrey, int type){
FFTR result;
result.rows = mGrey.rows, result.cols = mGrey.cols;
result.data = new myComplex*[result.cols];
for(int i = 0; i<result.cols; i++)
result.data[i] = new myComplex[result.rows];
fftw_complex *in, *out;
fftw_plan p;
in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * result.rows * result.cols);
out = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * result.rows * result.cols);
switch(type){
case FFT_TYPE_1D:
p = fftw_plan_dft_1d(result.rows*result.cols, in, out, FFTW_FORWARD, FFTW_ESTIMATE);
break;
case FFT_TYPE_2D:
p = fftw_plan_dft_2d(result.rows, result.cols, in, out, FFTW_FORWARD, FFTW_ESTIMATE);
break;
}
cv::MatIterator_<uchar> mGrey_it = mGrey.begin<uchar>();
for(int i=0; mGrey_it != mGrey.end<uchar>(); mGrey_it++, i++){
in[i][0] = *mGrey_it;
in[i][1] = 0;
}
fftw_execute(p);
for(int i=0; i<result.rows*result.cols; i++){
int x = i%result.cols, y = i/result.cols;
result.data[x][y].real = out[i][0];
result.data[x][y].imag = out[i][1];
}
fftw_destroy_plan(p);
fftw_free(in);
fftw_free(out);
return result;
}
template<class T> FFTR createFFTR(const T* const mat, int cols, int rows, int type){
FFTR result;
result.rows = rows, result.cols = cols;
result.data = new myComplex*[result.cols];
for(int i = 0; i<result.cols; i++)
result.data[i] = new myComplex[result.rows];
fftw_complex *in, *out;
fftw_plan p;
in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * result.rows * result.cols);
out = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * result.rows * result.cols);
switch(type){
case FFT_TYPE_1D:
p = fftw_plan_dft_1d(result.rows*result.cols, in, out, FFTW_FORWARD, FFTW_ESTIMATE);
break;
case FFT_TYPE_2D:
p = fftw_plan_dft_2d(result.rows, result.cols, in, out, FFTW_FORWARD, FFTW_ESTIMATE);
break;
}
for(int i=0; i<cols*rows; i++){
in[i][0] = mat[i];
in[i][1] = 0;
}
fftw_execute(p);
for(int i=0; i<result.rows*result.cols; i++){
int x = i%result.cols, y = i/result.cols;
result.data[x][y].real = out[i][0];
result.data[x][y].imag = out[i][1];
}
fftw_destroy_plan(p);
fftw_free(in);
fftw_free(out);
return result;
}
void m_reverseFFTR(FFTR fft, int type){
fftw_complex *in, *out;
fftw_plan p;
int scaleFactor = fft.cols*fft.rows;
in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * fft.rows * fft.cols);
out = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * fft.rows * fft.cols);
switch(type){
case FFT_TYPE_1D:
p = fftw_plan_dft_1d(fft.rows*fft.cols, in, out, FFTW_BACKWARD, FFTW_ESTIMATE);
break;
case FFT_TYPE_2D:
p = fftw_plan_dft_2d(fft.rows, fft.cols, in, out, FFTW_BACKWARD, FFTW_ESTIMATE);
break;
}
for(int j=0; j<fft.rows; j++)
for(int i=0; i<fft.cols; i++){
int index = j*fft.cols+i;
in[index][0] = fft.data[i][j].real;
in[index][1] = fft.data[i][j].imag;
}
fftw_execute(p);
for(int i=0; i<fft.rows*fft.cols; i++){
int x = i%fft.cols, y = i/fft.cols;
fft.data[x][y].real = out[i][0]/scaleFactor;
fft.data[x][y].imag = out[i][1]/scaleFactor;
}
fftw_destroy_plan(p);
fftw_free(in);
fftw_free(out);
}
FFTR multFFT(const FFTR fft1, const FFTR fft2){
FFTR result;
result.cols = fft1.cols;
result.rows = fft1.rows;
result.data = new myComplex*[result.cols];
for(int i=0; i<result.cols; i++)
result.data[i] = new myComplex[result.rows];
for(int i=0; i<result.cols; i++){
for(int j=0; j<result.rows; j++){
result.data[i][j].real = (fft1.data[i][j].real*fft2.data[i][j].real)-(fft1.data[i][j].imag*fft2.data[i][j].imag);
result.data[i][j].imag = (fft1.data[i][j].real*fft2.data[i][j].imag)+(fft1.data[i][j].imag*fft2.data[i][j].real);
}
}
return result;
}
long double getFFTAverege(FFTR fft){
long double result = 0;
for(int i=0; i<fft.cols; i++){
long double sum=0;
for(int j=0; j<fft.rows; j++){
sum += fft.data[i][j].real;
}
result += sum/fft.rows;
}
return result/fft.rows;
}
void deleteFFTR(FFTR fftr){
for(int i=0; i<fftr.cols; i++)
if(fftr.data[i]) delete [] fftr.data[i];
if(fftr.data) delete [] fftr.data;
}
void printFFTR(FFTR fft){
for(int j=0; j<fft.rows; j++){
for(int i=0; i<fft.cols; i++){
printf("%f%si%f\n", fft.data[i][j].real, (fft.data[i][j].imag<0 ? "-" : "+"), abs(fft.data[i][j].imag));
}
}
}
cv::Mat resize_zeros(const cv::Mat src, cv::Size newSize){
cv::Mat srcROI, result, resultROI;
result.create(newSize, src.type());
srcROI = src(cv::Rect(0,0,(src.cols>result.cols ? result.cols : src.cols), (src.rows>result.rows ? result.rows : src.rows)));
result = 0;
resultROI = result(cv::Rect(0,0, srcROI.cols, srcROI.rows));
srcROI.copyTo(resultROI);
return result;
}
//otsu's threshhold
template<class T> T getThreshold(cv::Mat mGrey){
uchar* image = mGrey.data;
int columns = mGrey.cols;
int rows = mGrey.rows;
const T SIGMA = 0.000001;
const int num_bins = 257;
int counts[num_bins] = {0};
T p[num_bins] = {0};
T mu[num_bins] = {0};
T omega[num_bins] = {0};
T sigma_b_squared[num_bins] = {0};
int sumC;
// calculate histogram
for(int i = 0; i < rows*columns; i++)
counts[image[i]]++;
sumC = 0;
for(int i = 0; i < num_bins; i++)
sumC += counts[i];
for(int i = 0; i < num_bins; i++)
p[i] = ((T)counts[i])/sumC;
mu[0] = omega[0] = p[0];
for(int i = 1; i < num_bins; i++){
omega[i] = omega[i-1] + p[i];
mu[i] = mu[i-1] + p[i]*(i+1);
}
T mu_t = mu[num_bins-1];
T maxval = -1.0;
for(int i = 0; i < num_bins; i++){
T v = mu_t * omega[i] - mu[i];
if (omega[i] > SIGMA && abs(1.0-omega[i]) > SIGMA){
sigma_b_squared[i] = v*v/(omega[i]* (1.0 - omega[i]));
maxval = std::max(maxval,sigma_b_squared[i]);
}
}
// Find the location of the maximum value of sigma_b_squared.
// The maximum may extend over several bins, so average together the
// locations.
// If maxval == -1, sigma_b_squared is not defined, then return 0.
T level = 0;
if (maxval > 0){
T idx = 0;
int maxNumbers = 0;
for(int i = 0; i < num_bins; i++){
if (sigma_b_squared[i] == maxval){
idx += i;
maxNumbers++;
}
}
if (maxNumbers >= 0){
idx /= maxNumbers;
// Normalize the threshold to the range [0, 1].
// level = (idx - 1) / (num_bins - 1);
level = idx / (num_bins - 1);
}
}
return level;
}
}
double fftData[512*512];
That's (probably) 2MB of data, which is (probably) too big to fit on the stack. The simplest fix is to use a dynamic array instead:
std::vector<double> fftData(512*512);
Alternatively, if dynamic allocation is too expensive, you could use a static or global array. This is usually a bad idea, since it makes the function non-reentrant and awkward to use in a multi-threaded program; however, you already have so many globals that one more probably won't hurt.