Anisotropic Diffusion - c++

I have converted this Matlab Anisotropic Diffusion code to C++ but I am not getting the desired output. All I am getting is a black image. Can someone please check my code and give any suggestions? Below is my code:
const double lambda = 1 / 7;
const double k = 30;
const int iter = 1;
int ahN[3][3] = { {0, 1, 0}, {0, -1, 0}, {0, 0, 0} };
int ahS[3][3] = { {0, 0, 0}, {0, -1, 0}, {0, 1, 0} };
int ahE[3][3] = { {0, 0, 0}, {0, -1, 1}, {0, 0, 0} };
int ahW[3][3] = { {0, 0, 0}, {1, -1, 0}, {0, 0, 0} };
int ahNE[3][3] = { {0, 0, 1}, {0, -1, 0}, {0, 0, 0} };
int ahSE[3][3] = { {0, 0, 0}, {0, -1, 0}, {0, 0, 1} };
int ahSW[3][3] = { {0, 0, 0}, {0, -1, 0}, {1, 0, 0} };
int ahNW[3][3] = { {1, 0, 0}, {0, -1, 0}, {0, 0, 0} };
Mat hN = Mat(3, 3, CV_32FC1, &ahN);
Mat hS = Mat(3, 3, CV_32FC1, &ahS);
Mat hE = Mat(3, 3, CV_32FC1, &ahE);
Mat hW = Mat(3, 3, CV_32FC1, &ahW);
Mat hNE = Mat(3, 3, CV_32FC1, &ahNE);
Mat hSE = Mat(3, 3, CV_32FC1, &ahSE);
Mat hSW = Mat(3, 3, CV_32FC1, &ahSW);
Mat hNW = Mat(3, 3, CV_32FC1, &ahNW);
void anisotropicDiffusion(Mat &output, int width, int height) {
//mat initialisation
Mat nablaN, nablaS, nablaW, nablaE, nablaNE, nablaSE, nablaSW, nablaNW;
Mat cN, cS, cW, cE, cNE, cSE, cSW, cNW;
//depth of filters
int ddepth = -1;
//center pixel distance
double dx = 1, dy = 1, dd = sqrt(2);
double idxSqr = 1.0 / (dx * dx), idySqr = 1.0 / (dy * dy), iddSqr = 1 / (dd * dd);
for (int i = 0; i < iter; i++) {
//filters
filter2D(output, nablaN, ddepth, hN);
filter2D(output, nablaS, ddepth, hS);
filter2D(output, nablaW, ddepth, hW);
filter2D(output, nablaE, ddepth, hE);
filter2D(output, nablaNE, ddepth, hNE);
filter2D(output, nablaSE, ddepth, hSE);
filter2D(output, nablaSW, ddepth, hSW);
filter2D(output, nablaNW, ddepth, hNW);
//exponential flux
cN = nablaN / k;
cN.mul(cN);
cN = 1.0 / (1.0 + cN);
//exp(-cN, cN);
cS = nablaS / k;
cS.mul(cS);
cS = 1.0 / (1.0 + cS);
//exp(-cS, cS);
cW = nablaW / k;
cW.mul(cW);
cW = 1.0 / (1.0 + cW);
//exp(-cW, cW);
cE = nablaE / k;
cE.mul(cE);
cE = 1.0 / (1.0 + cE);
//exp(-cE, cE);
cNE = nablaNE / k;
cNE.mul(cNE);
cNE = 1.0 / (1.0 + cNE);
//exp(-cNE, cNE);
cSE = nablaSE / k;
cSE.mul(cSE);
cSE = 1.0 / (1.0 + cSE);
//exp(-cSE, cSE);
cSW = nablaSW / k;
cSW.mul(cSW);
cSW = 1.0 / (1.0 + cSW);
//exp(-cSW, cSW);
cNW = nablaNW / k;
cNW.mul(cNW);
cNW = 1.0 / (1.0 + cNW);
//exp(-cNW, cNW);
output = output + lambda * (idySqr * cN.mul(nablaN) + idySqr * cS.mul(nablaS) +
idxSqr * cW.mul(nablaW) + idxSqr * cE.mul(nablaE) +
iddSqr * cNE.mul(nablaNE) + iddSqr * cSE.mul(nablaSE) +
iddSqr * cNW.mul(nablaNW) + iddSqr * cSW.mul(nablaSW));
}
}

Resolved in c#. Easy of translate to c++
You need this variables:
IMAGE[height, width] = integer array with stored Image
height = height of images in pixels
width = width of images in pixels
/// <summary>Perona & Malik anisotropic difusion filter. (squared formula)</summary>
/// <param name="data">Image data</param>
/// <param name="dt">Heat difusion value. Upper = more rapid convergence.</param>
/// <param name="lambda">The shape of the diffusion coefficient g(), controlling the Perona Malik diffusion g(delta) = 1/((1 + delta2) / lambda2). Upper = more blurred image & more noise removed</param>
/// <param name="interations">Determines the maximum number of iteration steps of the filter. Upper = less speed & more noise removed</param>
private void PeronaMalik(int[,] image, double dt, int lambda, int interations)
{
try
{
//test parameters
if (dt < 0)
throw new Exception("DT negative value not allowed");
if (lambda < 0)
throw new Exception("lambda must be upper of 0");
if (interations <= 0)
throw new Exception("Iterations must be upper of 0");
//Make temp image
int[,] temp = new int[height, width];
Array.Copy(image, temp, image.Length);
//Precalculate tables (for speed up)
double[] precal = new double[512];
double lambda2 = lambda * lambda;
for (int f = 0; f < 512; f++)
{
int diff = f - 255;
precal[f] = -dt * diff * lambda2 / (lambda2 + diff * diff);
}
//Apply the filter
for (int n = 0; n < interations; n++)
{
for (int h = 0; h < height; h++)
for (int w = 0; w < width; w++)
{
int current = temp[h, w];
int px = w - 1;
int nx = w + 1;
int py = h - 1;
int ny = h + 1;
if (px < 0)
px = 0;
if (nx >= width)
nx = width - 1;
if (py < 0)
py = 0;
if (ny >= height)
ny = height - 1;
image[h, w] = (int)(precal[255 + current - temp[h, px]] +
precal[255 + current - temp[h, nx]] +
precal[255 + current - temp[py, w]] +
precal[255 + current - temp[ny, w]]) +
temp[h, w];
}
}
}
catch (Exception ex) { throw new Exception(ex.Message + "\r\nIn PeronaMalik"); }
}
The solution is for equation 2. If you want equation 1 (exponential), you can change the ecuation in precal table for this:
precal[f] = -dt * delta * Math.Exp(-(delta * delta / lambda2));

Looks like you need to assign multiplication result:
Mat C = A.mul(B);
And
int ahN[3][3] ....
should be
float ahN[3][3] ....

Related

Rotation raytracer camera

I'm writing small raytracer and I want to rotate my camera around some object. So I can achieve this such code:
vecTemp is camera position vector, vecT is the first position (i run this snippet in for loop). Yaw is radian value angle of moved camera.
double yaw = degree * (PI / 180.f);
vecTemp.x() = cos(yaw) * vecT.x() - sin(yaw) * vecT.z();
vecTemp.z() = cos(yaw) * vecT.z() + sin(yaw) * vecT.x();
vecTemp.y() = 0;
sett.camera.buildCameraToWorld(vecTemp, { 0, 0, 0} );
sett.camera.rotateY(2*degree);
void buildCameraToWorld(const arma::dvec3& from, const arma::dvec3& to, const arma::dvec3& tmp = arma::dvec3{ 0, 1, 0 })
{
arma::dvec3 forward = arma::normalise(from - to);
arma::dvec3 right = arma::cross(arma::normalise(tmp), forward);
arma::dvec3 up = arma::cross(forward, right);
this->cameraToWorld.zeros();
this->cameraToWorld(0, 0) = right.x();
this->cameraToWorld(0, 1) = right.y();
this->cameraToWorld(0, 2) = right.z();
this->cameraToWorld(1, 0) = up.x();
this->cameraToWorld(1, 1) = up.y();
this->cameraToWorld(1, 2) = up.z();
this->cameraToWorld(2, 0) = forward.x();
this->cameraToWorld(2, 1) = forward.y();
this->cameraToWorld(2, 2) = forward.z();
this->cameraToWorld(3, 0) = from.x();
this->cameraToWorld(3, 1) = from.y();
this->cameraToWorld(3, 2) = from.z();
this->cameraToWorld = this->cameraToWorld.t();
}
void rotateY(double yaw)
{
yaw = yaw * (PI / 180);
this->cameraToWorld = this->cameraToWorld * arma::dmat44{ { cos(yaw), 0, sin(yaw), 0},
{ 0, 1, 0, 0},
{-sin(yaw), 0, cos(yaw), 0},
{ 0, 0, 0, 1} };
}
So, now I don't understand, why i have rotate in Y axis after translation - after buildCameraToWorld i should get a new camera matrix looking at my origin, right? Why i need to rotate by double degree value?
I found some other way to do it - i can rotate the whole world and don't move camera, like in camera from OpenGL, but its good idea for ray tracer?

Snake active contour algorithm with C++ and OpenCV 3

I am trying to implement the snake algorithm for active contour using C++ and OpenCV 3. I am working with the version that uses the gradient descent. As base test I am trying to draw a contour of a lip. This is the base image.
This is the evolution of the contour without external forces (alpha = 0.001, beta = 3, step-size=0.3).
When I add the external force, this is the result.
As external force I have used just the edge detection with Sobel derivative.
This is the code I use for points update.
array<Mat, 2> edges = edgeMatrices(croppedImage);
const float ALPHA = 0.001, BETA = 3, GAMMA = 0.3, // Gamma is step size.
a = GAMMA * ALPHA, b = GAMMA * BETA;
const uint16_t CYCLES = 1000;
const float p = b, q = -a - 4 * b, r = 1 + 2 * a + 6 * b;
Mat pMatrix = pentadiagonalMatrix(POINTS_NUM, p, q, r).inv();
for (uint16_t i = 0; i < CYCLES; ++i) {
// Extract the x and y derivatives for current points.
auto externalForces = external(edges, x, y);
x = pMatrix * (x + GAMMA * externalForces[0]);
y = pMatrix * (y + GAMMA * externalForces[1]);
// Draw the points.
if (i % 200 == 0 && i > 0)
drawPoints(croppedImage, x, y, { 0.2f * i, 0.2f * i, 0 });
}
This is the code for computing the derivatives.
array<Mat, 2> edgeMatrices(Mat &img) {
// Convert image.
Mat gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
// Apply scharr filter.
Mat grad_x, grad_y, blurred_x, blurred_y;
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
int kernSize = 3;
Sobel(gray, grad_x, ddepth, 1, 0, kernSize, scale, delta, BORDER_DEFAULT);
Sobel(gray, grad_y, ddepth, 0, 1, kernSize, scale, delta, BORDER_DEFAULT);
GaussianBlur(grad_x, blurred_x, Size(5, 5), 30);
GaussianBlur(grad_y, blurred_y, Size(5, 5), 30);
return { blurred_x, blurred_y };
}
array<Mat, 2> external(array<Mat, 2> &edgeMat, Mat &x, Mat &y) {
array<Mat, 2> ext;
ext[0] = { Size{ 1, POINTS_NUM }, CV_32FC1 };
ext[1] = { Size{ 1, POINTS_NUM }, CV_32FC1 };
for (size_t i = 0; i < POINTS_NUM; ++i) {
ext[0].at<float>(0, i) = - edgeMat[0].at<short>(y.at<float>(0, i), x.at<float>(0, i));
ext[1].at<float>(0, i) = - edgeMat[1].at<short>(y.at<float>(0, i), x.at<float>(0, i));
}
return ext;
}
As you can see, the contour points converge in a very strange way and not towards the edge of the lip (that was the result I would expect).
I am not able to understand if it is an error about implementation or about tuning the parameters or it is just is normal behaviour and I misunderstood something about the algorithm.
I have some doubts on the derivative matrices, I think that they should be regularized in some way, but I am not sure which is the right one. Can someone help me?
The only implementations I have found are of the greedy method.

Initialize a vector of Rect with some Rect objects - Opencv C++

I'm working on a face tracking project with Kalman Filter. Basically, I want to store the result of my tracking application (int x, int y, int width, int heigth) on a vector of Rectangles, (i.e. each face will be stored on a Rect, then all the Rects will be stored on a vector of Rect).
The following code is what I tried to do:
Rect faceTracked(Estimated_int.at<int>(0, 0), Estimated_int.at<int>(
1, 0), Estimated_int.at<int>(2, 0), Estimated_int.at<int>(3, 0));
std::vector <Rect> facesVector;
facesVector[i] = faceTracked;
Where "Estimated_int" is the result matrix (4,1) of KF. When I run this code the following error is displayed on Android Studio Logcat, then the app crashes:
11-21 17:36:43.729 10735-11321/com.example.android.ndkopencvtest1 A/libc: Fatal signal 11 (SIGSEGV), code 1, fault addr 0x0 in tid 11321 (Thread-5)
That error only happens when the statement facesVector[i] = faceTracked is called. What am I doing wrong? The entire function code is shown below:
void trackFace (Mat& frame, std::vector<Rect> faces) {
for (size_t i = 0; i < faces.size(); i++) {
X = A * X_p;
transpose(A, A_transpose);
P = A * P_p * A_transpose;
if (faces.size() > 0) {
Mat Z = (Mat_<float>(4, 1) << faces[i].x, faces[i].y, faces[i].x + faces[i].width,
faces[i].y + faces[i].height);
Y = Z - H * X;
transpose(H, H_transpose);
S = H * P * H_transpose + R;
invert(S, S_inverse);
K = P * H_transpose * S_inverse;
X_p = X + K * Y;
Estimated = H * X_p;
P_p = (Ident - K * H) * P;
Mat Estimated_int = (Mat_<int>(4, 1) << cvRound(Estimated.at<float>(0, 0)), cvRound(
Estimated.at<float>(1, 0)), cvRound(Estimated.at<float>(2, 0)), cvRound(
Estimated.at<float>(3, 0)));
rectangle(frame, Point((Estimated_int.at<int>(0, 0)), (Estimated_int.at<int>(1, 0))),
Point((Estimated_int.at<int>(2, 0)), (Estimated_int.at<int>(3, 0))),
Scalar(255, 255, 102, 255), 2, 8, 0);
Rect faceTracked(Estimated_int.at<int>(0, 0), Estimated_int.at<int>(
1, 0), Estimated_int.at<int>(2, 0), Estimated_int.at<int>(3, 0));
std::vector <Rect> facesVector;
facesVector[i] = faceTracked;
}
}
}
#edit: All matrix were properly initialized on a header file. It was tested before and it is working.

Image transformation by homography matrix in C++ using OpenCV (warpPerspective)

I'm trying perspective transformation of an image using a homography matrix.
Given translation and rotation, I made a homography matrix and applied it to the perspective transformation as:
Mat srcImg = imread("tests/image3.jp2", IMREAD_COLOR);
Mat dstImg, H;
Get_Homography(H, srcImg.size());
warpPerspective(srcImg, dstImg, H, dstImg.size());
imshow("output", dstImg);
and
#define FL_x 1000.0
#define FL_y 1000.0
void Get_Homography(Mat &H_out, cv::Size size)
{
static float H_uc[9], C[9], C_inv[9], H[9], C_inv_H_uc[9];
static float H33[3][3];
static float R[9];
static float T[3];
static float n[3];
static float d;
static float nTd[9];
static float phi, the, psi;
n[0] = n[1] = 0.0;
n[2] = -1.0;
T[0] = -500;
T[1] = -500;
T[2] = 0.0;
d = 100.0;
phi = 0.0*D2R;
the = 0.0*D2R;
psi = 0.0*D2R;
matMult(T, n, 3, 1, 3, nTd);
matMult(nTd, &d, 9, 1, 1, nTd);
getDCM_I2B(phi, the, psi, R);
matAdd(R, nTd, 3, 3, H_uc);
C[0] = FL_x; C[1] = 0.0; C[2] = size.width / 2.0;
C[3] = 0.0; C[4] = FL_y; C[5] = size.height / 2.0;
C[6] = 0.0; C[7] = 0.0; C[8] = 1.0;
matInv33(C, C_inv);
matMult(C_inv, H_uc, 3, 3, 3, C_inv_H_uc);
matMult(C_inv_H_uc, C, 3, 3, 3, H);
H33[0][0] = H[0];
H33[0][1] = H[1];
H33[0][2] = H[2];
H33[1][0] = H[3];
H33[1][1] = H[4];
H33[1][2] = H[5];
H33[2][0] = H[6];
H33[2][1] = H[7];
H33[2][2] = H[8];
H_out = Mat(3, 3, CV_32F, H33);
return;
}
The rotations by z-axis (any value at "psi") work alright.
But when I put any value (even 1.0 deg) to "the" or "phi", the resultant image is awkward. I cannot recognize what it is.
And when I put [-500, -500, 0] to T (translation), it produces a shifted image as if it is taken in a different position (shifted in right direction), but I think -500 -500 are too big. For d = 1.0, the resultant image just shows a few pixel shift (not recognizably).
I thought my implementation of constructing a homography matrix is right, but the results are awkward.
Could you give me some advice on this?
Thank you.

Principle range of object orientation using image moments

I am trying to extract the angle of a shape in my image using moments in opencv/c++. I am able to extract the angle, but the issue is that the principal range of this angle is 180 degrees. This makes the orientation of the object ambiguous with respect to 180 degree rotations. The code I am using to extract the angle currently is,
findContours(frame, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
vector<vector<Point2i> > hull(contours.size());
int maxArea = 0;
int maxI = -1;
int M20 = 0;
int M02 = 0;
int M11 = 0;
for (int i = 0; i < contours.size(); i++)
{
convexHull(contours[i], hull[i], false);
approxPolyDP(hull[i], contourVertices, arcLength(hull[i], true)*0.1, true);
shapeMoments = moments(hull[i], false);
if(shapeMoments.m00 <= areaThreshold || shapeMoments.m00 >= MAX_AREA)
continue;
if(contourVertices.size() <= 3 || contourVertices.size() >= 7)
continue;
if(shapeMoments.m00 >= maxArea)
{
maxArea = shapeMoments.m00;
maxI = i;
}
}
if(maxI == -1)
return false;
fabricContour = hull[maxI];
approxPolyDP(hull[maxI], contourVertices, arcLength(hull[maxI], true)*0.02,true);
shapeMoments = moments(hull[maxI], false);
centerOfMass = Point2f(shapeMoments.m10/shapeMoments.m00, shapeMoments.m01/shapeMoments.m00);
drawContours(contourFrame, hull, maxI, Scalar(24, 35, 140), CV_FILLED, CV_AA);
drawContours(originalFrame, hull, maxI, Scalar(255, 0, 0), 8, CV_AA);
circle(contourFrame, centerOfMass, 4, Scalar(0, 0, 0), 10, 8, 0);
posX = centerOfMass.x;
posY = centerOfMass.y;
M11 = shapeMoments.mu11/shapeMoments.m00;
M20 = shapeMoments.mu20/shapeMoments.m00;
M02 = shapeMoments.mu02/shapeMoments.m00;
num = double(2)*M11;
den = M20 - M02;
angle = (int(-1*(180/(2*M_PI))*atan2(num, den)) + 45 + 180)%180;
//angle = int(-1*(180/(2*M_PI))*atan2(num, den));
area = shapeMoments.m00;
Is there any way I can remove the ambiguity from this extracted angle? I tries using the third order moments, but they do not seem to be very reliable.