Snake active contour algorithm with C++ and OpenCV 3 - c++

I am trying to implement the snake algorithm for active contour using C++ and OpenCV 3. I am working with the version that uses the gradient descent. As base test I am trying to draw a contour of a lip. This is the base image.
This is the evolution of the contour without external forces (alpha = 0.001, beta = 3, step-size=0.3).
When I add the external force, this is the result.
As external force I have used just the edge detection with Sobel derivative.
This is the code I use for points update.
array<Mat, 2> edges = edgeMatrices(croppedImage);
const float ALPHA = 0.001, BETA = 3, GAMMA = 0.3, // Gamma is step size.
a = GAMMA * ALPHA, b = GAMMA * BETA;
const uint16_t CYCLES = 1000;
const float p = b, q = -a - 4 * b, r = 1 + 2 * a + 6 * b;
Mat pMatrix = pentadiagonalMatrix(POINTS_NUM, p, q, r).inv();
for (uint16_t i = 0; i < CYCLES; ++i) {
// Extract the x and y derivatives for current points.
auto externalForces = external(edges, x, y);
x = pMatrix * (x + GAMMA * externalForces[0]);
y = pMatrix * (y + GAMMA * externalForces[1]);
// Draw the points.
if (i % 200 == 0 && i > 0)
drawPoints(croppedImage, x, y, { 0.2f * i, 0.2f * i, 0 });
}
This is the code for computing the derivatives.
array<Mat, 2> edgeMatrices(Mat &img) {
// Convert image.
Mat gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
// Apply scharr filter.
Mat grad_x, grad_y, blurred_x, blurred_y;
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
int kernSize = 3;
Sobel(gray, grad_x, ddepth, 1, 0, kernSize, scale, delta, BORDER_DEFAULT);
Sobel(gray, grad_y, ddepth, 0, 1, kernSize, scale, delta, BORDER_DEFAULT);
GaussianBlur(grad_x, blurred_x, Size(5, 5), 30);
GaussianBlur(grad_y, blurred_y, Size(5, 5), 30);
return { blurred_x, blurred_y };
}
array<Mat, 2> external(array<Mat, 2> &edgeMat, Mat &x, Mat &y) {
array<Mat, 2> ext;
ext[0] = { Size{ 1, POINTS_NUM }, CV_32FC1 };
ext[1] = { Size{ 1, POINTS_NUM }, CV_32FC1 };
for (size_t i = 0; i < POINTS_NUM; ++i) {
ext[0].at<float>(0, i) = - edgeMat[0].at<short>(y.at<float>(0, i), x.at<float>(0, i));
ext[1].at<float>(0, i) = - edgeMat[1].at<short>(y.at<float>(0, i), x.at<float>(0, i));
}
return ext;
}
As you can see, the contour points converge in a very strange way and not towards the edge of the lip (that was the result I would expect).
I am not able to understand if it is an error about implementation or about tuning the parameters or it is just is normal behaviour and I misunderstood something about the algorithm.
I have some doubts on the derivative matrices, I think that they should be regularized in some way, but I am not sure which is the right one. Can someone help me?
The only implementations I have found are of the greedy method.

Related

Slow motion in C++

I want to do slow motion. I've seen an implementation here: https://github.com/vaibhav06891/SlowMotion
I modified the code to generate only one frame.
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/tracking.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <fstream>
#include <string>
using namespace cv;
using namespace std;
#define CLAMP(x,min,max) ( ((x) < (min)) ? (min) : ( ((x) > (max)) ? (max) : (x) ) )
int main(int argc, char** argv)
{
Mat frame,prevframe;
prevframe = imread("img1.png");
frame = imread("img2.png");
Mat prevgray, gray;
Mat fflow,bflow;
Mat flowf(frame.rows,frame.cols ,CV_8UC3); // the forward co-ordinates for interpolation
flowf.setTo(Scalar(255,255,255));
Mat flowb(frame.rows,frame.cols ,CV_8UC3); // the backward co-ordinates for interpolation
flowb.setTo(Scalar(255,255,255));
Mat final(frame.rows,frame.cols ,CV_8UC3);
int fx,fy,bx,by;
cvtColor(prevframe,prevgray,COLOR_BGR2GRAY); // Convert to gray space for optical flow calculation
cvtColor(frame, gray, COLOR_BGR2GRAY);
calcOpticalFlowFarneback(prevgray, gray, fflow, 0.5, 3, 15, 3, 3, 1.2, 0); // forward optical flow
calcOpticalFlowFarneback(gray, prevgray, bflow, 0.5, 3, 15, 3, 3, 1.2, 0); //backward optical flow
for (int y=0; y<frame.rows; y++)
{
for (int x=0; x<frame.cols; x++)
{
const Point2f fxy = fflow.at<Point2f>(y,x);
fy = CLAMP(y+fxy.y*0.5,0,frame.rows);
fx = CLAMP(x+fxy.x*0.5,0,frame.cols);
flowf.at<Vec3b>(fy,fx) = prevframe.at<Vec3b>(y,x);
const Point2f bxy = bflow.at<Point2f>(y,x);
by = CLAMP(y+bxy.y*(1-0.5),0,frame.rows);
bx = CLAMP(x+bxy.x*(1-0.5),0,frame.cols);
flowb.at<Vec3b>(by,bx) = frame.at<Vec3b>(y,x);
}
}
final = flowf*(1-0.5) + flowb*0.5; //combination of frwd and bckward martrix
cv::medianBlur(final,final,3);
imwrite( "output.png",final);
return 0;
}
But the result is not as expected.
For the images:
The result is :
Does anyone know what is the problem?
The optical flow algorithm won't work for your test images.
The first problem is that your test images have very little difference in neighbour pixel values. That completely black lines and a single color square give no clues to optical flow algorithm where the image areas moved as the algorithm is not able to process the whole image at once and calculates optical flow with a small 15x15 (as you set it in calcOpticalFlowFarneback) pixels window.
The second problem is that your test images differ too much. The distance between positions of brown square is too big. Again Farneback is not able to detect it.
Try the code with some real life video frames or edit your tests to be less monotonous (set some texture to the square, background and rectangle lines) and bring the squares closer to each other on the images (try 2-10 px distance). You can also play with calcOpticalFlowFarneback arguments (read here) to suit your conditions.
You can use this code to save the optical flow you get to an image for debugging:
Mat debugImage = Mat::zeros(fflow.size(), CV_8UC3);
float hsvHue, magnitude;
for (int x = 0; x < fflow.cols; x++)
{
for (int y = 0; y < fflow.rows; y++)
{
auto& item = fflow.at<Vec2f>(y, x);
magnitude = sqrtf(item[0] * item[0] + item[1] * item[1]);
hsvHue = atan2f(item[1], item[0]) / static_cast<float>(CV_PI)* 180.f;
// div 2 to fit 0..255 range
hsvHue = (hsvHue >= 0. ? hsvHue : (360.f + hsvHue)) / 2.f;
debugImage.at<Vec3b>(y, x)[0] = static_cast<uchar>(hsvHue);
debugImage.at<Vec3b>(y, x)[1] = 255;
debugImage.at<Vec3b>(y, x)[2] = static_cast<uchar>(255.f * magnitude);
}
}
cvtColor(debugImage, debugImage, CV_HSV2BGR);
imwrite("OpticalFlow.png", debugImage);
Here pixel flow direction will be represented with color (hue), and pixel move distance will be represented with brightness.
Try to use this images I created:
.
Also note that
for (int y = 0; y < frame.rows; y++)
{
for (int x = 0; x < frame.cols; x++)
{
const Point2f fxy = fflow.at<Point2f>(y, x);
fy = CLAMP(y + fxy.y*0.5, 0, frame.rows);
fx = CLAMP(x + fxy.x*0.5, 0, frame.cols);
flowf.at<Vec3b>(fy, fx) = prevframe.at<Vec3b>(y, x);
...
code won't color some flowf pixels that have no corresponding target positions they moved to, and optical flow algorithm can produce such situations. I would change it to:
for (int y = 0; y < frame.rows; y++)
{
for (int x = 0; x < frame.cols; x++)
{
const Point2f fxy = fflow.at<Point2f>(y, x);
fy = CLAMP(y - fxy.y*0.5, 0, frame.rows);
fx = CLAMP(x - fxy.x*0.5, 0, frame.cols);
flowf.at<Vec3b>(y, x) = prevframe.at<Vec3b>(fy, fx);
const Point2f bxy = bflow.at<Point2f>(y, x);
by = CLAMP(y - bxy.y*(1 - 0.5), 0, frame.rows);
bx = CLAMP(x - bxy.x*(1 - 0.5), 0, frame.cols);
flowb.at<Vec3b>(y, x) = frame.at<Vec3b>(by, bx);
}
}
With this changed code and my tests I get this output:

Hough Circular Transform

Im trying to implement Hough Transform using gradient direction. I know that there is an implementation in OpenCv but I want to do it myself.
I'm using Sobel to get the X and Y gradient. Then for every pixel the
magnitute ---> sqrt(sobelX^2 + sobelY^2)
directions --> atan2(sobelY,sobelX) * 180/PI
if the magnitude is higher then 220 (so almost black) this is the edge.
And then the direction is used on the circle equation.
But the results are not acceptable. Any help?
I know there are the cv::polar and cv::cartToPolar, but I want to optimize code so that all equations will be calculated on fly, no empty loops.
cv::Mat sobelX,sobelY;
Sobel(mat, sobelX, CV_32F, 1, 0, kernelSize, 1, 0, cv::BORDER_REPLICATE);
Sobel(mat, sobelY, CV_32F, 0, 1, kernelSize, 1, 0, cv::BORDER_REPLICATE);
//cv::Canny(mat,mat,100,200,kernelSize,false);
debug::showImage("sobelX",sobelX);
debug::showImage("SobelY",sobelY);
debug::showImage("MAT",mat);
cv::Mat magnitudeMap,angleMap;
magnitudeMap = cv::Mat::zeros(mat.rows,mat.cols,mat.type());
angleMap = cv::Mat::zeros(mat.rows,mat.cols,mat.type());
std::vector<cv::Mat> hough_spaces(max);
for(int i=0; i<max; ++i)
{
hough_spaces[i] = cv::Mat::zeros(mat.rows,mat.cols,mat.type());
}
for(int x=0; x<mat.rows; ++x)
{
for(int y=0; y<mat.cols; ++y)
{
const float magnitude = sqrt(sobelX.at<uchar>(x,y)*sobelX.at<uchar>(x,y)+sobelY.at<uchar>(x,y)*sobelY.at<uchar>(x,y));
const float theta= atan2(sobelY.at<uchar>(x,y),sobelX.at<uchar>(x,y)) * 180/CV_PI;
magnitudeMap.at<uchar>(x,y) = magnitude;
if(magnitude > 225)//mat.at<const uchar>(x,y) == 255)
{
for(int radius=min; radius<max; ++radius)
{
const int a = x - radius * cos(theta);//lookup::cosArray[static_cast<int>(theta)];//+ 0.5f;
const int b = y - radius * sin(theta);//lookup::sinArray[static_cast<int>(theta)]; //+ 0.5f;
if(a >= 0 && a <hough_spaces[radius].rows && b >= 0 && b<hough_spaces[radius].cols) {
hough_spaces[radius].at<uchar>(a,b)+=10;
}
}
}
}
}
debug::showImage("magnitude",magnitudeMap);
for(int radius=min; radius<max; ++radius)
{
double min_f,max_f;
cv::Point min_loc,max_loc;
cv::minMaxLoc(hough_spaces[radius],&min_f,&max_f,&min_loc,&max_loc);
if(max_f>=treshold)
{
circles.emplace_back(cv::Point3f(max_loc.x,max_loc.y,radius));
// debug::showImage(std::to_string(radius).c_str(),hough_spaces[radius]);
}
}
circles.shrink_to_fit();

Camera pose estimation from essential matrix

I try to estimate the camera motion from pair of images. I found essential matrix E and decomposed it into the rotation and translation elements.
Here is the C++ code:
cv::SVD svd(E);
cv::Matx33d W{0, -1, 0, 1, 0 , 0, 0, 0, 1};
cv::Mat_<double> R = svd.u * cv::Mat(W) * svd.vt;
cv::Mat_<double> t = svd.u.col(2);
if (!infrontOfBothCameras(inliers[0], inliers[1], R, t)) {
t = -svd.u.col(2);
if (!posEstimator.infrontOfBothCameras(inliers[0], inliers[1], R, t)) {
R = svd.u * cv::Mat(W.t()) * svd.vt;
t = svd.u.col(2);
if (!infrontOfBothCameras(inliers[0], inliers[1], R, t)) {
t = -svd.u.col(2);
if (!infrontOfBothCameras(inliers[0], inliers[1], R, t)) {
std::cout << "Incorrect SVD decomposition" << std::endl;
}
}
}
}
function infrontOfBothCameras check if points are in front of the camera.
bool infrontOfBothCameras(std::vector<cv::Point2f>& points1, std::vector<cv::Point2f>& points2, cv::Mat_<double>& R, cv::Mat_<double>& t) {
cv::Mat r1 = R.row(0);
cv::Mat r2 = R.row(1);
cv::Mat r3 = R.row(2);
for (size_t i = 0; i < points1.size(); ++i) {
cv::Matx13d uv{ points2[i].x, points2[i].y, 1 };
double z = (r1 - points2[i].x * r3).dot(t.t()) / ((r1 - points2[i].x * r3).dot(cv::Mat_<double>(uv)));
cv::Matx31d point3d_first{points1[i].x * z, points1[i].y * z, z};
cv::Mat_<double> point3d_second = R.t() * (cv::Mat_<double>(point3d_first) - t);
if (point3d_first(2) < 0 || point3d_second(2) < 0) {
return false;
}
}
return true;
}
After I wish to estimate new pose of camera. How I can use t and R for it?
For example, i have old pose of camera: old_pose=(0,0,0) and i try to calculate new pose:
new_pose = old_pose + R * t
Is it correct?
I believe it should be:
new_pose = R*(old_pose-t);
The rest looks ok, but I haven't checked every little detail.
If you want a reference to compare to, you can look at:
https://github.com/MasteringOpenCV/code/blob/master/Chapter4_StructureFromMotion/FindCameraMatrices.cpp
Specifically functions DecomposeEtoRandT and FindCameraMatrices

Is there an easy way/algorithm to match 2 clouds of 2D points?

I am wondering if there is an easy way to match (register) 2 clouds of 2d points.
Let's say I have an object represented by points and an cluttered 2nd image with the object points and noise (noise in a way of points that are useless).
Basically the object can be 2d rotated as well as translated and scaled.
I know there is the ICP - Algorithm but I think that this is not a good approach due to high noise.
I hope that you understand what i mean. please ask if (im sure it is) anything is unclear.
cheers
Here is the function that finds translation and rotation. Generalization to scaling, weighted points, and RANSAC are straight forward. I used openCV library for visualization and SVD. The function below combines data generation, Unit Test , and actual solution.
// rotation and translation in 2D from point correspondences
void rigidTransform2D(const int N) {
// Algorithm: http://igl.ethz.ch/projects/ARAP/svd_rot.pdf
const bool debug = false; // print more debug info
const bool add_noise = true; // add noise to imput and output
srand(time(NULL)); // randomize each time
/*********************************
* Creat data with some noise
**********************************/
// Simulated transformation
Point2f T(1.0f, -2.0f);
float a = 30.0; // [-180, 180], see atan2(y, x)
float noise_level = 0.1f;
cout<<"True parameters: rot = "<<a<<"deg., T = "<<T<<
"; noise level = "<<noise_level<<endl;
// noise
vector<Point2f> noise_src(N), noise_dst(N);
for (int i=0; i<N; i++) {
noise_src[i] = Point2f(randf(noise_level), randf(noise_level));
noise_dst[i] = Point2f(randf(noise_level), randf(noise_level));
}
// create data with noise
vector<Point2f> src(N), dst(N);
float Rdata = 10.0f; // radius of data
float cosa = cos(a*DEG2RAD);
float sina = sin(a*DEG2RAD);
for (int i=0; i<N; i++) {
// src
float x1 = randf(Rdata);
float y1 = randf(Rdata);
src[i] = Point2f(x1,y1);
if (add_noise)
src[i] += noise_src[i];
// dst
float x2 = x1*cosa - y1*sina;
float y2 = x1*sina + y1*cosa;
dst[i] = Point2f(x2,y2) + T;
if (add_noise)
dst[i] += noise_dst[i];
if (debug)
cout<<i<<": "<<src[i]<<"---"<<dst[i]<<endl;
}
// Calculate data centroids
Scalar centroid_src = mean(src);
Scalar centroid_dst = mean(dst);
Point2f center_src(centroid_src[0], centroid_src[1]);
Point2f center_dst(centroid_dst[0], centroid_dst[1]);
if (debug)
cout<<"Centers: "<<center_src<<", "<<center_dst<<endl;
/*********************************
* Visualize data
**********************************/
// Visualization
namedWindow("data", 1);
float w = 400, h = 400;
Mat Mdata(w, h, CV_8UC3); Mdata = Scalar(0);
Point2f center_img(w/2, h/2);
float scl = 0.4*min(w/Rdata, h/Rdata); // compensate for noise
scl/=sqrt(2); // compensate for rotation effect
Point2f dT = (center_src+center_dst)*0.5; // compensate for translation
for (int i=0; i<N; i++) {
Point2f p1(scl*(src[i] - dT));
Point2f p2(scl*(dst[i] - dT));
// invert Y axis
p1.y = -p1.y; p2.y = -p2.y;
// add image center
p1+=center_img; p2+=center_img;
circle(Mdata, p1, 1, Scalar(0, 255, 0));
circle(Mdata, p2, 1, Scalar(0, 0, 255));
line(Mdata, p1, p2, Scalar(100, 100, 100));
}
/*********************************
* Get 2D rotation and translation
**********************************/
markTime();
// subtract centroids from data
for (int i=0; i<N; i++) {
src[i] -= center_src;
dst[i] -= center_dst;
}
// compute a covariance matrix
float Cxx = 0.0, Cxy = 0.0, Cyx = 0.0, Cyy = 0.0;
for (int i=0; i<N; i++) {
Cxx += src[i].x*dst[i].x;
Cxy += src[i].x*dst[i].y;
Cyx += src[i].y*dst[i].x;
Cyy += src[i].y*dst[i].y;
}
Mat Mcov = (Mat_<float>(2, 2)<<Cxx, Cxy, Cyx, Cyy);
if (debug)
cout<<"Covariance Matrix "<<Mcov<<endl;
// SVD
cv::SVD svd;
svd = SVD(Mcov, SVD::FULL_UV);
if (debug) {
cout<<"U = "<<svd.u<<endl;
cout<<"W = "<<svd.w<<endl;
cout<<"V transposed = "<<svd.vt<<endl;
}
// rotation = V*Ut
Mat V = svd.vt.t();
Mat Ut = svd.u.t();
float det_VUt = determinant(V*Ut);
Mat W = (Mat_<float>(2, 2)<<1.0, 0.0, 0.0, det_VUt);
float rot[4];
Mat R_est(2, 2, CV_32F, rot);
R_est = V*W*Ut;
if (debug)
cout<<"Rotation matrix: "<<R_est<<endl;
float cos_est = rot[0];
float sin_est = rot[2];
float ang = atan2(sin_est, cos_est);
// translation = mean_dst - R*mean_src
Point2f center_srcRot = Point2f(
cos_est*center_src.x - sin_est*center_src.y,
sin_est*center_src.x + cos_est*center_src.y);
Point2f T_est = center_dst - center_srcRot;
// RMSE
double RMSE = 0.0;
for (int i=0; i<N; i++) {
Point2f dst_est(
cos_est*src[i].x - sin_est*src[i].y,
sin_est*src[i].x + cos_est*src[i].y);
RMSE += SQR(dst[i].x - dst_est.x) + SQR(dst[i].y - dst_est.y);
}
if (N>0)
RMSE = sqrt(RMSE/N);
// Final estimate msg
cout<<"Estimate = "<<ang*RAD2DEG<<"deg., T = "<<T_est<<"; RMSE = "<<RMSE<<endl;
// show image
printTime(1);
imshow("data", Mdata);
waitKey(-1);
return;
} // rigidTransform2D()
// --------------------------- 3DOF
// calculates squared error from two point mapping; assumes rotation around Origin.
inline float sqErr_3Dof(Point2f p1, Point2f p2,
float cos_alpha, float sin_alpha, Point2f T) {
float x2_est = T.x + cos_alpha * p1.x - sin_alpha * p1.y;
float y2_est = T.y + sin_alpha * p1.x + cos_alpha * p1.y;
Point2f p2_est(x2_est, y2_est);
Point2f dp = p2_est-p2;
float sq_er = dp.dot(dp); // squared distance
//cout<<dp<<endl;
return sq_er;
}
// calculate RMSE for point-to-point metrics
float RMSE_3Dof(const vector<Point2f>& src, const vector<Point2f>& dst,
const float* param, const bool* inliers, const Point2f center) {
const bool all_inliers = (inliers==NULL); // handy when we run QUADRTATIC will all inliers
unsigned int n = src.size();
assert(n>0 && n==dst.size());
float ang_rad = param[0];
Point2f T(param[1], param[2]);
float cos_alpha = cos(ang_rad);
float sin_alpha = sin(ang_rad);
double RMSE = 0.0;
int ninliers = 0;
for (unsigned int i=0; i<n; i++) {
if (all_inliers || inliers[i]) {
RMSE += sqErr_3Dof(src[i]-center, dst[i]-center, cos_alpha, sin_alpha, T);
ninliers++;
}
}
//cout<<"RMSE = "<<RMSE<<endl;
if (ninliers>0)
return sqrt(RMSE/ninliers);
else
return LARGE_NUMBER;
}
// Sets inliers and returns their count
inline int setInliers3Dof(const vector<Point2f>& src, const vector <Point2f>& dst,
bool* inliers,
const float* param,
const float max_er,
const Point2f center) {
float ang_rad = param[0];
Point2f T(param[1], param[2]);
// set inliers
unsigned int ninliers = 0;
unsigned int n = src.size();
assert(n>0 && n==dst.size());
float cos_ang = cos(ang_rad);
float sin_ang = sin(ang_rad);
float max_sqErr = max_er*max_er; // comparing squared values
if (inliers==NULL) {
// just get the number of inliers (e.g. after QUADRATIC fit only)
for (unsigned int i=0; i<n; i++) {
float sqErr = sqErr_3Dof(src[i]-center, dst[i]-center, cos_ang, sin_ang, T);
if ( sqErr < max_sqErr)
ninliers++;
}
} else {
// get the number of inliers and set them (e.g. for RANSAC)
for (unsigned int i=0; i<n; i++) {
float sqErr = sqErr_3Dof(src[i]-center, dst[i]-center, cos_ang, sin_ang, T);
if ( sqErr < max_sqErr) {
inliers[i] = 1;
ninliers++;
} else {
inliers[i] = 0;
}
}
}
return ninliers;
}
// fits 3DOF (rotation and translation in 2D) with least squares.
float fit3DofQUADRATICold(const vector<Point2f>& src, const vector<Point2f>& dst,
float* param, const bool* inliers, const Point2f center) {
const bool all_inliers = (inliers==NULL); // handy when we run QUADRTATIC will all inliers
unsigned int n = src.size();
assert(dst.size() == n);
// count inliers
int ninliers;
if (all_inliers) {
ninliers = n;
} else {
ninliers = 0;
for (unsigned int i=0; i<n; i++){
if (inliers[i])
ninliers++;
}
}
// under-dermined system
if (ninliers<2) {
// param[0] = 0.0f; // ?
// param[1] = 0.0f;
// param[2] = 0.0f;
return LARGE_NUMBER;
}
/*
* x1*cosx(a)-y1*sin(a) + Tx = X1
* x1*sin(a)+y1*cos(a) + Ty = Y1
*
* approximation for small angle a (radians) sin(a)=a, cos(a)=1;
*
* x1*1 - y1*a + Tx = X1
* x1*a + y1*1 + Ty = Y1
*
* in matrix form M1*h=M2
*
* 2n x 4 4 x 1 2n x 1
*
* -y1 1 0 x1 * a = X1
* x1 0 1 y1 Tx Y1
* Ty
* 1=Z
* ----------------------------
* src1 res src2
*/
// 4 x 1
float res_ar[4]; // alpha, Tx, Ty, 1
Mat res(4, 1, CV_32F, res_ar); // 4 x 1
// 2n x 4
Mat src1(2*ninliers, 4, CV_32F); // 2n x 4
// 2n x 1
Mat src2(2*ninliers, 1, CV_32F); // 2n x 1: [X1, Y1, X2, Y2, X3, Y3]'
for (unsigned int i=0, row_cnt = 0; i<n; i++) {
// use inliers only
if (all_inliers || inliers[i]) {
float x = src[i].x - center.x;
float y = src[i].y - center.y;
// first row
// src1
float* rowPtr = src1.ptr<float>(row_cnt);
rowPtr[0] = -y;
rowPtr[1] = 1.0f;
rowPtr[2] = 0.0f;
rowPtr[3] = x;
// src2
src2.at<float> (0, row_cnt) = dst[i].x - center.x;
// second row
row_cnt++;
// src1
rowPtr = src1.ptr<float>(row_cnt);
rowPtr[0] = x;
rowPtr[1] = 0.0f;
rowPtr[2] = 1.0f;
rowPtr[3] = y;
// src2
src2.at<float> (0, row_cnt) = dst[i].y - center.y;
}
}
cv::solve(src1, src2, res, DECOMP_SVD);
// estimators
float alpha_est;
Point2f T_est;
// original
alpha_est = res.at<float>(0, 0);
T_est = Point2f(res.at<float>(1, 0), res.at<float>(2, 0));
float Z = res.at<float>(3, 0);
if (abs(Z-1.0) > 0.1) {
//cout<<"Bad Z in fit3DOF(), Z should be close to 1.0 = "<<Z<<endl;
//return LARGE_NUMBER;
}
param[0] = alpha_est; // rad
param[1] = T_est.x;
param[2] = T_est.y;
// calculate RMSE
float RMSE = RMSE_3Dof(src, dst, param, inliers, center);
return RMSE;
} // fit3DofQUADRATICOLd()
// fits 3DOF (rotation and translation in 2D) with least squares.
float fit3DofQUADRATIC(const vector<Point2f>& src_, const vector<Point2f>& dst_,
float* param, const bool* inliers, const Point2f center) {
const bool debug = false; // print more debug info
const bool all_inliers = (inliers==NULL); // handy when we run QUADRTATIC will all inliers
assert(dst_.size() == src_.size());
int N = src_.size();
// collect inliers
vector<Point2f> src, dst;
int ninliers;
if (all_inliers) {
ninliers = N;
src = src_; // copy constructor
dst = dst_;
} else {
ninliers = 0;
for (int i=0; i<N; i++){
if (inliers[i]) {
ninliers++;
src.push_back(src_[i]);
dst.push_back(dst_[i]);
}
}
}
if (ninliers<2) {
param[0] = 0.0f; // default return when there is not enough points
param[1] = 0.0f;
param[2] = 0.0f;
return LARGE_NUMBER;
}
/* Algorithm: Least-Square Rigid Motion Using SVD by Olga Sorkine
* http://igl.ethz.ch/projects/ARAP/svd_rot.pdf
*
* Subtract centroids, calculate SVD(cov),
* R = V[1, det(VU')]'U', T = mean_q-R*mean_p
*/
// Calculate data centroids
Scalar centroid_src = mean(src);
Scalar centroid_dst = mean(dst);
Point2f center_src(centroid_src[0], centroid_src[1]);
Point2f center_dst(centroid_dst[0], centroid_dst[1]);
if (debug)
cout<<"Centers: "<<center_src<<", "<<center_dst<<endl;
// subtract centroids from data
for (int i=0; i<ninliers; i++) {
src[i] -= center_src;
dst[i] -= center_dst;
}
// compute a covariance matrix
float Cxx = 0.0, Cxy = 0.0, Cyx = 0.0, Cyy = 0.0;
for (int i=0; i<ninliers; i++) {
Cxx += src[i].x*dst[i].x;
Cxy += src[i].x*dst[i].y;
Cyx += src[i].y*dst[i].x;
Cyy += src[i].y*dst[i].y;
}
Mat Mcov = (Mat_<float>(2, 2)<<Cxx, Cxy, Cyx, Cyy);
Mcov /= (ninliers-1);
if (debug)
cout<<"Covariance-like Matrix "<<Mcov<<endl;
// SVD of covariance
cv::SVD svd;
svd = SVD(Mcov, SVD::FULL_UV);
if (debug) {
cout<<"U = "<<svd.u<<endl;
cout<<"W = "<<svd.w<<endl;
cout<<"V transposed = "<<svd.vt<<endl;
}
// rotation (V*Ut)
Mat V = svd.vt.t();
Mat Ut = svd.u.t();
float det_VUt = determinant(V*Ut);
Mat W = (Mat_<float>(2, 2)<<1.0, 0.0, 0.0, det_VUt);
float rot[4];
Mat R_est(2, 2, CV_32F, rot);
R_est = V*W*Ut;
if (debug)
cout<<"Rotation matrix: "<<R_est<<endl;
float cos_est = rot[0];
float sin_est = rot[2];
float ang = atan2(sin_est, cos_est);
// translation (mean_dst - R*mean_src)
Point2f center_srcRot = Point2f(
cos_est*center_src.x - sin_est*center_src.y,
sin_est*center_src.x + cos_est*center_src.y);
Point2f T_est = center_dst - center_srcRot;
// Final estimate msg
if (debug)
cout<<"Estimate = "<<ang*RAD2DEG<<"deg., T = "<<T_est<<endl;
param[0] = ang; // rad
param[1] = T_est.x;
param[2] = T_est.y;
// calculate RMSE
float RMSE = RMSE_3Dof(src_, dst_, param, inliers, center);
return RMSE;
} // fit3DofQUADRATIC()
// RANSAC fit in 3DOF: 1D rot and 2D translation (maximizes the number of inliers)
// NOTE: no data normalization is currently performed
float fit3DofRANSAC(const vector<Point2f>& src, const vector<Point2f>& dst,
float* best_param, bool* inliers,
const Point2f center ,
const float inlierMaxEr,
const int niter) {
const int ITERATION_TO_SETTLE = 2; // iterations to settle inliers and param
const float INLIERS_RATIO_OK = 0.95f; // stopping criterion
// size of data vector
unsigned int N = src.size();
assert(N==dst.size());
// unrealistic case
if(N<2) {
best_param[0] = 0.0f; // ?
best_param[1] = 0.0f;
best_param[2] = 0.0f;
return LARGE_NUMBER;
}
unsigned int ninliers; // current number of inliers
unsigned int best_ninliers = 0; // number of inliers
float best_rmse = LARGE_NUMBER; // error
float cur_rmse; // current distance error
float param[3]; // rad, Tx, Ty
vector <Point2f> src_2pt(2), dst_2pt(2);// min set of 2 points (1 correspondence generates 2 equations)
srand (time(NULL));
// iterations
for (int iter = 0; iter<niter; iter++) {
#ifdef DEBUG_RANSAC
cout<<"iteration "<<iter<<": ";
#endif
// 1. Select a random set of 2 points (not obligatory inliers but valid)
int i1, i2;
i1 = rand() % N; // [0, N[
i2 = i1;
while (i2==i1) {
i2 = rand() % N;
}
src_2pt[0] = src[i1]; // corresponding points
src_2pt[1] = src[i2];
dst_2pt[0] = dst[i1];
dst_2pt[1] = dst[i2];
bool two_inliers[] = {true, true};
// 2. Quadratic fit for 2 points
cur_rmse = fit3DofQUADRATIC(src_2pt, dst_2pt, param, two_inliers, center);
// 3. Recalculate to settle params and inliers using a larger set
for (int iter2=0; iter2<ITERATION_TO_SETTLE; iter2++) {
ninliers = setInliers3Dof(src, dst, inliers, param, inlierMaxEr, center); // changes inliers
cur_rmse = fit3DofQUADRATIC(src, dst, param, inliers, center); // changes cur_param
}
// potential ill-condition or large error
if (ninliers<2) {
#ifdef DEBUG_RANSAC
cout<<" !!! less than 2 inliers "<<endl;
#endif
continue;
} else {
#ifdef DEBUG_RANSAC
cout<<" "<<ninliers<<" inliers; ";
#endif
}
#ifdef DEBUG_RANSAC
cout<<"; recalculate: RMSE = "<<cur_rmse<<", "<<ninliers <<" inliers";
#endif
// 4. found a better solution?
if (ninliers > best_ninliers) {
best_ninliers = ninliers;
best_param[0] = param[0];
best_param[1] = param[1];
best_param[2] = param[2];
best_rmse = cur_rmse;
#ifdef DEBUG_RANSAC
cout<<" --- Solution improved: "<<
best_param[0]<<", "<<best_param[1]<<", "<<param[2]<<endl;
#endif
// exit condition
float inlier_ratio = (float)best_ninliers/N;
if (inlier_ratio > INLIERS_RATIO_OK) {
#ifdef DEBUG_RANSAC
cout<<"Breaking early after "<< iter+1<<
" iterations; inlier ratio = "<<inlier_ratio<<endl;
#endif
break;
}
} else {
#ifdef DEBUG_RANSAC
cout<<endl;
#endif
}
} // iterations
// 5. recreate inliers for the best parameters
ninliers = setInliers3Dof(src, dst, inliers, best_param, inlierMaxEr, center);
return best_rmse;
} // fit3DofRANSAC()
Let me first make sure I'm interpreting your question correctly. You have two sets of 2D points, one of which contains all "good" points corresponding to some object of interest, and one of which contains those points under an affine transformation with noisy points added. Right?
If that's correct, then there is a fairly reliable and efficient way to both reject noisy points and determine the transformation between your points of interest. The algorithm that is usually used to reject noisy points ("outliers") is known as RANSAC, and the algorithm used to determine the transformation can take several forms, but the most current state of the art is known as the five-point algorithm and can be found here -- a MATLAB implementation can be found here.
Unfortunately I don't know of a mature implementation of both of those combined; you'll probably have to do some work of your own to implement RANSAC and integrate it with the five point algorithm.
Edit:
Actually, OpenCV has an implementation that is overkill for your task (meaning it will work but will take more time than necessary) but is ready to work out of the box. The function of interest is called cv::findFundamentalMat.
I believe you are looking for something like David Lowe's SIFT (Scale Invariant Feature Transform). Other option is SURF (SIFT is patent protected). The OpenCV computer library presents a SURF implementation
I would try and use distance geometry (http://en.wikipedia.org/wiki/Distance_geometry) for this
Generate a scalar for each point by summing its distances to all neighbors within a certain radius. Though not perfect, this will be good discriminator for each point.
Then put all the scalars in a map that allows a point (p) to be retrieve by its scalar (s) plus/minus some delta
M(s+delta) = p (e.g K-D Tree) (http://en.wikipedia.org/wiki/Kd-tree)
Put all the reference set of 2D points in the map
On the other (test) set of 2D points:
foreach test scaling (esp if you have a good idea what typical scaling values are)
...scale each point by S
...recompute the scalars of the test set of points
......for each point P in test set (or perhaps a sample for faster method)
.........lookup point in reference scalar map within some delta
.........discard P if no mapping found
.........else foreach P' point found
............examine neighbors of P and see if they have corresponding scalars in the reference map within some delta (i.e reference point has neighbors with approx same value)
......... if all points tested have a mapping in the reference set, you have found a mapping of test point P onto reference point P' -> record mapping of test point to reference point
......discard scaling if no mappings recorded
Note this is trivially parallelized in several different places
This is off the top of my head, drawing from research I did years ago. It lacks fine details but the general idea is clear: find points in the noisy (test) graph whose distances to their closest neighbors are roughly the same as the reference set. Noisy graphs will have to measure the distances with a larger allowed error that less noisy graphs.
The algorithm works perfectly for graphs with no noise.
Edit: there is a refinement for the algorithm that doesn't require looking at different scalings. When computing the scalar for each point, use a relative distance measure instead. This will be invariant of transform
From C++, you could use ITK to do the image registration. It includes many registration functions that will work in the presence of noise.
The KLT (Kanade Lucas Tomasi) Feature Tracker makes a Affine Consistency Check of tracked features. The Affine Consistency Check takes into account translation, rotation and scaling. I don't know if it is of help to you, because you can't use the function (which calculates the affine transformation of a rectangular region) directly. But maybe you can learn from the documentation and source-code, how the affine transformation can be calculated and adapt it to your problem (clouds of points instead of a rectangular region).
You want want the Denton-Beveridge point matching algorithm. Source code at the bottom of the page linked below, and there is also a paper that explain the algorithm and why Ransac is a bad choice for this problem.
http://jasondenton.me/pntmatch.html

Can normal maps be generated from a texture?

If I have a texture, is it then possible to generate a normal-map for this texture, so it can be used for bump-mapping?
Or how are normal maps usually made?
Yes. Well, sort of. Normal maps can be accurately made from height-maps. Generally, you can also put a regular texture through and get decent results as well. Keep in mind there are other methods of making a normal map, such as taking a high-resolution model, making it low resolution, then doing ray casting to see what the normal should be for the low-resolution model to simulate the higher one.
For height-map to normal-map, you can use the Sobel Operator. This operator can be run in the x-direction, telling you the x-component of the normal, and then the y-direction, telling you the y-component. You can calculate z with 1.0 / strength where strength is the emphasis or "deepness" of the normal map. Then, take that x, y, and z, throw them into a vector, normalize it, and you have your normal at that point. Encode it into the pixel and you're done.
Here's some older incomplete-code that demonstrates this:
// pretend types, something like this
struct pixel
{
uint8_t red;
uint8_t green;
uint8_t blue;
};
struct vector3d; // a 3-vector with doubles
struct texture; // a 2d array of pixels
// determine intensity of pixel, from 0 - 1
const double intensity(const pixel& pPixel)
{
const double r = static_cast<double>(pPixel.red);
const double g = static_cast<double>(pPixel.green);
const double b = static_cast<double>(pPixel.blue);
const double average = (r + g + b) / 3.0;
return average / 255.0;
}
const int clamp(int pX, int pMax)
{
if (pX > pMax)
{
return pMax;
}
else if (pX < 0)
{
return 0;
}
else
{
return pX;
}
}
// transform -1 - 1 to 0 - 255
const uint8_t map_component(double pX)
{
return (pX + 1.0) * (255.0 / 2.0);
}
texture normal_from_height(const texture& pTexture, double pStrength = 2.0)
{
// assume square texture, not necessarily true in real code
texture result(pTexture.size(), pTexture.size());
const int textureSize = static_cast<int>(pTexture.size());
for (size_t row = 0; row < textureSize; ++row)
{
for (size_t column = 0; column < textureSize; ++column)
{
// surrounding pixels
const pixel topLeft = pTexture(clamp(row - 1, textureSize), clamp(column - 1, textureSize));
const pixel top = pTexture(clamp(row - 1, textureSize), clamp(column, textureSize));
const pixel topRight = pTexture(clamp(row - 1, textureSize), clamp(column + 1, textureSize));
const pixel right = pTexture(clamp(row, textureSize), clamp(column + 1, textureSize));
const pixel bottomRight = pTexture(clamp(row + 1, textureSize), clamp(column + 1, textureSize));
const pixel bottom = pTexture(clamp(row + 1, textureSize), clamp(column, textureSize));
const pixel bottomLeft = pTexture(clamp(row + 1, textureSize), clamp(column - 1, textureSize));
const pixel left = pTexture(clamp(row, textureSize), clamp(column - 1, textureSize));
// their intensities
const double tl = intensity(topLeft);
const double t = intensity(top);
const double tr = intensity(topRight);
const double r = intensity(right);
const double br = intensity(bottomRight);
const double b = intensity(bottom);
const double bl = intensity(bottomLeft);
const double l = intensity(left);
// sobel filter
const double dX = (tr + 2.0 * r + br) - (tl + 2.0 * l + bl);
const double dY = (bl + 2.0 * b + br) - (tl + 2.0 * t + tr);
const double dZ = 1.0 / pStrength;
math::vector3d v(dX, dY, dZ);
v.normalize();
// convert to rgb
result(row, column) = pixel(map_component(v.x), map_component(v.y), map_component(v.z));
}
}
return result;
}
There's probably many ways to generate a Normal map, but like others said, you can do it from a Height Map, and 3d packages like XSI/3dsmax/Blender/any of them can output one for you as an image.
You can then output and RGB image with the Nvidia plugin for photoshop, an algorithm to convert it or you might be able to output it directly from those 3d packages with 3rd party plugins.
Be aware that in some case, you might need to invert channels (R, G or B) from the generated normal map.
Here's some resources link with examples and more complete explanation:
http://developer.nvidia.com/object/photoshop_dds_plugins.html
http://en.wikipedia.org/wiki/Normal_mapping
http://www.vrgeo.org/fileadmin/VRGeo/Bilder/VRGeo_Papers/jgt2002normalmaps.pdf
I don't think normal maps are generated from a texture. they are generated from a model.
just as texturing allows you to define complex colour detail with minimal polys (as opposed to just using millions of ploys and just vertex colours to define the colour on your mesh)
A normal map allows you to define complex normal detail with minimal polys.
I believe normal maps are usually generated from a higher res mesh, and then is used with a low res mesh.
I'm sure 3D tools, such as 3ds max or maya, as well as more specific tools will do this for you. unlike textures, I don't think they are usually done by hand.
but they are generated from the mesh, not the texture.
I suggest starting with OpenCV, due to its richness in algorithms. Here's one I wrote that iteratively blurs the normal map and weights those to the overall value, essentially creating more of a topological map.
#define ROW_PTR(img, y) ((uchar*)((img).data + (img).step * y))
cv::Mat normalMap(const cv::Mat& bwTexture, double pStrength)
{
// assume square texture, not necessarily true in real code
int scale = 1.0;
int delta = 127;
cv::Mat sobelZ, sobelX, sobelY;
cv::Sobel(bwTexture, sobelX, CV_8U, 1, 0, 13, scale, delta, cv::BORDER_DEFAULT);
cv::Sobel(bwTexture, sobelY, CV_8U, 0, 1, 13, scale, delta, cv::BORDER_DEFAULT);
sobelZ = cv::Mat(bwTexture.rows, bwTexture.cols, CV_8UC1);
for(int y=0; y<bwTexture.rows; y++) {
const uchar *sobelXPtr = ROW_PTR(sobelX, y);
const uchar *sobelYPtr = ROW_PTR(sobelY, y);
uchar *sobelZPtr = ROW_PTR(sobelZ, y);
for(int x=0; x<bwTexture.cols; x++) {
double Gx = double(sobelXPtr[x]) / 255.0;
double Gy = double(sobelYPtr[x]) / 255.0;
double Gz = pStrength * sqrt(Gx * Gx + Gy * Gy);
uchar value = uchar(Gz * 255.0);
sobelZPtr[x] = value;
}
}
std::vector<cv::Mat>planes;
planes.push_back(sobelX);
planes.push_back(sobelY);
planes.push_back(sobelZ);
cv::Mat normalMap;
cv::merge(planes, normalMap);
cv::Mat originalNormalMap = normalMap.clone();
cv::Mat normalMapBlurred;
for (int i=0; i<3; i++) {
cv::GaussianBlur(normalMap, normalMapBlurred, cv::Size(13, 13), 5, 5);
addWeighted(normalMap, 0.4, normalMapBlurred, 0.6, 0, normalMap);
}
addWeighted(originalNormalMap, 0.3, normalMapBlurred, 0.7, 0, normalMap);
return normalMap;
}