Getting fingerprint orientation using gradient based method - c++

I'm trying to get orientation image of a fingerprint using the method proposed in this paper.
I tried implementing the steps described in Section 3.1.1 of the paper, but I don't get the desired result.
Here are my OpenCV code:
Mat calculate_orientation(Mat img, Mat &coherence) {
Mat image = img.clone();
Mat orient_im = Mat::zeros(image.size(), image.type());
Mat grad_x, grad_y;
Sobel(image, grad_x, CV_32F, 1, 0, 3, 1, 0, BORDER_DEFAULT );
Sobel(image, grad_y, CV_32F, 0, 1, 3, 1, 0, BORDER_DEFAULT );
//Iterate per BLOCKSIZE and use BLOCKSIZE/2 as the center
for (int i=BLOCKSIZE/2 ; i<=image.rows-BLOCKSIZE/2 ; i+=BLOCKSIZE) {
for (int j=BLOCKSIZE/2 ; j<=image.cols-BLOCKSIZE/2 ; j+=BLOCKSIZE) {
//Iterate each pixel in the block
float vx = 0.0f, vy = 0.0f, angle;
//Coherence
float gx = 0.0f, gy = 0.0f, gxy = 0.0f;
for (int u=i-BLOCKSIZE/2 ; u<i+BLOCKSIZE/2 ; u++) {
for (int v=j-BLOCKSIZE/2 ; v<j+BLOCKSIZE/2 ; v++) {
gx = 2* grad_x.at<float>(u,v) * grad_y.at<float>(u,v);
gy = pow(grad_x.at<float>(u,v), 2) - pow(grad_y.at<float>(u,v), 2);
vx += gx;
vy += gy;
gxy += sqrt(pow(gx,2)+pow(gy,2));
}
}
if (vy == 0) {
angle = 90;
} else {
angle = 0.5 * atan(vx/vy) * 180.0f/CV_PI;
}
//The angle above is the angle perpendicular to ridge direction
orient_im.at<float>(i,j) = angle + 90;
//Coherence
float coh = sqrt(pow(vx,2)+pow(vy,2))/gxy;
coherence.at<float>(i,j) = coh;
}
}
return orient_im;
}
This is the input image.
And this is the result. The blue lines are orientation with coherence value of more than 0.5, and the red lines are orientation with coherence value of less than 0.5.
Only around half of the orientation seems right.
I know there are already a few questions about this, but I still haven't gotten the correct results, so pardon me for asking. Any help would be appreciated.

Related

OpenCV: rotating video frames

I am trying to rotate video frames captured using libcamera-vid from libcamera-apps an arbitrary amount of degrees using OpenCV's warpAffine().
The frames are as far as I can understand on yuv420 planar format.
The gist of what I'm doing is:
#include <opencv2/imgproc.hpp>
#include <opencv2/core.hpp>
cv::Point2f Ycenter = cv::Point2f(width / 2.0f, height / 2.0f);
cv::Point2f UVcenter = cv::Point2f(width / 4.0f, height / 4.0f);
double rotation = 0;
cv::Mat Ytransform = cv::getRotationMatrix2D(Ycenter, rotation, 1.0);
cv::Mat UVtransform = cv::getRotationMatrix2D(UVcenter, rotation, 1.0);
int Uoffset = height*width;
int Voffset = 5*info.height*info.width/4;
cv::Size Ysize(info.height, info.width);
cv::Size UVsize(info.height / 2, info.width / 2);
for (unsigned int count = 0; ; count++)
{
// ...
// Wait for frame here
// ...
// Acquire buffer in which frame is stored:
uint8_t* buffer = getFrameBuffer(); // Simplification, but not important
double rot = floor(count / 10);
if (10*rot != rotation)
{
rotation = 10*rot;
Ytransform = cv::getRotationMatrix2D(Ycenter, rotation, 1.0);
UVtransform = cv::getRotationMatrix2D(UVcenter, rotation, 1.0);
}
cv::Mat Y(Ysize.height, Ysize.width, CV_8UC1, buffer);
cv::Mat U(UVsize.height, UVsize.width, CV_8UC1, buffer + Uoffset);
cv::Mat V(UVsize.height, UVsize.width, CV_8UC1, buffer + Voffset);
cv::warpAffine(Y, Y, Ytransform, Ysize);
cv::warpAffine(U, U, UVtransform, UVsize);
cv::warpAffine(V, V, UVtransform, UVsize);
sendFrameToEncoder(buffer); // Also a simplification, also not important as far as i know
}
Where height, width are the height and width of the video frame.
However, this produces wierd, warped images. Something is clearly rotated, but not the correctly.
rotation = 0 produces:
rotation = 10 produces:
rotation = 20 produces:
rotation = 30 produces:
So, it clearly isn't working correctly. Does anyone know what's going wrong here?
I'm using OpenCV version 4.5.1
On a Raspberry Pi Zero W running Raspberry Pi OS Bullseye

Convert OpenGL coord system to custom and back

I have a task to draw 3D objects on the ground using OpenGL. I use OpenGL left-handed coord system where Y axis is up. But 3D objects and camera orbiting objects should use different coord systems with following properties:
XY plane is a ground plane;
Z-axis is up;
Y-axis is "North";
X-axis is "East";
The azimuth (or horizontal) angle is [0, 360] degrees;
The elevation (or vertical) angle is [0, 90] degrees from XY plane.
End user uses azimuth and elevation to rotate camera around some center. So I made following code to convert from spherical coordinates to quaternion:
//polar: x - radius, y - horizontal angle, z - vertical
QQuaternion CoordinateSystemGround::fromPolarToQuat(const QVector3D& polar) const {
QQuaternion dest = QQuaternion::fromEulerAngles({0, polar.y(), polar.z()});
//convert user coord system back to OpenGL by rotation around X-axis
QQuaternion orig = QQuaternion::fromAxisAndAngle(1, 0, 0, -90);
return dest * orig;
}
and back:
QVector3D CoordinateSystemGround::fromQuatToPolar(const QQuaternion& q) const {
//convert OpenGL coord system to destination by rotation around X-axis
QQuaternion dest = QQuaternion::fromAxisAndAngle(1, 0, 0, 90);
QQuaternion out = q * dest;
QVector3D euler = out.toEulerAngles();
float hor = euler.y();
if(hor < 0.0f)
hor += 360.0f;
float ver = euler.z();
if(ver > 90.0f)
ver = 90.0f;
else if(ver < 0.0f)
ver = 0.0f;
//x changes later
return QVector3D(0, hor, ver);
}
But it doesn't work right. I suppose fromPolarToQuat conversion has mistake somewhere and I can't understand where.
Seems like I found the solution. So, to get polar angles from quaternion:
QVector3D CoordinateSystemGround::fromQuatToPolar(const QQuaternion& q) const {
QQuaternion coord1 = QQuaternion::fromAxisAndAngle(0, 1, 0, -180);
QQuaternion coord2 = QQuaternion::fromAxisAndAngle(1, 0, 0, -90);
QQuaternion out = orig1 * orig2 * q;
QVector3D euler = out.toEulerAngles();
float hor = euler.y();
if(hor < 0.0f)
hor += 360.0f;
float ver = euler.x();
return QVector3D(0, hor, -ver);
}
And back:
QQuaternion CoordinateSystemGround::fromPolarToQuat(const QVector3D& polar) const {
QQuaternion dest = QQuaternion::fromEulerAngles({-polar.z(), polar.y(), 0});
QQuaternion coord1 = QQuaternion::fromAxisAndAngle(1, 0, 0, 90);
QQuaternion coord2 = QQuaternion::fromAxisAndAngle(0, 1, 0, 180);
return coord1 * coord2 * dest;
}
Not sure it's an optimal solution though, but it works as it should.
Edited
After some research I've found few mistakes and made optimized and hope correct version of conversion:
QVector3D CoordinateSystemGround::fromQuatToPolar(const QQuaternion& q) const {
// back to OpenGL coord system: just multiplication on inverted destination coord system
QQuaternion dest = QQuaternion::fromAxes({-1, 0, 0}, {0, 0, 1}, {0, 1, 0}).inverted();
QVector3D euler = (dest * q).toEulerAngles();
float hor = euler.y();
if(hor < 0.0f)
hor += 360.0f;
float ver = euler.x();
return QVector3D(0, hor, -ver);
}
And back:
QQuaternion CoordinateSystemGround::fromPolarToQuat(const QVector3D& polar) const {
//just rotate if we were in OpenGL coord system
QQuaternion orig = QQuaternion::fromEulerAngles({-polar.z(), polar.y(), 0});
//and then multiply on destination coord system
QQuaternion dest = QQuaternion::fromAxes({-1, 0, 0}, {0, 0, 1}, {0, 1, 0});
return dest * orig;
}

How to make a line following algortihm for an A.R Drone 2.0?

I am trying to develop a line following algorithm where a drone will detect a region of interest by color filtering.I am filtering all the colors to only see the color white. Once that color is detected, I want the drone to go in a straight line from one end of the region to the other essentially following line. Maybe it is easier using the draw line function on openCV but I am not sure. Any way, my biggest problem is telling the drone to follow the color or in other words the detected stationary object. I am using this repository from GitHub:https://github.com/tekkies/cvdrone/blob/master/src/main.cpp.
#include "ardrone/ardrone.h"
int main(int argc, char *argv[])
{
// AR.Drone class
ARDrone ardrone;
// Initialize
if (!ardrone.open()) {
std::cout << "Failed to initialize." << std::endl;
return -1;
}
// Thresholds
int minH = 0, maxH = 255;
int minS = 0, maxS = 255;
int minV = 0, maxV = 255;
// XML save data
std::string filename("thresholds.xml");
cv::FileStorage fs(filename, cv::FileStorage::READ);
// If there is a save file then read it
if (fs.isOpened()) {
maxH = fs["H_MAX"];
minH = fs["H_MIN"];
maxS = fs["S_MAX"];
minS = fs["S_MIN"];
maxV = fs["V_MAX"];
minV = fs["V_MIN"];
fs.release();
}
// Create a window
cv::namedWindow("binalized");
cv::createTrackbar("H max", "binalized", &maxH, 255);
cv::createTrackbar("H min", "binalized", &minH, 255);
cv::createTrackbar("S max", "binalized", &maxS, 255);
cv::createTrackbar("S min", "binalized", &minS, 255);
cv::createTrackbar("V max", "binalized", &maxV, 255);
cv::createTrackbar("V min", "binalized", &minV, 255);
cv::resizeWindow("binalized", 0, 0);
// Kalman filter
cv::KalmanFilter kalman(4, 2, 0);
// Sampling time [s]
const double dt = 1.0;
// Transition matrix (x, y, vx, vy)
cv::Mat1f A(4, 4);
A << 1.0, 0.0, dt, 0.0,
0.0, 1.0, 0.0, dt,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0;
kalman.transitionMatrix = A;
// Measurement matrix (x, y)
cv::Mat1f H(2, 4);
H << 1, 0, 0, 0,
0, 1, 0, 0;
kalman.measurementMatrix = H;
// Process noise covairance (x, y, vx, vy)
cv::Mat1f Q(4, 4);
Q << 1e-5, 0.0, 0.0, 0.0,
0.0, 1e-5, 0.0, 0.0,
0.0, 0.0, 1e-5, 0.0,
0.0, 0.0, 0.0, 1e-5;
kalman.processNoiseCov = Q;
// Measurement noise covariance (x, y)
cv::Mat1f R(2, 2);
R << 1e-1, 0.0,
0.0, 1e-1;
kalman.measurementNoiseCov = R;
char textBuffer[80];
cv::Scalar green = CV_RGB(0,255,0);
float speed = 0.0;
bool learnMode = false;
// Main loop
while (1) {
// Key input
int key = cv::waitKey(33);
if (key == 0x1b) break;
// Get an image
cv::Mat image = ardrone.getImage();
// HSV image
cv::Mat hsv;
cv::cvtColor(image, hsv, cv::COLOR_BGR2HSV_FULL);
// Binalize
cv::Mat binalized;
cv::Scalar lower(minH, minS, minV);
cv::Scalar upper(maxH, maxS, maxV);
cv::inRange(hsv, lower, upper, binalized);
// Show result
cv::imshow("binalized", binalized);
// De-noising
cv::Mat kernel = getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
cv::morphologyEx(binalized, binalized, cv::MORPH_CLOSE, kernel);
//cv::imshow("morphologyEx", binalized);
// Detect contours
std::vector<std::vector<cv::Point>> contours;
cv::findContours(binalized.clone(), contours, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);
// Find largest contour
int contour_index = -1;
double max_area = 0.0;
for (size_t i = 0; i < contours.size(); i++) {
double area = fabs(cv::contourArea(contours[i]));
if (area > max_area) {
contour_index = i;
max_area = area;
}
}
// Object detected
if (contour_index >= 0) {
// Moments
cv::Moments moments = cv::moments(contours[contour_index], true);
double marker_y = (int)(moments.m01 / moments.m00);
double marker_x = (int)(moments.m10 / moments.m00);
// Measurements
cv::Mat measurement = (cv::Mat1f(2, 1) << marker_x, marker_y);
// Correction
cv::Mat estimated = kalman.correct(measurement);
// Show result
cv::Rect rect = cv::boundingRect(contours[contour_index]);
cv::rectangle(image, rect, cv::Scalar(0, 255, 0));
}
// Prediction
cv::Mat1f prediction = kalman.predict();
int radius = 1e+3 * kalman.errorCovPre.at<float>(0, 0);
// Calculate object heading fraction
float heading = -((image.cols/2)-prediction(0, 0))/(image.cols/2);
sprintf(textBuffer, "heading = %+3.2f", heading);
putText(image, textBuffer, cvPoint(30,30), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, green, 1, CV_AA);
// Show predicted position
cv::circle(image, cv::Point(prediction(0, 0), prediction(0, 1)), radius, green, 2);
//Speed
if ((key >= '0') && (key <= '9'))
{
speed = (key-'0')*0.1;
//printf("speed = %3.2f\n", speed);
}
sprintf(textBuffer, "speed = %3.2f", speed);
putText(image, textBuffer, cvPoint(30,60), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, green, 1, CV_AA);
// Drone control
double vx = 0.0, vy = 0.0, vz = 0.0, vr = 0.0;
// Auto-follow
vx = speed;
vr = -heading;
if (key == 0x260000) vx = 1.0;
if (key == 0x280000) vx = -1.0;
if (key == 0x250000) vr = 1.0;
if (key == 0x270000) vr = -1.0;
if (key == 'q') vz = 1.0;
if (key == 'a') vz = -1.0;
ardrone.move3D(vx, vy, vz, vr);
// See you
ardrone.close();
return 0;
}

OpenCV C++: declaring a new variable leads to stack corruption

I have been looking through this code for the last two days, and I have no idea what's happenning.
void filterKeypoints(std::vector<KeyPoint> &keypoints, Mat orientation){
unsigned int rows = orientation.rows;
unsigned int cols = orientation.cols;
unsigned int N = keypoints.size();
Mat newOrientation;
vector<KeyPoint> newKeyPoints;
// Function do create n points in a interval equally spaced
vector<float> t = divideInterval(-3.14159265, 3.14159265, 6);
for (unsigned int i = 0; i < N; i++){
Mat x_c, y_c;
getKeyPoint(keypoints[i], x_c, y_c);
float radius_sq = (72 * pow(keypoints[i].size, 2));
float radius = 6 * sqrt(2)*keypoints[i].size;
float x_min = round(keypoints[i].pt.x - radius);
float x_max = round(keypoints[i].pt.x + radius);
float y_min = round(keypoints[i].pt.y - radius);
float y_max = round(keypoints[i].pt.y + radius);
cout << i << " of " << N << endl;
if ((x_min >= 0) && (x_max < cols) && (y_min >= 0) && (y_max < rows)){
Mat X, Y;
meshGrid(X, Y, x_min, x_max, y_min, y_max);
X = X.reshape(0, 1); // row vector
Y = Y.reshape(0, 1);
int nr = y_max - y_min + 1;
int nc = x_max - x_min + 1;
Mat D_array = Mat::zeros(128, 1, CV_32FC1);
float radius_sq_small = radius_sq / 16;
//////////// HERE STOPED IMPLEMENTATION
for (unsigned int j = 0; j < 16; j++){
Mat X_sq, Y_sq;
cv::pow(X - (float) x_c.at<float>(j), 2, X_sq);
cv::pow(Y - (float) y_c.at<float>(j), 2, Y_sq);
Mat Distance_Square = X_sq + Y_sq;
Mat If = Mat::zeros(rows, cols, CV_8UC1);
Mat Binary;
//cv::compare(Distance_Square, radius_sq_small, Binary, CMP_LE);
//Mat Binary = Distance_Square <= radius_sq_small;
//Binary = Binary.reshape(0, nr);
//Mat roi_img(If(cvRect(x_min, y_min, nc, nr)));
//Binary.copyTo(roi_img);
}
}
}
cout << "Everything run well." << endl;
}
If the line where Mat Binary is created is uncommented, the program breaks with an unhandled exception: it runs the for cycle, gets to cout and doesn't get out of the void function launching an error: ".exe stopped working".
If I commment this line everything goes as expected. So I am guessing there's some kind of stack corruption, but I am not understanding quite well why...
Note: The function is not all implemented yet, I am just not sure why this error is thrown.
The keypoints come from opencv sift and orientation is an image with gradient orientation. I am using opencv 3.1
The original function that calls it is:
void getImageDescriptors(Mat image, Mat orientation){
cv::Ptr<Feature2D> f2d = xfeatures2d::SIFT::create();
std::vector<KeyPoint> keypoints;
f2d->detect(image, keypoints);
Mat descriptors;
f2d->compute(image, keypoints, descriptors);
adjustAngle(keypoints);
filterKeypoints(keypoints, orientation);
cout << "Got here Correctly" << endl;
}
Hope anyone can help me!
Best Regards.
EDIT 1:
Here's the getKeyPoint function, that applies some expressions accordingly to the article i'm trying to implement. It never returns an error so I don't thing it comes from here.
void getKeyPoint(KeyPoint keypoint, Mat &x_c, Mat &y_c){
float xc_array[16] = { -1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5 };
x_c = Mat(1, 16, CV_32FC1, &xc_array).clone();
float yc_array[16] = { -1.5, -1.5, -1.5, -1.5, -0.5, -0.5, -0.5, -0.5, 0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5 };
y_c = Mat(1, 16, CV_32FC1, &yc_array).clone();
float SBP = 3 * keypoint.size;
float c = cos(keypoint.angle * 3.14159265 / 180); // Convert to radians
float s = sin(keypoint.angle * 3.14159265 / 180);
x_c = SBP*(c*x_c - s*y_c) + keypoint.pt.x; // See this in future
y_c = SBP*(s*x_c + c*y_c) + keypoint.pt.y;
}
EDIT 2
By replacing Mat Binary; by Mat Binary = Mat::zeros(Distance_Square.rows, Distance_Square.cols, CV_8UC1);, the error goes away (creating a matrix of zeros, instead of just default initialization).
My question is, why this variable was messing up the memory allocation and the other ones like newOrientation, X_sq, Y_sq were not causing any trouble?
Resolved, thank you to everyone who helped. Basically not all paths of divideInterval function returned a value, so when deallocating memory it thrown an exception. It was a stupid implementation error that I passed by.
Still I do not know why the error was thrown only sometimes.

Transform a frame to be as if it was taken from above using OpenCV

I am working on a project for estimating a UAV (quadcopter) location using optical-flow technique. I currently have a code that is using farneback algorithm from OpenCV. The current code is working fine when the camera is always pointing to the ground.
Now, I want to add support to the case when the camera is not pointing straight down - meaning that the quadcopter now has a pitch / roll / yaw (Euler angles). The quadcopters Euler angles are known and I am searching for a method to compute and apply the transformation needed based on the known current Euler angles. So that the result image will be as if it was taken from above (see image below).
I found methods that calculates the transformation when having 2 sets (source and destination) of 4 corners via findHomography or getPerspectiveTransform functions from OpenCV. But I couldn't find any methods that can do it with knowing only Euler angle (because I don't know the destination image corenrs).
So my question is what method can I use and how in order to transform a frame to be as if it was taken from above using only Euler angles and camera height from ground if necessary?
In order to demonstrate what I need:
The relevant part of my current code is below:
for(;;)
{
Mat m, disp, warp;
vector<Point2f> corners;
// take out frame- still distorted
cap >> origFrame;
// undistort the frame using the calibration parameters
cv::undistort(origFrame, undistortFrame, cameraMatrix, distCoeffs, noArray());
// lower the process effort by transforming the picture to gray
cvtColor(undistortFrame, gray, COLOR_BGR2GRAY);
if( !prevgray.empty() )
{
// calculate flow
calcOpticalFlowFarneback(prevgray, gray, uflow, 0.5, 3/*def 3 */, 10/* def 15*/, 3, 3, 1.2 /* def 1.2*/, 0);
uflow.copyTo(flow);
// get average
calcAvgOpticalFlow(flow, 16, corners);
// calculate range of view - 2*tan(fov/2)*distance
rovX = 2*0.44523*distanceSonar*100; // 2 * tan(48/2) * dist(cm)
rovY = 2*0.32492*distanceSonar*100; // 2 * tan(36/2) * dist(cm)
// calculate final x, y location
location[0] += (currLocation.x/WIDTH_RES)*rovX;
location[1] += (currLocation.y/HEIGHT_RES)*rovY;
}
//break conditions
if(waitKey(1)>=0)
break;
if(end_run)
break;
std::swap(prevgray, gray);
}
UPDATE:
After successfully adding the rotation, I still need my image to be centered (and not to go outside of the frame window as shown below). I guess I need some kind of translation. I want the center of the source image to be at the center of the destination image. How can I add this as well?
The rotation function that works:
void rotateFrame(const Mat &input, Mat &output, Mat &A , double roll, double pitch, double yaw){
Mat Rx = (Mat_<double>(3, 3) <<
1, 0, 0,
0, cos(roll), -sin(roll),
0, sin(roll), cos(roll));
Mat Ry = (Mat_<double>(3, 3) <<
cos(pitch), 0, sin(pitch),
0, 1, 0,
-sin(pitch), 0, cos(pitch));
Mat Rz = (Mat_<double>(3, 3) <<
cos(yaw), -sin(yaw), 0,
sin(yaw), cos(yaw), 0,
0, 0, 1);
Mat R = Rx*Ry*Rz;
Mat trans = A*R*A.inv();
warpPerspective(input, output, trans, input.size());
}
When I run it with rotateFrame(origFrame, processedFrame, cameraMatrix, 0, 0, 0); I get image as expected:
But when I run it with 10 degrees in roll rotateFrame(origFrame, processedFrame, cameraMatrix, 20*(M_PI/180), 0, 0);. The image is getting out of the frame window:
If you have a calibration intrinsics matrix A (3x3), and there is no translation between camara poses, all you need to find homography H (3x3) is to construct rotation matrix R (3x3) from euler angles and apply the following formula:
H = A * R * A.inv()
Where .inv() is matrix invertion.
UPDATED:
If you want to center the image, you should just add translation this way:
(this is finding the warped position of the center and translation of this point back to the center)
|dx| | 320 / 2 |
|dy| = H * | 240 / 2 |
|1 | | 1 |
| 1 0 (320/2-dx) |
W = | 0 1 (240/2-dy) | * H
| 0 0 1 |
W is your final transformation.
I came to a conclusion that I had to use the 4x4 Homography matrix in order to be able to get what I wanted. In order to find the right homography matrix we need:
3D Rotation matrix R.
Camera calibration intrinsic matrix A1 and its inverted matrix A2.
Translation matrix T.
We can compose the 3D rotation matrix R by multiplying the rotation matrices around axes X,Y,Z:
Mat R = RZ * RY * RX
In order to apply the transformation on the image and keep it centered we need to add translation given by a 4x4 matrix, where dx=0; dy=0; dz=1 :
Mat T = (Mat_<double>(4, 4) <<
1, 0, 0, dx,
0, 1, 0, dy,
0, 0, 1, dz,
0, 0, 0, 1);
Given all these matrices we can compose our homography matrix H:
Mat H = A2 * (T * (R * A1))
With this homography matrix we can then use warpPerspective function from OpenCV to apply the transformation.
warpPerspective(input, output, H, input.size(), INTER_LANCZOS4);
For conclusion and completeness of this solution here is the full code:
void rotateImage(const Mat &input, UMat &output, double roll, double pitch, double yaw,
double dx, double dy, double dz, double f, double cx, double cy)
{
// Camera Calibration Intrinsics Matrix
Mat A2 = (Mat_<double>(3,4) <<
f, 0, cx, 0,
0, f, cy, 0,
0, 0, 1, 0);
// Inverted Camera Calibration Intrinsics Matrix
Mat A1 = (Mat_<double>(4,3) <<
1/f, 0, -cx/f,
0, 1/f, -cy/f,
0, 0, 0,
0, 0, 1);
// Rotation matrices around the X, Y, and Z axis
Mat RX = (Mat_<double>(4, 4) <<
1, 0, 0, 0,
0, cos(roll), -sin(roll), 0,
0, sin(roll), cos(roll), 0,
0, 0, 0, 1);
Mat RY = (Mat_<double>(4, 4) <<
cos(pitch), 0, sin(pitch), 0,
0, 1, 0, 0,
-sin(pitch), 0, cos(pitch), 0,
0, 0, 0, 1);
Mat RZ = (Mat_<double>(4, 4) <<
cos(yaw), -sin(yaw), 0, 0,
sin(yaw), cos(yaw), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1);
// Translation matrix
Mat T = (Mat_<double>(4, 4) <<
1, 0, 0, dx,
0, 1, 0, dy,
0, 0, 1, dz,
0, 0, 0, 1);
// Compose rotation matrix with (RX, RY, RZ)
Mat R = RZ * RY * RX;
// Final transformation matrix
Mat H = A2 * (T * (R * A1));
// Apply matrix transformation
warpPerspective(input, output, H, input.size(), INTER_LANCZOS4);
}
Result:
This how I do it in Eigen and using 4 corners:
// Desired four corners
std::vector<Eigen::Vector2f> Normalized_Reference_Pattern = { Eigen::Vector2f(0, 0), Eigen::Vector2f(0, 2), Eigen::Vector2f(2, 0), Eigen::Vector2f(2, 2) };
// Current four points
std::vector<Eigen::Vector2f> CurrentCentroids = { /* Whatever four corners you want, but relative sueqnece to above */ };
// Transform for current to desired
auto Master_Transform = get_perspective_transform(CurrentCentroids, Normalized_Reference_Pattern);
// abilitu to use the same tranformation for other points (other than the corners) in the image
Eigen::Vector2f Master_Transform_Centroid = Master_Transform * Eigen::Vector2f(currentX, currentY);
And here is my black box:
Eigen::Matrix3f get_perspective_transform(const std::vector<Eigen::Vector2f>& points_from,const std::vector<Eigen::Vector2f>& points_to)
{
cv::Mat transform_cv = cv::getPerspectiveTransform(
convert::to_cv(points_from),
convert::to_cv(points_to));
Eigen::Matrix3f transform_eigen;
cv::cv2eigen(transform_cv, transform_eigen);
return transform_eigen;
}