OpenCV: rotating video frames - c++

I am trying to rotate video frames captured using libcamera-vid from libcamera-apps an arbitrary amount of degrees using OpenCV's warpAffine().
The frames are as far as I can understand on yuv420 planar format.
The gist of what I'm doing is:
#include <opencv2/imgproc.hpp>
#include <opencv2/core.hpp>
cv::Point2f Ycenter = cv::Point2f(width / 2.0f, height / 2.0f);
cv::Point2f UVcenter = cv::Point2f(width / 4.0f, height / 4.0f);
double rotation = 0;
cv::Mat Ytransform = cv::getRotationMatrix2D(Ycenter, rotation, 1.0);
cv::Mat UVtransform = cv::getRotationMatrix2D(UVcenter, rotation, 1.0);
int Uoffset = height*width;
int Voffset = 5*info.height*info.width/4;
cv::Size Ysize(info.height, info.width);
cv::Size UVsize(info.height / 2, info.width / 2);
for (unsigned int count = 0; ; count++)
{
// ...
// Wait for frame here
// ...
// Acquire buffer in which frame is stored:
uint8_t* buffer = getFrameBuffer(); // Simplification, but not important
double rot = floor(count / 10);
if (10*rot != rotation)
{
rotation = 10*rot;
Ytransform = cv::getRotationMatrix2D(Ycenter, rotation, 1.0);
UVtransform = cv::getRotationMatrix2D(UVcenter, rotation, 1.0);
}
cv::Mat Y(Ysize.height, Ysize.width, CV_8UC1, buffer);
cv::Mat U(UVsize.height, UVsize.width, CV_8UC1, buffer + Uoffset);
cv::Mat V(UVsize.height, UVsize.width, CV_8UC1, buffer + Voffset);
cv::warpAffine(Y, Y, Ytransform, Ysize);
cv::warpAffine(U, U, UVtransform, UVsize);
cv::warpAffine(V, V, UVtransform, UVsize);
sendFrameToEncoder(buffer); // Also a simplification, also not important as far as i know
}
Where height, width are the height and width of the video frame.
However, this produces wierd, warped images. Something is clearly rotated, but not the correctly.
rotation = 0 produces:
rotation = 10 produces:
rotation = 20 produces:
rotation = 30 produces:
So, it clearly isn't working correctly. Does anyone know what's going wrong here?
I'm using OpenCV version 4.5.1
On a Raspberry Pi Zero W running Raspberry Pi OS Bullseye

Related

Create a rotated rectangle above a skew line using OpenCV in C++

I am trying to draw a rectangle rotated suitable with the rotate of a line (this rectangle created by four points)
Basic rectangle
A white overlay in the image I created using a rectangle. I want to make it rotate and stand above the red rectangle.
Here are my red rectangle code:
std::vector<cv::Point> imagePoints;
imagePoints.push_back(it->rect_tl());
imagePoints.push_back(it->rect_tr());
imagePoints.push_back(it->rect_br());
imagePoints.push_back(it->rect_bl());
imagePoints.push_back(it->rect_tl());
polylines(cam_view, imagePoints, false, Scalar(0, 0, 255), 2);
Thanks for your help.
I assume you have the red rectangle already given. So I calculate the angle of the top line of the red rectangle and create a new rotated rectangle with the cv::RotatedRect function.
Here is the example code:
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
// Function to calculate the angle from 0 to 180° between two lines
float getClockwiseAngle0to180(cv::Point2f x_axis1, cv::Point2f x_axis2, cv::Point2f tl, cv::Point2f tr) {
float dot = (x_axis2.x - x_axis1.x) * (tr.x - tl.x) + (tr.y - tl.y) * (x_axis2.y - x_axis1.y);
float det = (x_axis2.x - x_axis1.x) * (tr.y - tl.y) - (x_axis2.y - x_axis1.y) * (tr.x - tl.x);
float angle = atan2(det, dot);
angle = angle * (180 / (float)CV_PI);
if (angle < 0) {
angle = angle + 360;
}
if (angle >= 180) {
angle = angle - 180;
}
return angle;
}
int main(int argc, char** argv) {
cv::Mat test_image(400, 400, CV_8UC3, cv::Scalar(0));
// You created the red rectangle with some detection algorithm and it seems that
// you already have topleft (tl), topright (tr)... coordinate of the red rectangle
std::vector<cv::Point2f> red_rect_points;
cv::Point2f tl(200.0, 200.0);
cv::Point2f tr(300.0, 150.0);
cv::Point2f br(350.0, 220.0);
cv::Point2f bl(250.0, 300.0);
red_rect_points.push_back(tl);
red_rect_points.push_back(tr);
red_rect_points.push_back(br);
red_rect_points.push_back(bl);
// Get the angle between the tl and tr point with the given function
float rotation = getClockwiseAngle0to180(cv::Point2f(0, 0), cv::Point2f(1, 0), tr, tl);
std::cout << rotation << std::endl;
// Create a new white rectangle with the same rotation angle
// Construct it using center, size and angle
cv::RotatedRect white_rectangle(cv::Point2f(200, 150), cv::Size2f(80, 50), rotation);
cv::Point2f white_vertices[4];
white_rectangle.points(white_vertices);
// Draw both rectangles
for (int i = 0; i < 4; ++i) {
line(test_image, red_rect_points[i], red_rect_points[(i+1)%4], cv::Scalar(0, 0, 255), 1, 8, 0);
line(test_image, white_vertices[i], white_vertices[(i+1)%4], cv::Scalar(255, 255, 255), 1, 8, 0);
}
cv::imshow("Rectangles", test_image);
cv::waitKey(0);
}

Getting fingerprint orientation using gradient based method

I'm trying to get orientation image of a fingerprint using the method proposed in this paper.
I tried implementing the steps described in Section 3.1.1 of the paper, but I don't get the desired result.
Here are my OpenCV code:
Mat calculate_orientation(Mat img, Mat &coherence) {
Mat image = img.clone();
Mat orient_im = Mat::zeros(image.size(), image.type());
Mat grad_x, grad_y;
Sobel(image, grad_x, CV_32F, 1, 0, 3, 1, 0, BORDER_DEFAULT );
Sobel(image, grad_y, CV_32F, 0, 1, 3, 1, 0, BORDER_DEFAULT );
//Iterate per BLOCKSIZE and use BLOCKSIZE/2 as the center
for (int i=BLOCKSIZE/2 ; i<=image.rows-BLOCKSIZE/2 ; i+=BLOCKSIZE) {
for (int j=BLOCKSIZE/2 ; j<=image.cols-BLOCKSIZE/2 ; j+=BLOCKSIZE) {
//Iterate each pixel in the block
float vx = 0.0f, vy = 0.0f, angle;
//Coherence
float gx = 0.0f, gy = 0.0f, gxy = 0.0f;
for (int u=i-BLOCKSIZE/2 ; u<i+BLOCKSIZE/2 ; u++) {
for (int v=j-BLOCKSIZE/2 ; v<j+BLOCKSIZE/2 ; v++) {
gx = 2* grad_x.at<float>(u,v) * grad_y.at<float>(u,v);
gy = pow(grad_x.at<float>(u,v), 2) - pow(grad_y.at<float>(u,v), 2);
vx += gx;
vy += gy;
gxy += sqrt(pow(gx,2)+pow(gy,2));
}
}
if (vy == 0) {
angle = 90;
} else {
angle = 0.5 * atan(vx/vy) * 180.0f/CV_PI;
}
//The angle above is the angle perpendicular to ridge direction
orient_im.at<float>(i,j) = angle + 90;
//Coherence
float coh = sqrt(pow(vx,2)+pow(vy,2))/gxy;
coherence.at<float>(i,j) = coh;
}
}
return orient_im;
}
This is the input image.
And this is the result. The blue lines are orientation with coherence value of more than 0.5, and the red lines are orientation with coherence value of less than 0.5.
Only around half of the orientation seems right.
I know there are already a few questions about this, but I still haven't gotten the correct results, so pardon me for asking. Any help would be appreciated.

Fill circle with gradient

I want fill circle with gradient color, like I show on bottom. I can't figure out easy way, how to do that.
I can make more circles, but transitions are visible.
cv::circle(img, center, circle_radius * 1.5, cv::Scalar(1.0, 1.0, 0.3), CV_FILLED);
cv::circle(img, center, circle_radius * 1.2, cv::Scalar(1.0, 1.0, 0.6), CV_FILLED);
cv::circle(img, center, circle_radius, cv::Scalar(1.0, 1.0, 1.0), CV_FILLED);
All you need to do is create a function which takes in a central point and a new point, calculates the distance, and returns a grayscale value for that point. Alternatively you could just return the distance, store the distance at that point, and then scale the whole thing later with cv::normalize().
So let's say you have the central point as (50, 50) in a (100, 100) image. Here's pseudocode for what you'd want to do:
function euclideanDistance(center, point) # returns a float
return sqrt( (center.x - point.x)^2 + (center.y - point.y)^2 )
center = (50, 50)
rows = 100
cols = 100
gradient = new Mat(rows, cols) # should be of type float
for row < rows:
for col < cols:
point = (col, row)
gradient[row, col] = euclideanDistance(center, point)
normalize(gradient, 0, 255, NORM_MINMAX, uint8)
gradient = 255 - gradient
Note the steps here:
Create the Euclidean distance function to calculate distance
Create a floating point matrix to hold the distance values
Loop through all rows and columns and assign a distance value
Normalize to the range you want (you could stick with a float here instead of casting to uint8, but you do you)
Flip the binary gradient, since distances farther away will be brighter---but you want the opposite.
Now for your exact example image, there's a gradient in a circle, whereas this method just creates the whole image as a gradient. In your case, if you want a specific radius, just modify the function which calculates the Euclidean distance, and if it's beyond some distance, set it to 0 (the value at the center of the circle, which will be flipped eventually to white):
function euclideanDistance(center, point, radius) # returns a float
distance = sqrt( (center.x - point.x)^2 + (center.y - point.y)^2 )
if distance > radius:
return 0
else
return distance
Here is the above in actual C++ code:
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <cmath>
float euclidean_distance(cv::Point center, cv::Point point, int radius){
float distance = std::sqrt(
std::pow(center.x - point.x, 2) + std::pow(center.y - point.y, 2));
if (distance > radius) return 0;
return distance;
}
int main(){
int h = 400;
int w = 400;
int radius = 100;
cv::Mat gradient = cv::Mat::zeros(h, w, CV_32F);
cv::Point center(150, 200);
cv::Point point;
for(int row=0; row<h; ++row){
for(int col=0; col<w; ++col){
point.x = col;
point.y = row;
gradient.at<float>(row, col) = euclidean_distance(center, point, radius);
}
}
cv::normalize(gradient, gradient, 0, 255, cv::NORM_MINMAX, CV_8U);
cv::bitwise_not(gradient, gradient);
cv::imshow("gradient", gradient);
cv::waitKey();
}
A completely different method (though doing the same thing) would be to use the distanceTransform(). This function maps the distance from the center of a white blob to the nearest black value to a grayscale value, like we were doing above. This code is more concise and does the same thing. However, it can work on arbitrary shapes, not just circles, so that's cool.
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
int main(){
int h = 400;
int w = 400;
int radius = 100;
cv::Point center(150, 200);
cv::Mat gradient = cv::Mat::zeros(h, w, CV_8U);
cv::rectangle(gradient, cv::Point(115, 100), cv::Point(270, 350), cv::Scalar(255), -1, 8 );
cv::Mat gradient_padding;
cv::bitwise_not(gradient, gradient_padding);
cv::distanceTransform(gradient, gradient, CV_DIST_L2, CV_DIST_MASK_PRECISE);
cv::normalize(gradient, gradient, 0, 255, cv::NORM_MINMAX, CV_8U);
cv::bitwise_or(gradient, gradient_padding, gradient);
cv::imshow("gradient-distxform.png", gradient);
cv::waitKey();
}
You have to draw many circles. Color of each circle depends on distance from center. Here is some simple example:
void printGradient(cv::Mat &_input,const cv::Point &_center, const double radius)
{
cv::circle(_input, _center, radius, cv::Scalar(0, 0, 0), -1);
for(double i=1; i<radius; i=i++)
{
const int color = 255-int(i/radius * 255); //or some another color calculation
cv::circle(_input,_center,i,cv::Scalar(color, color, color),2);
}
}
And result:
Another approach not mentioned yet is to precompute a circle gradient image (with one of the mentioned approaches like the accepted solution) and use affine warping with linear interpolation to create other such circles (different sizes). This can be faster, if warping and interpolation are optimized and maybe accelerated by hardware.
Result might be a bit worse than perfect.
I once used this to create a single individual vignetting mask circle for each frame innendoscopic imaging. Was faster than to compute the distances "manually".

AR with OpenCV & OpenGL

Here's the problem: I wrote a code to display the OpenGL teapot on a sheet of paper with drawing. For this, I track the 4 corners of the paper (using SURF detection & matching followed by computing the homography matrix, then moving average of the corners position to reduce the jitter). The corners coordinates are used to compute the intrinsic & extrinsic matrices of the camera (using calibrateCamera() and solvePnP(), respectively). The rotation matrix is then computed using Rodrigues(). Afterwards, I computed the rotation angles using decomposeProjectionMatrix(). Here's the OpenCV part of the code:
...
objPoints.push_back(objCorners);
scenePoints.push_back(sceneCorners);
calibrateCamera(objPoints, scenePoints, Size(640,480), camMtx, distortCoeff, RVecs, tVecs);
solvePnP(objCorners, sceneCorners, camMtx, distortCoeff, RVec, tVec);
Rodrigues(RVec, rotMtx);
getAngles(rotMtx, rotAngles);
objCorners are the corners coordinates in the template image ([1 1], [img width 1], [img width img height], [1 img height]). sceneCorners are the corners coordinates in the webcam frame, computed using the homography matrix. The function getAngles() is as follows:
void getAngles(Mat &rotCamMtx, Vec3d &angles)
{
Mat camMtx, rotMtx, transVec, rotMtxX, rotMtxY, rotMtxZ;
double *r = rotCamMtx.ptr<double>();
double projMtx[12] = {r[0], r[1], r[2], 0,
r[3], r[4], r[5], 0,
r[6], r[7], r[8], 0};
decomposeProjectionMatrix(Mat(3,4,CV_64FC1,projMtx), camMtx, rotMtx, transVec, rotMtxX, rotMtxY, rotMtxZ, angles);
}
Then I set the element of the OpenGL model view matrix as follows:
modelViewMat[0] = 1.0;
modelViewMat[1] = 0.0;
modelViewMat[2] = 0.0;
modelViewMat[3] = 0.0;
modelViewMat[4] = 0.0;
modelViewMat[5] = 1.0;
modelViewMat[6] = 0.0;
modelViewMat[7] = 0.0;
modelViewMat[8] = 0.0;
modelViewMat[9] = 0.0;
modelViewMat[10] = 1.0;
modelViewMat[11] = 0.0;
modelViewMat[12] = 2*matCenter.x/639 - 641/639;
modelViewMat[13] = 481/479 - 2*matCenter.y/479;
modelViewMat[14] = -0.25;
modelViewMat[15] = 1.0;
matCenter is the center coordinate of the paper, obtained by taking the average of the 4 corners. The values in modelViewMat[12] and modelViewMat[13] are obtained by mapping the pixel coordinates ([1 640], [1 480]) to ([-1 1], [1 -1]). The OpenGL part of the code:
...
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadMatrixd(modelViewMat);
glRotated(-45, 1.0, 0.0, 0.0);
glRotated(rotAngles[2], 0.0, 1.0, 0.0);
glShadeModel(GL_SMOOTH);
glColor3f(1.0, 1.0, 1.0);
glutSolidTeapot(0.3);
I rotated the teapot -45 degrees around x-axis to make it appears "sitting" on the paper.
The result is this: if I translate the paper on the desk, the location of the teapot on the paper is more or less correct (on the same spot). If I rotate the paper, the teapot will follow the rotation correctly (around y-axis), but the location is no more correct. The question is: how to "pin" the teapot always on the same spot of the paper? I've tried using the result of Rodrigues() and solvePnP() directly in the OpenGL model view matrix (as suggested in OpenCV + OpenGL: proper camera pose using solvePnP), but the result is incorrect.
Solved this problem several days ago, based on the code from http://blog.yarrago.com/2011/08/introduction-to-augmented-reality.html. To display the 3D object correctly, the OpenGL projection matrix is set first, followed by the OpenGL model view matrix. The elements of the projection matrix are computed from the intrinsic matrix of the camera as follows:
calibrateCamera(objPoints, scenePoints, Size(640,480), camMtx, distortCoeff, RVecs, tVecs);
...
projectionMat[0] = 2*camMtx.at<double>(0,0)/frameW;
projectionMat[1] = 0;
projectionMat[2] = 0;
projectionMat[3] = 0;
projectionMat[4] = 0;
projectionMat[5] = 2*camMtx.at<double>(1,1)/frameH;
projectionMat[6] = 0;
projectionMat[7] = 0;
projectionMat[8] = 1 - 2*camMtx.at<double>(0,2)/frameW;
projectionMat[9] = -1 + (2*camMtx.at<double>(1,2) + 2)/frameH;
projectionMat[10] = (zNear + zFar)/(zNear - zFar);
projectionMat[11] = -1;
projectionMat[12] = 0;
projectionMat[13] = 0;
projectionMat[14] = 2*zNear*zFar/(zNear - zFar);
projectionMat[15] = 0;
frameW and frameH are 640 and 480, respectively. zNear is 0.1 and zFar is 100.
The elements of the OpenGL model view matrix are computed from the rotation matrix and the translation vector (obtained from solvePnP() and Rodrigues()). To get a correct positioning of the 3D object, the translation vector needs to be transformed before computing the model view matrix.
// Offset value to move the translation vector
double offsetC[3][1] = {424, 600, 0};
Mat offset(3, 1, CV_64F, offsetC);
...
solvePnP(objCorners, sceneCorners, camMtx, distortCoeff, RVec, tVec);
Rodrigues(RVec, rotMtx);
tVec = tVec + rotMtx*offset; // Move tVec to refer to the center of the paper
tVec = tVec / 250.0; // Converting pixel coordinates to OpenGL world coordinates
...
modelviewMat[0] = rotMtx.at<double>(0,0);
modelviewMat[1] = -rotMtx.at<double>(1,0);
modelviewMat[2] = -rotMtx.at<double>(2,0);
modelviewMat[3] = 0;
modelviewMat[4] = rotMtx.at<double>(0,1);
modelviewMat[5] = -rotMtx.at<double>(1,1);
modelviewMat[6] = -rotMtx.at<double>(2,1);
modelviewMat[7] = 0;
modelviewMat[8] = rotMtx.at<double>(0,2);
modelviewMat[9] = -rotMtx.at<double>(1,2);
modelviewMat[10] = -rotMtx.at<double>(2,2);
modelviewMat[11] = 0;
modelviewMat[12] = tVec.at<double>(0,0);
modelviewMat[13] = -tVec.at<double>(1,0);
modelviewMat[14] = -tVec.at<double>(2,0);
modelviewMat[15] = 1;
The numerical values for offsetC is the pixel coordinate of the center of the paper. The OpenGL part of the code is then:
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(projectionMat);
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf(modelviewMat);
glRotatef(90, -1.0, 0.0, 0.0); // Rotate the teapot first so that it will be displayed correctly on the paper
glutSolidTeapot(1.0);
One important thing for the correct positioning of the teapot is the transformation of tVec.

how to implement Imrotate of Matlab in Opencv?

for rotation in Opencv, i used following code:
Mat rotate(Mat src, double angle)
{
Mat dst;
Point2f pt(src.cols/2., src.rows/2.);
Mat r = getRotationMatrix2D(pt, angle, 1.0);
warpAffine(src, dst, r, Size(src.cols, src.rows));
return dst;
}
and in matlab i used:
im = imrotate(img, angle, 'bilinear', 'crop');
but results not equivalent.
how to fix that or implement imrotate of matlab in c++?
i got this code around 1 year ago. I do not remember where i got this. So i do not take any credit of this code. But i modified this code little.
The result is not exactly match with matlab result. But it is good enough to use.
cv::Mat imRotate(const cv::Mat source, double angle) {
cv::Mat dst;
// Special Cases
if (std::fmod(angle, 360.0) == 0.0)
dst = source;
else{
cv::Point2f center(source.cols / 2.0F, source.rows / 2.0F);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle
cv::Rect bbox = cv::RotatedRect(center, source.size(), angle).boundingRect();
// adjust transformation matrix
rot.at<double>(0, 2) += bbox.width / 2.0 - center.x;
rot.at<double>(1, 2) += bbox.height / 2.0 - center.y;
cv::warpAffine(source, dst, rot, bbox.size(), cv::INTER_LINEAR);
}
return dst;
}