for rotation in Opencv, i used following code:
Mat rotate(Mat src, double angle)
{
Mat dst;
Point2f pt(src.cols/2., src.rows/2.);
Mat r = getRotationMatrix2D(pt, angle, 1.0);
warpAffine(src, dst, r, Size(src.cols, src.rows));
return dst;
}
and in matlab i used:
im = imrotate(img, angle, 'bilinear', 'crop');
but results not equivalent.
how to fix that or implement imrotate of matlab in c++?
i got this code around 1 year ago. I do not remember where i got this. So i do not take any credit of this code. But i modified this code little.
The result is not exactly match with matlab result. But it is good enough to use.
cv::Mat imRotate(const cv::Mat source, double angle) {
cv::Mat dst;
// Special Cases
if (std::fmod(angle, 360.0) == 0.0)
dst = source;
else{
cv::Point2f center(source.cols / 2.0F, source.rows / 2.0F);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle
cv::Rect bbox = cv::RotatedRect(center, source.size(), angle).boundingRect();
// adjust transformation matrix
rot.at<double>(0, 2) += bbox.width / 2.0 - center.x;
rot.at<double>(1, 2) += bbox.height / 2.0 - center.y;
cv::warpAffine(source, dst, rot, bbox.size(), cv::INTER_LINEAR);
}
return dst;
}
Related
I am trying to rotate video frames captured using libcamera-vid from libcamera-apps an arbitrary amount of degrees using OpenCV's warpAffine().
The frames are as far as I can understand on yuv420 planar format.
The gist of what I'm doing is:
#include <opencv2/imgproc.hpp>
#include <opencv2/core.hpp>
cv::Point2f Ycenter = cv::Point2f(width / 2.0f, height / 2.0f);
cv::Point2f UVcenter = cv::Point2f(width / 4.0f, height / 4.0f);
double rotation = 0;
cv::Mat Ytransform = cv::getRotationMatrix2D(Ycenter, rotation, 1.0);
cv::Mat UVtransform = cv::getRotationMatrix2D(UVcenter, rotation, 1.0);
int Uoffset = height*width;
int Voffset = 5*info.height*info.width/4;
cv::Size Ysize(info.height, info.width);
cv::Size UVsize(info.height / 2, info.width / 2);
for (unsigned int count = 0; ; count++)
{
// ...
// Wait for frame here
// ...
// Acquire buffer in which frame is stored:
uint8_t* buffer = getFrameBuffer(); // Simplification, but not important
double rot = floor(count / 10);
if (10*rot != rotation)
{
rotation = 10*rot;
Ytransform = cv::getRotationMatrix2D(Ycenter, rotation, 1.0);
UVtransform = cv::getRotationMatrix2D(UVcenter, rotation, 1.0);
}
cv::Mat Y(Ysize.height, Ysize.width, CV_8UC1, buffer);
cv::Mat U(UVsize.height, UVsize.width, CV_8UC1, buffer + Uoffset);
cv::Mat V(UVsize.height, UVsize.width, CV_8UC1, buffer + Voffset);
cv::warpAffine(Y, Y, Ytransform, Ysize);
cv::warpAffine(U, U, UVtransform, UVsize);
cv::warpAffine(V, V, UVtransform, UVsize);
sendFrameToEncoder(buffer); // Also a simplification, also not important as far as i know
}
Where height, width are the height and width of the video frame.
However, this produces wierd, warped images. Something is clearly rotated, but not the correctly.
rotation = 0 produces:
rotation = 10 produces:
rotation = 20 produces:
rotation = 30 produces:
So, it clearly isn't working correctly. Does anyone know what's going wrong here?
I'm using OpenCV version 4.5.1
On a Raspberry Pi Zero W running Raspberry Pi OS Bullseye
I want fill circle with gradient color, like I show on bottom. I can't figure out easy way, how to do that.
I can make more circles, but transitions are visible.
cv::circle(img, center, circle_radius * 1.5, cv::Scalar(1.0, 1.0, 0.3), CV_FILLED);
cv::circle(img, center, circle_radius * 1.2, cv::Scalar(1.0, 1.0, 0.6), CV_FILLED);
cv::circle(img, center, circle_radius, cv::Scalar(1.0, 1.0, 1.0), CV_FILLED);
All you need to do is create a function which takes in a central point and a new point, calculates the distance, and returns a grayscale value for that point. Alternatively you could just return the distance, store the distance at that point, and then scale the whole thing later with cv::normalize().
So let's say you have the central point as (50, 50) in a (100, 100) image. Here's pseudocode for what you'd want to do:
function euclideanDistance(center, point) # returns a float
return sqrt( (center.x - point.x)^2 + (center.y - point.y)^2 )
center = (50, 50)
rows = 100
cols = 100
gradient = new Mat(rows, cols) # should be of type float
for row < rows:
for col < cols:
point = (col, row)
gradient[row, col] = euclideanDistance(center, point)
normalize(gradient, 0, 255, NORM_MINMAX, uint8)
gradient = 255 - gradient
Note the steps here:
Create the Euclidean distance function to calculate distance
Create a floating point matrix to hold the distance values
Loop through all rows and columns and assign a distance value
Normalize to the range you want (you could stick with a float here instead of casting to uint8, but you do you)
Flip the binary gradient, since distances farther away will be brighter---but you want the opposite.
Now for your exact example image, there's a gradient in a circle, whereas this method just creates the whole image as a gradient. In your case, if you want a specific radius, just modify the function which calculates the Euclidean distance, and if it's beyond some distance, set it to 0 (the value at the center of the circle, which will be flipped eventually to white):
function euclideanDistance(center, point, radius) # returns a float
distance = sqrt( (center.x - point.x)^2 + (center.y - point.y)^2 )
if distance > radius:
return 0
else
return distance
Here is the above in actual C++ code:
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <cmath>
float euclidean_distance(cv::Point center, cv::Point point, int radius){
float distance = std::sqrt(
std::pow(center.x - point.x, 2) + std::pow(center.y - point.y, 2));
if (distance > radius) return 0;
return distance;
}
int main(){
int h = 400;
int w = 400;
int radius = 100;
cv::Mat gradient = cv::Mat::zeros(h, w, CV_32F);
cv::Point center(150, 200);
cv::Point point;
for(int row=0; row<h; ++row){
for(int col=0; col<w; ++col){
point.x = col;
point.y = row;
gradient.at<float>(row, col) = euclidean_distance(center, point, radius);
}
}
cv::normalize(gradient, gradient, 0, 255, cv::NORM_MINMAX, CV_8U);
cv::bitwise_not(gradient, gradient);
cv::imshow("gradient", gradient);
cv::waitKey();
}
A completely different method (though doing the same thing) would be to use the distanceTransform(). This function maps the distance from the center of a white blob to the nearest black value to a grayscale value, like we were doing above. This code is more concise and does the same thing. However, it can work on arbitrary shapes, not just circles, so that's cool.
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
int main(){
int h = 400;
int w = 400;
int radius = 100;
cv::Point center(150, 200);
cv::Mat gradient = cv::Mat::zeros(h, w, CV_8U);
cv::rectangle(gradient, cv::Point(115, 100), cv::Point(270, 350), cv::Scalar(255), -1, 8 );
cv::Mat gradient_padding;
cv::bitwise_not(gradient, gradient_padding);
cv::distanceTransform(gradient, gradient, CV_DIST_L2, CV_DIST_MASK_PRECISE);
cv::normalize(gradient, gradient, 0, 255, cv::NORM_MINMAX, CV_8U);
cv::bitwise_or(gradient, gradient_padding, gradient);
cv::imshow("gradient-distxform.png", gradient);
cv::waitKey();
}
You have to draw many circles. Color of each circle depends on distance from center. Here is some simple example:
void printGradient(cv::Mat &_input,const cv::Point &_center, const double radius)
{
cv::circle(_input, _center, radius, cv::Scalar(0, 0, 0), -1);
for(double i=1; i<radius; i=i++)
{
const int color = 255-int(i/radius * 255); //or some another color calculation
cv::circle(_input,_center,i,cv::Scalar(color, color, color),2);
}
}
And result:
Another approach not mentioned yet is to precompute a circle gradient image (with one of the mentioned approaches like the accepted solution) and use affine warping with linear interpolation to create other such circles (different sizes). This can be faster, if warping and interpolation are optimized and maybe accelerated by hardware.
Result might be a bit worse than perfect.
I once used this to create a single individual vignetting mask circle for each frame innendoscopic imaging. Was faster than to compute the distances "manually".
I want to get new location of a cv::rect (ROI) after rotate the image by using the following code :
cv::Point2f center(image.cols/2.0, image.rows/2.0);
cv::Rect ROI = cv::Rect(100,200,50,100);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
cv::Rect bbox = cv::RotatedRect(center,image.size(), angle).boundingRect();
rot.at<double>(0,2) += bbox.width/2.0 - center.x;
rot.at<double>(1,2) += bbox.height/2.0 - center.y;
cv::warpAffine(image, image, rot, bbox.size(),cv::INTER_LINEAR,cv::BORDER_CONSTANT,
cv::Scalar(255, 255, 255));
how I can do it ?
Since you have the rotation matrix, you can rotate the ROI rectangle using cv::transform function. First of all, you would need an array of points of that rectangle.
vector<Point2f> roi_points = {
{roi.x, roi.y},
{roi.x + roi.width, roi.y},
{roi.x + roi.width, roi.y + roi.height},
{roi.x, roi.y + roi.height}
};
Then, you can use cv::transform:
vector<Point2f> rot_roi_points;
transform(roi_points, rot_roi_points, rot);
This way, rot_roi_points holds points of the transformed rectangle.
==>
In order to get new location of a cv::rect (ROI) you have to transform each of its corners with using of following function:
cv::Point2f Convert(const cv::Point2f & p, const cv::Mat & t)
{
float x = p.x*t.at<double>((0, 0) + p.y*t.at<double>((0, 1) + t.at<double>((0, 2);
float y = p.x*t.at<double>((1, 0) + p.y*t.at<double>((1, 1) + t.at<double>((1, 2);
return cv::Point2f(x, y);
}
The transformation matrix is the same as you used for image rotation.
I have the following image in a Mat:
The black rectangle is created using the class rotatedRect. How can I have the following result?
EDIT:
I manage to do it with the following code:
cv::Mat src, dst; float angle, x, y;
cv::Mat imgRotated = cv::getRotationMatrix2D(Point(50,50), angle,
1.0); cv::warpAffine(src, dst, imgRotated, Size(x,y));
imshow("image", dst);
You can use cv::warpAffine() together with cv::getRotationMatrix2D().
There is an example here.
I am currently working on a 3d reconstruction of X-Ray images, and therefore I need to stereo-rectify images of two views before I can match some features with help of the epilines. I am using OpenCV 2.4 with C++.
For this purpose I got a set of pairs of X-Ray images (cone beam X-ray images, no real cameras with distortion parameters or a real focal length), one from the anteroposterior view (directly looking at the chest), and one from the lateral view (looking at the chest from the side). I know some parameters like a virtual focal length (equal for both views) that I can use, and the images have got a resolution of 512x512px, hence the camera projection at the images is at (255,255) for both views. Also i know that the cameras are perpendicular. From this information I developed a rotation matrix R and a translation vector t (both verified with help of a 3d plot in Matlab).
Problem: R and t are actually enough for a stereo rectification in OpenCV, but the resulting images after rectification are black. Googling led me to a bug in stereoRectify, but I doubt that it is the bug as I can run the OpenCV stereoRectification example which does work. When trying a stereoRectification in Matlab I can at least see some distorted rectification results.
Here is my C++ code:
float camera_matrix_ap_data[] = {1207*2.0, 0.0, 255.0,
0.0, 1207*2, 255.0,
0.0, 0.0, 1.0};
cv::Mat camera_matrix_ap(3, 3, CV_64F, camera_matrix_ap_data);
float camera_matrix_lat_data[] = {1207*2, 0.0, 255.0,
0.0, 1207*2, 255.0,
0.0, 0.0, 1.0};
cv::Mat camera_matrix_lat(3, 3, CV_64F, camera_matrix_lat_data);
///
/// #brief the distortion matrices
///
cv::Mat distortion_ap(4, 1, CV_64F, 0.0);
cv::Mat distortion_lat(4, 1, CV_64F, 0.0);
///
/// #brief Translation and Rotation matrices
///
float R_data[] = {0.0, 0.0, 1.0,
0.0, 1.0, 0.0,
-1.0, 0.0, 0.0};
float T_data[] = {-(1207.0*2 + 255), 0.0, 1207.0*2 + 255};
cv::Mat R(3, 3, CV_64F, R_data);
cv::Mat T(3, 1, CV_64F, T_data);
for (int i=1; i<=20; i++) {
std::stringstream filenameAP_tmp;
std::stringstream filenameLAT_tmp;
filenameAP_tmp << "imageAP"<< i <<".jpg";
filenameAP = filenameAP_tmp.str();
filenameLAT_tmp << "imageLAT"<< i <<".jpg";
filenameLAT = filenameLAT_tmp.str();
rectimg_ap = cv::imread(filenameAP);
rectimg_lat = cv::imread(filenameLAT);
// Yes, these images are grayscale
/// Experimental
/// Stereo rectify both images
cv::Mat R1(3, 3, CV_64F);
cv::Mat R2(3, 3, CV_64F);
cv::Mat P1(3, 4, CV_64F);
cv::Mat P2(3, 4, CV_64F);
cv::Mat Q(4, 4, CV_64F);
cv::Rect validRoi[2];
// buggy?
cv::stereoRectify(camera_matrix_ap, distortion_ap, camera_matrix_lat, distortion_lat, rectimg_ap.size(), R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 1, rectimg_ap.size(), &validRoi[0], &validRoi[1] );
// Maps for AP View
cv::Mat map1x(rectimg_ap.size(), CV_32FC1, 255.0);
cv::Mat map2x(rectimg_ap.size(), CV_32FC1, 255.0);
// Maps for LAT View
cv::Mat map1y(rectimg_ap.size(), CV_32FC1, 255.0);
cv::Mat map2y(rectimg_ap.size(), CV_32FC1, 255.0);
cv::initUndistortRectifyMap(camera_matrix_ap, distortion_ap, R1, P1, rectimg_ap.size(), CV_32FC1, map1x, map1y);
cv::initUndistortRectifyMap(camera_matrix_lat, distortion_lat, R2, P2, rectimg_lat.size(), CV_32FC1, map2x, map2y);
cv::Mat tmp1, tmp2;
cv::remap(rectimg_ap, tmp1, map1x, map1y, INTER_LINEAR);
cv::remap(rectimg_lat, tmp2, map2x, map2y, INTER_LINEAR);
//findHomography(rectimg_ap, rectimg_lat, CV_RANSAC);
}
So I am wondering what is wrong with this code or my matrices, as the rectification images after remap are completely black. Is there a difference concerning the coordinate system axes between OpenCV and Matlab? As I read, in OpenCV the z-axis points to the image plane, and it was the same for Matlab.
I'd be glad if someone could help me, I am stuck with this problem for weeks now. Thank you very much!
Try changing the "float" variable types to "double". The CV_64F corresponds to a double, and not to a float, since it is 8 bytes (= 64bits). I tried your code with my own matrix values, and that did the trick.