OpenCV : algorithm for simple image rotation and reduction - c++

I have tried image rotation and reduction(JPEG) with getRotationMatrix2D(center, angle, scale);
and warpAffine(image1, image3, rotation, image3.size());
I got the result I wanted(image as below)
for (int r = 0;r < image1.rows;r++) {
for (int c = r + 1;c < image1.cols;c++) {
Point center = Point(image1.cols / 2, image1.rows / 2);
Point center1 = Point(image1.cols / 2, image1.rows / 2);
double angle = 90.0;
double scale = 1;
double angle1 = 90.0;
double scale1 = 0.5;
rotation = getRotationMatrix2D(center, angle, scale);
rotation1 = getRotationMatrix2D(center1, angle1, scale1);
but I want to learn some simple rotation and reduction algorithm ( simple for beginner like me ) without using any library
to get the same result.
After searching for various solutions, i ended up with this
from https://gamedev.stackexchange.com/questions/67613/how-can-i-rotate-a-bitmap-without-d3d-or-opengl
Can anyone break up bit by bit of the simple linear algebra to explain to me in regards to my pesudo code?
EDIT:
Reduction code
void reduction(Mat image1)
{
for (int r = 0;r < imgC.rows;r++)
{
for (int c = 0;c < imgC.cols;c++)
{
int new_x = c * (125 / 256);
int new_y = r * (125 / 256);
imgC.at<uchar>(r, c) = imgC.at<uchar>(new_y, new_x);
}
}
}

In this example I have loaded two images. One is in gray scale and other is color image. Both are same image so you can understand easily how to handle rotation with mathematical equation. Please see this example which is very easy to understand. Also in the similar manner you can add scaling and reduction. Here each point is converted according to the equation and color value set on new location. Here is the code:
#include <iostream>
#include <string>
#include "opencv/highgui.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/objdetect/objdetect.hpp"
using namespace std;
using namespace cv;
#define PIPI 3.14156
int main()
{
Mat img = imread("C:/Users/dell2/Desktop/DSC00587.JPG",0);//loading gray scale image
Mat imgC = imread("C:/Users/dell2/Desktop/DSC00587.JPG",1);//loading color image
Mat rotC(imgC.cols, imgC.rows, imgC.type());
rotC = Scalar(0,0,0);
Mat rotG(img.cols, img.rows, img.type());
rotG = Scalar(0,0,0);
float angle = 90.0 * PIPI / 180.0;
for(int r=0;r<imgC.rows;r++)
{
for(int c=0;c<imgC.cols;c++)
{
float new_px = c * cos(angle) - r * sin(angle);
float new_py = c * sin(angle) + r * cos(angle);
Point pt((int)-new_px, (int)new_py);
//color image
rotC.at<Vec3b>(pt) = imgC.at<Vec3b>(r,c);//assign color value at new location from original image
//gray scale image
rotG.at<uchar>(pt) = img.at<uchar>(r,c);//assign color value at new location from original image
}
}
imshow("color",rotC);
imshow("gray",rotG);
waitKey(0);
return 0;
}

Related

Convert a fisheye image to an equirectangular image with opencv4

I want to transform a single round fisheye image to an equirectangular image with a C++ algorithm and OpenCV4.
The idea is from a input image loaded on my computer like this :
I want to obtain an output image like this :
I'm using the method described on this blog :
http://paulbourke.net/dome/dualfish2sphere/
The method can be described by this picture :
Unfortunately when I run my code, I obtain something like this :
I'm working on a MacOSX with Xcode and I use Terminal "ITerm2" to build and execute my code.
The code is the following :
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
const double PI = 3.141592653589793;
const string PATH_IMAGE = "/Users/Kenza/Desktop/Xcode_cpp_opencv/PaulBourke2/PaulBourke2/Images/img1.jpg";
const int ESC = 27;
Point2f findCorrespondingFisheyePoint(int Xe, int Ye, double He, double We, double Hf, double Wf, double FOV){
Point2f fisheyePoint;
double Xfn, Yfn; //Normalized Cartesian Coordinates
double longitude, latitude, Px, Py, Pz; //Spherical Coordinates
double r, theta; //Polar coordinates
double Xpn, Ypn; //Normalized Polar coordinates
//Normalize Coordinates
Xfn = ( ( 2.0 * (double)Xe ) - We) / Wf;//Between -1 and 1
Yfn = ( ( 2.0 * (double)Ye ) - He) / Hf;//Between -1 and 1
//Normalize Coordinates to Spherical Coordinates
longitude = Xfn*PI; //Between -PI and PI (2*PI interval)
latitude = Yfn*(PI/2.0); //Between -PI/2 and PI/2 (PI interval)
Px = cos(latitude)*cos(longitude);
Py = cos(latitude)*sin(longitude);
Pz = sin(latitude);
//Spherical Coordinates to Polar Coordinates
r = 2.0 * atan2(sqrt(pow(Px,2)+pow(Pz,2)),Py)/FOV;
theta = atan2(Pz,-Px);
Xpn = r * cos(theta);
Ypn = r * sin(theta);
//Normalize Coordinates to CartesianImage Coordinates
fisheyePoint.x = (int)(((Xpn+1.0)*Wf)/2.0);
fisheyePoint.y = (int)(((Ypn+1.0)*Hf)/2.0);
return fisheyePoint;
}
int main(int argc, char** argv){
Mat fisheyeImage, equirectangularImage;
fisheyeImage = imread(PATH_IMAGE, CV_32FC1);
namedWindow("Fisheye Image", WINDOW_AUTOSIZE);
imshow("Fisheye Image", fisheyeImage);
while(waitKey(0) != ESC) {
//wait until the key ESC is pressed
}
//destroyWindow("Fisheye Image");
int Hf, Wf; //Height, width and FOV for the input image (=fisheyeImage)
double FOV;
int He, We; //Height and width for the outpout image (=EquirectangularImage)
Hf = fisheyeImage.size().height;
Wf = fisheyeImage.size().width;
FOV = PI; //FOV in radian
//We keep the same ratio for the image input and the image output
We = Wf;
He = Hf;
equirectangularImage.create(Hf, Wf, fisheyeImage.type()); //We create the outpout image (=EquirectangularImage)
//For each pixels of the ouput equirectangular Image
for (int Xe = 0; Xe <equirectangularImage.size().width; Xe++){
for (int Ye = 0; Ye <equirectangularImage.size().height; Ye++){
equirectangularImage.at<Vec3b>(Point(Xe,Ye)) = fisheyeImage.at<Vec3b>(findCorrespondingFisheyePoint(Xe, Ye, He, We, Hf, Wf, FOV)); //We find the corresponding point in the fisheyeImage
}
}
namedWindow("Equirectangular Image", WINDOW_AUTOSIZE);
imshow("Equirectangular Image",equirectangularImage);
while(waitKey(0) != ESC) {
//wait until the key ESC is pressed
}
destroyWindow("Fisheye Image");
imwrite("equirectangularImage.jpg", equirectangularImage);
return 0;
}
With this code, I get the result expected :
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
const string PATH_IMAGE = "/Users/Kenza/Desktop/Xcode_cpp_opencv/Sos/Sos/Images/img1.jpg";
const int ESC = 27;
Point2f findCorrespondingFisheyePoint(int Xe, int Ye, int We, int He, float FOV){
Point2f fisheyePoint;
float theta, phi, r;
Point3f sphericalPoint;
theta = CV_PI * (Xe / ( (float) We ) - 0.5);
phi = CV_PI * (Ye / ( (float) He ) - 0.5);
sphericalPoint.x = cos(phi) * sin(theta);
sphericalPoint.y = cos(phi) * cos(theta);
sphericalPoint.z = sin(phi);
theta = atan2(sphericalPoint.z, sphericalPoint.x);
phi = atan2(sqrt(pow(sphericalPoint.x,2) + pow(sphericalPoint.z,2)), sphericalPoint.y);
r = ( (float) We ) * phi / FOV;
fisheyePoint.x = (int) ( 0.5 * ( (float) We ) + r * cos(theta) );
fisheyePoint.y = (int) ( 0.5 * ( (float) He ) + r * sin(theta) );
return fisheyePoint;
}
int main(int argc, char** argv){
Mat fisheyeImage, equirectangularImage;
int Wf, Hf;
float FOV;
int We, He;
fisheyeImage = imread(PATH_IMAGE, IMREAD_COLOR);
namedWindow("Fisheye Image");
imshow("fisheye Image", fisheyeImage);
Wf = fisheyeImage.size().width;
Hf = fisheyeImage.size().height;
FOV = (180 * CV_PI ) / 180;
We = Wf;
He = Hf;
while (waitKey(0) != ESC){
}
equirectangularImage.create(He, We, CV_8UC3);
for (int Xe = 0; Xe < We; Xe++){
for (int Ye = 0; Ye < He; Ye++){
Point2f fisheyePoint = findCorrespondingFisheyePoint(Xe, Ye, We, He, FOV);
if (fisheyePoint.x >= We || fisheyePoint.y >= He)
continue;
if (fisheyePoint.x < 0 || fisheyePoint.y < 0)
continue;
equirectangularImage.at<Vec3b>(Point(Xe, Ye)) = fisheyeImage.at<Vec3b>(fisheyePoint);
}
}
namedWindow("Equirectangular Image");
imshow("Equirectangular Image", equirectangularImage);
while (waitKey(0) != ESC){
}
imwrite("im2.jpg", equirectangularImage);
}

Slow motion in C++

I want to do slow motion. I've seen an implementation here: https://github.com/vaibhav06891/SlowMotion
I modified the code to generate only one frame.
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/tracking.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <fstream>
#include <string>
using namespace cv;
using namespace std;
#define CLAMP(x,min,max) ( ((x) < (min)) ? (min) : ( ((x) > (max)) ? (max) : (x) ) )
int main(int argc, char** argv)
{
Mat frame,prevframe;
prevframe = imread("img1.png");
frame = imread("img2.png");
Mat prevgray, gray;
Mat fflow,bflow;
Mat flowf(frame.rows,frame.cols ,CV_8UC3); // the forward co-ordinates for interpolation
flowf.setTo(Scalar(255,255,255));
Mat flowb(frame.rows,frame.cols ,CV_8UC3); // the backward co-ordinates for interpolation
flowb.setTo(Scalar(255,255,255));
Mat final(frame.rows,frame.cols ,CV_8UC3);
int fx,fy,bx,by;
cvtColor(prevframe,prevgray,COLOR_BGR2GRAY); // Convert to gray space for optical flow calculation
cvtColor(frame, gray, COLOR_BGR2GRAY);
calcOpticalFlowFarneback(prevgray, gray, fflow, 0.5, 3, 15, 3, 3, 1.2, 0); // forward optical flow
calcOpticalFlowFarneback(gray, prevgray, bflow, 0.5, 3, 15, 3, 3, 1.2, 0); //backward optical flow
for (int y=0; y<frame.rows; y++)
{
for (int x=0; x<frame.cols; x++)
{
const Point2f fxy = fflow.at<Point2f>(y,x);
fy = CLAMP(y+fxy.y*0.5,0,frame.rows);
fx = CLAMP(x+fxy.x*0.5,0,frame.cols);
flowf.at<Vec3b>(fy,fx) = prevframe.at<Vec3b>(y,x);
const Point2f bxy = bflow.at<Point2f>(y,x);
by = CLAMP(y+bxy.y*(1-0.5),0,frame.rows);
bx = CLAMP(x+bxy.x*(1-0.5),0,frame.cols);
flowb.at<Vec3b>(by,bx) = frame.at<Vec3b>(y,x);
}
}
final = flowf*(1-0.5) + flowb*0.5; //combination of frwd and bckward martrix
cv::medianBlur(final,final,3);
imwrite( "output.png",final);
return 0;
}
But the result is not as expected.
For the images:
The result is :
Does anyone know what is the problem?
The optical flow algorithm won't work for your test images.
The first problem is that your test images have very little difference in neighbour pixel values. That completely black lines and a single color square give no clues to optical flow algorithm where the image areas moved as the algorithm is not able to process the whole image at once and calculates optical flow with a small 15x15 (as you set it in calcOpticalFlowFarneback) pixels window.
The second problem is that your test images differ too much. The distance between positions of brown square is too big. Again Farneback is not able to detect it.
Try the code with some real life video frames or edit your tests to be less monotonous (set some texture to the square, background and rectangle lines) and bring the squares closer to each other on the images (try 2-10 px distance). You can also play with calcOpticalFlowFarneback arguments (read here) to suit your conditions.
You can use this code to save the optical flow you get to an image for debugging:
Mat debugImage = Mat::zeros(fflow.size(), CV_8UC3);
float hsvHue, magnitude;
for (int x = 0; x < fflow.cols; x++)
{
for (int y = 0; y < fflow.rows; y++)
{
auto& item = fflow.at<Vec2f>(y, x);
magnitude = sqrtf(item[0] * item[0] + item[1] * item[1]);
hsvHue = atan2f(item[1], item[0]) / static_cast<float>(CV_PI)* 180.f;
// div 2 to fit 0..255 range
hsvHue = (hsvHue >= 0. ? hsvHue : (360.f + hsvHue)) / 2.f;
debugImage.at<Vec3b>(y, x)[0] = static_cast<uchar>(hsvHue);
debugImage.at<Vec3b>(y, x)[1] = 255;
debugImage.at<Vec3b>(y, x)[2] = static_cast<uchar>(255.f * magnitude);
}
}
cvtColor(debugImage, debugImage, CV_HSV2BGR);
imwrite("OpticalFlow.png", debugImage);
Here pixel flow direction will be represented with color (hue), and pixel move distance will be represented with brightness.
Try to use this images I created:
.
Also note that
for (int y = 0; y < frame.rows; y++)
{
for (int x = 0; x < frame.cols; x++)
{
const Point2f fxy = fflow.at<Point2f>(y, x);
fy = CLAMP(y + fxy.y*0.5, 0, frame.rows);
fx = CLAMP(x + fxy.x*0.5, 0, frame.cols);
flowf.at<Vec3b>(fy, fx) = prevframe.at<Vec3b>(y, x);
...
code won't color some flowf pixels that have no corresponding target positions they moved to, and optical flow algorithm can produce such situations. I would change it to:
for (int y = 0; y < frame.rows; y++)
{
for (int x = 0; x < frame.cols; x++)
{
const Point2f fxy = fflow.at<Point2f>(y, x);
fy = CLAMP(y - fxy.y*0.5, 0, frame.rows);
fx = CLAMP(x - fxy.x*0.5, 0, frame.cols);
flowf.at<Vec3b>(y, x) = prevframe.at<Vec3b>(fy, fx);
const Point2f bxy = bflow.at<Point2f>(y, x);
by = CLAMP(y - bxy.y*(1 - 0.5), 0, frame.rows);
bx = CLAMP(x - bxy.x*(1 - 0.5), 0, frame.cols);
flowb.at<Vec3b>(y, x) = frame.at<Vec3b>(by, bx);
}
}
With this changed code and my tests I get this output:

Creating vignette filter in opencv?

How we can make vignette filter in opencv? Do we need to implement any algorithm for it or only to play with the values of BGR ? How we can make this type of filters. I saw its implementation here but i didn't understand it clearly . Anyone with complete algorithms guidance and implementation guidance is highly appriciated.
After Abid rehman K answer I tried this in c++
int main()
{
Mat v;
Mat img = imread ("D:\\2.jpg");
img.convertTo(v, CV_32F);
Mat a,b,c,d,e;
c.create(img.rows,img.cols,CV_32F);
d.create(img.rows,img.cols,CV_32F);
e.create(img.rows,img.cols,CV_32F);
a = getGaussianKernel(img.cols,300,CV_32F);
b = getGaussianKernel(img.rows,300,CV_32F);
c = b*a.t();
double minVal;
double maxVal;
cv::minMaxLoc(c, &minVal, &maxVal);
d = c/maxVal;
e = v*d ; // This line causing error
imshow ("venyiet" , e);
cvWaitKey();
}
d is displaying right but e=v*d line is causing runtime error of
OpenCV Error: Assertion failed (type == B.type() && (type == CV_32FC1 || type ==
CV_64FC1 || type == CV_32FC2 || type == CV_64FC2)) in unknown function, file ..
\..\..\src\opencv\modules\core\src\matmul.cpp, line 711
First of all, Abid Rahman K describes the easiest way to go about this filter. You should seriously study his answer with time and attention. Wikipedia's take on Vignetting is also quite clarifying for those that had never heard about this filter.
Browny's implementation of this filter is considerably more complex. However, I ported his code to the C++ API and simplified it so you can follow the instructions yourself.
#include <math.h>
#include <vector>
#include <cv.hpp>
#include <highgui/highgui.hpp>
// Helper function to calculate the distance between 2 points.
double dist(CvPoint a, CvPoint b)
{
return sqrt(pow((double) (a.x - b.x), 2) + pow((double) (a.y - b.y), 2));
}
// Helper function that computes the longest distance from the edge to the center point.
double getMaxDisFromCorners(const cv::Size& imgSize, const cv::Point& center)
{
// given a rect and a line
// get which corner of rect is farthest from the line
std::vector<cv::Point> corners(4);
corners[0] = cv::Point(0, 0);
corners[1] = cv::Point(imgSize.width, 0);
corners[2] = cv::Point(0, imgSize.height);
corners[3] = cv::Point(imgSize.width, imgSize.height);
double maxDis = 0;
for (int i = 0; i < 4; ++i)
{
double dis = dist(corners[i], center);
if (maxDis < dis)
maxDis = dis;
}
return maxDis;
}
// Helper function that creates a gradient image.
// firstPt, radius and power, are variables that control the artistic effect of the filter.
void generateGradient(cv::Mat& mask)
{
cv::Point firstPt = cv::Point(mask.size().width/2, mask.size().height/2);
double radius = 1.0;
double power = 0.8;
double maxImageRad = radius * getMaxDisFromCorners(mask.size(), firstPt);
mask.setTo(cv::Scalar(1));
for (int i = 0; i < mask.rows; i++)
{
for (int j = 0; j < mask.cols; j++)
{
double temp = dist(firstPt, cv::Point(j, i)) / maxImageRad;
temp = temp * power;
double temp_s = pow(cos(temp), 4);
mask.at<double>(i, j) = temp_s;
}
}
}
// This is where the fun starts!
int main()
{
cv::Mat img = cv::imread("stack-exchange-chefs.jpg");
if (img.empty())
{
std::cout << "!!! Failed imread\n";
return -1;
}
/*
cv::namedWindow("Original", cv::WINDOW_NORMAL);
cv::resizeWindow("Original", img.size().width/2, img.size().height/2);
cv::imshow("Original", img);
*/
What img looks like:
cv::Mat maskImg(img.size(), CV_64F);
generateGradient(maskImg);
/*
cv::Mat gradient;
cv::normalize(maskImg, gradient, 0, 255, CV_MINMAX);
cv::imwrite("gradient.png", gradient);
*/
What maskImg looks like:
cv::Mat labImg(img.size(), CV_8UC3);
cv::cvtColor(img, labImg, CV_BGR2Lab);
for (int row = 0; row < labImg.size().height; row++)
{
for (int col = 0; col < labImg.size().width; col++)
{
cv::Vec3b value = labImg.at<cv::Vec3b>(row, col);
value.val[0] *= maskImg.at<double>(row, col);
labImg.at<cv::Vec3b>(row, col) = value;
}
}
cv::Mat output;
cv::cvtColor(labImg, output, CV_Lab2BGR);
//cv::imwrite("vignette.png", output);
cv::namedWindow("Vignette", cv::WINDOW_NORMAL);
cv::resizeWindow("Vignette", output.size().width/2, output.size().height/2);
cv::imshow("Vignette", output);
cv::waitKey();
return 0;
}
What output looks like:
As stated in the code above, by changing the values of firstPt, radius and power you can achieve stronger/weaker artistic effects.
Good luck!
You can do a simple implementation using Gaussian Kernels available in OpenCV.
Load the image, Get its number of rows and columns
Create two Gaussian Kernels of size rows and columns, say A,B. Its variance depends upon your needs.
C = transpose(A)*B, ie multiply a column vector with a row vector such that result array should be same size as that of the image.
D = C/C.max()
E = img*D
See the implementation below (for a grayscale image):
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('temp.jpg',0)
row,cols = img.shape
a = cv2.getGaussianKernel(cols,300)
b = cv2.getGaussianKernel(rows,300)
c = b*a.T
d = c/c.max()
e = img*d
cv2.imwrite('vig2.png',e)
Below is my result:
Similarly for Color image:
NOTE : Of course, it is centered. You will need to make additional modifications to move focus to other places.
Similar one close to Abid's Answer. But the code is for the colored image
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('turtle.jpg',1)
rows,cols = img.shape[:2]
zeros = np.copy(img)
zeros[:,:,:] = 0
a = cv2.getGaussianKernel(cols,900)
b = cv2.getGaussianKernel(rows,900)
c = b*a.T
d = c/c.max()
zeros[:,:,0] = img[:,:,0]*d
zeros[:,:,1] = img[:,:,1]*d
zeros[:,:,2] = img[:,:,2]*d
cv2.imwrite('vig2.png',zeros)
Original Image (Taken from Pexels under CC0 Licence)
After Applying Vignette with a sigma of 900 (i.e `cv2.getGaussianKernel(cols,900))
After Applying Vignette with a sigma of 300 (i.e `cv2.getGaussianKernel(cols,300))
Additionally you can focus the vignette effect to the cordinates of your wish by simply shifting the mean of the gaussian to your focus point as follows.
import cv2
import numpy as np
img = cv2.imread('turtle.jpg',1)
fx,fy = 1465,180 # Add your Focus cordinates here
fx,fy = 145,1000 # Add your Focus cordinates here
sigma = 300 # Standard Deviation of the Gaussian
rows,cols = img.shape[:2]
fxn = fx - cols//2 # Normalised temperory vars
fyn = fy - rows//2
zeros = np.copy(img)
zeros[:,:,:] = 0
a = cv2.getGaussianKernel(2*cols ,sigma)[cols-fx:2*cols-fx]
b = cv2.getGaussianKernel(2*rows ,sigma)[rows-fy:2*rows-fy]
c = b*a.T
d = c/c.max()
zeros[:,:,0] = img[:,:,0]*d
zeros[:,:,1] = img[:,:,1]*d
zeros[:,:,2] = img[:,:,2]*d
zeros = add_alpha(zeros)
cv2.imwrite('vig4.png',zeros)
The size of the turtle image is 1980x1200 (WxH). The following is an example focussing at the cordinate 1465,180 (i.e fx,fy = 1465,180) (Note that I have reduced the variance to exemplify the change in focus)
The following is an example focussing at the cordinate 145,1000 (i.e fx,fy = 145,1000)
Here is my c++ implementation of Vignette filter on Colored Image using opencv. It is faster than the accepted answer.
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
double fastCos(double x){
x += 1.57079632;
if (x > 3.14159265)
x -= 6.28318531;
if (x < 0)
return 1.27323954 * x + 0.405284735 * x * x;
else
return 1.27323954 * x - 0.405284735 * x * x;
}
double dist(double ax, double ay,double bx, double by){
return sqrt((ax - bx)*(ax - bx) + (ay - by)*(ay - by));
}
int main(int argv, char** argc){
Mat src = cv::imread("filename_of_your_image.jpg");
Mat dst = Mat::zeros(src.size(), src.type());
double radius; //value greater than 0,
//greater the value lesser the visible vignette
//for a medium vignette use a value in range(0.5-1.5)
cin << radius;
double cx = (double)src.cols/2, cy = (double)src.rows/2;
double maxDis = radius * dist(0,0,cx,cy);
double temp;
for (int y = 0; y < src.rows; y++) {
for (int x = 0; x < src.cols; x++) {
temp = fastCos(dist(cx, cy, x, y) / maxDis);
temp *= temp;
dst.at<Vec3b>(y, x)[0] =
saturate_cast<uchar>((src.at<Vec3b>(y, x)[0]) * temp);
dst.at<Vec3b>(y, x)[1] =
saturate_cast<uchar>((src.at<Vec3b>(y, x)[1]) * temp );
dst.at<Vec3b>(y, x)[2] =
saturate_cast<uchar>((src.at<Vec3b>(y, x)[2]) * temp);
}
}
imshow ("Vignetted Image", dst);
waitKey(0);
}
Here is a C++ implementation of Vignetting for Grayscale Image
#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main(int argv, char** argc)
{
Mat test = imread("test.jpg", IMREAD_GRAYSCALE);
Mat kernel_X = getGaussianKernel(test.cols, 100);
Mat kernel_Y = getGaussianKernel(test.rows, 100);
Mat kernel_X_transpose;
transpose(kernel_X, kernel_X_transpose);
Mat kernel = kernel_Y * kernel_X_transpose;
Mat mask_v, proc_img;
normalize(kernel, mask_v, 0, 1, NORM_MINMAX);
test.convertTo(proc_img, CV_64F);
multiply(mask_v, proc_img, proc_img);
convertScaleAbs(proc_img, proc_img);
imshow ("Vignette", proc_img);
waitKey(0);
return 0;
}

Video Stabilization

I 'm researching about Video Stabilization field. I implement a application using OpenCV.
My progress such as:
Surf points extraction
Matching
estimateRigidTransform
warpAffine
But the result video is not be stable. Can anyone help me this problem or provide me some source code link to improve?
Sample video: Hippo video
Here is my code [EDIT]
#include "stdafx.h"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/opencv.hpp>
const double smooth_level = 0.7;
using namespace cv;
using namespace std;
struct TransformParam
{
TransformParam() {}
TransformParam(double _dx, double _dy, double _da) {
dx = _dx;
dy = _dy;
da = _da;
}
double dx; // translation x
double dy; // translation y
double da; // angle
};
int main( int argc, char** argv )
{
VideoCapture cap ("test12.avi");
Mat cur, cur_grey;
Mat prev, prev_grey;
cap >> prev;
cvtColor(prev, prev_grey, COLOR_BGR2GRAY);
// Step 1 - Get previous to current frame transformation (dx, dy, da) for all frames
vector <TransformParam> prev_to_cur_transform; // previous to current
int k=1;
int max_frames = cap.get(CV_CAP_PROP_FRAME_COUNT);
VideoWriter writeVideo ("stable.avi",0,30,cvSize(prev.cols,prev.rows),true);
Mat last_T;
double avg_dx = 0, avg_dy = 0, avg_da = 0;
Mat smooth_T(2,3,CV_64F);
while(true) {
cap >> cur;
if(cur.data == NULL) {
break;
}
cvtColor(cur, cur_grey, COLOR_BGR2GRAY);
// vector from prev to cur
vector <Point2f> prev_corner, cur_corner;
vector <Point2f> prev_corner2, cur_corner2;
vector <uchar> status;
vector <float> err;
goodFeaturesToTrack(prev_grey, prev_corner, 200, 0.01, 30);
calcOpticalFlowPyrLK(prev_grey, cur_grey, prev_corner, cur_corner, status, err);
// weed out bad matches
for(size_t i=0; i < status.size(); i++) {
if(status[i]) {
prev_corner2.push_back(prev_corner[i]);
cur_corner2.push_back(cur_corner[i]);
}
}
// translation + rotation only
Mat T = estimateRigidTransform(prev_corner2, cur_corner2, false);
// in rare cases no transform is found. We'll just use the last known good transform.
if(T.data == NULL) {
last_T.copyTo(T);
}
T.copyTo(last_T);
// decompose T
double dx = T.at<double>(0,2);
double dy = T.at<double>(1,2);
double da = atan2(T.at<double>(1,0), T.at<double>(0,0));
prev_to_cur_transform.push_back(TransformParam(dx, dy, da));
avg_dx = (avg_dx * smooth_level) + (dx * (1- smooth_level));
avg_dy = (avg_dy * smooth_level) + (dy * (1- smooth_level));
avg_da = (avg_da * smooth_level) + (da * (1- smooth_level));
smooth_T.at<double>(0,0) = cos(avg_da);
smooth_T.at<double>(0,1) = -sin(avg_da);
smooth_T.at<double>(1,0) = sin(avg_da);
smooth_T.at<double>(1,1) = cos(avg_da);
smooth_T.at<double>(0,2) = avg_dx;
smooth_T.at<double>(1,2) = avg_dy;
Mat stable;
warpAffine(prev,stable,smooth_T,prev.size());
Mat canvas = Mat::zeros(cur.rows, cur.cols*2+10, cur.type());
prev.copyTo(canvas(Range::all(), Range(0, prev.cols)));
stable.copyTo(canvas(Range::all(), Range(prev.cols+10, prev.cols*2+10)));
imshow("before and after", canvas);
waitKey(20);
writeVideo.write(stable);
cur.copyTo(prev);
cur_grey.copyTo(prev_grey);
k++;
}
}
First, you can just blur you image. It will helps a bit. Second, you can easily smooth your matrix by simplest implementation of exponential smooth A(t+1) = a*A(t)+(1-a)*A(t+1) and play with a-value in [0;1] range. Third, you can turn off some type of transformations like rotation, shift etc.
Here is code example:
t = estimateRigidTransform(new, old, 0); // 0 means not all transformations (5 of 6)
if(!t.empty()){
// t(Range(0,2), Range(0,2)) = Mat::eye(2, 2, CV_64FC1); // turning off rotation
// t.at<double>(0,2) = 0; t.at<double>(1,2) = 0; // turning off shift dx and dy
tAvrg = tAvrg*a + t*(1-a); // a - smooth level in [0;1] range, play with it
warpAffine(new, stable, tAvrg, Size(new.cols, new.rows));
}

Rotate an image without cropping in OpenCV in C++

I'd like to rotate an image, but I can't obtain the rotated image without cropping
My original image:
Now I use this code:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
// Compile with g++ code.cpp -lopencv_core -lopencv_highgui -lopencv_imgproc
int main()
{
cv::Mat src = cv::imread("im.png", CV_LOAD_IMAGE_UNCHANGED);
cv::Mat dst;
cv::Point2f pc(src.cols/2., src.rows/2.);
cv::Mat r = cv::getRotationMatrix2D(pc, -45, 1.0);
cv::warpAffine(src, dst, r, src.size()); // what size I should use?
cv::imwrite("rotated_im.png", dst);
return 0;
}
And obtain the following image:
But I'd like to obtain this:
My answer is inspired by the following posts / blog entries:
Rotate cv::Mat using cv::warpAffine offsets destination image
http://john.freml.in/opencv-rotation
Main ideas:
Adjusting the rotation matrix by adding a translation to the new image center
Using cv::RotatedRect to rely on existing opencv functionality as much as possible
Code tested with opencv 3.4.1:
#include "opencv2/opencv.hpp"
int main()
{
cv::Mat src = cv::imread("im.png", CV_LOAD_IMAGE_UNCHANGED);
double angle = -45;
// get rotation matrix for rotating the image around its center in pixel coordinates
cv::Point2f center((src.cols-1)/2.0, (src.rows-1)/2.0);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle, center not relevant
cv::Rect2f bbox = cv::RotatedRect(cv::Point2f(), src.size(), angle).boundingRect2f();
// adjust transformation matrix
rot.at<double>(0,2) += bbox.width/2.0 - src.cols/2.0;
rot.at<double>(1,2) += bbox.height/2.0 - src.rows/2.0;
cv::Mat dst;
cv::warpAffine(src, dst, rot, bbox.size());
cv::imwrite("rotated_im.png", dst);
return 0;
}
Just try the code below, the idea is simple:
You need to create a blank image with the maximum size you're expecting while rotating at any angle. Here you should use Pythagoras as mentioned in the above comments.
Now copy the source image to the newly created image and pass it to warpAffine. Here you should use the centre of newly created image for rotation.
After warpAffine if you need to crop exact image for this translate four corners of source image in enlarged image using rotation matrix as described here
Find minimum x and minimum y for top corner, and maximum x and maximum y for bottom corner from the above result to crop image.
This is the code:
int theta = 0;
Mat src,frame, frameRotated;
src = imread("rotate.png",1);
cout<<endl<<endl<<"Press '+' to rotate anti-clockwise and '-' for clockwise 's' to save" <<endl<<endl;
int diagonal = (int)sqrt(src.cols*src.cols+src.rows*src.rows);
int newWidth = diagonal;
int newHeight =diagonal;
int offsetX = (newWidth - src.cols) / 2;
int offsetY = (newHeight - src.rows) / 2;
Mat targetMat(newWidth, newHeight, src.type());
Point2f src_center(targetMat.cols/2.0F, targetMat.rows/2.0F);
while(1){
src.copyTo(frame);
double radians = theta * M_PI / 180.0;
double sin = abs(std::sin(radians));
double cos = abs(std::cos(radians));
frame.copyTo(targetMat.rowRange(offsetY, offsetY + frame.rows).colRange(offsetX, offsetX + frame.cols));
Mat rot_mat = getRotationMatrix2D(src_center, theta, 1.0);
warpAffine(targetMat, frameRotated, rot_mat, targetMat.size());
//Calculate bounding rect and for exact image
//Reference:- https://stackoverflow.com/questions/19830477/find-the-bounding-rectangle-of-rotated-rectangle/19830964?noredirect=1#19830964
Rect bound_Rect(frame.cols,frame.rows,0,0);
int x1 = offsetX;
int x2 = offsetX+frame.cols;
int x3 = offsetX;
int x4 = offsetX+frame.cols;
int y1 = offsetY;
int y2 = offsetY;
int y3 = offsetY+frame.rows;
int y4 = offsetY+frame.rows;
Mat co_Ordinate = (Mat_<double>(3,4) << x1, x2, x3, x4,
y1, y2, y3, y4,
1, 1, 1, 1 );
Mat RotCo_Ordinate = rot_mat * co_Ordinate;
for(int i=0;i<4;i++){
if(RotCo_Ordinate.at<double>(0,i)<bound_Rect.x)
bound_Rect.x=(int)RotCo_Ordinate.at<double>(0,i); //access smallest
if(RotCo_Ordinate.at<double>(1,i)<bound_Rect.y)
bound_Rect.y=RotCo_Ordinate.at<double>(1,i); //access smallest y
}
for(int i=0;i<4;i++){
if(RotCo_Ordinate.at<double>(0,i)>bound_Rect.width)
bound_Rect.width=(int)RotCo_Ordinate.at<double>(0,i); //access largest x
if(RotCo_Ordinate.at<double>(1,i)>bound_Rect.height)
bound_Rect.height=RotCo_Ordinate.at<double>(1,i); //access largest y
}
bound_Rect.width=bound_Rect.width-bound_Rect.x;
bound_Rect.height=bound_Rect.height-bound_Rect.y;
Mat cropedResult;
Mat ROI = frameRotated(bound_Rect);
ROI.copyTo(cropedResult);
imshow("Result", cropedResult);
imshow("frame", frame);
imshow("rotated frame", frameRotated);
char k=waitKey();
if(k=='+') theta+=10;
if(k=='-') theta-=10;
if(k=='s') imwrite("rotated.jpg",cropedResult);
if(k==27) break;
}
Cropped Image
Thanks Robula!
Actually, you do not need to compute sine and cosine twice.
import cv2
def rotate_image(mat, angle):
# angle in degrees
height, width = mat.shape[:2]
image_center = (width/2, height/2)
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)
abs_cos = abs(rotation_mat[0,0])
abs_sin = abs(rotation_mat[0,1])
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
rotation_mat[0, 2] += bound_w/2 - image_center[0]
rotation_mat[1, 2] += bound_h/2 - image_center[1]
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))
return rotated_mat
Thanks #Haris! Here's the Python version:
def rotate_image(image, angle):
'''Rotate image "angle" degrees.
How it works:
- Creates a blank image that fits any rotation of the image. To achieve
this, set the height and width to be the image's diagonal.
- Copy the original image to the center of this blank image
- Rotate using warpAffine, using the newly created image's center
(the enlarged blank image center)
- Translate the four corners of the source image in the enlarged image
using homogenous multiplication of the rotation matrix.
- Crop the image according to these transformed corners
'''
diagonal = int(math.sqrt(pow(image.shape[0], 2) + pow(image.shape[1], 2)))
offset_x = (diagonal - image.shape[0])/2
offset_y = (diagonal - image.shape[1])/2
dst_image = np.zeros((diagonal, diagonal, 3), dtype='uint8')
image_center = (diagonal/2, diagonal/2)
R = cv2.getRotationMatrix2D(image_center, angle, 1.0)
dst_image[offset_x:(offset_x + image.shape[0]), \
offset_y:(offset_y + image.shape[1]), \
:] = image
dst_image = cv2.warpAffine(dst_image, R, (diagonal, diagonal), flags=cv2.INTER_LINEAR)
# Calculate the rotated bounding rect
x0 = offset_x
x1 = offset_x + image.shape[0]
x2 = offset_x
x3 = offset_x + image.shape[0]
y0 = offset_y
y1 = offset_y
y2 = offset_y + image.shape[1]
y3 = offset_y + image.shape[1]
corners = np.zeros((3,4))
corners[0,0] = x0
corners[0,1] = x1
corners[0,2] = x2
corners[0,3] = x3
corners[1,0] = y0
corners[1,1] = y1
corners[1,2] = y2
corners[1,3] = y3
corners[2:] = 1
c = np.dot(R, corners)
x = int(c[0,0])
y = int(c[1,0])
left = x
right = x
up = y
down = y
for i in range(4):
x = int(c[0,i])
y = int(c[1,i])
if (x < left): left = x
if (x > right): right = x
if (y < up): up = y
if (y > down): down = y
h = down - up
w = right - left
cropped = np.zeros((w, h, 3), dtype='uint8')
cropped[:, :, :] = dst_image[left:(left+w), up:(up+h), :]
return cropped
Increase the image canvas (equally from the center without changing the image size) so that it can fit the image after rotation, then apply warpAffine:
Mat img = imread ("/path/to/image", 1);
double offsetX, offsetY;
double angle = -45;
double width = img.size().width;
double height = img.size().height;
Point2d center = Point2d (width / 2, height / 2);
Rect bounds = RotatedRect (center, img.size(), angle).boundingRect();
Mat resized = Mat::zeros (bounds.size(), img.type());
offsetX = (bounds.width - width) / 2;
offsetY = (bounds.height - height) / 2;
Rect roi = Rect (offsetX, offsetY, width, height);
img.copyTo (resized (roi));
center += Point2d (offsetX, offsetY);
Mat M = getRotationMatrix2D (center, angle, 1.0);
warpAffine (resized, resized, M, resized.size());
After searching around for a clean and easy to understand solution and reading through the answers above trying to understand them, I eventually came up with a solution using trigonometry.
I hope this helps somebody :)
import cv2
import math
def rotate_image(mat, angle):
height, width = mat.shape[:2]
image_center = (width / 2, height / 2)
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1)
radians = math.radians(angle)
sin = math.sin(radians)
cos = math.cos(radians)
bound_w = int((height * abs(sin)) + (width * abs(cos)))
bound_h = int((height * abs(cos)) + (width * abs(sin)))
rotation_mat[0, 2] += ((bound_w / 2) - image_center[0])
rotation_mat[1, 2] += ((bound_h / 2) - image_center[1])
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))
return rotated_mat
EDIT: Please refer to #Remi Cuingnet's answer below.
A python version of rotating an image and take control of the padded black coloured region you can use the scipy.ndimage.rotate. Here is an example:
from skimage import io
from scipy import ndimage
image = io.imread('https://www.pyimagesearch.com/wp-
content/uploads/2019/12/tensorflow2_install_ubuntu_header.jpg')
io.imshow(image)
plt.show()
rotated = ndimage.rotate(image, angle=234, mode='nearest')
rotated = cv2.resize(rotated, (image.shape[:2]))
# rotated = cv2.cvtColor(rotated, cv2.COLOR_BGR2RGB)
# cv2.imwrite('rotated.jpg', rotated)
io.imshow(rotated)
plt.show()
If you have a rotation and a scaling of the image:
#include "opencv2/opencv.hpp"
#include <functional>
#include <vector>
bool compareCoords(cv::Point2f p1, cv::Point2f p2, char coord)
{
assert(coord == 'x' || coord == 'y');
if (coord == 'x')
return p1.x < p2.x;
return p1.y < p2.y;
}
int main(int argc, char** argv)
{
cv::Mat image = cv::imread("lenna.png");
float angle = 45.0; // degrees
float scale = 0.5;
cv::Mat_<float> rot_mat = cv::getRotationMatrix2D( cv::Point2f( 0.0f, 0.0f ), angle, scale );
// Image corners
cv::Point2f pA = cv::Point2f(0.0f, 0.0f);
cv::Point2f pB = cv::Point2f(image.cols, 0.0f);
cv::Point2f pC = cv::Point2f(image.cols, image.rows);
cv::Point2f pD = cv::Point2f(0.0f, image.rows);
std::vector<cv::Point2f> pts = { pA, pB, pC, pD };
std::vector<cv::Point2f> ptsTransf;
cv::transform(pts, ptsTransf, rot_mat );
using namespace std::placeholders;
float minX = std::min_element(ptsTransf.begin(), ptsTransf.end(), std::bind(compareCoords, _1, _2, 'x'))->x;
float maxX = std::max_element(ptsTransf.begin(), ptsTransf.end(), std::bind(compareCoords, _1, _2, 'x'))->x;
float minY = std::min_element(ptsTransf.begin(), ptsTransf.end(), std::bind(compareCoords, _1, _2, 'y'))->y;
float maxY = std::max_element(ptsTransf.begin(), ptsTransf.end(), std::bind(compareCoords, _1, _2, 'y'))->y;
float newW = maxX - minX;
float newH = maxY - minY;
cv::Mat_<float> trans_mat = (cv::Mat_<float>(2,3) << 0, 0, -minX, 0, 0, -minY);
cv::Mat_<float> M = rot_mat + trans_mat;
cv::Mat warpedImage;
cv::warpAffine( image, warpedImage, M, cv::Size(newW, newH) );
cv::imshow("lenna", image);
cv::imshow("Warped lenna", warpedImage);
cv::waitKey();
cv::destroyAllWindows();
return 0;
}
Thanks to everyone for this post, it has been super useful. However, I have found some black lines left and up (using Rose's python version) when rotating 90º. The problem seemed to be some int() roundings. In addition to that, I have changed the sign of the angle to make it grow clockwise.
def rotate_image(image, angle):
'''Rotate image "angle" degrees.
How it works:
- Creates a blank image that fits any rotation of the image. To achieve
this, set the height and width to be the image's diagonal.
- Copy the original image to the center of this blank image
- Rotate using warpAffine, using the newly created image's center
(the enlarged blank image center)
- Translate the four corners of the source image in the enlarged image
using homogenous multiplication of the rotation matrix.
- Crop the image according to these transformed corners
'''
diagonal = int(math.ceil(math.sqrt(pow(image.shape[0], 2) + pow(image.shape[1], 2))))
offset_x = (diagonal - image.shape[0])/2
offset_y = (diagonal - image.shape[1])/2
dst_image = np.zeros((diagonal, diagonal, 3), dtype='uint8')
image_center = (float(diagonal-1)/2, float(diagonal-1)/2)
R = cv2.getRotationMatrix2D(image_center, -angle, 1.0)
dst_image[offset_x:(offset_x + image.shape[0]), offset_y:(offset_y + image.shape[1]), :] = image
dst_image = cv2.warpAffine(dst_image, R, (diagonal, diagonal), flags=cv2.INTER_LINEAR)
# Calculate the rotated bounding rect
x0 = offset_x
x1 = offset_x + image.shape[0]
x2 = offset_x + image.shape[0]
x3 = offset_x
y0 = offset_y
y1 = offset_y
y2 = offset_y + image.shape[1]
y3 = offset_y + image.shape[1]
corners = np.zeros((3,4))
corners[0,0] = x0
corners[0,1] = x1
corners[0,2] = x2
corners[0,3] = x3
corners[1,0] = y0
corners[1,1] = y1
corners[1,2] = y2
corners[1,3] = y3
corners[2:] = 1
c = np.dot(R, corners)
x = int(round(c[0,0]))
y = int(round(c[1,0]))
left = x
right = x
up = y
down = y
for i in range(4):
x = c[0,i]
y = c[1,i]
if (x < left): left = x
if (x > right): right = x
if (y < up): up = y
if (y > down): down = y
h = int(round(down - up))
w = int(round(right - left))
left = int(round(left))
up = int(round(up))
cropped = np.zeros((w, h, 3), dtype='uint8')
cropped[:, :, :] = dst_image[left:(left+w), up:(up+h), :]
return cropped
Go version (using gocv) of #robula and #remi-cuingnet
func rotateImage(mat *gocv.Mat, angle float64) *gocv.Mat {
height := mat.Rows()
width := mat.Cols()
imgCenter := image.Point{X: width/2, Y: height/2}
rotationMat := gocv.GetRotationMatrix2D(imgCenter, -angle, 1.0)
absCos := math.Abs(rotationMat.GetDoubleAt(0, 0))
absSin := math.Abs(rotationMat.GetDoubleAt(0, 1))
boundW := float64(height) * absSin + float64(width) * absCos
boundH := float64(height) * absCos + float64(width) * absSin
rotationMat.SetDoubleAt(0, 2, rotationMat.GetDoubleAt(0, 2) + (boundW / 2) - float64(imgCenter.X))
rotationMat.SetDoubleAt(1, 2, rotationMat.GetDoubleAt(1, 2) + (boundH / 2) - float64(imgCenter.Y))
gocv.WarpAffine(*mat, mat, rotationMat, image.Point{ X: int(boundW), Y: int(boundH) })
return mat
}
I rotate in the same matrice in-memory, make a new matrice if you don't want to alter it
For anyone using Emgu.CV or OpenCvSharp wrapper in .NET, there's a C# implement of Lars Schillingmann's answer:
Emgu.CV:
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
public static class MatExtension
{
/// <summary>
/// <see>https://stackoverflow.com/questions/22041699/rotate-an-image-without-cropping-in-opencv-in-c/75451191#75451191</see>
/// </summary>
public static Mat Rotate(this Mat src, float degrees)
{
degrees = -degrees; // counter-clockwise to clockwise
var center = new PointF((src.Width - 1) / 2f, (src.Height - 1) / 2f);
var rotationMat = new Mat();
CvInvoke.GetRotationMatrix2D(center, degrees, 1, rotationMat);
var boundingRect = new RotatedRect(new(), src.Size, degrees).MinAreaRect();
rotationMat.Set(0, 2, rotationMat.Get<double>(0, 2) + (boundingRect.Width / 2f) - (src.Width / 2f));
rotationMat.Set(1, 2, rotationMat.Get<double>(1, 2) + (boundingRect.Height / 2f) - (src.Height / 2f));
var rotatedSrc = new Mat();
CvInvoke.WarpAffine(src, rotatedSrc, rotationMat, boundingRect.Size);
return rotatedSrc;
}
/// <summary>
/// <see>https://stackoverflow.com/questions/32255440/how-can-i-get-and-set-pixel-values-of-an-emgucv-mat-image/69537504#69537504</see>
/// </summary>
public static unsafe void Set<T>(this Mat mat, int row, int col, T value) where T : struct =>
_ = new Span<T>(mat.DataPointer.ToPointer(), mat.Rows * mat.Cols * mat.ElementSize)
{
[(row * mat.Cols) + col] = value
};
public static unsafe T Get<T>(this Mat mat, int row, int col) where T : struct =>
new ReadOnlySpan<T>(mat.DataPointer.ToPointer(), mat.Rows * mat.Cols * mat.ElementSize)
[(row * mat.Cols) + col];
}
OpenCvSharp:
OpenCvSharp already has Mat.Set<> method that functions same as mat.at<> in the original OpenCV, so we don't have to copy these methods from How can I get and set pixel values of an EmguCV Mat image?
using OpenCvSharp;
public static class MatExtension
{
/// <summary>
/// <see>https://stackoverflow.com/questions/22041699/rotate-an-image-without-cropping-in-opencv-in-c/75451191#75451191</see>
/// </summary>
public static Mat Rotate(this Mat src, float degrees)
{
degrees = -degrees; // counter-clockwise to clockwise
var center = new Point2f((src.Width - 1) / 2f, (src.Height - 1) / 2f);
var rotationMat = Cv2.GetRotationMatrix2D(center, degrees, 1);
var boundingRect = new RotatedRect(new(), new Size2f(src.Width, src.Height), degrees).BoundingRect();
rotationMat.Set(0, 2, rotationMat.Get<double>(0, 2) + (boundingRect.Width / 2f) - (src.Width / 2f));
rotationMat.Set(1, 2, rotationMat.Get<double>(1, 2) + (boundingRect.Height / 2f) - (src.Height / 2f));
var rotatedSrc = new Mat();
Cv2.WarpAffine(src, rotatedSrc, rotationMat, boundingRect.Size);
return rotatedSrc;
}
}
Also, you may want to mutate the src param instead of returning a new clone of it during rotation, for that you can just set the det param of WrapAffine() as the same with src: c++, opencv: Is it safe to use the same Mat for both source and destination images in filtering operation?
CvInvoke.WarpAffine(src, src, rotationMat, boundingRect.Size);
This is being called as in-place mode: https://answers.opencv.org/question/24/do-all-opencv-functions-support-in-place-mode-for-their-arguments/
Can the OpenCV function cvtColor be used to convert a matrix in place?
If it is just to rotate 90 degrees, maybe this code could be useful.
Mat img = imread("images.jpg");
Mat rt(img.rows, img.rows, CV_8U);
Point2f pc(img.cols / 2.0, img.rows / 2.0);
Mat r = getRotationMatrix2D(pc, 90, 1);
warpAffine(img, rt, r, rt.size());
imshow("rotated", rt);
Hope it's useful.
By the way, for 90º rotations only, here is a more efficient + accurate function:
def rotate_image_90(image, angle):
angle = -angle
rotated_image = image
if angle == 0:
pass
elif angle == 90:
rotated_image = np.rot90(rotated_image)
elif angle == 180 or angle == -180:
rotated_image = np.rot90(rotated_image)
rotated_image = np.rot90(rotated_image)
elif angle == -90:
rotated_image = np.rot90(rotated_image)
rotated_image = np.rot90(rotated_image)
rotated_image = np.rot90(rotated_image)
return rotated_image