Rotate an image using OpenCV with C++ on interface - c++

I'm developing Rotate an image using OpenCV in C++ on interface project.So, I have some problems with this code.Is there any way to solve this code...?
IplImage* source_image = cvLoadImage(ip, 1);
IplImage *rotate_image = cvCreateImage(cvGetSize(source_image), IPL_DEPTH_8U, 1);
cvNamedWindow("rotate_image", CV_WINDOW_FREERATIO);
int angle = 180;
cvCreateTrackbar("Angle", rotate_image,&angle,360);
int image_height = source_image.rows / 2;
int image_width = source_image.cols / 2;
IplImage *rotatetion = cvCreateImage(cvGetSize(source_image), IPL_DEPTH_8U, 1);
rotatetion = cv2DRotationMatrix(Point(image_height,image_width),(angle - 180), 1);
IplImage *rotated_image = cvCreateImage(cvGetSize(rotatetion), IPL_DEPTH_8U, 1);
cvWarpAffine(dialateImage,Rotated_Image,Rotatetion,dialateImage.size());
cvShowImage("rotateImage", rotated_image);

From https://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.html
Mat rot_mat( 2, 3, CV_32FC1 );
Mat warp_mat( 2, 3, CV_32FC1 );
Mat src, warp_dst, warp_rotate_dst;
/// Load the image
src = imread( argv[1], 1 );
/// Set the dst image the same type and size as src
warp_dst = Mat::zeros( src.rows, src.cols, src.type() );
/// Compute a rotation matrix with respect to the center of the image
Point center = Point( warp_dst.cols/2, warp_dst.rows/2 );
double angle = -50.0;
double scale = 0.6;
/// Get the rotation matrix with the specifications above
rot_mat = getRotationMatrix2D( center, angle, scale );
/// Rotate the warped image
warpAffine( warp_dst, warp_rotate_dst, rot_mat, warp_dst.size() );
Don't use obsolete C api like #Miki said and ask a clear question, not "this code no worky".

try this.worked.
IplImage* source_image;
IplImage *dest = cvCloneImage(source_image);
CvPoint2D32f center;
center.x = dest->width / 2;
center.y = dest->height / 2;
CvMat *mapMatrix = cvCreateMat(2, 3, CV_32FC1);
double angle = System::Convert::ToDouble(numericUpDown1->Value);
cv2DRotationMatrix(center, angle, 1.0, mapMatrix);
cvWarpAffine(source_image, dest, mapMatrix, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
cvReleaseMat(&mapMatrix);
cvShowImage("Rotated", dest);

Related

Why do I get bad results with calcBackProject in opencv?

I create a project to reduce noise in the target image. I want to do something on Histograms, so I use calcHist to get the histogram of the image. When I use calcBackProject to recover images, the results are very different from the source image that I can't understand. The results show that the histogram of the two images is significant differences. Are there some problems with histogram processing?
By the way, my OpenCV version is 4.5.0.
int main()
{
Mat src = imread("./newpaperback.jpg");
imshow("src", src);
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
imshow("gray", gray);
const int channels[] = {0};
Mat hist;
int dims = 1;
const int histSize[] = {256};;
float granges[] = {0, 256};
const float* ranges[] = { granges };
calcHist(&gray, 1, channels, Mat(), hist, dims, histSize, ranges, true, false);
int hist_height = 256;
Mat hist_image = Mat::zeros(hist_height, hist_height * 2, CV_8UC3);
double max_val;
minMaxLoc(hist, 0, &max_val, 0, 0);
for(int i = 0; i < hist_height; i++)
{
float bin_val = hist.at<float>(i);
segment[i] = bin_val * hist_height / max_val;
int intensity = bin_val * hist_height / max_val;
rectangle(hist_image, Point(i * 2, hist_height - 1), Point((i + 1) * 2 - 1, hist_height - intensity), Scalar(255, 255, 255));
}
imshow("hist", hist_image);
Mat back_img;
calcBackProject(&gray, 1, channels, hist, back_img, ranges, 1, true);
imshow("back", back_img);
waitKey(0);
return 0;
}

Deskewing Image OpenCV

I have followed this article on how to calculate and deskew an image for better Tesseract OCR results: http://felix.abecassis.me/2011/10/opencv-rotation-deskewing/
The correct angle is calculated, but the text is never actually rotated.
These are the methods I am using:
+(UIImage *) prepareImage: (UIImage *)image{
return deskew(image, computeSkew(image));
}
// Organization -> Deskewing
double computeSkew(UIImage *image)
{
Mat src;
UIImageToMat(image, src);
cv::Size size = src.size();
bitwise_not(src, src);
vector<Vec4i> lines;
HoughLinesP(src, lines, 1, CV_PI/180, 100, size.width / 2.f, 20);
Mat disp_lines(size, CV_8UC1, Scalar(0, 0, 0));
double angle = 0.;
unsigned nb_lines = lines.size();
for (unsigned i = 0; i < nb_lines; ++i)
{
line(disp_lines, cv::Point(lines[i][0], lines[i][1]),
cv::Point(lines[i][2], lines[i][3]), Scalar(255, 0 ,0));
angle += atan2((double)lines[i][3] - lines[i][1],
(double)lines[i][2] - lines[i][0]);
}
angle /= nb_lines; // mean angle, in radians.
cout << angle << endl;
return angle;
}
UIImage* deskew(UIImage *image, double angle)
{
Mat img;
UIImageToMat(image, img);
bitwise_not(img, img);
vector<cv::Point> points;
Mat_<uchar>::iterator it = img.begin<uchar>();
Mat_<uchar>::iterator end = img.end<uchar>();
for (; it != end; ++it)
if (*it)
points.push_back(it.pos());
RotatedRect box = minAreaRect(Mat(points));
Mat rot_mat = getRotationMatrix2D(box.center, angle, 1);
Mat rotated;
warpAffine(img, rotated, rot_mat, img.size(), INTER_CUBIC);
return MatToUIImage(rotated);
}
UIImageToMat and MatToUIImage are reliable methods that convert back and forth. I have also tried to return the angle in both radians and degrees. Both times the image returned from the prepareImage function is still tilted at the same angle as the original image.

Smooth or blur image edges using opencv

I want to smooth or blur image edges using opencv. i follow some links on stackoverflow but did not get accurate result.
This is what i tried
int lowThreshold = 100;
int ratio = 3;
int kernelSize = 3;
Mat srcGray, cannyEdges, blurred;
cvtColor(input, srcGray, CV_BGR2GRAY);
blur(srcGray, cannyEdges, Size(3, 3));
Canny(cannyEdges, cannyEdges, lowThreshold, lowThreshold * ratio, kernelSize);
int dilation_size = 5;
Mat element = getStructuringElement(MORPH_CROSS,
Size(2 * dilation_size + 1, 2 * dilation_size + 1),
Point(dilation_size, dilation_size));
dilate(cannyEdges, cannyEdges, element);
input.copyTo(blurred);
blur(blurred, blurred, Size(3, 3));
blurred.copyTo(output, cannyEdges);
This is the image of my input, my output and desired result
i am using this image as input

Know the number of the corner using cornerHarris

How can I know the number of the corners that are calculated by cornerHarris ? The function I wrote is as follows :
...
Mat gray;
cvtColor( img, gray, CV_BGR2GRAY );
int thresh = 160;
Mat dst, dst_norm, dst_norm_scaled;
dst = Mat::zeros( img.size(), CV_32FC1 );
// Detector parameters
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
// Detecting corners
cornerHarris( gray, dst, blockSize, apertureSize, k, BORDER_DEFAULT );
// Normalizing
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
cornerHarris does not calculate a specific amount of corners. It creates a new image dst that has the same size of your original image gray. You define a threshold value that from this value above you can find the corners. You will have more corners if you define your threshold to be smaller.
In your case you can find the corners for a predefined value of thresh like this:
for( int j = 0; j < dst_norm.rows ; j++ ){
for( int i = 0; i < dst_norm.cols; i++ ){
if( (int) dst_norm.at<float>(j,i) > thresh ){
/* Whatever your would like to do with that corner */
}
}
}
See this Harris Corner Detector Tutorial and cornerHarris OpenCV documentation for more information.

How to do inverse DFT using magnitude and phase of a image in opencv?

I want to show that phase of an image carries more information than that of its magnitude, so I want to exchange the magnitude of two image and then do the inverse DFT.
here is the code:
void main()
{
Mat I1 = imread("lena.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat I2 = imread("peppers.png", CV_LOAD_IMAGE_GRAYSCALE);
Mat padded1,padded2;
//expand input image to optimal size
int m1= getOptimalDFTSize( I1.rows );
int n1 = getOptimalDFTSize( I1.cols );
int m2= getOptimalDFTSize( I2.rows );
int n2 = getOptimalDFTSize( I2.cols );
// on the border add zero values
copyMakeBorder(I1, padded1, 0, m1 - I1.rows, 0, n1 - I1.cols, BORDER_CONSTANT, Scalar::all(0));
copyMakeBorder(I2, padded2, 0, m2 - I2.rows, 0, n2 - I2.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes1[] = {Mat_<float>(padded1), Mat::zeros(padded1.size(), CV_32F)};
Mat planes2[] = {Mat_<float>(padded2), Mat::zeros(padded2.size(), CV_32F)};
Mat complexI, complexII;
// Add to the expanded another plane with zeros
merge(planes1, 2, complexI);
merge(planes2, 2, complexII);
dft(complexI, complexI);
dft(complexII, complexII);
// compute the magnitude and phase then switch to logarithmic scale
// => magnitude:log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2)), phase:arctan(Im(DFT(I)),Re(DFT(I)))
split(complexI, planes1);// planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
Mat ph1, magI1;
phase(planes1[0], planes1[1], ph1);//ph1 = phase
magnitude(planes1[0], planes1[1], magI1);// magI1 = magnitude
magI1 = magI1(Rect(0, 0, magI1.cols & -2, magI1.rows & -2));
ph1 = ph1(Rect(0, 0, ph1.cols & -2, ph1.rows & -2));
split(complexII, planes2);// planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
Mat ph2, magI2;
phase(planes2[0], planes2[1], ph2);//ph2 = phase
magnitude(planes2[0], planes2[1], magI2);// mag2 = magnitude
magI2 = magI2(Rect(0, 0, magI2.cols & -2, magI2.rows & -2));
ph2 = ph2(Rect(0, 0, ph2.cols & -2, ph2.rows & -2));
planes1[1] = ph1; planes1[0] = magI2;
planes2[1] = ph2; planes2[0] = magI1;
dft(complexI,complexI,DFT_INVERSE);
dft(complexII,complexII,DFT_INVERSE);
imshow("image", complexI);
waitKey();
}
I just simply merge magnitude and phase together then do the IDFT, seems totally wrong.
I guess from your question that something with your dft is not working.
Try the below Code (after adding your split Planes) and see if it works.
The images have to be the Exact same Size.
If something else is wrong: please show your images and results. Maybe your code is correct and you are just expecting the wrong thing.
Here is the working Example:
// Load an image
Mat I1 = imread("lena.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat I2 = imread("peppers.png", CV_LOAD_IMAGE_GRAYSCALE);
Mat fI1;
Mat fI2;
I1.convertTo(fI1, CV_32F);
I2.convertTo(fI2, CV_32F);
//Perform DFT
Mat fourierTransform1;
Mat fourierTransform2;
dft(fI1, fourierTransform1, DFT_SCALE|DFT_COMPLEX_OUTPUT);
dft(fI2, fourierTransform2, DFT_SCALE|DFT_COMPLEX_OUTPUT);
//your split plane and everything else
//Perform IDFT
Mat inverseTransform1;
Mat inverseTransform2;
dft(fourierTransform1, inverseTransform1, DFT_INVERSE|DFT_REAL_OUTPUT);
dft(fourierTransform2, inverseTransform2, DFT_INVERSE|DFT_REAL_OUTPUT);
Mat result1;
Mat result2;
inverseTransform1.convertTo(result1, CV_8U);
inverseTransform2.convertTo(result2, CV_8U);
You can't merge the magnitude and phase by using cv::merge() function, instead you should use cv::polarToCart(). Here's my code to do what you want:
using namespace cv;
// Rearrange the quadrants of a Fourier image so that the origin is at
// the image center
void shiftDFT(Mat &fImage )
{
Mat tmp, q0, q1, q2, q3;
// first crop the image, if it has an odd number of rows or columns
fImage = fImage(Rect(0, 0, fImage.cols & -2, fImage.rows & -2));
int cx = fImage.cols / 2;
int cy = fImage.rows / 2;
// rearrange the quadrants of Fourier image
// so that the origin is at the image center
q0 = fImage(Rect(0, 0, cx, cy));
q1 = fImage(Rect(cx, 0, cx, cy));
q2 = fImage(Rect(0, cy, cx, cy));
q3 = fImage(Rect(cx, cy, cx, cy));
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
}
int main()
{
// Load an image
Mat I1 = imread("lena.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat I2 = imread("pepper.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat fI1;
Mat fI2;
I1.convertTo(fI1, CV_32F);
I2.convertTo(fI2, CV_32F);
//expand input image to optimal size
int m = getOptimalDFTSize( I1.rows );
int n = getOptimalDFTSize( I1.cols );
Mat padded1, padded2;
// on the border add zero values
copyMakeBorder(fI1, padded1, 0, m - I1.rows, 0, n - I1.cols, BORDER_CONSTANT, Scalar::all(0));
copyMakeBorder(fI2, padded2, 0, m - I2.rows, 0, n - I2.cols, BORDER_CONSTANT, Scalar::all(0));
//Perform DFT
Mat fourierTransform1;
Mat fourierTransform2;
Mat planes1[2], planes2[2];
dft(fI1, fourierTransform1, DFT_SCALE|DFT_COMPLEX_OUTPUT);
dft(fI2, fourierTransform2, DFT_SCALE|DFT_COMPLEX_OUTPUT);
shiftDFT(fourierTransform1);
shiftDFT(fourierTransform2);
split(fourierTransform1, planes1);// planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
split(fourierTransform2, planes2);// planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
Mat ph1, mag1;
mag1.zeros(planes1[0].rows, planes1[0].cols, CV_32F);
ph1.zeros(planes1[0].rows, planes1[0].cols, CV_32F);
cartToPolar(planes1[0], planes1[1], mag1, ph1);
Mat ph2, mag2;
mag2.zeros(planes2[0].rows, planes2[0].cols, CV_32F);
ph2.zeros(planes2[0].rows, planes2[0].cols, CV_32F);
cartToPolar(planes2[0], planes2[1], mag2, ph2);
polarToCart(mag1, ph2, planes1[0], planes1[1]);
polarToCart(mag2, ph1, planes2[0], planes2[1]);
merge(planes1, 2, fourierTransform1);
merge(planes2, 2, fourierTransform2);
shiftDFT(fourierTransform1);
shiftDFT(fourierTransform2);
//Perform IDFT
Mat inverseTransform1, inverseTransform2;
dft(fourierTransform1, inverseTransform1, DFT_INVERSE|DFT_REAL_OUTPUT);
dft(fourierTransform2, inverseTransform2, DFT_INVERSE|DFT_REAL_OUTPUT);
namedWindow("original image 1");
imshow("original image 1", I1);
namedWindow("original image 2");
imshow("original image 2", I2);
waitKey(0);
cv::Mat out1, out2;
inverseTransform1.convertTo(out1, CV_8U);
inverseTransform2.convertTo(out2, CV_8U);
namedWindow("result image 1");
imshow("result image 1", out1);
namedWindow("result image 2");
imshow("result image 2", out2);
waitKey(0);
}