How to imRotate with OpenCV, C++, and get the exact same result? - c++

I tried everything available here related to this problem, but none gave me the exact same results, so I'd like to know if there exist one? and if so, how can I achieve it?
Let's talk about a simple example and hopefully find a way to get the same result in Matlab as well as in OpenCV:
Matlab:
test = [ 10 20 10 ; 20 10 10 ; 30 30 30]
im_rot = imrotate(double(test), -45, 'bilinear', 'crop');
Result:
im_rot =
11.7157 14.1421 11.7157
26.2132 10.0000 12.0711
17.5736 24.1421 5.8579
OpenCV:
double data[9]{ 10, 20, 10, 20, 10, 10, 30, 30, 30 };
Mat test = Mat(Size(3, 3), CV_64F, data);
What lines of code can get the exact same result as the one above?
Edit:
Tried the following:
void matlabImrotate(Mat& src, double angle, int interpolationMethod, Mat& dst)
{
// src: https://stackoverflow.com/questions/38715363/how-to-implement-imrotate-of-matlab-in-opencv
try
{
// Special Cases
if (fmod(angle, 360.0) == 0.0)
dst = src;
else {
Point2f center(src.cols / 2.0F, src.rows / 2.0F);
Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
cout << "center: " << center << endl;
// determine bounding rectangle
Rect bbox = RotatedRect(center, src.size(), angle).boundingRect();
// adjust transformation matrix
// cout << "bbox.size: " << bbox.size() << endl;
rot.at<double>(0, 2) += ((bbox.width ) / 2.0 - center.x );
rot.at<double>(1, 2) += ((bbox.height ) / 2.0 - center.y );
warpAffine(src, dst, rot, bbox.size(), interpolationMethod);
}
}
}
matlabImrotate(test, -45, INTER_LINEAR, res);
Result:
[0, 0, 0, 1.40625, 0, 0;
0, 0, 6.6796875, 11.69921875, 6.6796875, 0;
0, 8.7890625, 24.53125, 13.41796875, 14.53125, 2.9296875;
0, 2.8125, 23.4375, 20, 7.8125, 0.9375;
0, 0, 2.8125, 18.310546875, 1.875, 0;
0, 0, 0, 0.263671875, 0, 0]
Also tried:
void matlabImrotate2(Mat& src, double angle, int interpolationMethod, Mat& dst, int border=0)
{
// src: https://stackoverflow.com/questions/14870089/how-to-imrotate-with-opencv-2-4-3
Mat bordered_source;
int top, bottom, left, right;
top = bottom = left = right = border;
copyMakeBorder(src, bordered_source, top, bottom, left, right, BORDER_CONSTANT, cv::Scalar());
Point2f src_center(bordered_source.cols / 2.0F, bordered_source.rows / 2.0F);
Mat rot_mat = getRotationMatrix2D(src_center , angle, 1.0);
warpAffine(bordered_source, dst, rot_mat, bordered_source.size());
return true;
}
matlabImrotate2(test, -45, INTER_LINEAR, res);
Result:
[9.375, 17.28515625, 17.28515625;
23.4375, 21.09375, 11.09375;
2.8125, 23.4375, 15.625]

Related

corrupted size vs. prev_size in C++ [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 11 months ago.
Locked. There are disputes about this question’s content being resolved at this time. It is not currently accepting new answers or interactions.
I am new to c++, I started with it today and I got an error that I can't solve myself.
When I run my code it sometimes works fine, and I sometimes get an error after a few seconds.
The errors that I have gotten are:
"corrupted size vs. prev_size Aborted (core dumped)"
"segmentation fault (core dumped)"
This is my code:
#include <opencv2/opencv.hpp>
#include <iostream>
#include <random>
using namespace cv;
using namespace std;
using chrono::duration;
using chrono::duration_cast;
using chrono::high_resolution_clock;
using chrono::milliseconds;
class Product
{
public:
Point location = Point(-80, 0);
Scalar color;
Product(int given_y, Scalar given_color)
{
location.y = given_y;
color = given_color;
cout << "Product created" << endl;
}
Mat update(double speed, Mat img, double dt)
{
location.x += speed* dt;
circle(img, location, 25, Scalar(255, 255, 255), FILLED, LINE_8);
circle(img, location, 20, color, FILLED, LINE_8);
putText(img, "12", location + Point(-17, +7), FONT_HERSHEY_DUPLEX, .8, Scalar(255, 255, 255), 1, LINE_AA);
return (img);
}
};
int main(int argc, char **argv)
{
// Constant variables
const int kScreenWidth = 1920;
const int kScreenHeight = 1080;
const int kUpdateFpsMilliseconds = 600;
const String kWindowName = "Moving dots";
const Mat kBlackImg(kScreenHeight, kScreenWidth, CV_8UC3, Scalar(0, 0, 0));
// Variables
Scalar red = Scalar(255, 0, 0);
Scalar green = Scalar(0, 255, 0);
Scalar blue = Scalar(0, 0, 255);
Scalar cyan = Scalar(0, 255, 255);
Scalar magenta = Scalar(255, 0, 255);
Scalar yellow = Scalar(255, 255, 0);
Scalar black = Scalar(0, 0, 0);
Scalar white = Scalar(255, 255, 255);
vector<Product> products;
Mat img;
double speed = .2;
double fps;
int frame_counter = 0;
int loop_dt;
int time_since_fps_update = 0;
int counter = 0;
bool remove_product = false;
bool full_screen = false;
// Random number generators
random_device rd_seed;
mt19937 rng(rd_seed());
uniform_int_distribution<mt19937::result_type> rd_screen_height(50, kScreenHeight - 50);
uniform_int_distribution<mt19937::result_type> rd_new_product(0, 100);
namedWindow(kWindowName, WINDOW_NORMAL);
setWindowProperty(kWindowName, WND_PROP_FULLSCREEN, WINDOW_FULLSCREEN);
auto loop_start = high_resolution_clock::now();
while (true)
{
// Black image
Mat img = kBlackImg.clone();
// Add new products
if (rd_new_product(rng) < .1 * loop_dt)
{
products.push_back(Product(rd_screen_height(rng), red));
}
// Update products
if (products.size() > 0)
{
for (int i = 0; i <= products.size(); i++)
{
img = products[i].update(speed, img, loop_dt);
}
}
// Show text and border
stringstream temp;
temp << "FPS: " << fps << " Products: " << products.size();
putText(img, temp.str(), Point(18, 38), FONT_HERSHEY_DUPLEX, .8, white, 1, LINE_AA);
rectangle(img, Point(6, 6), Point(kScreenWidth - 6, kScreenHeight - 6), white, 12);
rectangle(img, Point(6, 6), Point(kScreenWidth - 6, 60 - 6), white, 12);
rectangle(img, Point(6, 6), Point(kScreenWidth - 6, kScreenHeight - 6), cyan, 3);
rectangle(img, Point(6, 6), Point(kScreenWidth - 6, 60 - 6), cyan, 3);
// Update screen
cvtColor(img, img, COLOR_RGB2BGR);
imshow(kWindowName, img);
// Exit script
if (waitKey(1) >= 0)
{
break;
};
// Handle FPS counter
frame_counter++;
loop_dt = duration_cast<milliseconds>(high_resolution_clock::now() - loop_start).count();
loop_start = high_resolution_clock::now();
time_since_fps_update += loop_dt;
if (time_since_fps_update > kUpdateFpsMilliseconds)
{
fps = (double)frame_counter / (double)time_since_fps_update * 1000;
time_since_fps_update -= kUpdateFpsMilliseconds;
frame_counter = 0;
}
}
destroyWindow(kWindowName);
return 0;
}
I think that it might have something to do with the color scalars or with the products vector.
Any help is appreciated!
The main culprit is likely:
for (int i = 0; i <= products.size(); i++)
{
img = products[i].update(speed, img, loop_dt);
}
That should be < products.size(). Vectors use indices from 0 to size()-1, like pretty much all other std containers.

How to rotate a point cloud given 3 points?

I have a 3D depth camera placed above three moving belt lanes and I'm trying to rotate the depth image (or the point cloud) so that the three lanes match the camera's angle. I'm not experienced at all with point clouds, but after some research I've tried to do the following:
Acquire an XYZ cartesian image from the sensor which I turn into a point cloud vector.
Define three points on the point cloud, one on each of the three lanes.
Fit a plane through them by finding the plane coefficients.
Finding the cross product between the plane and the z_normal, and then finding the angle of
rotation.
Use the Eigen library to transform the PCL cloud and turn it back into an openCV Mat.
For whatever reason, I always end up with a bad image with max-int values on one side and zeros on the other. I'm not certain anymore if there's something wrong with the code or if the method above is incorrect to start with.
My code so far:
// helper functions
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPcl(cv::Mat xyzMat);
cv::Mat PclToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr);
void colorize(cv::Mat& src, cv::Mat& dst);
void clip(cv::Mat& m, const uint16_t lowerBound, const uint16_t upperBound);
while(1)
{
// camera framegrabber object to capture an image
fg->SWTrigger();
if (!fg->WaitForFrame(im.get(), 2000))
{
throw;
}
// openCV Mat declerations
cv::Mat zDepth, zDepthColor;
cv::Mat xyz = im->XYZImage();
vector<cv::Mat> channels(3);
cv::split(xyz, channels);
zDepth = channels[0];
cv::imwrite("xyzMat.png", xyz);
cv::imwrite("depthImage.png", zDepth);
clip(zDepth, 1250, 1400);
colorise(zDepth, zDepthColor);
cv::imwrite("depthColored.png", zDepthColor);
// specify a 3D point on each lane
cv::Point3i p1, p2, p3;
p1.x = w / 4;
p1.y = 24;
p1.z = zDepth.at<uint16_t>(p1.x, p1.y);
p2.x = w / 2;
p2.y = 70;
p2.z = zDepth.at<uint16_t>(p2.x, p2.y);
p3.x = int(w * 0.75);
p3.y = 114;
p3.z = zDepth.at<uint16_t>(p3.x, p3.y);
auto cross = (p2 - p1).cross(p3 - p1);
// transform Mats to point clouds
pcl::PointCloud<pcl::PointXYZ>::Ptr floor_plane, xyzCentered;
floor_plane = MatToPcl(zDepth);
Eigen::Matrix<float, 1, 3> floor_plane_normal_vector, xy_plane_normal_vector, rotation_vector;
floor_plane_normal_vector[0] = cross.x;
floor_plane_normal_vector[1] = cross.y;
floor_plane_normal_vector[2] = cross.z;
// specify the z normal from the xy-plane
xy_plane_normal_vector[0] = 0.0;
xy_plane_normal_vector[1] = 0.0;
xy_plane_normal_vector[2] = 1.0;
// cross product and normalize vector
rotation_vector = xy_plane_normal_vector.cross(floor_plane_normal_vector);
rotation_vector.normalized();
// angle of rotation
float theta = -atan2(rotation_vector.norm(), xy_plane_normal_vector.dot(floor_plane_normal_vector));
// transform plane according to angle
Eigen::Affine3f transform_2 = Eigen::Affine3f::Identity();
transform_2.translation() << 0, 0, 30;
transform_2.rotate(Eigen::AngleAxisf(theta, rotation_vector));
pcl::transformPointCloud(*floor_plane, *xyzCentered, transform_2);
// Pointcloud to Mat again
cv::Mat xyzRot = PclToMat(xyzCentered);
// clipLow and clipHigh values obtained from trackbars
clip(xyzRot, clipLow, clipHigh);
cv::Mat xyzRotColor;
colorize(xyzRot, xyzRotColor)
cv::imshow("result", xyzRotColor);
cv::waitKey(1);
}
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPcl(cv::Mat xyzMat)
{
/*
* Function: Get from a Mat to pcl pointcloud datatype
* In: cv::Mat
* Out: pcl::PointCloud
*/
//char pr=100, pg=100, pb=100;
pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZ>);;
vector<cv::Mat> channels(3);
cv::split(xyzMat, channels);
for (int i = 0; i < ifmXYZ.rows; i++)
{
for (int j = 0; j < ifmXYZ.cols; j++)
{
pcl::PointXYZ point;
point.x = channels[0].at<short>(i,j);
point.y = channels[1].at<short>(i, j);
point.z = channels[2].at<short>(i, j);
// when color needs to be added:
//uint32_t rgb = (static_cast<uint32_t>(pr) << 16 | static_cast<uint32_t>(pg) << 8 | static_cast<uint32_t>(pb));
//point.rgb = *reinterpret_cast<float*>(&rgb);
point_cloud_ptr->points.push_back(point);
}
}
point_cloud_ptr->width = (int)point_cloud_ptr->points.size();
/*point_cloud_ptr->height = 1;*/
return point_cloud_ptr;
}
// convert PCL to cv::Mat, taking only the depth values at z.
cv::Mat PclToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr)
{
cv::Mat depth_image;
if (!depth_image.empty())
depth_image.release();
depth_image.create(132, 176, CV_32F);
int count = 0;
for (int i = 0; i < 132; i++)
{
for (int j = 0; j < 176; j++)
{
depth_image.at<float>(i, j) = point_cloud_ptr->points.at(count++).z;
}
}
depth_image.convertTo(depth_image, CV_16UC1);
return depth_image;
}
/*
* For display purposes with cv::imshow, will convert a 16bit depth image to 8bit 3 channel colored image
* thanks to fmw42 for the function at https://stackoverflow.com/a/67678634/13184944
*/
void colorize(cv::Mat& src, cv::Mat& dst)
{
// stretch the image by rescaling intensity within the output 8-bit range
double oldMin;
double oldMax;
cv::Point minLoc;
cv::Point maxLoc;
cv::minMaxLoc(src, &oldMin, &oldMax, &minLoc, &maxLoc);
double oldRange = oldMax - oldMin;
double newMin = 0.0;
double newMax = 255.0;
double newRange = newMax - newMin;
//cout << oldMin << ' ' << oldMax << ' ' << oldRange << '\n';
// clip the values of the image to the required range
clip(src, oldMin, oldMax);
//TODO: Look at difference between OpenCV normalization and skimage
normalize(src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
//img = (img - cv::Scalar(oldMin)) / (cv::Scalar(oldRange));
//img = (img * cv::Scalar(newRange)) + cv::Scalar(newMin);
cv::Mat channels[3] = { dst, dst, dst };
cv::merge(channels, 3, dst);
cv::Mat C(1, 6, CV_8UC(3));
cv::Vec3b color1 = { 0, 0, 255 };
cv::Vec3b color2 = { 0, 165, 255 };
cv::Vec3b color3 = { 0, 255, 255 };
cv::Vec3b color4 = { 255, 255, 0 };
cv::Vec3b color5 = { 255, 0, 0 };
cv::Vec3b color6 = { 128, 64, 64 };
C.at<cv::Vec3b>(0, 0) = color1;
C.at<cv::Vec3b>(0, 1) = color2;
C.at<cv::Vec3b>(0, 2) = color3;
C.at<cv::Vec3b>(0, 3) = color4;
C.at<cv::Vec3b>(0, 4) = color5;
C.at<cv::Vec3b>(0, 5) = color6;
cv::Mat lut;
cv::resize(C, lut, cv::Size(256, 1), 0.0, 0.0, cv::INTER_LINEAR);
//cout << lut.size << '\n';
cv::LUT(dst, lut, dst);
return;
}
void clip(cv::Mat& m, const uint16_t lowerBound, const uint16_t upperBound)
{
m.setTo(lowerBound, m < lowerBound);
m.setTo(upperBound, m > upperBound);
return;
}
Apologies if this is really basic or something is obviously wrong but I feel stuck here. I also tried segmentation with ransac but the it never aligns the plane in the way I wanted.
Thanks!
Edit: Updated the code to include additional steps and functions. Only the camera initialization is skipped.
The clip and colorize functions aid in displaying the 16bit depth image. My end goal here is to be able to use trackbars with clip(zImg, low, high) where the three lanes will always be vertically aligns (as in, change color at the same rate) as I change the clip values.
download link with image files: link
Colorized depth image:

Draw vertical HoughLines till certain intersection points

My idea is to draw all vertical lines, which are created by calculating the Canny before, from a intersection point to a diagonal lines to another intersection point (also this point comes from a intersection between a vetical and diagonal line). As a reference here an image, the red vertical (Hough)lines should be drawen:
Until yet I just detect all vertical lines with this implementation:
int main(int argc, char *argv[]) {
std::vector<cv::Point> diagonalLine = DiagonalLines::diagonalLines(src);
Mat wdst, cwdst, contRegion;
vector<Vec4i> vericalLines;
double maxLineGap = 200.0;
double threshold = 100;
std::vector<cv::Vec4i> elemLinesCur;
cv::Scalar mu, sigma;
meanStdDev(src, mu, sigma);
Canny(src, wdst, mu.val[0] - sigma.val[0], mu.val[0] + sigma.val[0], 3, false);
cvtColor(wdst, cwdst, CV_GRAY2BGR);
HoughLinesP(wdst, vericalLines, 1, CV_PI / 2, threshold, 50, 200);
cv::Vec4i current, previous;
cv::Point pt1, pt2, ppt1, ppt2;
for (size_t i = 1; i < vericalLines.size(); i++) {
current = vericalLines[i];
pt1 = cv::Point(current[0], current[1]);
pt2 = cv::Point(current[2], current[3]);
previous = vericalLines[i - 1];
ppt1 = cv::Point(previous[0], previous[1]);
ppt2 = cv::Point(previous[2], previous[3]);
if (diagonalLine[i - 1].y > pt2.y && diagonalLine[i].y < pt1.y) {
std::cout << "Intersection: " << pt2.x << "\n";
}
double distanceBetweenPointsX = abs(pt1.x - ppt1.x)*sqrt(2);
if (distanceBetweenPointsX >= 12) {
elemLinesCur.push_back(current);
double angle = atan2(ppt2.y - ppt1.y, ppt2.x - ppt1.x) * 180.0 / CV_PI; ///draw only vertical lines (90 degree)
if (angle) {
line(cwdst, pt1, pt2, cv::Scalar(0, 0, 255), 2, CV_AA);
}
//do some stuff
}
...and here a method, which detect only diagonal lines (it looks similiar to the above one):
std::vector<cv::Point> diagonalLines(cv::Mat src) {
std::vector<cv::Point> hitPoint;
Scalar mu, sigma;
meanStdDev(src, mu, sigma);
Canny(src, ddst, mu.val[0] - sigma.val[0], mu.val[0] + sigma.val[0], 3, false);
cvtColor(ddst, cddst, CV_GRAY2BGR);
HoughLinesP(ddst, vertlines, 1, CV_PI / 180, 100, 50, 10);
cv::Point pt1, pt2;
for (size_t i = 1; i < vertlines.size(); i++) {
cv::Vec4i current = vertlines[i];
pt1 = cv::Point(current[0], current[1]);
pt2 = cv::Point(current[2], current[3]);
double angle = atan2(pt2.y - pt1.y, pt2.x - pt1.x) * 180.0 / CV_PI;
if (angle != -90 && angle != 90) {
//line(cddst, pt1, pt2, Scalar(0, 0, 255), 2, CV_AA);
hitPoint.push_back(pt1);
hitPoint.push_back(pt2);
}
}
return hitPoint;
}
What I know:
I should calculate all those intersection points, yes, I also tried it in if (diagonalLine[i - 1].y > pt2.y && diagonalLine[i].y < pt1.y) but I don't get the further steps. Could some one help me? Thank you in advance!
The OpenCV function line() accepts endpoints as the arguments, so all you need to do is calculate the intersections and use those intersection points for the endpoints of the vertical lines. You can calculate the intersection directly from the endpoints you have as the result of HoughLinesP() using determinants.
In Python, a function to compute the intersection points might look like
def find_intersection(line1, line2):
# extract points
x1, y1 = line1[0]
x2, y2 = line1[1]
x3, y3 = line2[0]
x4, y4 = line2[1]
# compute determinant
Px = ((x1*y2 - y1*x2)*(x3-x4) - (x1-x2)*(x3*y4 - y3*x4)) /
((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))
Py = ((x1*y2 - y1*x2)*(y3-y4) - (y1-y2)*(x3*y4 - y3*x4)) /
((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))
return (int(Px), int(Py))
Let's show how you might use this. Suppose your image looked like this:
# draw image and lines
img = np.ones((500, 500, 3)) * 255
diag1 = [(0, 0), (499, 100)]
diag2 = [(0, 499), (499, 399)]
vert1 = [(100, 0), (100, 499)]
vert2 = [(400, 0), (400, 499)]
cv2.line(img, diag1[0], diag1[1], color=[0, 0, 255])
cv2.line(img, diag2[0], diag2[1], color=[0, 0, 255])
cv2.line(img, vert1[0], vert1[1], color=[0, 255, 0])
cv2.line(img, vert2[0], vert2[1], color=[0, 255, 0])
To cut them off at the intersection, simply use the function to find those points and only draw the vertical lines at the intersection points with each diagonal line.
# get intersection points
vert1_intersect = [find_intersection(diag1, vert1), find_intersection(diag2, vert1)]
vert2_intersect = [find_intersection(diag1, vert2), find_intersection(diag2, vert2)]
# draw vertical lines from intersection points
img = np.ones((500, 500, 3)) * 255
diag1 = [(0, 0), (499, 100)]
diag2 = [(0, 499), (499, 399)]
vert1 = [(100, 0), (100, 499)]
vert2 = [(400, 0), (400, 499)]
cv2.line(img, diag1[0], diag1[1], color=[0, 0, 255])
cv2.line(img, diag2[0], diag2[1], color=[0, 0, 255])
cv2.line(img, vert1_intersect[0], vert1_intersect[1], color=[0, 255, 0])
cv2.line(img, vert2_intersect[0], vert2_intersect[1], color=[0, 255, 0])

Detect a target circle from other circles in an image or webcam frame

I'm trying to detect this circle
within an image with several other circles .
How would you go about doing something like this? I tried combining both colour ranges but this didn't work.
Here's my current code:
// Threshold for yellow colour on the Drop-off point
int bLowH = 25;
int bHighH = 79;
int bLowS = 0;
int bHighS = 121;
int bLowV = 87;
int bHighV = 196;
// Threshold values for red colour on the Drop-off point
int gLowH = 148;
int gHighH = 180;
int gLowS = 54;
int gHighS = 255;
int gLowV = 96;
int gHighV = 247;
Mat imgHSV;
Mat yellowRange;
Mat redRange;
cvtColor(frame, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV
//Threshold the images.. Only Keep The threshold values for the dropoff point
inRange(imgHSV, Scalar(bLowH, bLowS, bLowV), Scalar(bHighH, bHighS, bHighV), yellowRange);
inRange(imgHSV, Scalar(gLowH, gLowS, gLowV), Scalar(gHighH, gHighS, gHighV), redRange);
// combine both images and slightly blur...
Mat dropoff_image;
addWeighted(yellowRange, 1.0, redRange, 1.0, 0.0, dropoff_image);
GaussianBlur(dropoff_image, dropoff_image, Size(9, 9), 2, 2);
// Hough Transform to detect circle
vector<Vec3f> dropoff;
HoughCircles(dropoff_image, dropoff, CV_HOUGH_GRADIENT, 1, dropoff_image.rows / 8, 100, 20, 0, 0);
if (dropoff.size() == 0)
{
cout << "No dropoff circle found" << endl;
exit(-1);
}
for (size_t current_circle = 0; current_circle < dropoff.size(); ++current_circle)
{
cout << "circle found" << endl;
Point center(round(dropoff[current_circle][0]), round(dropoff[current_circle][1]));
int radius = round(dropoff[current_circle][2]);
circle(frame, center, radius, Scalar(0, 255, 0), 5);
imshow("Gaussian", dropoff_image);
}

OpenCV: Histogram not displaying correctly

I am trying to plot Histogram of lenna here the 8 bit single ch. gray scale image.
But it is not displaying the output correctly, as can be seen in the following output:
void show_histogram_image(Mat img1)
{
int sbins = 256;
int histSize[] = {sbins};
float sranges[] = { 0, 256 };
const float* ranges[] = { sranges };
cv::MatND hist;
int channels[] = {0};
cv::calcHist( &img1, 1, channels, cv::Mat(), // do not use mask
hist, 1, histSize, ranges,
true, // the histogram is uniform
false );
double maxVal=0;
minMaxLoc(hist, 0, &maxVal, 0, 0);
int xscale = 10;
int yscale = 10;
cv::Mat hist_image;
hist_image = cv::Mat::zeros(256, sbins*xscale, CV_8UC1);
for( int s = 0; s < sbins; s++ )
{
float binVal = hist.at<float>(s, 0);
int intensity = cvRound(binVal*255/maxVal);
rectangle( hist_image, cv::Point(s*xscale, 0),
cv::Point( (s+1)*xscale - 1, intensity),
cv::Scalar::all(255),
CV_FILLED );
}
imshow("Image1",hist_image);
waitKey(0);
}
Here is my main();
int main()
{
Mat img1 = imread("lena512.bmp", CV_8UC1);
if (img1.empty()) //check whether the image is valid or not
{
cout << "Error : Image cannot be read..!!" << endl;
system("pause"); //wait for a key press
return -1;
}
show_histogram_image(img1);
}
And here is the output Histogram image:
I tried changing the xscale even then it is not coming correctly.
Update
I made the following changes:
rectangle( hist_image, cv::Point(s*xscale, hist_image.rows),
cv::Point( (s+1)*xscale - 1, hist_image.rows - intensity),
cv::Scalar::all(255), CV_FILLED );
And now the output is:
It is much better , but I need lines and clearly visible bins. And it looks like some part is hidden on the right side.
Update 2
I changed CV_FILLED to '1' and now I have:
since the image origin in opencv is (0,0), and thus the y-axis is pointing downwards,
you will have to invert the y-values for your histogram-drawing:
rectangle( hist_image, cv::Point(s*xscale, hist_image.rows),
cv::Point( (s+1)*xscale - 1, hist_image.rows - intensity),
cv::Scalar::all(255),
CV_FILLED );