Not able to display the Histogram properly from a function in c++ - c++

I want to write a function to display a Histogram in C++ Visual Studio, The code is displaying a function to calculate histogram and display it, but I'm getting the same histogram for all the images used. The code is written below, Kindly advise where i might need improvement. Thank You.
class Histogram {
public:
Mat calc_histogram(Mat src) {
Mat hist;
hist = Mat::zeros(256, 1, CV_32F);
src.convertTo(src, CV_32F);
double value{ 0 };
for (int i = 0; i < src.rows; i++)
{
for (int j = 0; j < src.cols; j++)
{
value = src.at<float>(i, j);
//Add 1 point to the bin pixel intensity, so giving total pixels at each intensities
hist.at<float>(value) = hist.at<float>(value) + 1;
}
}
return hist;
}
void plot_histogram(Mat histogram) {
Mat histogram_image(400, 512, CV_8UC3, Scalar(0, 0, 0)); //Image background to draw hist on &
//Scaling it 512 pixels for clearer image
Mat normalized_histogram;
normalize(histogram, normalized_histogram, 0, 400, NORM_MINMAX, -1, Mat());
for (int i{ 0 }; i < 256; i++)
{
rectangle(histogram_image, Point(2 * i, histogram_image.rows - normalized_histogram.at<float>(i)),
Point(2 * (i + 1), histogram_image.rows), Scalar(255, 0, 0));
}
//namedWindow("Histogtram", WINDOW_NORMAL);
imshow("Histogram", histogram_image);
}
};
int main() {
Mat img;
img = imread("Shiv_Mahadev.png");
Histogram H1;
Mat hist = H1.calc_histogram(img);
H1.plot_histogram(hist);
waitKey(0);
destroyAllWindows();
return 0;
}

Related

Debug assertion failure, "__acrt_first_block == header" when using openCV for live webcam? [duplicate]

This question already has answers here:
why opencv imshow() create a new window has the same name as namedWindow() does in Debug Mode?
(1 answer)
Debug Assertion Failed! Expression: __acrt_first_block == header
(6 answers)
Closed 4 years ago.
I'm trying to do finger recognition using opencv, which has been working correctly when simply processing one image from the capture, however after adding the while loop to have it go from single image capture to live processing, there seems to be a heap error showing. I'm currently running it on Visual Studio 2017 w/ OpenCV 3.41.
The error from the Microsoft Visual C++ Runtime Library is
Debug Assertion Failed!
File: minkernel\crts\ucrt\src\appcrt\heap\debug_heap.cpp
Line: 996
Expression: __acrt_first_block == header
The code I'm using is:
#include "stdafx.h"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int myMax(int a, int b, int c);
int myMin(int a, int b, int c);
void mySkinDetect(Mat& src, Mat& dst);
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
void thresh_callback(int, void*);
int main(){
VideoCapture cap(0);
while (1) {
cap >> src;
Mat frameDest;
frameDest = Mat::zeros(src.rows, src.cols, CV_8UC1);
mySkinDetect(src, frameDest);
int erosion_size = 1;
Mat element = getStructuringElement(MORPH_RECT,
Size(2 * erosion_size + 1, 2 * erosion_size + 1),
Point(erosion_size, erosion_size));
erode(frameDest, frameDest, element);
erode(frameDest, frameDest, element);
namedWindow("Skin", WINDOW_AUTOSIZE);
imshow("Skin", frameDest);
blur(frameDest, src, Size(3, 3));
createTrackbar(" Threshold:", "Source", &thresh, max_thresh, thresh_callback);
thresh_callback(0, 0);
if (waitKey(30) == 27) { break; }
}
return(0);
}
void thresh_callback(int, void*)
{
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
threshold(src, threshold_output, thresh, 255, THRESH_BINARY);
findContours(threshold_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0));
vector<vector<Point>>hull(contours.size());
vector<vector<int> > hullsI(contours.size());
vector<vector<Vec4i>>defects(contours.size());
int index = 0;
int area = 0;
for (int i = 0; i < contours.size(); i++)
{
double a = contourArea(contours[i]);
if (a>area)
{
area = a;
index = i;
}
}
for (int i = 0; i < contours.size(); i++)
{
convexHull(contours[i], hull[i], false);
convexHull(contours[i], hullsI[i], false);
if (hullsI[i].size() > 3)
{
convexityDefects(contours[i], hullsI[i], defects[i]);
}
}
Mat drawing = Mat::zeros(threshold_output.size(), CV_8UC3);
for (size_t i = 0; i< contours.size(); i++)
{
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
drawContours(drawing, contours, (int)i, color, 1, 8, vector<Vec4i>(), 0, Point());
drawContours(drawing, hull, (int)i, color, 1, 8, vector<Vec4i>(), 0, Point());
}
int fingers = 0;
if (area>50)
{
for (int j = 0; j<defects[index].size(); ++j)
{
const Vec4i& def = defects[index][j];
float depth = def[3] / 256;
if (depth > 5) // filter defects by depth
{
int start = def[0];
Point ptStart(contours[index][start]);
int end = def[1];
Point ptEnd(contours[index][end]);
int min = def[2];
Point ptFar(contours[index][min]);
line(drawing, ptStart, ptEnd, Scalar(0, 255, 0), 1);
line(drawing, ptStart, ptFar, Scalar(0, 255, 0), 1);
line(drawing, ptEnd, ptFar, Scalar(0, 255, 0), 1);
circle(drawing, ptFar, 4, Scalar(0, 255, 0), 2);
fingers += 1;
}
}
}
std::string s = std::to_string(fingers-1);
namedWindow("Hull demo", WINDOW_AUTOSIZE);
putText(drawing, "Number Fingers = "+s, Point(drawing.cols/1.5, drawing.rows / 10), FONT_HERSHEY_PLAIN, 1.2f, Scalar(200, 0, 0), 2);
imshow("Hull demo", drawing);
}
int myMax(int a, int b, int c) {
int m = a;
(void)((m < b) && (m = b));
(void)((m < c) && (m = c));
return m;
}
//Function that returns the minimum of 3 integers
int myMin(int a, int b, int c) {
int m = a;
(void)((m > b) && (m = b));
(void)((m > c) && (m = c));
return m;
}
//Function that detects whether a pixel belongs to the skin based on RGB values
void mySkinDetect(Mat& src, Mat& dst) {
//Surveys of skin color modeling and detection techniques:
//Vezhnevets, Vladimir, Vassili Sazonov, and Alla Andreeva. "A survey on pixel-based skin color detection techniques." Proc. Graphicon. Vol. 3. 2003.
//Kakumanu, Praveen, Sokratis Makrogiannis, and Nikolaos Bourbakis. "A survey of skin-color modeling and detection methods." Pattern recognition 40.3 (2007): 1106-1122.
for (int i = 0; i < src.rows; i++) {
for (int j = 0; j < src.cols; j++) {
//For each pixel, compute the average intensity of the 3 color channels
Vec3b intensity = src.at<Vec3b>(i, j); //Vec3b is a vector of 3 uchar (unsigned character)
int B = intensity[0]; int G = intensity[1]; int R = intensity[2];
if ((R > 95 && G > 40 && B > 20) && (myMax(R, G, B) - myMin(R, G, B) > 15) && (abs(R - G) > 15) && (R > G) && (R > B)) {
dst.at<uchar>(i, j) = 255;
}
}
}
}

How to implementation connected-component algorithm on a part of image with OpenCV?

I want to use connected component algorithm for object detection.I can use this algorithm on full image but I want to implementation connected component for a part of image.for example the size of my image is 760*520 and I want to implementation this algorithm on a square with size (350,270,60,60).this is a part of my code: `Mat image;
Mat stat, centroid;
int threshval = 100;
static void on_trackbar(int, void*) {
Mat bw = threshval < 128 ? (image < threshval) : (image > threshval);
Mat labelImage(image.size(), CV_32S);
int nLabels = connectedComponentsWithStats(bw, labelImage, stat, centroid, 8);
std::vector<Vec3b> colors(nLabels);
colors[0] = Vec3b(0, 0, 0); // Background
for (int label = 1; label < nLabels; ++label) {
colors[label] = Vec3b((rand() & 255), (rand() & 255), (rand() & 255));
at dst(image.size(), CV_8UC3);
for (int r = 0; r < dst.rows; ++r) {
for (int c = 0; c < dst.cols; ++c) {
int label = labelImage.at<int>(r, c);
Vec3b &pixel = dst.at<Vec3b>(r, c);
pixel = colors[label];
}
imshow("Connected Components", dst);
}
}
}
Except use image(cv::Rect(350, 270, 60, 60)) Instead of** image**,do you have any idea to help me?I'm beginner in opencv and c++.thanks a lot...

from float array to mat , concatenate blocks of image

I have an image 800x800 which is broken down to 16 blocks of 200x200.
(you can see previous post here)
These blocks are : vector<Mat> subImages;
I want to use float pointers on them , so I am doing :
float *pdata = (float*)( subImages[ idxSubImage ].data );
1) Now, I want to be able to get again the same images/blocks, going from float array to Mat data.
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
namedWindow( "Display window", WINDOW_AUTOSIZE );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
imshow( "Display window", mf );
waitKey(0);
}
}
So , the problem is that I am receiving an
OpenCV Error: Assertion failed
in imshow.
2) How can I recombine all the blocks to obtain the original 800x800 image?
I tried something like:
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
Mat big( 800,800,CV_32F );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
Rect roi(j,i,200,200);
mf.copyTo( big(roi) );
}
}
imwrite( "testing" , big );
This gives me :
OpenCV Error: Assertion failed (!fixedSize()) in release
in mf.copyTo( big(roi) );.
First, you need to know where are your subimages into the big image. To do this, you can save the rect of each subimage into the vector<Rect> smallImageRois;
Then you can use pointers (keep in mind that subimages are not continuous), or simply use copyTo to the correct place:
Have a look:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
Mat3b img = imread("path_to_image");
resize(img, img, Size(800, 800));
Mat grayImg;
cvtColor(img, grayImg, COLOR_BGR2GRAY);
grayImg.convertTo(grayImg, CV_32F);
int N = 4;
if (((grayImg.rows % N) != 0) || ((grayImg.cols % N) != 0))
{
// Error
return -1;
}
Size graySize = grayImg.size();
Size smallSize(grayImg.cols / N, grayImg.rows / N);
vector<Mat> smallImages;
vector<Rect> smallImageRois;
for (int i = 0; i < graySize.height; i += smallSize.height)
{
for (int j = 0; j < graySize.width; j += smallSize.width)
{
Rect rect = Rect(j, i, smallSize.width, smallSize.height);
smallImages.push_back(grayImg(rect));
smallImageRois.push_back(rect);
}
}
// Option 1. Using pointer to subimage data.
Mat big1(800, 800, CV_32F);
int big1step = big1.step1();
float* pbig1 = big1.ptr<float>(0);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
float* pdata = (float*)smallImages[idx].data;
int step = smallImages[idx].step1();
Rect roi = smallImageRois[idx];
for (int i = 0; i < smallSize.height; ++i)
{
for (int j = 0; j < smallSize.width; ++j)
{
pbig1[(roi.y + i) * big1step + (roi.x + j)] = pdata[i * step + j];
}
}
}
// Option 2. USing copyTo
Mat big2(800, 800, CV_32F);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
smallImages[idx].copyTo(big2(smallImageRois[idx]));
}
return 0;
}
For concatenating the sub-images into a single squared image, you can use the following function:
// Important: all patches should have exactly the same size
Mat concatPatches(vector<Mat> &patches) {
assert(patches.size() > 0);
// make it square
const int patch_width = patches[0].cols;
const int patch_height = patches[0].rows;
const int patch_stride = ceil(sqrt(patches.size()));
Mat image = Mat::zeros(patch_stride * patch_height, patch_stride * patch_width, patches[0].type());
for (size_t i = 0, iend = patches.size(); i < iend; i++) {
Mat &patch = patches[i];
const int offset_x = (i % patch_stride) * patch_width;
const int offset_y = (i / patch_stride) * patch_height;
// copy the patch to the output image
patch.copyTo(image(Rect(offset_x, offset_y, patch_width, patch_height)));
}
return image;
}
It takes a vector of sub-images (or patches as I refer them to) and concatenates them into a squared image. Example usage:
vector<Mat> patches;
vector<Scalar> colours = {Scalar(255, 0, 0), Scalar(0, 255, 0), Scalar(0, 0, 255)};
// fill vector with circles of different colours
for(int i = 0; i < 16; i++) {
Mat patch = Mat::zeros(100,100, CV_32FC3);
circle(patch, Point(50,50), 40, colours[i % 3], -1);
patches.push_back(patch);
}
Mat img = concatPatches(patches);
imshow("img", img);
waitKey();
Will produce the following image
print the values of i and j before creating Mat mf and I believe you will soon be able to find the error.
Hint 1: i and j will be 0 the first time
Hint 2: Use the copyTo() with a ROI like:
cv::Rect roi(0,0,200,200);
src.copyTo(dst(roi))
Edit:
Hint 3: Try not to do such pointer fiddling, you will get in trouble. Especially if you're ignoring the step (like you seem to do).

pass the pointer of image buffer to OpenCV and change the saturation?

I want to pass the pointer of my image buffer, change the saturation and see the result immediately. But the change is not applying in my buffer and it is not changing.
void changeSaturation(void* buffer,int width, int height)
{
Mat matObject(width, height, CV_8UC4, buffer);
m_matSource = matObject;
Mat newMat = m_matSource.clone();
// BGR to HSV
cvtColor(matSource, matSource, CV_BGR2HSV);
for(int i = 0; i < newMat.rows; ++i)
{
for(int j = 0; j < newMat.cols; ++j)
{
newMat.at<cv::Vec3b>(i, j)[1] = 255; //saturationValue;
}
}
// HSV back to BGR
cvtColor(newMat, m_matSource, CV_HSV2BGR); // here m_matSource->data change
}
How can I apply the change on my buffer?
I refactored your code when trying to reproduce your problem and in the process I fixed it. You cloned your source into newMat then changed the color space of your original image and then proceed to completely ignore your new modified image. Try this out:
void changeSaturation(Mat& image)
{
Mat result(image.rows, image.cols, image.type());
// BGR to HSV
cvtColor(image, result, CV_BGR2HSV);
for(int i = 0; i < result.rows; ++i)
{
for(int j = 0; j < result.cols; ++j)
result.at<cv::Vec3b>(i, j)[1] = 255; //saturationValue;
}
// HSV back to BGR
cvtColor(result, result, CV_HSV2BGR); // here m_matSource->data change
namedWindow("Original");
imshow("Original",image);
namedWindow("Duplicate");
imshow("Duplicate",result);
}
int main()
{
Mat image;
image = imread("C:/Users/Public/Pictures/Sample Pictures/Desert.jpg");
changeSaturation(image);
waitKey(0);
}
Edit
To modify the input image:
void changeSaturation(Mat& image)
{
// BGR to HSV
cvtColor(image, image, CV_BGR2HSV);
for(int i = 0; i < image.rows; ++i)
{
for(int j = 0; j < image.cols; ++j)
image.at<cv::Vec3b>(i, j)[1] = 255; //saturationValue;
}
// HSV back to BGR
cvtColor(image, image, CV_HSV2BGR); // here m_matSource->data change
}
Next Edit
This now has (almost) the original function signature:
void changeSaturation(uchar* buffer, int rows, int cols, int type)
{
Mat image(rows, cols, type, buffer);
Mat result;
// BGR to HSV
cvtColor(image, result, CV_BGR2HSV);
for(int i = 0; i < result.rows; ++i)
{
for(int j = 0; j < result.cols; ++j)
result.at<cv::Vec3b>(i, j)[1] = 255;
}
// HSV back to BGR
cvtColor(result, image, CV_HSV2BGR);
}
int main()
{
Mat image;
image = imread("C:/Users/Public/Pictures/Sample Pictures/Desert.jpg");
changeSaturation(image.data, image.rows, image.cols, image.type());
imshow("Original",image);
waitKey(0);
}
Your constructor Mat matObject(width, height, CV_8UC4, buffer); allocates matObject of size width and height at the location pointed by buffer.
In your function you are making changes to newMat, which is cloned from matObject. However, buffer doesn't point to newMat, it points to matObject and matObject is not changed by your function.

convolution using DFT

im using the following code to calculate convolution of an image with a specified kernel(in my case gaussian). Everytime I get a different result and the result image is not even close to the one i obtained by convolution in the Spatial domain. First I thought the problem is with the datatype of the images. I changed them to 32 and 64 but still the same results. Can anyone tell me what could be wrong?
http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html#dft
this function above is giving me a black image. I have input in GRAYSCALE.
void convol_fft(const Mat& A,const vector<vector<float>>& kernel2d,Mat& result)
{
Mat B = Mat(3,3,CV_64F);
for (int row = 0; row < kernel2d.size(); row++)
for (int col = 0; col < kernel2d[row].size(); col++){
B.at<uchar>(row,col) = (uchar)kernel2d[row][col];
}
int dft_M = getOptimalDFTSize( A.rows+B.rows-1 );
int dft_N = getOptimalDFTSize( A.cols+B.cols-1 );
Mat dft_A = Mat::zeros(dft_M, dft_N, CV_64F);
Mat dft_B = Mat::zeros(dft_M, dft_N, CV_64F);
Mat dft_A_part = dft_A(Rect(0, 0, A.cols,A.rows));
A.convertTo(dft_A_part, dft_A_part.type(), 1, -mean(A)[0]);
Mat dft_B_part = dft_B(Rect(0, 0, B.cols,B.rows));
B.convertTo(dft_B_part, dft_B_part.type(), 1, -mean(B)[0]);
dft(dft_A, dft_A, 0, A.rows);
dft(dft_B, dft_B, 0, B.rows);
// set the last parameter to false to compute convolution instead of correlation
mulSpectrums( dft_A, dft_B, dft_A, 0, false );
idft(dft_A, dft_A, DFT_SCALE, A.rows + B.rows - 1 );
result = dft_A(Rect(0, 0, A.cols + B.cols - 1, A.rows + B.rows - 1));
normalize(result, result, 0, 1, NORM_MINMAX, result.type());
pow(result, 3., result);
// B ^= Scalar::all(255);
}
The following code based on openCV's phaseCorrelateRes() will do correlation in 2 dimensions.
static void fftShift(InputOutputArray _out)
{
Mat out = _out.getMat();
if(out.rows == 1 && out.cols == 1)
{
// trivially shifted.
return;
}
vector<Mat> planes;
split(out, planes);
int xMid = out.cols >> 1;
int yMid = out.rows >> 1;
bool is_1d = xMid == 0 || yMid == 0;
if(is_1d)
{
xMid = xMid + yMid;
for(size_t i = 0; i < planes.size(); i++)
{
Mat tmp;
Mat half0(planes[i], Rect(0, 0, xMid, 1));
Mat half1(planes[i], Rect(xMid, 0, xMid, 1));
half0.copyTo(tmp);
half1.copyTo(half0);
tmp.copyTo(half1);
}
}
else
{
for(size_t i = 0; i < planes.size(); i++)
{
// perform quadrant swaps...
Mat tmp;
Mat q0(planes[i], Rect(0, 0, xMid, yMid));
Mat q1(planes[i], Rect(xMid, 0, xMid, yMid));
Mat q2(planes[i], Rect(0, yMid, xMid, yMid));
Mat q3(planes[i], Rect(xMid, yMid, xMid, yMid));
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
}
}
merge(planes, out);
}
void Correlate2d(
const cv::Mat& src1,
const cv::Mat& src2,
cv::Mat& dst,
double* response)
{
CV_Assert( src1.type() == src2.type());
CV_Assert( src1.type() == CV_32FC1 || src1.type() == CV_64FC1 );
CV_Assert( src1.size == src2.size);
int M = getOptimalDFTSize(src1.rows);
int N = getOptimalDFTSize(src1.cols);
Mat padded1, padded2, paddedWin;
if(M != src1.rows || N != src1.cols)
{
copyMakeBorder(src1, padded1, 0, M - src1.rows, 0, N - src1.cols, BORDER_CONSTANT, Scalar::all(0));
copyMakeBorder(src2, padded2, 0, M - src2.rows, 0, N - src2.cols, BORDER_CONSTANT, Scalar::all(0));
}
else
{
padded1 = src1;
padded2 = src2;
}
Mat FFT1, FFT2, P, Pm, C;
// correlation equation
// Reference: http://en.wikipedia.org/wiki/Phase_correlation
dft(padded1, FFT1, DFT_REAL_OUTPUT);
dft(padded2, FFT2, DFT_REAL_OUTPUT);
mulSpectrums(FFT1, FFT2, dst, 0, true);
idft(dst, dst, DFT_SCALE); // gives us the correlation result...
fftShift(dst); // shift the energy to the center of the frame.
// locate the highest peak
Point peakLoc;
minMaxLoc(dst, NULL, NULL, NULL, &peakLoc);
// max response is scaled
if( response )
*response = dst.at<float>(peakLoc);
}
You can find the code in \opencv\sources\modules\imgproc\src\phasecorr.cpp
In order to change the code to convolution simply change this line:
mulSpectrums(FFT1, FFT2, dst, 0, true);
to
mulSpectrums(FFT1, FFT2, dst, 0, false);
This is equivalent to doing in matlab:
dst = fftshift(ifft2(fft2(src1).*conj(fft2(src2))))
I am not sure about OpenCV...but this looks suspicious.
for (int row = 0; row < kernel2d.size(); row++)
for (int col = 0; col < kernel2d[row].size(); col++){
B.at<uchar>(row,col) = (uchar)kernel2d[row][col];
}
If you are filling up the B kernel then the row should be kernel2d[col].size(). It looks like you are overrunning the B kernel. What is value of kernel2d.size() ?
Why not just load the values directly? Saving all the function calls.
For gaussian kernel it should look something like {1,2,1,2,3,2,1,2,1}.