I am a beginner in openCV.
I want to plot the intensity profile for R, G and B for the image given below.
I am like to plot R, G and B values w.r.t to pixel location in three different graphs.
So far I have learnt how to read an Image and display. for example using imread();
Mat img = imread("Apple.bmp");
and then showing it on the screen using imshow(" Window", img);.
Now I would like to put all R , G and B values in 3 separate buffers; buf1, buf2, buf3 and plot these values.
Kindly provide me some hint or a sample code snippet to help me understand this.
You can separate R, G and B into separate Mats using cv::split()
std::vector<Mat> planes(3);
cv::split(img, planes);
cv::Mat R = planes[2];
cv::Mat G = planes[1];
cv::Mat B = planes[0];
But you only need to separate them like this if you have code that is expecting a Mat with a single color channnel.
Don't use at<>() as the supposed duplicate suggest - it is really slow if you are sequentially scanning an image (but it is good for random access).
You can scan the image efficiently like this
for(int i = 0; i < img.rows; ++i)
{
// get pointers to each row
cv::Vec3b* row = img.ptr<cv::Vec3b>(i);
// now scan the row
for(int j = 0; j < img.cols; ++j)
{
cv::Vec3b pixel = row[j];
uchar r = pixel[2];
uchar g = pixel[1];
uchar b = pixel[0];
process(r, g, b);
}
}
Lastly if you do want to make a histogram, you can use this code. It is fairly old so I suppose it still works.
void show_histogram_image(cv::Mat src, cv::Mat &hist_image)
{ // based on http://docs.opencv.org/2.4.4/modules/imgproc/doc/histograms.html?highlight=histogram#calchist
int sbins = 256;
int histSize[] = {sbins};
float sranges[] = { 0, 256 };
const float* ranges[] = { sranges };
cv::MatND hist;
int channels[] = {0};
cv::calcHist( &src, 1, channels, cv::Mat(), // do not use mask
hist, 1, histSize, ranges,
true, // the histogram is uniform
false );
double maxVal=0;
minMaxLoc(hist, 0, &maxVal, 0, 0);
int xscale = 10;
int yscale = 10;
//hist_image.create(
hist_image = cv::Mat::zeros(256, sbins*xscale, CV_8UC3);
for( int s = 0; s < sbins; s++ )
{
float binVal = hist.at<float>(s, 0);
int intensity = cvRound(binVal*255/maxVal);
rectangle( hist_image, cv::Point(s*xscale, 0),
cv::Point( (s+1)*xscale - 1, intensity),
cv::Scalar::all(255),
CV_FILLED );
}
}
Related
I use a FLIR Camera (Grasshopper3) and the SDK (Spinnaker) to take an image (Mono8). After converting the image, I wuold like to compute the Histogram and display it in my GUI in a picturebox (C++ CLR/CLI .net environment). For this, I need to convert it, but I guess there is a mistake in the color conversion or the BitMap creation.
Here is the code:
Spinnaker::ImagePtr convertedImage_MONO = Grasshopper3.pResultImage_MONO->Convert(Spinnaker::PixelFormat_Mono8, Spinnaker::NO_COLOR_PROCESSING); // Raw image is converted to Mono8
unsigned int XPadding = convertedImage_MONO->GetXPadding();
unsigned int YPadding = convertedImage_MONO->GetYPadding();
unsigned int rowsize = convertedImage_MONO->GetWidth();
unsigned int colsize = convertedImage_MONO->GetHeight();
//image data contains padding. When allocating Mat container size, you need to account for the X,Y image data padding.
cv::Mat cvimg_Mono = cv::Mat(colsize + YPadding, rowsize + XPadding, CV_8UC1, convertedImage_MONO->GetData(), convertedImage_MONO->GetStride());
cvtColor(cvimg_Mono, cvimg_Mono, cv::COLOR_BGR2BGRA);
// Histogram
int bins = 256;
int histSize[] = { bins };
// Set ranges for histogram bins
float lranges[] = { 0, 256 };
const float* ranges[] = { lranges };
// create matrix for histogram
cv::Mat hist;
int channels[] = { 0 };
// create matrix for histogram visualization
int const hist_height = 256;
cv::Mat3b hist_image = cv::Mat3b::zeros(hist_height, bins);
cv::calcHist(&cvimg_Mono, 1, channels, cv::Mat(), hist, 1, histSize, ranges, true, false);
double max_val = 0;
minMaxLoc(hist, 0, &max_val);
// visualize each bin
for (int b = 0; b < bins; b++)
{
float const binVal = hist.at<float>(b);
int const height = cvRound(binVal*hist_height / max_val);
cv::line(hist_image, cv::Point(b, hist_height - height), cv::Point(b, hist_height), cv::Scalar::all(255));
}
cv::Mat Histogram_Mono = hist_image;
cv::resize(Histogram_Mono, Histogram_Mono, cv::Size(pictureBox_Mono->Width, pictureBox_Mono->Height), cv::INTER_AREA);
hBit_Mono = CreateBitmap(Histogram_Mono.cols, Histogram_Mono.rows, 1, 32, Histogram_Mono.data); // hBit_Mono was created global
bmp_Mono = Bitmap::FromHbitmap((IntPtr)hBit_Mono); // bmp_Mono was created as a global Bitmap
pictureBox_Mono->Image = bmp_Mono;
My aim is to generate a histogram for a gray-scale image. The code I used is :
Mat img = imread("leeds-castle.jpg",IMREAD_GRAYSCALE);
Mat hst;
int hstsize = 256;
float ranges[] = { 0,256 };
float *hstrange = { ranges };
calcHist( img, 1,0, Mat(), hst, 1, &hstsize,&hstrange,true,false);
int hst_w = 512, hst_h = 400;
int bin_w = cvRound((double)hst_w / 256);
Mat histimg(hst_w, hst_h, CV_8U);
normalize(hst, hst, 0, histimg.rows, NORM_MINMAX, -1, Mat());
for (int i = 1; i < 256; i++)
{
line(histimg, Point(bin_w*(i - 1), hst_h - cvRound(hst.at<float>(i - 1))), Point(bin_w*i, hst_h - cvRound(hst.at<float>(i))), 2, 8, 0);
}
imshow("Histogram", histimg);
The only error is the usage of calcHist() function. Is there anything wrong with it?
See the comment above calcHist to identify the correct usage:
// original image
Mat img = imread("leeds-castle.jpg",IMREAD_GRAYSCALE);
// NOTE: check if img.channels is equal to 1
// histogram
Mat hst;
// number of bins
int hstsize = 256;
float ranges[] = { 0,256 };
float *hstrange = { ranges };
// parameters for histogram calculation
bool uniform = true;
bool accumulate = false;
// calculate histogram
// the '&' was missing here
calcHist( &img, 1,0, Mat(), hst, 1, &hstsize,&hstrange,true,false);
I have float x, float y, float z values of an image. I want to construct a 16 bit png depth image by copying the z values. The image I am getting as a result has some invalid points. Below is my code.
uint16_t* depthValues = new uint16_t[size];
auto sampleVector(DepthPoints);
for (unsigned int i = 0; i < sampleVector.size(); i++)
{
depthValues[i] = (sampleVector.at(i).z) * 65536;
}
Mat newDepthImage = cv::Mat(var.height, var.width, CV_16UC1,depthValues);
imwrite(Location, CImage);
Can someone tell me, if I can copy the float values into an unsigned char array to create the image?
Is that why my image has invalid points?
auto sampleVector(DepthPoints);
const int size = sampleVector.size();
float* depthValues = new float[size];
for (unsigned int i = 0; i < sampleVector.size(); i++)
{
depthValues[i] = (sampleVector.at(i).z);
}
Mat depthImageOne, depthImageTwo;
Mat depthImageNew = cv::Mat(var.height, var.width, CV_32FC1,depthValues);
normalize(newDepthImageNew, depthImageOne, 1, 0, NORM_MINMAX, CV_32FC1);
depthImageOne.convertTo(depthImageTwo, CV_16UC1, 65536.0,0.0);
imwrite("path", depthImageTwo);
Normalization might cause lose of depth information. I have used normalization for visualization of the images. To preserve the depth information, I used the below code.
Mat depthImageNew = cv::Mat(var.height, var.width, CV_32FC1,depthValues);
depthImageOne.convertTo(depthImageTwo, CV_16UC1, 1000.0,0.0);
I am trying to plot Histogram of lenna here the 8 bit single ch. gray scale image.
But it is not displaying the output correctly, as can be seen in the following output:
void show_histogram_image(Mat img1)
{
int sbins = 256;
int histSize[] = {sbins};
float sranges[] = { 0, 256 };
const float* ranges[] = { sranges };
cv::MatND hist;
int channels[] = {0};
cv::calcHist( &img1, 1, channels, cv::Mat(), // do not use mask
hist, 1, histSize, ranges,
true, // the histogram is uniform
false );
double maxVal=0;
minMaxLoc(hist, 0, &maxVal, 0, 0);
int xscale = 10;
int yscale = 10;
cv::Mat hist_image;
hist_image = cv::Mat::zeros(256, sbins*xscale, CV_8UC1);
for( int s = 0; s < sbins; s++ )
{
float binVal = hist.at<float>(s, 0);
int intensity = cvRound(binVal*255/maxVal);
rectangle( hist_image, cv::Point(s*xscale, 0),
cv::Point( (s+1)*xscale - 1, intensity),
cv::Scalar::all(255),
CV_FILLED );
}
imshow("Image1",hist_image);
waitKey(0);
}
Here is my main();
int main()
{
Mat img1 = imread("lena512.bmp", CV_8UC1);
if (img1.empty()) //check whether the image is valid or not
{
cout << "Error : Image cannot be read..!!" << endl;
system("pause"); //wait for a key press
return -1;
}
show_histogram_image(img1);
}
And here is the output Histogram image:
I tried changing the xscale even then it is not coming correctly.
Update
I made the following changes:
rectangle( hist_image, cv::Point(s*xscale, hist_image.rows),
cv::Point( (s+1)*xscale - 1, hist_image.rows - intensity),
cv::Scalar::all(255), CV_FILLED );
And now the output is:
It is much better , but I need lines and clearly visible bins. And it looks like some part is hidden on the right side.
Update 2
I changed CV_FILLED to '1' and now I have:
since the image origin in opencv is (0,0), and thus the y-axis is pointing downwards,
you will have to invert the y-values for your histogram-drawing:
rectangle( hist_image, cv::Point(s*xscale, hist_image.rows),
cv::Point( (s+1)*xscale - 1, hist_image.rows - intensity),
cv::Scalar::all(255),
CV_FILLED );
I am trying to use opencv EM algorithm to do color extraction.I am using the following code based on example in opencv documentation:
cv::Mat capturedFrame ( height, width, CV_8UC3 );
int i, j;
int nsamples = 1000;
cv::Mat samples ( nsamples, 2, CV_32FC1 );
cv::Mat labels;
cv::Mat img = cv::Mat::zeros ( height, height, CV_8UC3 );
img = capturedFrame;
cv::Mat sample ( 1, 2, CV_32FC1 );
CvEM em_model;
CvEMParams params;
samples = samples.reshape ( 2, 0 );
for ( i = 0; i < N; i++ )
{
//from the training samples
cv::Mat samples_part = samples.rowRange ( i*nsamples/N, (i+1)*nsamples/N);
cv::Scalar mean (((i%N)+1)*img.rows/(N1+1),((i/N1)+1)*img.rows/(N1+1));
cv::Scalar sigma (30,30);
cv::randn(samples_part,mean,sigma);
}
samples = samples.reshape ( 1, 0 );
//initialize model parameters
params.covs = NULL;
params.means = NULL;
params.weights = NULL;
params.probs = NULL;
params.nclusters = N;
params.cov_mat_type = CvEM::COV_MAT_SPHERICAL;
params.start_step = CvEM::START_AUTO_STEP;
params.term_crit.max_iter = 300;
params.term_crit.epsilon = 0.1;
params.term_crit.type = CV_TERMCRIT_ITER|CV_TERMCRIT_EPS;
//cluster the data
em_model.train ( samples, Mat(), params, &labels );
cv::Mat probs;
probs = em_model.getProbs();
cv::Mat weights;
weights = em_model.getWeights();
cv::Mat modelIndex = cv::Mat::zeros ( img.rows, img.cols, CV_8UC3 );
for ( i = 0; i < img.rows; i ++ )
{
for ( j = 0; j < img.cols; j ++ )
{
sample.at<float>(0) = (float)j;
sample.at<float>(1) = (float)i;
int response = cvRound ( em_model.predict ( sample ) );
modelIndex.data [ modelIndex.cols*i + j] = response;
}
}
My question here is:
Firstly, I want to extract each model, here totally five, then store those corresponding pixel values in five different matrix. In this case, I could have five different colors seperately. Here I only obtained their indexes, is there any way to achieve their corresponding colors here? To make it easy, I can start from finding the dominant color based on these five GMMs.
Secondly, here my sample datapoints are "100", and it takes about nearly 3 seconds for them. But I want to do all these things in no more than 30 milliseconds. I know OpenCV background extraction, which is using GMM, performs really fast, below 20ms, that means, there must be a way for me to do all these within 30 ms for all 600x800=480000 pixels. I found predict function is the most time consuming one.
First Question:
In order to do color extraction you first need to train the EM with your input pixels. After that you simply loop over all the input pixels again and use predict() to classify each of them. I've attached a small example that utilizes EM for foreground/background separation based on colors. It shows you how to extract the dominant color (mean) of each gaussian and how to access the original pixel color.
#include <opencv2/opencv.hpp>
int main(int argc, char** argv) {
cv::Mat source = cv::imread("test.jpg");
//ouput images
cv::Mat meanImg(source.rows, source.cols, CV_32FC3);
cv::Mat fgImg(source.rows, source.cols, CV_8UC3);
cv::Mat bgImg(source.rows, source.cols, CV_8UC3);
//convert the input image to float
cv::Mat floatSource;
source.convertTo(floatSource, CV_32F);
//now convert the float image to column vector
cv::Mat samples(source.rows * source.cols, 3, CV_32FC1);
int idx = 0;
for (int y = 0; y < source.rows; y++) {
cv::Vec3f* row = floatSource.ptr<cv::Vec3f > (y);
for (int x = 0; x < source.cols; x++) {
samples.at<cv::Vec3f > (idx++, 0) = row[x];
}
}
//we need just 2 clusters
cv::EMParams params(2);
cv::ExpectationMaximization em(samples, cv::Mat(), params);
//the two dominating colors
cv::Mat means = em.getMeans();
//the weights of the two dominant colors
cv::Mat weights = em.getWeights();
//we define the foreground as the dominant color with the largest weight
const int fgId = weights.at<float>(0) > weights.at<float>(1) ? 0 : 1;
//now classify each of the source pixels
idx = 0;
for (int y = 0; y < source.rows; y++) {
for (int x = 0; x < source.cols; x++) {
//classify
const int result = cvRound(em.predict(samples.row(idx++), NULL));
//get the according mean (dominant color)
const double* ps = means.ptr<double>(result, 0);
//set the according mean value to the mean image
float* pd = meanImg.ptr<float>(y, x);
//float images need to be in [0..1] range
pd[0] = ps[0] / 255.0;
pd[1] = ps[1] / 255.0;
pd[2] = ps[2] / 255.0;
//set either foreground or background
if (result == fgId) {
fgImg.at<cv::Point3_<uchar> >(y, x, 0) = source.at<cv::Point3_<uchar> >(y, x, 0);
} else {
bgImg.at<cv::Point3_<uchar> >(y, x, 0) = source.at<cv::Point3_<uchar> >(y, x, 0);
}
}
}
cv::imshow("Means", meanImg);
cv::imshow("Foreground", fgImg);
cv::imshow("Background", bgImg);
cv::waitKey(0);
return 0;
}
I've tested the code with the following image and it performs quite good.
Second Question:
I've noticed that the maximum number of clusters has a huge impact on the performance. So it's better to set this to a very conservative value instead of leaving it empty or setting it to the number of samples like in your example. Furthermore the documentation mentions an iterative procedure to repeatedly optimize the model with less-constrained parameters. Maybe this gives you some speed-up. To read more please have a look at the docs inside the sample code that is provided for train() here.