Negative image is completly black - c++

Here is my code, which uses OpenCV 2.4.5
Histogram1D.h
#ifndef HISTOGRAM1D_H
#define HISTOGRAM1D_H
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
class Histogram1D
{
public:
Histogram1D();
//Histogram generators
MatND getHistogram(Mat );
Mat getHistogramImage(Mat );
//Generate Negative Image
Mat applyLookup(Mat ,Mat );
//Generate improved image with equalized histogram
Mat equalize(Mat image);
private:
int histSize[1];//Number of bins
float hRanges[2];//Max and Min pixel values
const float *ranges[1];
int channels[1];//Only one channel will be used
};
#endif // HISTOGRAM1D_H
Histogram1D.cpp
#include "Histogram1D.h"
Histogram1D::Histogram1D()
{
histSize[0] = 256;
hRanges[0] = 0.0;
hRanges[1] = 255.0;
ranges[0] = hRanges;
channels[0] = 0;
}
MatND Histogram1D::getHistogram(Mat image)
{
MatND hist;
cv::calcHist(&image,1,channels,Mat(),hist,1,histSize,ranges);
return hist;
}
Mat Histogram1D::getHistogramImage(Mat image)
{
MatND histo = getHistogram(image);
//Get minimum and maximum value bins
double minVal = 0;
double maxVal = 0;
minMaxLoc(histo,&minVal,&maxVal,0,0);
//Image on which to display histogram
Mat histImage(histSize[0],histSize[0],CV_8U,Scalar(255));
//Set highest point at 90% of nbins
int hpt = static_cast<int>(0.9,histSize[0]);
//Draw a vertical line for each bin
for(int i=0;i<histSize[0];i++)
{
float binVal = histo.at<float>(i);
int intensity = static_cast<int>(binVal*hpt/maxVal);
line(histImage,Point(i,histSize[0]),Point(i,histSize[0]-intensity),Scalar::all(0));
}
return histImage;
}
Mat Histogram1D::applyLookup(Mat image,Mat lookup)
{
Mat result;
cv::LUT(image,lookup,result);
return result;
}
Mat Histogram1D::equalize(Mat image)
{
Mat result;
cv::equalizeHist(image,result);
return result;
}
HistogramMain.cpp
#include "Histogram1D.h"
int main()
{
Histogram1D h;
Mat image = imread("C:/Users/Public/Pictures/Sample Pictures/Penguins.jpg",CV_LOAD_IMAGE_GRAYSCALE);
cout << "Number of Channels: " << image.channels() << endl;
namedWindow("Image");
imshow("Image",image);
Mat histogramImage = h.getHistogramImage(image);
namedWindow("Histogram");
imshow("Histogram",histogramImage);
Mat thresholded;
threshold(image,thresholded,60,255,THRESH_BINARY);
namedWindow("Binary Image");
imshow("Binary Image",thresholded);
Mat negativeImage;
int dim(256);
negativeImage = h.applyLookup(image,Mat(1,&dim,CV_8U));
namedWindow("Negative Image");
imshow("Negative Image",negativeImage);
Mat equalizedImage;
equalizedImage = h.equalize(image);
namedWindow("Equalized Image");
imshow("Equalized Image",equalizedImage);
waitKey(0);
return 0;
}
When you run this code, the negative image is 100% black! The most amazing this is, if you remove all other code from HistogramMain.cpp but keep the code below which is related to negative image, you will get the correct negative image! Why is this?
I am using QT latest version which use the VS 2010 Compiler.
Mat negativeImage;
int dim(256);
negativeImage = h.applyLookup(image,Mat(1,&dim,CV_8U));
namedWindow("Negative Image");
imshow("Negative Image",negativeImage);

Your primary difficulty is that the expression Mat(1,&dim,CV_8U) allocates memory for a cv::Mat, but does not initialize any values. It is possible that your environment may fill uninitialized memory with zeros, which would explain the black image after calling applyLookup(). In any case, you should initialize the values in your lookup table in order to achieve correct results. For inverting the image, it is easy:
int dim(256);
cv::Mat tab(1,&dim,CV_8U);
uchar* ptr = tab.ptr();
for (size_t i = 0; i < tab.total(); ++i)
{
ptr[i] = 255 - i;
}
There are a few other issues with your code:
The line
int hpt = static_cast<int>(0.9,histSize[0]);
should be
int hpt = static_cast<int>(0.9*histSize[0]);
to do what your comment indicates. Pay attention to your compiler warnings!
You also have problems with your histogram ranges.

By the way, with opencv2 image are now numpy array, so to negative a grey 8-bits image in python, it's simply:
img = 255 - img

Related

OpenCV(Version 3.2.0) - Extraction of ROI

I have been working on extracting the ROI from the image given below.
Fluroscopic Image
This code is supposed to work for images of all resolutions.
My approach is to:
Find the largest contour and store it's ID.
Use minEnclosingCircle() function to find the minimum bounding circle.
But, this circle is being offset to the top-left corner.
Here is my code:
#include<iostream>
#include<opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int largestContourId(vector<vector<Point>> contourVec); //Function to find ID of largest contour
int main(int argc, char **argv )
{
if ((argv[1] != 0) && (argv[2] != 0))
{
String path = argv[1]; //Path of image to be given here
Mat img = imread(path); // Initialize and read image
Mat img_copy = img.clone(); //Make a copy of the orignal image
Mat img_gray; //Initialize grey image matrix
Mat thresh; //Initialize threshold mask
Mat result; //Initialize resulting image matrix
int maxAreaContourId; //This stores ID of the largest contour
Point2f center; //MEC center
float radius; //MEC radius
vector<vector<Point>> contours; // Defining contour vector
vector<Vec4i> hierarchy; //Defining contour hierarchy vector
cout << "Resolution of image: " << img.size() << endl;
cvtColor(img,img_gray,COLOR_BGR2GRAY); // Convert from colored to grayscale image
threshold(img_gray,thresh,5,255,THRESH_BINARY); //Apply threshold mask and convert to a binary image
imshow("Binary img",thresh); //Display binary image in B&W
findContours(thresh,contours,hierarchy,RETR_TREE,CHAIN_APPROX_NONE); //Find contours and hierarchy within using the Simple Approximation method
maxAreaContourId = largestContourId(contours); //Store largest contour ID
minEnclosingCircle(contours[maxAreaContourId],center,radius);
circle(img_copy,center,radius-20.0,Scalar(255,255,255),-1);
bitwise_not(img_copy,img_copy);
bitwise_xor(img,img_copy,result);
imshow("Orignal Image",img); //Show orignal image
imshow("Result",result); //Show the result of the plot
waitKey(0); //Wait for input
imwrite(argv[2],result); //Write the result to a file
destroyAllWindows();
return 0;
}
else
{
cout << "Please enter CLA in the format: *input_file* *output_file*" << endl;
return 0;
}
}
int largestContourId(vector<vector<Point>> contourVec) //Function to find ID of largest contour
{
double maxArea = 0; //Intialize area contour
int maxAreaContourId = -1; //Intialize largest contour ID
for(int j = 0; j < contourVec.size(); j++) //Cycle through contour vector
{
double newArea = contourArea(contourVec.at(j)); //Variable for comparing area
if (newArea > maxArea) //Comparison of areas
{
maxArea = newArea;
maxAreaContourId = j;
}
}
return maxAreaContourId; //Largest contour ID to be returned
}
This is my output:
Output Fluroscopic Image
I would like this code to work for all sorts of images even if the text is written on th e ROI itself.
Please let me know if my current approach is feasible or not.
Suggestions are always welcome.

How to relate an image with another image in OpenCV

I'm doing this project in OpenCV C++ where i make the reflection of a given image, just like the flip function but with the coordinates of each pixel. the problem is that the image output that i get is all blue with a line horizontally, i believe that my code is only affecting the first channel.
I tried to do imageReflectionFinal.at<Vec3b>(r,c) = image.at<Vec3b>(r,c); in order to solve it, but nothing changed. I'll leave the code below, thanks in advance.
Mat image = imread("image_dir/image.jpg");
Mat imageReflectionFinal = Mat::zeros(image.size(), image.type());
for(unsigned int r=0; r<image.rows; r++) {
for(unsigned int c=0; c<image.cols; c++) {
imageReflectionFinal.at<Vec3b>(r,c) = image.at<Vec3b>(r,c);
Vec3b sourcePixel = image.at<Vec3b>(r,c);
imageReflectionFinal.at<Vec3b>(r, c) = (uchar)(c, -r + (220)/2);
}
}
If you don't want to use flip function, you can change the x-coordinates(cols) of each rows mirrorly. Here is the code:
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
int main() {
//You can change as "Mat3b" for the 3-channel images
Mat1b image = imread("/ur/image/directory/image.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat1b imageReflectionFinal = Mat::zeros(image.size(), image.type());
for(unsigned int r=0; r<image.rows; r++) {
for(unsigned int c=0; c<image.cols; c++) {
imageReflectionFinal(r, c) = image(r, image.cols - 1 - c);
//y-axis(r) doesnt change only x-axis(cols) mirroring
}
}
imshow("Result",imageReflectionFinal);
waitKey(0);
return 0;
}
This answer is also my reference.

OpenCV: can't get segmentation of image using k-means

Before going into deep of my question, I want you to know that I've read other posts on this forum, but none regards my problem.
In particular, the post here answers the question "how to do this?" with k-means, while I already know that I have to use it and I'd like to know why my implementation doesn't work.
I want to use k-means algorithm to divide pixels of an input image into clusters, according to their color. Then, after completing such task, I want each pixel to have the color of the center of the cluster it's been assigned to.
Taking as reference the OpenCV examples and other stuff retrieved on the web, I've designed the following code:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
int main( int argc, char** argv )
{
Mat src = imread( argv[1], 1 );
// reshape matrix
Mat resized(src.rows*src.cols, 3, CV_8U);
int row_counter = 0;
for(int i = 0; i<src.rows; i++)
{
for(int j = 0; j<src.cols; j++)
{
Vec3b channels = src.at<Vec3b>(i,j);
resized.at<char>(row_counter,0) = channels(0);
resized.at<char>(row_counter,1) = channels(1);
resized.at<char>(row_counter,2) = channels(2);
row_counter++;
}
}
//cout << src << endl;
// change data type
resized.convertTo(resized, CV_32F);
// determine termination criteria and number of clusters
TermCriteria criteria(TermCriteria::COUNT + TermCriteria::EPS, 10, 1.0);
int K = 8;
// apply k-means
Mat labels, centers;
double compactness = kmeans(resized, K, labels, criteria, 10, KMEANS_RANDOM_CENTERS, centers);
// change data type in centers
centers.convertTo(centers, CV_8U);
// create output matrix
Mat result = Mat::zeros(src.rows, src.cols, CV_8UC3);
row_counter = 0;
int matrix_row_counter = 0;
while(row_counter < result.rows)
{
for(int z = 0; z<result.cols; z++)
{
int index = labels.at<char>(row_counter+z, 0);
//cout << index << endl;
Vec3b center_channels(centers.at<char>(index,0),centers.at<char>(index,1), centers.at<char>(index,2));
result.at<Vec3b>(matrix_row_counter, z) = center_channels;
}
row_counter += result.cols;
matrix_row_counter++;
}
cout << "Labels " << labels.rows << " " << labels.cols << endl;
//cvtColor( src, gray, CV_BGR2GRAY );
//gray.convertTo(gray, CV_32F);
imshow("Result", result);
waitKey(0);
return 0;
}
Anyway, at the end of computation, I simply get a black image.
Do you know why?
Strangely, if I initialize result matrix as
Mat result(src.size(), src.type())
at the end of algorithm it will display exactly the input image, without any segmentation.
In particular, I have two doubts:
1) is it correct to lay the RGB values of a pixel on each row of matrix resized the way I've done it? is there a way to do it without a loop?
2) what's exactly the content of centers, after k-means function finishes working? it's a 3 columns matrix, does it contains the RGB values of clusters' centers?
thanks for support.
-The below posted OpenCV program assigns the user preferred color to a particular pixel value in an image
-ScanImageAndReduceC() is a predefined method in OpenCV to scan through all the pixels of an Image
-I.atuchar>(10, 10) = 255; is used to access a particular pixel value of an image
Here is the code:
Mat& ScanImageAndReduceC(Mat& I)
{
// accept only char type matrices
CV_Assert(I.depth() == CV_8U);
int channels = I.channels();
int nRows = I.rows;
int nCols = I.cols * channels;
if (I.isContinuous())
{
nCols *= nRows;
nRows = 1;
}
int i, j;
uchar* p;
for (i = 0; i < nRows; ++i)
{
p = I.ptr<uchar>(i);
for (j = 0; j < nCols; ++j)
{
I.at<uchar>(10, 10) = 255;
}
}
return I;
}
-------Main Program-------
Calling the above method in our main program
diff = ScanImageAndReduceC(diff);
namedWindow("Difference", WINDOW_AUTOSIZE);// Create a window for display.
imshow("Difference", diff); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window
return 0;
}

Weird pixel values when printing out?

I know this question has been answered, but when I tried the solutions, It did not take me anywhere.
Below is the given code I have written to get the left and the right most boundary of an image, that was thresholded using canny edge detector, OpenCV.
#include<iostream>
#include<vector>
#include<math.h>
#include<string>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, char **argv)
{
int thresh = 100,rows = 0,cols = 0;;
Mat src,src_gray,canny_output;
src = imread( argv[1]);
cvtColor( src, src_gray, CV_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
Canny( src_gray, canny_output, thresh, thresh*3, 3 );
Mat boundary_image = Mat::zeros( canny_output.size(), CV_8UC1 );
rows = canny_output.rows;
cols = canny_output.cols;
cout<<rows<<endl<<cols<<endl;
for(int i=0;i<rows;i++)
{
for(int j=0;j<cols;j++)
{
cout<<canny_output.at<uchar>(i,j)<<endl;
if(canny_output.at<uchar>(i,j) == 255)
{
boundary_image.at<uchar>(i,j) = 255;
break;
}
}
for(int k = cols;k>0;k--)
{
if(canny_output.at<uchar>(i,k) == 255)
{
boundary_image.at<uchar>(i,k) = 255;
break;
}
}
}
imshow("boundary_image",boundary_image);
waitKey(0);
return 0;
}
Whether the algorithm works or not is secondary, but am not able to view the value of the canny edge detected image. It is showing some symbols or empty values. Can you please tell me where I am going wrong?
Note: The question is with the print statement
cout<<canny_output.at<uchar>(i,j)<<endl;
which is not giving me any reasonable values at the output to view the pixel values. Similar questions where posted and the answer was to use uchar as the data type, but in my case, it is not working. It may sound rudimentary, but your help is greatly appreciated.
To correctly cout a unsigned char / uchar, you should cast it first, e.g.,
cout << (int)canny_output.at<uchar>(i,j) << endl;
To read on, check out Why "cout" works weird for "unsigned char"?

Creating bigger image out of small image Mat [opencv, background subtraction]

I have an image i1. I am supposed to create another Mat m1 of size (image.rows*3, image.cols*3).
In m1, I'm supposed to fill the pixel value in the following way. (Please do see the image):
Here is my code-
#include <highgui.h>
#include "opencv2/opencv.hpp"
#include <fstream>
using namespace cv;
static Mat NeurMap1, NeurMap2, NeurMap3, frame, hsv_Frame;
std::ofstream myfile;
void InitializeNeurMap(cv::Mat Channel[3])
{
int i=0,j=0,m_i=0,m_j=0, t1=0, t2=0;
for(i=0; i < frame.rows; i++)
{
for(j=0;j < frame.cols;j++)
{
t1= i*n+1; t2 = j*n+1;
for(m_i=t1-1; m_i <= t1+1;m_i++)
{
for(m_j=t2-1; m_j <= t2+1; m_j++)
{
NeurMap1.at<uchar>(m_i, m_j)= frame.at<uchar>(i,j);
}
}
}
}
std::cout<<m_j;
myfile<<frame;
}
int main()
{
myfile.open("NeurMaptext.txt");
String filename="BootStrap/b%05d.bmp";// sequence of frames are read
VideoCapture cap(filename);
if(!cap.isOpened()) // check if we succeeded
return -1;
namedWindow("edges",1);
//namedWindow("frames",1);
Mat Channel[3];
cap>>frame;
NeurMap1 = Mat::zeros(frame.rows*n, frame.cols*n, frame.type());
InitializeNeurMap(Channel);
imshow("edges",NeurMap1);waitKey(33);
for(;;)
{
cap>>frame;
if(frame.empty())
break;
}
system("pause");
return 0;
}
The input image is RGB[160*120]. Why am I not getting the columns in the output image given in the link above?.
You can simply call resize() by passing the INTER_NEAREST parameter, i.e. using the nearest-neighbor interpolation.
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
unsigned char data[] = { 1, 2, 3, 4, 5, 6 };
Mat img(2, 3, CV_8UC1, data);
cout << img << endl;
Mat res(6, 9, CV_8UC1);
resize(img, res, res.size(), 0, 0, INTER_NEAREST);
cout << res << endl;
return 0;
}
You will get:
In you are getting three only one-third of image filled because, probably you are passing 3 channel(colour) image to the function and treat it as a single channel image. So change the above code to,
void InitializeNeurMap(cv::Mat Channel[3])
{
for(int i=0; i < frame.rows; i++){
for(int j=0;j < frame.cols;j++){
for(int k=0;k<n;k++){
for(int l=0;l<n;l++){
NeurMap1.at<Vec3b>(i*n+k,j*n+l)[0] = frame.at<Vec3b>(i,j)[0]; //Access Blue channel
NeurMap1.at<Vec3b>(i*n+k,j*n+l)[1] = frame.at<Vec3b>(i,j)[1];//Access green channel
NeurMap1.at<Vec3b>(i*n+k,j*n+l)[2] = frame.at<Vec3b>(i,j)[2]; //Access red channel
}
}
}
}
myfile<<frame;
}
See the reult