I have been working on extracting the ROI from the image given below.
Fluroscopic Image
This code is supposed to work for images of all resolutions.
My approach is to:
Find the largest contour and store it's ID.
Use minEnclosingCircle() function to find the minimum bounding circle.
But, this circle is being offset to the top-left corner.
Here is my code:
#include<iostream>
#include<opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int largestContourId(vector<vector<Point>> contourVec); //Function to find ID of largest contour
int main(int argc, char **argv )
{
if ((argv[1] != 0) && (argv[2] != 0))
{
String path = argv[1]; //Path of image to be given here
Mat img = imread(path); // Initialize and read image
Mat img_copy = img.clone(); //Make a copy of the orignal image
Mat img_gray; //Initialize grey image matrix
Mat thresh; //Initialize threshold mask
Mat result; //Initialize resulting image matrix
int maxAreaContourId; //This stores ID of the largest contour
Point2f center; //MEC center
float radius; //MEC radius
vector<vector<Point>> contours; // Defining contour vector
vector<Vec4i> hierarchy; //Defining contour hierarchy vector
cout << "Resolution of image: " << img.size() << endl;
cvtColor(img,img_gray,COLOR_BGR2GRAY); // Convert from colored to grayscale image
threshold(img_gray,thresh,5,255,THRESH_BINARY); //Apply threshold mask and convert to a binary image
imshow("Binary img",thresh); //Display binary image in B&W
findContours(thresh,contours,hierarchy,RETR_TREE,CHAIN_APPROX_NONE); //Find contours and hierarchy within using the Simple Approximation method
maxAreaContourId = largestContourId(contours); //Store largest contour ID
minEnclosingCircle(contours[maxAreaContourId],center,radius);
circle(img_copy,center,radius-20.0,Scalar(255,255,255),-1);
bitwise_not(img_copy,img_copy);
bitwise_xor(img,img_copy,result);
imshow("Orignal Image",img); //Show orignal image
imshow("Result",result); //Show the result of the plot
waitKey(0); //Wait for input
imwrite(argv[2],result); //Write the result to a file
destroyAllWindows();
return 0;
}
else
{
cout << "Please enter CLA in the format: *input_file* *output_file*" << endl;
return 0;
}
}
int largestContourId(vector<vector<Point>> contourVec) //Function to find ID of largest contour
{
double maxArea = 0; //Intialize area contour
int maxAreaContourId = -1; //Intialize largest contour ID
for(int j = 0; j < contourVec.size(); j++) //Cycle through contour vector
{
double newArea = contourArea(contourVec.at(j)); //Variable for comparing area
if (newArea > maxArea) //Comparison of areas
{
maxArea = newArea;
maxAreaContourId = j;
}
}
return maxAreaContourId; //Largest contour ID to be returned
}
This is my output:
Output Fluroscopic Image
I would like this code to work for all sorts of images even if the text is written on th e ROI itself.
Please let me know if my current approach is feasible or not.
Suggestions are always welcome.
Related
I've written a code to create bounding boxes and draw the Farneback optical flow inside. The optical flow is calculated normally before hand and then it is drawn separately for each ROI box.
The problem comes when I draw the flow. The flow comes out looking normal, but shifted down and right. Here's the output, notice the bottom right has the flow of the moving person.
Here is the frame with the flow drawn everywhere, showing where the flow should be drawn.
The code attached is stripped down for simplicity, so excuse me if there are a few undeclared Matrices or something.
#include ...
using namespace cv;
using namespace std;
Mat currentImage, img, printr, gray ,prevgray, flow;
void getRectanglesandROI(Mat &Mask, Mat &imgTmp, Mat &imgOut, vector<Rect> &outBoxes);
void DrawFlowMap(Mat Image, Mat ROI, Rect Box, Point centre);
int main (int argc, char *argv[]) {
VideoCapture inVid("input.avi");
if (!inVid.isOpened()) {
cout << "Failed to open the input video" << endl;
exit(5);}
int loop=0, count =0, MaxTargets=0;
bool test=true;
namedWindow("Detected");
int ex = inVid.get(CV_CAP_PROP_FOURCC);
double fps = inVid.get(CV_CAP_PROP_FPS);
int wait=1000/fps;
Size S = Size( (int) inVid.get(CV_CAP_PROP_FRAME_WIDTH), (int) inVid.get(CV_CAP_PROP_FRAME_HEIGHT));
int fr =inVid.get(CV_CAP_PROP_FRAME_COUNT);
VideoWriter output; // Open the output
output.open("output.avi", ex, fps, S, true);
if (!output.isOpened())
{
cout << "Could not open the output video for write: " << endl;
return -1;
}
//=============4EVR=================
while(test){
inVid>>currentImage;
if (currentImage.empty())
{
count++;
//if (count==1){if (waitKey(0)==27){waitKey(2);}}
if (count==1){fs.release(); break;}
cout <<"Max Targets=" <<MaxTargets<< endl<< "End of video, looping" << endl<<endl;
inVid.set(CV_CAP_PROP_POS_AVI_RATIO, 0);
loop=0;
}
cvtColor(currentImage, gray,CV_RGB2GRAY);
if (prevgray.empty()){gray.copyTo(prevgray);}
currentImage.copyTo(img);
calcOpticalFlowFarneback(prevgray,gray,flow,0.5,3,21,20,5,1.2,0);
vector<Rect> outputBoxes;
getRectanglesandROI(fgMaskMOG2, img, currentImage, outputBoxes);
gray.copyTo(prevgray);
imshow("Detected", currentImage);
waitKey(wait);
}
return 0;
}
//============END===========================================================
void getRectanglesandROI(Mat &Mask, Mat &imgTmp, Mat &imgOut, vector<Rect> &outBoxes){
vector<vector<Point> > v;
vector<int> targets;
int tarArea=1;
findContours(Mask, v, CV_RETR_EXTERNAL/*CV_RETR_LIST*/, CV_CHAIN_APPROX_SIMPLE);
for (int j = 0; j < v.size(); j++) {
if (tarArea < v[j].size()) { // excluding tiny contours
targets.push_back(j);
}
}
for (int j = 0; j < targets.size(); j++) {
drawContours(imgTmp, v, targets[j], Scalar(255, 0, 255), 1, 8);
Rect rect = boundingRect(v[targets[j]]);
roi=currentImage(rect);
DrawFlowMap(currentImage, roi, rect);
}
}
void DrawFlowMap(Mat Image, Mat ROI, Rect Box){
Point pt1 = Point(Box.x, Box.y);
for( int y=0; y<roi.rows; y+=5){ //this is the issue area, probably.
for (int x=0;x<roi.cols;x+=5){
const Point2f& flowatxy=flow.at<Point2f>(y,x);
line(Image, Point(cvRound(pt1.x+x), cvRound(pt1.y+y)),
Point(cvRound(pt1.x+x+flowatxy.x), cvRound(pt1.y+y+flowatxy.y)), Scalar(0,255,0)); ///FLOW LINES
}
}
}
Easy peasy, after looking at the images for a while (crying) I noticed that it was drawing the flow in the right places, but the flowatxy in that place was wrong. So I changed the flowatxy declaration to the following:
const Point2f& flowatxy=flow.at<Point2f>( pt1.y+y , pt1.x+x );
Before going into deep of my question, I want you to know that I've read other posts on this forum, but none regards my problem.
In particular, the post here answers the question "how to do this?" with k-means, while I already know that I have to use it and I'd like to know why my implementation doesn't work.
I want to use k-means algorithm to divide pixels of an input image into clusters, according to their color. Then, after completing such task, I want each pixel to have the color of the center of the cluster it's been assigned to.
Taking as reference the OpenCV examples and other stuff retrieved on the web, I've designed the following code:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
int main( int argc, char** argv )
{
Mat src = imread( argv[1], 1 );
// reshape matrix
Mat resized(src.rows*src.cols, 3, CV_8U);
int row_counter = 0;
for(int i = 0; i<src.rows; i++)
{
for(int j = 0; j<src.cols; j++)
{
Vec3b channels = src.at<Vec3b>(i,j);
resized.at<char>(row_counter,0) = channels(0);
resized.at<char>(row_counter,1) = channels(1);
resized.at<char>(row_counter,2) = channels(2);
row_counter++;
}
}
//cout << src << endl;
// change data type
resized.convertTo(resized, CV_32F);
// determine termination criteria and number of clusters
TermCriteria criteria(TermCriteria::COUNT + TermCriteria::EPS, 10, 1.0);
int K = 8;
// apply k-means
Mat labels, centers;
double compactness = kmeans(resized, K, labels, criteria, 10, KMEANS_RANDOM_CENTERS, centers);
// change data type in centers
centers.convertTo(centers, CV_8U);
// create output matrix
Mat result = Mat::zeros(src.rows, src.cols, CV_8UC3);
row_counter = 0;
int matrix_row_counter = 0;
while(row_counter < result.rows)
{
for(int z = 0; z<result.cols; z++)
{
int index = labels.at<char>(row_counter+z, 0);
//cout << index << endl;
Vec3b center_channels(centers.at<char>(index,0),centers.at<char>(index,1), centers.at<char>(index,2));
result.at<Vec3b>(matrix_row_counter, z) = center_channels;
}
row_counter += result.cols;
matrix_row_counter++;
}
cout << "Labels " << labels.rows << " " << labels.cols << endl;
//cvtColor( src, gray, CV_BGR2GRAY );
//gray.convertTo(gray, CV_32F);
imshow("Result", result);
waitKey(0);
return 0;
}
Anyway, at the end of computation, I simply get a black image.
Do you know why?
Strangely, if I initialize result matrix as
Mat result(src.size(), src.type())
at the end of algorithm it will display exactly the input image, without any segmentation.
In particular, I have two doubts:
1) is it correct to lay the RGB values of a pixel on each row of matrix resized the way I've done it? is there a way to do it without a loop?
2) what's exactly the content of centers, after k-means function finishes working? it's a 3 columns matrix, does it contains the RGB values of clusters' centers?
thanks for support.
-The below posted OpenCV program assigns the user preferred color to a particular pixel value in an image
-ScanImageAndReduceC() is a predefined method in OpenCV to scan through all the pixels of an Image
-I.atuchar>(10, 10) = 255; is used to access a particular pixel value of an image
Here is the code:
Mat& ScanImageAndReduceC(Mat& I)
{
// accept only char type matrices
CV_Assert(I.depth() == CV_8U);
int channels = I.channels();
int nRows = I.rows;
int nCols = I.cols * channels;
if (I.isContinuous())
{
nCols *= nRows;
nRows = 1;
}
int i, j;
uchar* p;
for (i = 0; i < nRows; ++i)
{
p = I.ptr<uchar>(i);
for (j = 0; j < nCols; ++j)
{
I.at<uchar>(10, 10) = 255;
}
}
return I;
}
-------Main Program-------
Calling the above method in our main program
diff = ScanImageAndReduceC(diff);
namedWindow("Difference", WINDOW_AUTOSIZE);// Create a window for display.
imshow("Difference", diff); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window
return 0;
}
So I have two Thermal images (Potato quality I know but it is what I have to work with), the first two images in this album. I am using some code from a tutorial that is super common but have edited a lot of it.
http://imgur.com/a/Zch7C
So what I am doing in my code is
1. Detecting KeyPoints
2. Describe the KeyPoints
3. Match the KeyPoints
4. Keep only good points
5. Gather both Query and Train points
6. Find Homography
7. Warp one of the images
8. Repeat the above steps for the warped image and the other original image
Now my question is: Should the change in the (x,y) distance between two of the same points on the two different images be the same for every set of points?
The whole frame is moving in the same direction so no matter what matching points we look at the change should be the same should it not?
What I am finding is that the points all different in the distance, some are 5 pixels different and some are 700 pixels, the only thing I can think is happening is that the match is not actually good and it is comparing two points that are no where near the same point in the separate frames.
I need to know what the offset is so that I can overlay one frame on top of the other then average out the pixel values that are overlapping and build a new images from the composite/average of the two originals.
My code I am using is below:
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "stitch.cpp"
#include "opencv2\stitching\stitcher.hpp"
#include "opencv2\nonfree\features2d.hpp"
using namespace cv;
void readme();
Mat describe(Mat img, vector<KeyPoint> key);
vector<KeyPoint> detect(Mat img);
vector<DMatch> match(Mat descriptionOne, Mat descriptionTwo);
/** #function main */
int main(int argc, char** argv)
{
VideoCapture cap("vid.mp4");
vector<Mat> Vimg;
cout << "Grabbing Images" << endl;
for (int i = 0; i < 2; i++)
{
cout << "Grabbing Frame" << i << endl;
Mat temp;
cap.read(temp);
Vimg.push_back(temp);
imwrite("image" + to_string(i) + ".jpg", temp);
for (int j = 0; j < 80; j++)
cap.grab();
}
//Mat cimg1 = Vimg[0];
//Mat cimg2 = Vimg[1];
Mat cimg1 = imread("cap1.png");
Mat cimg2 = imread("cap2.png");
cout << "Starting Stitching" << endl;
//Converting the original images to grayscale
Mat img1, img2;
cvtColor(cimg1, img1, CV_BGR2GRAY);
cvtColor(cimg2, img2, CV_BGR2GRAY);
//Detecting Keypoints for original two images
vector<KeyPoint> keypointOne = detect(img1), keypointTwo = detect(img2);
Mat mkeypointOne, mkeypointTwo;
drawKeypoints(cimg1, keypointOne, mkeypointOne, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
drawKeypoints(cimg2, keypointTwo, mkeypointTwo, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imwrite("keypointOne.jpg", mkeypointOne);
imwrite("keypointTwo.jpg", mkeypointTwo);
//Computing descriptors
Mat descriptionOne = describe(img1, keypointOne), descriptionTwo = describe(img2, keypointTwo);
//Matching descriptors
vector<DMatch> matches = match(descriptionOne, descriptionTwo);
double max = 0;
double min = 100;
//Calculation of max and min distances
for (int i = 0; i < matches.size(); i++)
{
double dist = matches[i].distance;
if (dist < min) min = dist;
if (dist > max) max = dist;
}
vector<DMatch> goodMatches;
//Keep only good matches
for (int i = 0; i < matches.size(); i++)
{
if (matches[i].distance < 2*min)
goodMatches.push_back(matches[i]);
}
//Localize
vector<Point2f> obj;
vector<Point2f> scene;
for (int i = 0; i < goodMatches.size(); i++)
{
obj.push_back(keypointOne[goodMatches[i].queryIdx].pt);
scene.push_back(keypointTwo[goodMatches[i].trainIdx].pt);
}
/*
for (int k = 0; k < obj.size(); k++)
{
cout << "Point data for Match #" << k << endl;
cout << "\tImage 1 Point: " << obj[k] << endl;
cout << "\tImage 2 Point: " << scene[k] << endl;
}*/
Mat H = findHomography(obj, scene, CV_RANSAC);
//Warping the image to fit on first image
Mat cwarpImage, warpImage;
//TODO: figure out the right size for this image that is created
warpPerspective(cimg2, cwarpImage, H, Size(img2.cols + img1.cols, img2.rows + img1.rows));
/*
Mat result;
Mat half(warpImage, Rect(0, 0, img2.cols, img2.rows));
cimg2.copyTo(half);
*/
imwrite("warp.jpg", warpImage);
//Processing Image
cvtColor(cwarpImage, warpImage, CV_BGR2GRAY);
vector<KeyPoint> keypointWarp = detect(warpImage);
Mat descriptionWarp = describe(warpImage, keypointWarp);
vector<DMatch> warpMatches = match(descriptionOne, descriptionWarp);
Mat mkeypointWarp;
drawKeypoints(cwarpImage, keypointWarp, mkeypointWarp, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imwrite("keypointWarp.jpg", mkeypointWarp);
Mat match;
drawMatches(cimg1, keypointOne, warpImage, keypointWarp, warpMatches, match, Scalar(0, 0, 255), Scalar(255, 0, 0), vector<char>(), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
//imshow("match", match);
imwrite("matches.jpg", match);
//Localize
vector<Point2f> obj2;
vector<Point2f> scene2;
for (int i = 0; i < warpMatches.size(); i++)
{
obj2.push_back(keypointOne[warpMatches[i].queryIdx].pt);
scene2.push_back(keypointWarp[warpMatches[i].trainIdx].pt);
}
for (int k = 0; k < obj.size(); k++)
{
cout << "Point data for Match #" << k << endl;
cout << "\tImage 1 Point: " << obj2[k] << endl;
cout << "\tImage 2 Point: " << scene2[k] << endl;
}
vector<unsigned char> inliersMask;
Mat H2 = findHomography(obj, scene, CV_RANSAC, 3, inliersMask);
vector<DMatch> inliers;
for (size_t i = 0; i < inliersMask.size(); i++)
{
if (inliersMask[i])
inliers.push_back(warpMatches[i]);
}
warpMatches.swap(inliers);
Mat match2;
drawMatches(cimg1, keypointOne, warpImage, keypointWarp, warpMatches, match2, Scalar(0, 0, 255), Scalar(255, 0, 0), vector<char>(), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imwrite("homorgraphyOutlierMatch.jpg", match2);
cout << "Writing Warp Image" << endl;
imwrite("warpimage.jpg", warpImage);
cout << H << endl;
waitKey(0);
}
Mat describe(Mat img, vector<KeyPoint> key)
{
Mat temp;
SurfDescriptorExtractor extractor;
extractor.compute(img, key, temp);
return temp;
}
vector<KeyPoint> detect(Mat img)
{
vector<KeyPoint> temp;
SurfFeatureDetector detector(400);
detector.detect(img, temp);
return temp;
}
vector<DMatch> match(Mat descriptionOne, Mat descriptionTwo)
{
vector<DMatch> temp;
BFMatcher matcher(NORM_L2, true);
matcher.match(descriptionOne, descriptionTwo, temp);
return temp;
}
EDIT:
I set Cross Check to true in the BFMatcher and implemented Homography outlier detection from Mastering_OpenCV. Here are the two new results. I was not sure if I was supposed to implement both cross check and KnnMatch so I only did cross check.
http://imgur.com/a/1P7Xt
As you can see they are a lot better but there are still some there that should not be there. I ran it with both full color and thermal images.
New code is above as well.
While the change in distance between point correspondences won't be the same for all points in the general case, you wouldn't expect to have deltas of the order of 700 pixels with an image size of 1300ish.
by inspection of the images you've posted, it's clear that you have point correspondences which are not correct (simply, you have lots of crossed lines in your matches between images)
This suggests that your step 4 isn't doing a great job. You might want to try setting the second parameter of your Brute Force matcher to true to enable cross-check test:
BFMatcher matcher(NORM_L2, true);
You might also want to consider the ratio test for outlier removal as described here How to apply Ratio Test in order to remove outliers in a multiple object detection matcher?
I have the following contour:
https://drive.google.com/file/d/0B45BJEUVEkjseFd3X3RITzM5S3c/edit?usp=sharing
containing the following points (printed in order):
https://drive.google.com/file/d/0B45BJEUVEkjsN3NIRU5lOFBDb00/edit?usp=sharing
However, when I calculate the area of this contour (using the function contourArea), I get 157, which is too low for the size of that contour. I expect it to be in the thousands. Why is the contour area being calculated incorrectly, and how may I fix it?
The following is the code I'm using to calculate the areas of all contours of the image. The contour of interest is the last one. The original image I'm using is here:
https://drive.google.com/file/d/0B45BJEUVEkjsbGhXM3E3UW1lZWs/edit?usp=sharing
int main(int argc, char* argv[])
{
Mat imgOriginal = imread(argv[1], 0);
if(imgOriginal.empty())
return -1;
Mat img;
resize(imgOriginal, img, Size(640, 480));
medianBlur(img, img, 11);
Canny(img, img, 25, 100);
vector< vector<Point> > contours;
findContours(img, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
for (int i = 0; i < contours.size(); i++)
cout << "Area " << i << ": " << contourArea(contours[i]) << endl;
return 0;
}
Also, I noticed that several points in the contour are duplicates (I'm not sure why), which may be causing the area to be calculated incorrectly.
Maybe because your contour isn't closed?
Update: I see you're feeding it the result of a canny operation. I would try the code on an image with a very well defined closed shape to test the code and rule out a problem in the specific contour. Something like the one used in the moments tutorial.
Maybe this discussion will be helpful.
Because your image has no contours but series of green pixels and a background.you need to close the contour to get contour area.
Correction:
int main(int argc, char* argv[])
{
Mat imgOriginal = imread(argv[1], 0);
if(imgOriginal.empty())
return -1;
Mat img;
resize(imgOriginal, img, Size(640, 480));
medianBlur(img, img, 11);
Canny(img, img, 25, 100);
vector< vector<Point> > contours;
findContours(img, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
cout << "Area " << ": " << contourArea(contours[1]) << endl;
return 0;
}
Here is my code, which uses OpenCV 2.4.5
Histogram1D.h
#ifndef HISTOGRAM1D_H
#define HISTOGRAM1D_H
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
class Histogram1D
{
public:
Histogram1D();
//Histogram generators
MatND getHistogram(Mat );
Mat getHistogramImage(Mat );
//Generate Negative Image
Mat applyLookup(Mat ,Mat );
//Generate improved image with equalized histogram
Mat equalize(Mat image);
private:
int histSize[1];//Number of bins
float hRanges[2];//Max and Min pixel values
const float *ranges[1];
int channels[1];//Only one channel will be used
};
#endif // HISTOGRAM1D_H
Histogram1D.cpp
#include "Histogram1D.h"
Histogram1D::Histogram1D()
{
histSize[0] = 256;
hRanges[0] = 0.0;
hRanges[1] = 255.0;
ranges[0] = hRanges;
channels[0] = 0;
}
MatND Histogram1D::getHistogram(Mat image)
{
MatND hist;
cv::calcHist(&image,1,channels,Mat(),hist,1,histSize,ranges);
return hist;
}
Mat Histogram1D::getHistogramImage(Mat image)
{
MatND histo = getHistogram(image);
//Get minimum and maximum value bins
double minVal = 0;
double maxVal = 0;
minMaxLoc(histo,&minVal,&maxVal,0,0);
//Image on which to display histogram
Mat histImage(histSize[0],histSize[0],CV_8U,Scalar(255));
//Set highest point at 90% of nbins
int hpt = static_cast<int>(0.9,histSize[0]);
//Draw a vertical line for each bin
for(int i=0;i<histSize[0];i++)
{
float binVal = histo.at<float>(i);
int intensity = static_cast<int>(binVal*hpt/maxVal);
line(histImage,Point(i,histSize[0]),Point(i,histSize[0]-intensity),Scalar::all(0));
}
return histImage;
}
Mat Histogram1D::applyLookup(Mat image,Mat lookup)
{
Mat result;
cv::LUT(image,lookup,result);
return result;
}
Mat Histogram1D::equalize(Mat image)
{
Mat result;
cv::equalizeHist(image,result);
return result;
}
HistogramMain.cpp
#include "Histogram1D.h"
int main()
{
Histogram1D h;
Mat image = imread("C:/Users/Public/Pictures/Sample Pictures/Penguins.jpg",CV_LOAD_IMAGE_GRAYSCALE);
cout << "Number of Channels: " << image.channels() << endl;
namedWindow("Image");
imshow("Image",image);
Mat histogramImage = h.getHistogramImage(image);
namedWindow("Histogram");
imshow("Histogram",histogramImage);
Mat thresholded;
threshold(image,thresholded,60,255,THRESH_BINARY);
namedWindow("Binary Image");
imshow("Binary Image",thresholded);
Mat negativeImage;
int dim(256);
negativeImage = h.applyLookup(image,Mat(1,&dim,CV_8U));
namedWindow("Negative Image");
imshow("Negative Image",negativeImage);
Mat equalizedImage;
equalizedImage = h.equalize(image);
namedWindow("Equalized Image");
imshow("Equalized Image",equalizedImage);
waitKey(0);
return 0;
}
When you run this code, the negative image is 100% black! The most amazing this is, if you remove all other code from HistogramMain.cpp but keep the code below which is related to negative image, you will get the correct negative image! Why is this?
I am using QT latest version which use the VS 2010 Compiler.
Mat negativeImage;
int dim(256);
negativeImage = h.applyLookup(image,Mat(1,&dim,CV_8U));
namedWindow("Negative Image");
imshow("Negative Image",negativeImage);
Your primary difficulty is that the expression Mat(1,&dim,CV_8U) allocates memory for a cv::Mat, but does not initialize any values. It is possible that your environment may fill uninitialized memory with zeros, which would explain the black image after calling applyLookup(). In any case, you should initialize the values in your lookup table in order to achieve correct results. For inverting the image, it is easy:
int dim(256);
cv::Mat tab(1,&dim,CV_8U);
uchar* ptr = tab.ptr();
for (size_t i = 0; i < tab.total(); ++i)
{
ptr[i] = 255 - i;
}
There are a few other issues with your code:
The line
int hpt = static_cast<int>(0.9,histSize[0]);
should be
int hpt = static_cast<int>(0.9*histSize[0]);
to do what your comment indicates. Pay attention to your compiler warnings!
You also have problems with your histogram ranges.
By the way, with opencv2 image are now numpy array, so to negative a grey 8-bits image in python, it's simply:
img = 255 - img