I have got the corner points and I'm trying to fit the lines using cv::fitline
but I get lines that are from the origin 0,0 that are shown in the picture.
I also have the projection matrix and the view matrix and the camera intersincs parameters if that would help
I'm trying to compute the volume of the box that is in the figure
int main( int argc, char** argv )
{
Mat src, src_copy, edges, dst;
src = imread( "freezeFrame__1508152029892.png", 0 );
src_copy = src.clone();
GaussianBlur( src, edges, Size( 5, 5 ), 1.5, 1.5 );
erode( edges, edges, Mat() );// these lines may need to be optimized
dilate( edges, edges, Mat() );
dilate( edges, edges, Mat() );
erode( edges, edges, Mat() );
Canny( edges, dst, 1, 10, 3 ); // canny parameters may need to be optimized
imshow( "canny", dst );
std::vector< cv::Point2f > corners;
// maxCorners – The maximum number of corners to return. If there are more corners
// than that will be found, the strongest of them will be returned
int maxCorners = 10;
// qualityLevel – Characterizes the minimal accepted quality of image corners;
// the value of the parameter is multiplied by the by the best corner quality
// measure (which is the min eigenvalue, see cornerMinEigenVal() ,
// or the Harris function response, see cornerHarris() ).
// The corners, which quality measure is less than the product, will be rejected.
// For example, if the best corner has the quality measure = 1500,
// and the qualityLevel=0.01 , then all the corners which quality measure is
// less than 15 will be rejected.
double qualityLevel = 0.01;
// minDistance – The minimum possible Euclidean distance between the returned corners
double minDistance = 20.;
// mask – The optional region of interest. If the image is not empty (then it
// needs to have the type CV_8UC1 and the same size as image ), it will specify
// the region in which the corners are detected
cv::Mat mask;
// blockSize – Size of the averaging block for computing derivative covariation
// matrix over each pixel neighborhood, see cornerEigenValsAndVecs()
int blockSize = 3;
// useHarrisDetector – Indicates, whether to use operator or cornerMinEigenVal()
bool useHarrisDetector = false;
// k – Free parameter of Harris detector
double k = 0.04;
cv::goodFeaturesToTrack( src, corners, maxCorners, qualityLevel, minDistance, mask, blockSize, useHarrisDetector, k );
std::vector<Vec4f> lines;
for ( int i = 0; i < corners.size(); i++ )
{
cv::Point2f pt = corners[i];
for ( int j = i + 1; j < corners.size(); j++ )
{
cv::Point2f endpt = corners[j];
std::vector<cv::Point2f> points;
points.push_back( pt );
points.push_back( endpt );
Vec4f line;
cv::fitLine( points, line, CV_DIST_L2, 0, 0.01, 0.01 );
lines.push_back( line );
}
}
for ( size_t i = 0; i < lines.size(); i++ )
{
cv::Vec4i v = lines[i];
line( src, Point( v[0], v[1] ), Point( v[2], v[3] ), Scalar( 0, 0, 255 ), 3, 4 );
}
for ( size_t i = 0; i < corners.size(); i++ )
{
cv::circle( src, corners[i], 10, cv::Scalar( 255. ), -1 );
}
imshow( "line src", src );
imshow("line dest", edges );
cv::waitKey( 0 );
return 0;
}
Read the doc:
line – Output line parameters. In case of 2D fitting, it should be a vector of 4 elements (like Vec4f) - (vx, vy, x0, y0), where (vx, vy) is a normalized vector collinear to the line and (x0, y0) is a point on the line. In case of 3D fitting, it should be a vector of 6 elements (like Vec6f) - (vx, vy, vz, x0, y0, z0), where (vx, vy, vz) is a normalized vector collinear to the line and (x0, y0, z0) is a point on the line.`
So you have to draw your line by:
Point2f linePoint = Point2f( v[2], v[3] );
Point2f lineDirection = Point2f( v[0], v[1]);
float factor = 50; // if lineDirection is already length 1, you could choose factor to be the desired line length
line( src, linePoint , linePoint+ factor*lineDirection + , Scalar( 0, 0, 255 ), 3, 4 );`
Related
I need some help in detecting the dominant intensity area of an image. Suppose I have the following images and I like to automatically detect the dominant intensity area and find the mean/average intensity value of that dominant area.
Here, in Image-1, the dominant intensity area is the area with light gray color and in Image-2, the dominant area is with the dark gray color. How can I detect the dominant areas in those images and find the mean intensity value of the dominant area.
Image-1:
Image-2:
Any suggestion will be helpful!
Update: I used the following codes to get the histogram for Image-2. Figure-3 shows the histogram. Now, I need to find out which bin holds the most of the values i.e. the mode of the histogram. But, couldn't figure out how to calculate the bin with most of the values.
Figure-3:
int main(int, char**)
{
Mat gray=imread("Depth_frames_27/Image23.png",0);
namedWindow( "Gray", 1 ); imshow( "Gray", gray );
// Initialize parameters
int histSize = 256; // bin size
float range[] = { 0, 255 };
const float *ranges[] = { range };
// Calculate histogram
MatND hist;
calcHist( &gray, 1, 0, Mat(), hist, 1, &histSize, ranges, true, false );
double minVal=0, maxVal=0;
minMaxLoc(hist, &minVal, &maxVal, 0, 0);
// cout<<"Max:"<<maxVal<<endl;
// cout<<"Min:"<<minVal<<endl;
// Show the calculated histogram in command window
double total;
total = gray.rows * gray.cols;
for( int h = 0; h < histSize; h++ )
{
float binVal = hist.at<float>(h);
cout<<" "<<binVal;
}
// Plot the histogram
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound( (double) hist_w/histSize );
Mat histImage( hist_h, hist_w, CV_8UC1, Scalar( 0,0,0) );
normalize(hist, hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
for( int i = 1; i < histSize; i++ )
{
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(hist.at<float>(i-1)) ) ,
Point( bin_w*(i), hist_h - cvRound(hist.at<float>(i)) ),
Scalar( 255, 0, 0), 2, 8, 0 );
}
namedWindow( "Result", 1 ); imshow( "Result", histImage );
waitKey();
return 0;
}
Update-2: Worked out finally! I did the following to get the location and value of the maximum bin of the histogram.
double minVal=0, maxVal=0; int minIdx, maxIdx;
minMaxIdx(hist,&minVal,&maxVal, &minIdx, &maxIdx);
cout<<"Max:"<<maxVal<<endl;
cout<<"MaxIdx:"<<maxIdx<<endl;
The MaxIdx gives the location of the highest bin of the histogram and that's the dominant intensity value for the image!
What you are after is the mode of the histogram of intensities (the bin with the highest frequency). It directly tells you the average intensity.
For the given images, the histogram is made of two perfectly sharp peaks.
In some bad cases, the main peak can be spread over several secondary peaks. In such cases, you need to apply smoothing to the histogram before taking the mode.
It can be interesting to look at the relative heights of the first and second maxima, to check how dominant the color is.
How can I know the number of the corners that are calculated by cornerHarris ? The function I wrote is as follows :
...
Mat gray;
cvtColor( img, gray, CV_BGR2GRAY );
int thresh = 160;
Mat dst, dst_norm, dst_norm_scaled;
dst = Mat::zeros( img.size(), CV_32FC1 );
// Detector parameters
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
// Detecting corners
cornerHarris( gray, dst, blockSize, apertureSize, k, BORDER_DEFAULT );
// Normalizing
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
cornerHarris does not calculate a specific amount of corners. It creates a new image dst that has the same size of your original image gray. You define a threshold value that from this value above you can find the corners. You will have more corners if you define your threshold to be smaller.
In your case you can find the corners for a predefined value of thresh like this:
for( int j = 0; j < dst_norm.rows ; j++ ){
for( int i = 0; i < dst_norm.cols; i++ ){
if( (int) dst_norm.at<float>(j,i) > thresh ){
/* Whatever your would like to do with that corner */
}
}
}
See this Harris Corner Detector Tutorial and cornerHarris OpenCV documentation for more information.
I have a binary image:
I want to remove the bottom two crescent shapes(size and area may change with different images) from the image or at-least differentiate it from the rest.
I tried Hough circle transform to detect the curves as it resembles a portion of a circle, but that code was not working:
int main(int argc, char** argv)
{
Mat src, gray;
src = imread("446.bmp", 1);
namedWindow("src", 1);
imshow("src", src);
waitKey(0);
cvtColor(src, gray, CV_BGR2GRAY);
// Reduce the noise so we avoid false circle detection
GaussianBlur(gray, gray, Size(9, 9), 2, 2);
vector<Vec3f> circles;
// Apply the Hough Transform to find the circles
HoughCircles(gray, circles, CV_HOUGH_GRADIENT, 1, 30, 100, 100, 0, 0);
// Draw the circles detected
for (size_t i = 0; i < circles.size(); i++)
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
circle(src, center, 3, Scalar(0, 255, 0), -1, 8, 0);// circle center
circle(src, center, radius, Scalar(0, 0, 255), 3, 8, 0);// circle outline
cout << "center : " << center << "\nradius : " << radius << endl;
}
// Show your results
namedWindow("Hough Circle Transform Demo", CV_WINDOW_AUTOSIZE);
imshow("Hough Circle Transform Demo", src);
waitKey(0);
return 0;
}
But No circle is being drawn or the crescent moon shapes are not being detected at all. Any idea where I went wrong?
EDIT 1- I have added some other images too:
Edit-2 new image to try:-
i made some modification on the code posted for other question
you could try it
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
using namespace std;
//! Compute the distance between two points
/*! Compute the Euclidean distance between two points
*
* #param a Point a
* #param b Point b
*/
static double distanceBtwPoints(const cv::Point2f &a, const cv::Point2f &b)
{
double xDiff = a.x - b.x;
double yDiff = a.y - b.y;
return std::sqrt((xDiff * xDiff) + (yDiff * yDiff));
}
int main( int argc, char** argv )
{
Mat src,gray;
src = imread(argv[1]);
if(src.empty())
return -1;
cvtColor( src, gray, COLOR_BGR2GRAY );
gray = gray < 200;
vector<vector<Point> > contours;
findContours(gray.clone(), contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
RotatedRect _minAreaRect;
for (size_t i = 0; i < contours.size(); ++i)
{
double contour_area = contourArea(contours[i]);
_minAreaRect = minAreaRect( Mat(contours[i]) );
Point2f pts[4];
_minAreaRect.points(pts);
double dist0 = distanceBtwPoints(pts[0], pts[1]);
double dist1 = distanceBtwPoints(pts[1], pts[2]);
double angle = 0;
//if(dist0 > dist1 *1.2)
angle =atan2(pts[0].y - pts[1].y,pts[0].x - pts[1].x) * 180.0 / CV_PI;
//if(dist1 > dist0 *1.2)
angle =atan2(pts[1].y - pts[2].y,pts[1].x - pts[2].x) * 180.0 / CV_PI;
if( fabs(angle) > 91 ) // you can try different values
{
if( contour_area < dist0 * dist1 /2 ) // you can try different values
{
//drawContours(src,contours,i,Scalar(0,0,0),-1); // try to uncomment this line
for( int j = 0; j < 4; j++ )
line(src, pts[j], pts[(j+1)%4], Scalar(0, 0, 255), 1, LINE_AA);
}
}
}
imshow("result", src);
waitKey(0);
return 0;
}
I don't think there is an easy solution here unfortunately.
What you might want to try is to detect and label each image component. From here you need to detect which set of pixels looks like a crescent and which does not : as a crescent can be described by a polynomial equations you only need to describe each component (i.e a set of points) as a mathematical equation (using regression methods such as RANSAC) and see if that might be a crescent equation.
I am trying to detect adjacent circles in an image. these can be either 4 or 5. is there any way to detect it in opencv. i tried many ways, including hough circles method. but i am detecing extra circles too. if in any case i am able to detect circle than same parameters won't work with other images.
Please let me know of any thing possible to achieve this.
My code using Hough Circles is:
Mat img, gray;
img = imread("/Users/Development/Desktop/Images/IMG_0297.jpg");
cvtColor(img, gray, CV_BGR2GRAY);
// smooth it, otherwise a lot of false circles may be detected
GaussianBlur( gray, gray, Size(9, 9), 2, 2 );
vector<Vec3f> circles;
HoughCircles(gray, circles, CV_HOUGH_GRADIENT, 2, gray.rows/16,80,100,30,50 );
for( size_t i = 0; i < circles.size(); i++ )
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
// draw the circle center
circle( img, center, 3, Scalar(0,255,0), -1, 8, 0 );
// draw the circle outline
circle( img, center, radius, Scalar(0,0,255), 3, 8, 0 );
}
namedWindow( "circles", 1 );
imshow( "circles", img );
waitKey(0);
return 0;
sample image is
and i want to detect dials in this, that are adjacent to eachother
You can use partition to cluster circles adjacent circles, i.e. circles whose center distance is similar to the sim of their radii. You just need to define the appropriate equivalence predicate, here implemented in CirclesOnSameLine. You can eventually improve this predicate to consider as equal only circles that have similar radius.
The result of this clustering is something like (same color means same cluster):
With this approach, you can safely detect some circle, since you can then remove circles that don't belong to clusters with more than 4-5 circles.
Code:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
struct CirclesOnSameLine
{
float _tolerance;
CirclesOnSameLine(float tolerance) : _tolerance(tolerance) {};
bool operator()(const Vec3f& lhs, const Vec3f& rhs)
{
// [0] = x
// [1] = y
// [2] = radius
float center_distance = sqrt((lhs[0] - rhs[0])*(lhs[0] - rhs[0]) + (lhs[1] - rhs[1])*(lhs[1] - rhs[1]));
float sum_radii = lhs[2] + rhs[2];
if (sum_radii > center_distance)
{
return (sum_radii / center_distance) < _tolerance;
}
return (center_distance / sum_radii) < _tolerance;
}
};
int main()
{
Mat3b img = imread("path_to_image");
Mat1b gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, gray, Size(9, 9), 2, 2);
vector<Vec3f> circles;
HoughCircles(gray, circles, CV_HOUGH_GRADIENT, 2, gray.rows / 16, 80, 100, 10, 100);
// Cluster circles near each other
vector<int> labels;
int n_labels = partition(circles, labels, CirclesOnSameLine(1.1f));
vector<Scalar> colors;
for (int i = 0; i < n_labels; ++i)
{
Scalar color(rand() & 255, rand() & 255, rand() & 255);
colors.push_back(color);
}
Mat3b adjacent = img.clone();
for (size_t i = 0; i < circles.size(); i++)
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
// draw the circle outline
circle(adjacent, center, radius, colors[labels[i]], 3, 8, 0);
}
// Remove small clusters
vector<int> count(labels.size(), 0);
for (size_t i = 0; i < labels.size(); ++i)
{
count[labels[i]]++;
}
Mat3b big_clusters = img.clone();
for (size_t i = 0; i < circles.size(); i++)
{
if (count[labels[i]] < 4) continue;
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
// draw the circle outline
circle(big_clusters, center, radius, Scalar(0, 0, 255), 3, 8, 0);
}
imshow("Adjacent circles", adjacent);
imshow("Adjacent circles", big_clusters);
waitKey();
return 0;
}
You could chamfer match the circles for example and then check if the circle shares an edge with another circle or is close to it using edge detection and a scan of the image to see if the circles are close enough to be adjacent or not.
With this specific image you could probably do kmeans and connected components. Then chamfer match circles and see if a connected component is made up of multiple circles.
I have got a vector of Mat files and I want to calculate the correlation between them so as to keep the two mat files with which are theoretical similar. Actually in this vector are stored detected eyes from images, so I am trying to delete outliers. How is it possible to calculate correlation between two Mat files???
EDIT:
Mat Detection::hist_calculation(Mat image){
// Establish the number of bins
int histSize = 256;
// Set the ranges
float range[] = { 0, 256 } ;
const float* histRange = { range };
bool uniform = true; bool accumulate = false;
Mat hist;
// Compute the histograms:
calcHist( &image, 1, 0, Mat(), hist, 1, &histSize, &histRange, uniform, accumulate );
// Draw the histograms for B, G and R
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound( (double) hist_w/histSize );
Mat histImage( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
normalize(hist, hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
for( int i = 1; i < histSize; i++ )
{
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(hist.at<float>(i-1)) ) ,
Point( bin_w*(i), hist_h - cvRound(hist.at<float>(i)) ) ,
Scalar( 255, 0, 0), 2, 8, 0 );
}
//// Display
//namedWindow("calcHist Demo", CV_WINDOW_AUTOSIZE );
//imshow("calcHist Demo", histImage );
//waitKey(0);
return hist;
}
double Detection::cvMatHistCorrelation(Mat file1, Mat file2) {
cvtColor(file1, file1, CV_BGR2GRAY); cvtColor(file2, file2, CV_BGR2GRAY);
Mat hist1 = hist_calculation(file1);
Mat hist2 = hist_calculation(file2);
double autoCorrelation1 = compareHist( hist1, hist1, CV_COMP_BHATTACHARYYA );
double autoCorrelation2 = compareHist( hist1, hist1, CV_COMP_BHATTACHARYYA );
double correlation = compareHist( hist1, hist2, CV_COMP_BHATTACHARYYA );
cout << "autocorrelation of his1: "<< autoCorrelation1 << endl;
cout << "autocorrelation of hist2: "<< autoCorrelation2 << endl;
cout << "correlation between hist1 and hist2: "<< autoCorrelation << endl;
return correlation;
}
I think it works fine.
It's better to compute the correlation of feature vectors of these two Mat files instead of on the Mat data directly.
For example, you can first compute RGB/HSV color histogram (24d vector if use 8-bins for each channel) for each Mat file and then compute correlation of these two histogram vectors.