How to "undistort" a crop of image in OpenCV? - c++

I am working with an IDS camera with wide fish-eye. It is 2D-camera, but I need just the line in the middle. I got how to calibrate the camera: I can get undistorted images.
My problem is that I need quick computation. So I'm trying to find a way, how to compute just a crop, and not whole picture, and then crop. I need something like this:
.
I have tried this, but it did not helped me.
Is there any way to compute just a crop (1280x6) and not all picture (1280x960)?
int main ()
{
timespec time1, time2, timeAfterS, timeStart, execTime;
Mat intrinsic = Mat (3, 3, CV_32FC1);
Mat newCamMat=Mat (3, 3, CV_32FC1);
Mat distCoeffs;
FileStorage fs ("cameraData.xml", FileStorage::READ);
fs["intrinsic"] >> intrinsic;
fs["distCoeffs"] >> distCoeffs;
fs.release ();
VideoCapture capture = VideoCapture (0);
Mat image;
Mat imageUndistorted;
int l = 0;
clock_gettime (CLOCK_REALTIME, &timeStart);
//For Example i need to compute just this following crop of image
// |
// V
Rect recT(10,10,20,20);
while (l != 27)
{
capture >> image;
newCamMat=getOptimalNewCameraMatrix (intrinsic, distCoeffs,
image.size(),0.5,image.size(),&recT,false);
newCamMat.at<double>(0,2)=0;
newCamMat.at<double>(1,2)=20;
undistort (image, imageUndistorted, intrinsic,
distCoeffs,newCamMat);
imshow ("win1", image);
imshow ("win2", imageUndistorted);
l = waitKey (1);
}
capture.release ();
return 0;
}
Thank you ahead,
Anton
Update:
I think I found some solution, but I did not have an opportunity yet to work it out properly. It seems to be working. So the Idea is: In my CameraMatrix cx and cy are the points of "center of liens". For example it lays on the middle of my ROI(let it be 200,200). My ROI is (0,180,400,220).I cut ROI out and first shift center of camera to my new center. Because now my ROI is now (0,0,400,40). My cx stays 200, but cy has to be shifted to 20. Then I change in initUndistortRectifyMap size according my cropsize. And then i can put in "remap" already my crop picture. Code follows below.
#include <iostream>
#include "opencv2/opencv.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <sstream>
#include <ctime>
#include <time.h>
#include <cmath>
using namespace cv;
using namespace std;
timespec diff (timespec start, timespec end)
{
timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0)
{
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else
{
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
return temp;
}
int main ()
{
timespec time1, time2, timeAfterS, timeStart, execTime;
Mat intrinsic = Mat (3, 3, CV_32FC1);
Mat newCamMat = Mat (3, 3, CV_32FC1);
Mat distCoeffs;
FileStorage fs ("cameraData.xml", FileStorage::READ);
fs["intrinsic"] >> intrinsic;
fs["distCoeffs"] >> distCoeffs;
fs.release ();
intrinsic.copyTo(newCamMat);
VideoCapture capture = VideoCapture (0);
capture.set (CAP_PROP_FPS, 90);
Mat image;
Mat imageUndistorted;
Mat imageUndistorted1;
int l = 0;
capture >> image;
clock_gettime (CLOCK_REALTIME, &timeStart);
Rect recT (10, 10, 20, 20);
Rect roi (0, 400, 640, 30);//Region of Interests
Rect roi1 (0, 360, 640, 60); // To show borders of ROI
Mat view, rview, map1, map2,map3,map4,crop2;
Mat crop = Mat (30, 640, CV_8UC3); // here I define the size of crop
intrinsic.at<double> (1, 2) = intrinsic.at<double> (1, 2) - 400; // here i shift the cy to the new middle of frish cut ROI
initUndistortRectifyMap (intrinsic, distCoeffs, Mat (), intrinsic, crop.size (), CV_16SC2, map1, map2); // here i apply transform.. at least i believe i do it :)
while (l != 27)
{
capture >> image;
rectangle (image, recT, Scalar (255, 0, 0), 2, 8, 0);
rectangle (image, roi, Scalar (255, 0, 0), 2, 8, 0);
clock_gettime (CLOCK_REALTIME, &time1);
remap (image(roi), imageUndistorted, map1, map2, INTER_LINEAR);
initUndistortRectifyMap (newCamMat, distCoeffs, Mat (),
newCamMat, image.size (), CV_16SC2, map3, map4);// to compare with uncroped picture
remap (image, imageUndistorted1, map3, map4, INTER_LINEAR);
clock_gettime (CLOCK_REALTIME, &time2);
execTime = diff (time1, time2);
cout << " FPS: " << 1000000000. / execTime.tv_nsec << " per second, " << endl;
imshow("image(roi)",image(roi1));
imshow ("win1", image);
imshow ("win2", imageUndistorted);
imshow ("win3", imageUndistorted1);
l = waitKey (1);
}
capture.release ();
return 0;
}

Related

Basic Shape recognition (openCV C++)

I have a project that I need to make for classes and I chose task that is a bit out of my skills.
Target is to count result of dice rolls.
For now, I'am trying to make it work on a sample pic:
sample pic of dices
and my current code is added below:
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
Mat KostkaFunkcja(Mat image, Mat in, Scalar low, Scalar high);
int getMaxAreaContourId(vector <vector<cv::Point>> contours);
vector<Point> contoursConvexHull(vector<vector<Point> > contours, int index);
Mat ZnakiFunkcja(Mat image, Mat in, Scalar low, Scalar high);
int main(int argc, char** argv)
{
Mat image;
image = imread("kostki.jpg", CV_LOAD_IMAGE_COLOR);
if (!image.data)
{
cout << "Could not open or find the image" << std::endl;
return -1;
}
Mat imgHSV;
Mat workimage = image;
cvtColor(workimage, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV
//red dice
workimage = KostkaFunkcja(workimage, imgHSV, Scalar(146, 0, 31), Scalar(179, 255, 255));
//green dice
workimage = KostkaFunkcja(workimage, imgHSV, Scalar(25, 147, 0), Scalar(98, 255, 154));
//yellow dice
workimage = KostkaFunkcja(workimage, imgHSV, Scalar(22, 45, 161), Scalar(91, 255, 255));
//black dice
workimage = KostkaFunkcja(workimage, imgHSV, Scalar(98, 0, 0), Scalar(179, 232, 107));
//white symbols
workimage = ZnakiFunkcja(workimage, imgHSV, Scalar(58, 0, 183), Scalar(179, 145, 255));
namedWindow("Kostki_kontur", CV_WINDOW_AUTOSIZE);
imshow("Kostki_kontur", workimage);
waitKey(0);
return 0;
}
Mat KostkaFunkcja(Mat image, Mat in, Scalar low, Scalar high)
{
Mat temp;
inRange(in, low, high, temp);
erode(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
erode(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
Mat srcBlur, srcCanny;
blur(temp, srcBlur, Size(3, 3));
Canny(srcBlur, srcCanny, 0, 100, 3, true);
vector<vector<Point> > contours;
findContours(srcCanny, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
int largest_contour_index = getMaxAreaContourId(contours);
Mat drawing = Mat::zeros(srcCanny.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
Scalar color = Scalar(255, 255, 255);
drawContours(drawing, contours, i, color, 2);
}
vector<Point> ConvexHullPoints = contoursConvexHull(contours, largest_contour_index);
polylines(image, ConvexHullPoints, true, Scalar(0, 0, 255), 2);
return image;
}
vector<Point> contoursConvexHull(vector<vector<Point> > contours, int index)
{
vector<Point> result;
vector<Point> pts;
for (size_t j = 0; j< contours[index].size(); j++)
pts.push_back(contours[index][j]);
convexHull(pts, result);
return result;
}
int getMaxAreaContourId(vector <vector<cv::Point>> contours)
{
double maxArea = 0;
int maxAreaContourId = -1;
for (int j = 0; j < contours.size(); j++) {
double newArea = cv::contourArea(contours.at(j));
if (newArea > maxArea) {
maxArea = newArea;
maxAreaContourId = j;
}
return maxAreaContourId;
}
}
Mat ZnakiFunkcja(Mat image, Mat in, Scalar low, Scalar high)
{
Mat temp;
inRange(in, low, high, temp);
erode(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
erode(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
Mat srcBlur, srcCanny;
blur(temp, srcBlur, Size(3, 3));
Canny(srcBlur, srcCanny, 0, 100, 3, true);
vector<vector<Point> > contours;
findContours(srcCanny, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
Mat drawing = Mat::zeros(srcCanny.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
Scalar color = Scalar(255, 255, 255);
drawContours(drawing, contours, i, color, 2);
polylines(image, contours, true, Scalar(0, 0, 255), 2);
return image;
}
}
Yet I have no idea how to count different shapes (hearts, lightnings, shields, numbers).
I will be greatfull if anybody would give me a tip or solution of how to do the job.
1) sorry for bad english
2) we had no openCV in classes [only basic c++]
3) tryed to found anything usefull on internet, but even if I found anything, I could't understand what was happening in the code
Your project can be splited in three steps:
find the dices.
extract the shapes from the visible face of
the dices.
count the faces.
For the first step among all the possible approaches I think saliency map approaches can help.
Saliency map are a family of segmentation algorithm that aim to detect the parts in the image which are more likely to attract visual attention.
OpenCV have a saliency API that already implement several saliency algorithm and for each of them you can get an segmentation map.
It is highlikely considering the example image you gave the saliency will be focus on the dices.
From this you can so extract the dices as rois from the original image.
For the step 2) saliency algorithms may also fit... or not that depend a lot of the statistical criterions that are used by the algorithm.
However the previously extracted rois should only contain the face of the dice that does contain the shapes you want to count in step 3) so approaches based on contours detection may give quite good result.
Once you get the shapes among the way to count each shape you can use templateMatching (that is also already implement in OpenCV),a clustering approach based on the a shape sensitive metric (Hausdorff, Dice, ...), or many other.
Here is a code that can help you to deal with the two first step.
#ifndef _DEBUG
#define _DEBUG
#endif
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/saliency.hpp>
#include <opencv2/highgui.hpp>
#include <list>
CV_EXPORTS_W void get_regions_of_interest(cv::InputArray _src, cv::OutputArrayOfArrays mv, cv::OutputArrayOfArrays mv2 = cv::noArray());
int main()
{
cv::Mat tmp = cv::imread("C:\Desktop\dices.jpg");
if(!tmp.empty())
{
cv::imshow("source",tmp);
cv::waitKey(-1);
}
std::vector<cv::Mat> rois;
get_regions_of_interest(tmp,rois);
std::cout << "Hello World!" << std::endl;
return 0;
}
void get_regions_of_interest(cv::InputArray _src, cv::OutputArrayOfArrays _rois, cv::OutputArrayOfArrays _contours)
{
// Check that the first argument is an image and the second a vector of images.
CV_Assert(_src.isMat() && !_src.depth() && (_src.channels() == 1 || _src.channels() == 3) && _rois.isMatVector() && (!_contours.needed() || (_contours.needed() && _contours.isMatVector()) ) );
static cv::Ptr<cv::saliency::StaticSaliencySpectralResidual> saliency;
if(!saliency)
saliency = cv::saliency::StaticSaliencySpectralResidual::create();
cv::Mat src = _src.getMat();
cv::Mat gray;
if(src.depth() == src.type())
gray = src;
else
cv::cvtColor(src,gray,cv::COLOR_BGR2GRAY);
bool is_ctr_needed = _contours.needed();
std::list<cv::Mat> final_ctrs;
// Step 1) Process the saliency in order to segment the dices.
cv::Mat saliency_map;
cv::Mat binary_map;
saliency->computeSaliency(src,saliency_map);
saliency->computeBinaryMap(saliency_map,binary_map);
saliency_map.release();
// Step 2) From the binary map get the regions of interest.
cv::Mat1i stats;
std::vector<cv::Mat> rois;
cv::Mat labels;
cv::Mat centroids;
cv::connectedComponentsWithStats(binary_map, labels, stats, centroids);
labels.release();
centroids.release();
// prepare the memory
rois.reserve(stats.rows-1);
// Sort the stats in order to remove the background.
stats = stats.colRange(0,stats.cols-1);
// Extract the rois.
for(int i=0;i<stats.rows;i++)
{
cv::Rect roi = *reinterpret_cast<cv::Rect*>(stats.ptr<int>(i));
if(static_cast<std::size_t>(roi.area()) == gray.total())
continue;
rois.push_back(gray(roi));
#ifdef _DEBUG
cv::imshow("roi_"+std::to_string(i),gray(roi));
#endif
}
// Step 3) Refine.
// Because the final number of shape cannot be determine in advance it is better to use a linked list than a vector.
// In practice except if there is a huge number of elements to work with the performance will be almost the same.
std::list<cv::Mat> shapes;
int cnt=0;
for(const cv::Mat& roi : rois)
{
cv::Mat tmp = roi.clone();
// Slightly sharpen the regions contours
cv::morphologyEx(tmp,tmp, cv::MORPH_CLOSE, cv::noArray());
// Reduce the influence of local unhomogeneous illumination.
cv::GaussianBlur(tmp,tmp,cv::Size(31,31), 5);
cv::Mat thresh;
// Binarize the image.
cv::threshold(roi,thresh,0.,255.,cv::THRESH_BINARY | cv::THRESH_OTSU);
#ifdef _DEBUG
cv::imshow("thresh"+std::to_string(cnt++),thresh);
#endif
// Find the contours of each sub region on interest
std::vector<cv::Mat> contours;
cv::findContours(thresh, contours, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
cv::Mat dc;
cv::merge(std::vector<cv::Mat>(3,thresh),dc);
// cv::drawContours(dc, contours,-1,cv::Scalar(0.,0.,255),2);
// cv::imshow("ctrs"+std::to_string(cnt),dc);
// Extract the sub-regions
if(is_ctr_needed)
{
for(const cv::Mat& ctrs: contours)
{
cv::Rect croi = cv::boundingRect(ctrs);
// If the sub region is to big or to small it is depreate
if(static_cast<std::size_t>(croi.area()) == roi.total() || croi.area()<50)
continue;
final_ctrs.push_back(ctrs);
shapes.push_back(roi(croi));
#ifdef _DEBUG
cv::rectangle(dc,croi,cv::Scalar(0.,0.,255.));
cv::imshow("sub_roi_"+std::to_string(cnt++),roi(croi));
#endif
}
}
else
{
for(const cv::Mat& ctrs: contours)
{
cv::Rect croi = cv::boundingRect(ctrs);
// If the sub region is to big or to small it is depreate
if(static_cast<std::size_t>(croi.area()) == roi.total() || croi.area()<50)
continue;
shapes.push_back(roi(croi));
#ifdef _DEBUG
cv::rectangle(dc,croi,cv::Scalar(0.,0.,255.));
cv::imshow("sub_roi_"+std::to_string(cnt++),roi(croi));
#endif
}
}
}
#ifdef _DEBUG
cv::waitKey(-1);
#endif
// Final Step: set the output
_rois.create(shapes.size(),1,CV_8U);
_rois.assign(std::vector<cv::Mat>(shapes.begin(),shapes.end()));
if(is_ctr_needed)
{
_contours.create(final_ctrs.size(),1,CV_32SC2);
_contours.assign(std::vector<cv::Mat>(final_ctrs.begin(), final_ctrs.end()));
}
}

OpenCV - warpPerspective

I'm trying to use the function "warpPerspective" with OpenCV 3.0. I'm using this example:
http://answers.opencv.org/question/98110/how-do-i-stitch-images-with-two-different-angles/
I have to create a ROI on the right side of the first image and another one on the left side of the second image. Use ORB to extract and compute descriptions and match these ones. I didn't changed much of the original code. Just the ROI.
The problem is that every image that i try to warp the perspective comes out like this:
I already tried with multiple pairs of images and the problem persists.
#include "opencv2/opencv.hpp"
#include <iostream>
#include <fstream>
#include <ctype.h>
using namespace cv;
using namespace std;
int main(int argc, char* argv[])
{
Mat img1 = imread("image2.jpg");
Mat img2 = imread("image1.jpg");
namedWindow("I2", WINDOW_NORMAL); namedWindow("I1", WINDOW_NORMAL);
Ptr<ORB> o1 = ORB::create();
Ptr<ORB> o2 = ORB::create();
vector<KeyPoint> pts1, pts2;
Mat desc1, desc2;
vector<DMatch> matches;
Size s = img1.size();
Size s2 = img2.size();
Rect r1(s.width - 200, 0, 200, s.height);
//rectangle(img1, r1, Scalar(255, 0, 0), 5);
Rect r2(0, 0, 200, s2.height);
//rectangle(img2, r2, Scalar(255, 0, 0), 5);
Mat mask1 = Mat::zeros(img1.size(), CV_8UC1);
Mat mask2 = Mat::zeros(img1.size(), CV_8UC1);
mask1(r1) = 1;
mask2(r2) = 1;
o1->detectAndCompute(img1, mask1, pts1, desc1);
o2->detectAndCompute(img2, mask2, pts2, desc2);
BFMatcher descriptorMatcher(NORM_HAMMING, true);
descriptorMatcher.match(desc1, desc2, matches, Mat());
// Keep best matches only to have a nice drawing.
// We sort distance between descriptor matches
Mat index;
int nbMatch = int(matches.size());
Mat tab(nbMatch, 1, CV_32F);
for (int i = 0; i<nbMatch / 2; i++)
{
tab.at<float>(i, 0) = matches[i].distance;
}
sortIdx(tab, index, SORT_EVERY_COLUMN + SORT_ASCENDING);
vector<DMatch> bestMatches;
vector<Point2f> src, dst;
for (int i = 0; i < nbMatch / 2; i++)
{
int j = index.at<int>(i, 0);
cout << pts1[matches[j].queryIdx].pt << "\t" << pts2[matches[j].trainIdx].pt << "\n";
src.push_back(pts1[matches[j].queryIdx].pt + Point2f(0, img1.rows)); // necessary offset
dst.push_back(pts2[matches[j].trainIdx].pt);
}
cout << "\n";
Mat h = findHomography(src, dst, RANSAC);
Mat result;
cout << h << endl;
warpPerspective(img2, result, h.inv(), Size(3 * img2.cols + img1.cols, 2 * img2.rows + img1.rows));
imshow("I1", img1);
imshow("I2", img2);
Mat roi1(result, Rect(0, img1.rows, img1.cols, img1.rows));
img1.copyTo(roi1);
namedWindow("I3", WINDOW_NORMAL);
imshow("I3", result);
imwrite("result.jpg", result);
waitKey();
return 0;
Does that comes from bad matches? Am i missing something? Since i'm kind of new to this topic, any help or ideas would be really appreciated.
Here's the quick things you need to check when your warp perspective is not working-
Did you select the right points in both the images ?
Reason: You need to choose exactly the same points that correspond to each
other when finding a perspective transform. Unrelated points ruin it.
Are your points in right order in the array ?
R: You need to put them in the right corresponding order in both the source and
destination before passing to findhomography.
Are you passing then in the right order to findHomography ? Try switching in
case you are not sure. So that is doesn't reverse warp it
Those are the mistakes i did when i first used it. Now if you see your images, there's a little part overlapping in both the images. You need to be more careful over there. Your rect mask might be the fault.

opencv detects multiple small circles, but not larger one

I am very new to OpenCV and I am trying to detect just the penny image, but I am getting a bunch of smaller circles. Can someone tell me what Im doing wrong?
Code from here: https://github.com/opencv/opencv/blob/master/samples/cpp/houghcircles.cpp
Only things I changed were to make min circle radius 400, and max of circle 0. Because I know the image will be 600x480 so the penny circle must be at least 400.
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
static void help()
{
cout << "\nThis program demonstrates circle finding with the Hough transform.\n"
"Usage:\n"
"./houghcircles <image_name>, Default is ../data/board.jpg\n" << endl;
}
int main(int argc, char** argv)
{
cv::CommandLineParser parser(argc, argv,
"{help h ||}{#image|../data/board.jpg|}"
);
if (parser.has("help"))
{
help();
return 0;
}
//![load]
string filename = parser.get<string>("#image");
Mat img = imread(filename, IMREAD_COLOR);
if(img.empty())
{
help();
cout << "can not open " << filename << endl;
return -1;
}
//![load]
//![convert_to_gray]
Mat gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
//![convert_to_gray]
//![reduce_noise]
medianBlur(gray, gray, 5);
//![reduce_noise]
//![houghcircles]
vector<Vec3f> circles;
HoughCircles(gray, circles, HOUGH_GRADIENT, 1,
gray.rows/16, // change this value to detect circles with different distances to each other
100, 30, 400,0 // change the last two parameters
// (min_radius & max_radius) to detect larger circles
);
//![houghcircles]
//![draw]
for( size_t i = 0; i < circles.size(); i++ )
{
Vec3i c = circles[i];
circle( img, Point(c[0], c[1]), c[2], Scalar(0,0,255), 3, LINE_AA);
circle( img, Point(c[0], c[1]), 2, Scalar(0,255,0), 3, LINE_AA);
}
//![draw]
//![display]
imshow("detected circles", img);
waitKey();
//![display]
return 0;
}
You've got radius and diameter mixed up. Your minimum radius cannot be 400 if your image is only 600x480. Set your min_radius to 200.
HoughCircles(gray, circles, HOUGH_GRADIENT, 1,
max(gray.cols,gray.rows), // to find only the biggest perfect circle
100, 100, 0,0 // leave other params as default
);

OpenCV--how to get better hand contour from low quality gray image?

I need to get contour from hand image, usually I process image with 4 steps:
get raw RGB gray image from 3 channels to 1 channel:
cvtColor(sourceGrayImage, sourceGrayImage, COLOR_BGR2GRAY);
use Gaussian blur to filter gray image:
GaussianBlur(sourceGrayImage, sourceGrayImage, Size(3,3), 0);
binary gray image, I split image by height, normally I split image to 6 images by its height, then each one I do threshold process:
// we split source picture to binaryImageSectionCount(here it's 8) pieces by its height,
// then we for every piece, we do threshold,
// and at last we combine them agin to binaryImage
const binaryImageSectionCount = 8;
void GetBinaryImage(Mat &grayImage, Mat &binaryImage)
{
// get every partial gray image's height
int partImageHeight = grayImage.rows / binaryImageSectionCount;
for (int i = 0; i < binaryImageSectionCount; i++)
{
Mat partialGrayImage;
Mat partialBinaryImage;
Rect partialRect;
if (i != binaryImageSectionCount - 1)
{
// if it's not last piece, Rect's height should be partImageHeight
partialRect = Rect(0, i * partImageHeight, grayImage.cols, partImageHeight);
}
else
{
// if it's last piece, Rect's height should be (grayImage.rows - i * partImageHeight)
partialRect = Rect(0, i * partImageHeight, grayImage.cols, grayImage.rows - i * partImageHeight);
}
Mat partialResource = grayImage(partialRect);
partialResource.copyTo(partialGrayImage);
threshold( partialGrayImage, partialBinaryImage, 0, 255, THRESH_OTSU);
// combin partial binary image to one piece
partialBinaryImage.copyTo(binaryImage(partialRect));
///*stringstream resultStrm;
//resultStrm << "partial_" << (i + 1);
//string string = resultStrm.str();
//imshow(string, partialBinaryImage);
//waitKey(0);*/
}
imshow("result binary image.", binaryImage);
waitKey(0);
return;
}
use findcontour to get biggest area contour:
vector<vector<Point> > contours;
findContours(binaryImage, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
normally it works well,
But for some low quality gray image, it doesn't work,like below:
the complete code is here:
#include <opencv2/imgproc/imgproc.hpp>
#include<opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
// we split source picture to binaryImageSectionCount(here it's 8) pieces by its height,
// then we for every piece, we do threshold,
// and at last we combine them agin to binaryImage
const binaryImageSectionCount = 8;
void GetBinaryImage(Mat &grayImage, Mat &binaryImage)
{
// get every partial gray image's height
int partImageHeight = grayImage.rows / binaryImageSectionCount;
for (int i = 0; i < binaryImageSectionCount; i++)
{
Mat partialGrayImage;
Mat partialBinaryImage;
Rect partialRect;
if (i != binaryImageSectionCount - 1)
{
// if it's not last piece, Rect's height should be partImageHeight
partialRect = Rect(0, i * partImageHeight, grayImage.cols, partImageHeight);
}
else
{
// if it's last piece, Rect's height should be (grayImage.rows - i * partImageHeight)
partialRect = Rect(0, i * partImageHeight, grayImage.cols, grayImage.rows - i * partImageHeight);
}
Mat partialResource = grayImage(partialRect);
partialResource.copyTo(partialGrayImage);
threshold( partialGrayImage, partialBinaryImage, 0, 255, THRESH_OTSU);
// combin partial binary image to one piece
partialBinaryImage.copyTo(binaryImage(partialRect));
///*stringstream resultStrm;
//resultStrm << "partial_" << (i + 1);
//string string = resultStrm.str();
//imshow(string, partialBinaryImage);
//waitKey(0);*/
}
imshow("result binary image.", binaryImage);
waitKey(0);
return;
}
int main(int argc, _TCHAR* argv[])
{
// get image path
string imgPath("C:\\Users\\Alfred\\Desktop\\gray.bmp");
// read image
Mat src = imread(imgPath);
imshow("Source", src);
//medianBlur(src, src, 7);
cvtColor(src, src, COLOR_BGR2GRAY);
imshow("gray", src);
// do filter
GaussianBlur(src, src, Size(3,3), 0);
// binary image
Mat threshold_output(src.rows, src.cols, CV_8UC1, Scalar(0, 0, 0));
GetBinaryImage(src, threshold_output);
imshow("binaryImage", threshold_output);
// get biggest contour
vector<vector<Point> > contours;
findContours(threshold_output,contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
int biggestContourIndex = 0;
int maxContourArea = -1000;
for (int i = 0; i < contours.size(); i++)
{
if (contourArea(contours[i]) > maxContourArea)
{
maxContourArea = contourArea(contours[i]);
biggestContourIndex = i;
}
}
// show biggest contour
Mat biggestContour(threshold_output.rows, threshold_output.cols, CV_8UC1, Scalar(0, 0, 0));
drawContours(biggestContour, contours, biggestContourIndex, cv::Scalar(255,255,255), 2, 8, vector<Vec4i>(), 0, Point());
imshow("maxContour", biggestContour);
waitKey(0);
}
could anybody please help me to get a better hand contour result?
thanks!!!
I have the code snippet in python, you can follow the same approach in C:
img = cv2.imread(x, 1)
cv2.imshow("img",img)
imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow("gray",imgray)
#Code for histogram equalization
equ = cv2.equalizeHist(imgray)
cv2.imshow('equ', equ)
#Code for contrast limited adaptive histogram equalization
#clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
#cl2 = clahe.apply(imgray)
#cv2.imshow('clahe2', cl2)
This is the result I obtained:
If you're image is horribly bad you could try the code that I commented involving contrast limited adaptive histogram equalization.

detect and count face on image using open cv and c++

I am using opencv and C++ although i'm beginner. I am trying to detect and count faces from a set of image using Haarcascade .
I only want to get the number of faces on each image .
how can i edit this code to get the number of faces on image????
// Function detectAndDisplay
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Set Region of Interest
cv::Rect roi_b;
cv::Rect roi_c;
size_t ic = 0; // ic is index of current element
int ac = 0; // ac is area of current element
size_t ib = 0; // ib is index of biggest element
int ab = 0; // ab is area of biggest element
for (ic = 0; ic < faces.size(); ic++) // Iterate through all current elements (detected faces)
{
roi_c.x = faces[ic].x;
roi_c.y = faces[ic].y;
roi_c.width = (faces[ic].width);
roi_c.height = (faces[ic].height);
ac = roi_c.width * roi_c.height; // Get the area of current element (detected face)
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
ab = roi_b.width * roi_b.height; // Get the area of biggest element, at beginning it is same as "current" element
if (ac > ab)
{
ib = ic;
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
}
crop = frame(roi_b);
resize(crop, res, Size(128, 128), 0, 0, INTER_LINEAR); // This will be needed later while saving images
cvtColor(crop, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale
// Form a filename
filename = "";
stringstream ssfn;
ssfn << filenumber << ".png";
filename = ssfn.str();
filenumber++;
imwrite(filename, gray);
printf("filename");
Point pt1(faces[ic].x, faces[ic].y); // Display detected faces on main window - live stream from camera
Point pt2((faces[ic].x + faces[ic].height), (faces[ic].y + faces[ic].width));
rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
}
// Show image
/*sstm << "Crop area size: " << roi_b.width << "x" << roi_b.height << " Filename: " << filename;
text = sstm.str();
putText(frame, text, cvPoint(30, 30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 0, 255), 1, CV_AA);
imshow("original", frame);
if (!crop.empty())
{
imshow("detected", crop);
}
else
destroyWindow("detected");*/
}
modified your posted code sample to just return the number of detected faces in the image...
// Function to count the detected faces in your image
void countFacesInImage(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
return faces.size();
}
if you want to get an impression of which faces were detected and which weren't you can add this code before the return:
cv::Mat tmpImage = frame.clone();
for(unsigned int i=0; i<faces.size(); ++i)
{
cv::rectangle(tmpImage, faces[i], cv::Scalar(0,255,0), 2);
}
cv::imshow("faces", tmpImage);
cv::waitKey(0);
after each image you have to press a key with active window "faces". You can change to cv::waitKey(n) to wait n milliseconds instead of the need to press a key.
I had to do something similar and used the example of a CascadeClassifier on the OpenCV website.
The rough steps to follow are:
Load all images you want to process.
For each image, apply the CascadeClassifier as in the example, you will need to pass a std::vector<cv::Rect> as parameter. After detection, this vector will contain the location of all detected objects (in your case, faces).
For each image, return the size of the vector to know the number of faces that were detected.
To be honest, the example I linked is something you could have found on your own without much effort.