Extract object by it centroid - c++

I have developed an application using OpenCV that detects circularity form and get there centroid from a binary image. I want now to delete object that has not been detected from my original image. So I want to know if there is a solution to extract an object from an image by its centroid.
Here is what I have got so far:
The result of the for segmentation :
// Read image
cv::Mat im = cv::imread( "11002847.bmp", cv::IMREAD_GRAYSCALE );
bitwise_not(im, im);
cv::Mat im2;
im.copyTo(im2);
//Detec the contour to get all the obeject centroid
std::vector<std::vector<cv::Point> > contours;
vector<cv::Vec4i> hierarchy;
findContours( im2, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
vector<cv::Moments> mu(contours.size() );
for( int i = 0; i < contours.size(); i++ )
{
mu[i] = moments( contours[i], false );
}
ofstream file;
file.open("log.txt");
/// Get the mass centers:
vector<cv::Point2f> mc( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{
mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 );
file << mc[i].x << " "<< mc[i].y << endl;
}
file.close();
//Dtect the circular objects and get there centroid
// Setup SimpleBlobDetector parameters.
cv::SimpleBlobDetector::Params params;
// Change thresholds
//params.minThreshold = 10;
//params.maxThreshold = 200;
// Filter by Area.
params.filterByArea = true;
params.minArea = 1;
// Filter by Circularity
params.filterByCircularity = true;
params.minCircularity = 0.5;
// Filter by Convexity
params.filterByConvexity = true;
params.minConvexity = 0.1;
// Filter by Inertia
params.filterByInertia = true;
params.minInertiaRatio = 0.0;
// Storage for blobs
vector<cv::KeyPoint> keypoints;
// Set up detector with params
cv::SimpleBlobDetector detector(params);
// Detect blobs
detector.detect( im, keypoints);
ofstream file2;
file2.open("log2.txt");
for(vector<cv::KeyPoint>::iterator it = keypoints.begin(); it != keypoints.end(); ++it)
{
cv::KeyPoint k = *it;
file2 << k.pt.x << " "<< k.pt.y << endl;
}
file2.close();
// Draw detected blobs as red circles.
// DrawMatchesFlags::DRAW_RICH_KEYPOINTS flag ensures
// the size of the circle corresponds to the size of blob
cv::Mat im_with_keypoints;
drawKeypoints( im, keypoints, im_with_keypoints, cv::Scalar(0,0,255), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
// Show blobs
cv::imwrite("keypoints.bmp", im_with_keypoints );

I hope I understood your problem, but just in case, I will rephrase it. You want to remove the contours from your top image, which have not been detected as keypoints (i.e. don't have red circles in/around them) in the bottom image.
In order to do this, iterate over all your contours and check each contour for presence of KeyPoints using pointPolygonTest
auto end = contours.end();
for (auto contour_itr = contours.begin(); contour_itr != end; ++contour_itr) {
auto keypoint_end = keypoints.end();
for (auto keypoint_itr = keypoints.begin(); keypoint_itr != keypoint_end; ++ keypoint_itr) {
auto contour = *contour_itr;
auto keypoint = *keypoint_itr;
if (cv::pointPolygonTest(contour, keypoint.pt, false) > 0) {
// do your thing here (e.g. store contour into an array or paint it into a new image
}
}
}
Hope that helps

Related

Object tracking delay color tracking OpenCV

I am trying to detect colored balls like ps3 move controller balls from 2 mt distance.I have 10 camera in same room hanging from the ceiling.Room is dark and balls have led inside.I have 4-5 balls.(red,blue,green,yellow,pink). I want track their position with opencv.Whats the right mehtod for doing this in opencv ? Can u give link , example for this ?
I use this code but i have delay problem.When i comment // my trackFilteredObject line there is no lag.But when using this code i have lot latency.I cant understand why happening because my normal cpu usage ~%15 ram usage 6.3GB/15GB (%40) when run this code cpu usage ~20-23 ram usage 6.4GB . I think its not about cpu-ram performance.What am i doing wrong ?
Video: https://www.youtube.com/watch?v=_BKtJpPrkO4 (You can see lag in first 10 sec.After 10 sen i comment tracking codes.)
Note:Kamerasayisi mean cameracount My Track Function:
void trackFilteredObject(Object theObject,Mat threshold,Mat HSV, Mat &cameraFeed){
//max number of objects to be detected in frame
const int FRAME_WIDTH = 5120;
const int FRAME_HEIGHT = 480;
const int MAX_NUM_OBJECTS=50;
//minimum and maximum object area
const int MIN_OBJECT_AREA = 10*10;
const int MAX_OBJECT_AREA = FRAME_HEIGHT*FRAME_WIDTH/1.5;
vector <Object> objects;
Mat temp;
threshold.copyTo(temp);
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours of filtered image using openCV findContours function
findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );
//use moments method to find our filtered object
double refArea = 0;
bool objectFound = false;
if (hierarchy.size() > 0) {
int numObjects = hierarchy.size();
//if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
if(numObjects<MAX_NUM_OBJECTS){
for (int index = 0; index >= 0; index = hierarchy[index][0]) {
Moments moment = moments((cv::Mat)contours[index]);
double area = moment.m00;
//if the area is less than 20 px by 20px then it is probably just noise
//if the area is the same as the 3/2 of the image size, probably just a bad filter
//we only want the object with the largest area so we safe a reference area each
//iteration and compare it to the area in the next iteration.
if(area>MIN_OBJECT_AREA){
Object object;
object.setXPos(moment.m10/area);
object.setYPos(moment.m01/area);
object.setType(theObject.getType());
object.setColor(theObject.getColor());
objects.push_back(object);
objectFound = true;
}else objectFound = false;
}
//let user know you found an object
if(objectFound ==true){
//draw object location on screen
drawObject(objects,cameraFeed,temp,contours,hierarchy);}
}else putText(cameraFeed,"TOO MUCH NOISE! ADJUST FILTER",Point(0,50),1,2,Scalar(0,0,255),2);
}
}
};
Main Code:
void Run()
{
int w, h;
_fps = 30;
IplImage *pCapImage[kameraSayisi];
IplImage *pDisplayImage;
PBYTE pCapBuffer = NULL;
// Create camera instance
for(int i = 0; i < kameraSayisi; i++)
{
_cam[i] = CLEyeCreateCamera(_cameraGUID[i], _mode, _resolution, _fps);
if(_cam[i] == NULL) return;
// Get camera frame dimensions
CLEyeCameraGetFrameDimensions(_cam[i], w, h);
// Create the OpenCV images
pCapImage[i] = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
// Set some camera parameters
CLEyeSetCameraParameter(_cam[i], CLEYE_GAIN, 0);
CLEyeSetCameraParameter(_cam[i], CLEYE_EXPOSURE, 511);
// Start capturing
CLEyeCameraStart(_cam[i]);
}
pDisplayImage = cvCreateImage(cvSize(w*kameraSayisi / 2, h * kameraSayisi/4 ), IPL_DEPTH_8U ,1);
if(_cam == NULL) return;
int iLastX = -1;
int iLastY = -1;
//Capture a temporary image from the camera
//program
bool trackObjects = true;
bool useMorphOps = true;
Mat HSV;
//Create a black image with the size as the camera output
Mat imgLines;
// imgLines = Mat::zeros( cvarrToMat(image).size(), CV_8UC3 );;
Mat threshold;
//x and y values for the location of the object
int x=0, y=0;
bool calibrationMode = false;
if(calibrationMode){
//create slider bars for HSV filtering
createTrackbars();
}
// image capturing loop
while(_running)
{
PBYTE pCapBuffer;
// Capture camera images
for(int i = 0; i < kameraSayisi; i++)
{
cvGetImageRawData(pCapImage[i], &pCapBuffer);
CLEyeCameraGetFrame(_cam[i], pCapBuffer, (i==0)?2000:0);
}
// Display stereo image
for(int i = 0; i < kameraSayisi; i++)
{
cvSetImageROI(pDisplayImage, cvRect(w * (i%4) ,i/4 * h, w, h));
cvCopy(pCapImage[i], pDisplayImage);
}
cvResetImageROI(pDisplayImage);
Mat imgOriginal;
Mat imgConverted = cvarrToMat(pDisplayImage);
if(calibrationMode==true)
{
//need to find the appropriate color range values
// calibrationMode must be false
//if in calibration mode, we track objects based on the HSV slider values.
//cvtColor(imgOriginal,imgOriginal,CV_BayerRG2RGB);
cvtColor(imgConverted,imgOriginal,CV_BayerGB2BGR);
cvtColor(imgOriginal,HSV,CV_BGR2HSV);
inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
morphOps(threshold);
imshow(_windowName + 'T',threshold);
//the folowing for canny edge detec
/// Create a matrix of the same type and size as src (for dst)
dst.create( imgOriginal.size(), src.type() );
/// Convert the image to grayscale
cvtColor( imgOriginal, src_gray, CV_BGR2GRAY );
/// Create a window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
/// Create a Trackbar for user to enter threshold
// createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );
/// Show the image
Object a = Object(H_MIN,S_MIN,V_MIN,H_MAX,S_MAX,V_MAX);
trackFilteredObject(a,threshold,HSV,imgOriginal);
}
else{
//we can use their member functions/information
Object blue("blue"), yellow("yellow"), red("red"), orange("orange"),white("white");
cvtColor(imgConverted,imgOriginal,CV_BayerGB2BGR);
//first find blue objects
cvtColor(imgOriginal,HSV,CV_RGB2HSV);
inRange(HSV,blue.getHSVmin(),blue.getHSVmax(),threshold);
morphOps(threshold);
//then yellows
inRange(HSV,yellow.getHSVmin(),yellow.getHSVmax(),threshold);
//then reds
inRange(HSV,red.getHSVmin(),red.getHSVmax(),threshold);
//then white
inRange(HSV,white.getHSVmin(),white.getHSVmax(),threshold);
//then orange
inRange(HSV,orange.getHSVmin(),orange.getHSVmax(),threshold);
trackFilteredObject(yellow,threshold,HSV,imgOriginal);
trackFilteredObject(white,threshold,HSV,imgOriginal);
trackFilteredObject(red,threshold,HSV,imgOriginal);
trackFilteredObject(blue,threshold,HSV,imgOriginal);
trackFilteredObject(orange,threshold,HSV,imgOriginal);
}
//delay 10ms so that screen can refresh.
//image will not appear without this waitKey() command
if (cvWaitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
// cvShowImage(_windowName, image);
imshow(_windowName,imgOriginal);
}
for(int i = 0; i < kameraSayisi; i++)
{
// Stop camera capture
CLEyeCameraStop(_cam[i]);
// Destroy camera object
CLEyeDestroyCamera(_cam[i]);
// Destroy the allocated OpenCV image
cvReleaseImage(&pCapImage[i]);
_cam[i] = NULL;
}
}

OpenCV get coordinates of red-only rectangle area

I have the following output from red-only filtration done by the following algorithm:
cv::Mat findColor(const cv::Mat & inputBGRimage, int rng=20)
{
// Make sure that your input image uses the channel order B, G, R (check not implemented).
cv::Mat mt1, mt2;
cv::Mat input = inputBGRimage.clone();
cv::Mat imageHSV; //(input.rows, input.cols, CV_8UC3);
cv::Mat imgThreshold, imgThreshold0, imgThreshold1; //(input.rows, input.cols, CV_8UC1);
assert( ! input.empty() );
// blur image
cv::blur( input, input, Size(11, 11) );
// convert input-image to HSV-image
cv::cvtColor( input, imageHSV, cv::COLOR_BGR2HSV );
// In the HSV-color space the color 'red' is located around the H-value 0 and also around the
// H-value 180. That is why you need to threshold your image twice and the combine the results.
cv::inRange( imageHSV, cv::Scalar( H_MIN, S_MIN, V_MIN ), cv::Scalar( H_MAX, S_MAX, V_MAX ), imgThreshold0 );
if ( rng > 0 )
{
// cv::inRange(imageHSV, cv::Scalar(180-rng, 53, 185, 0), cv::Scalar(180, 255, 255, 0), imgThreshold1);
// cv::bitwise_or( imgThreshold0, imgThreshold1, imgThreshold );
}
else
{
imgThreshold = imgThreshold0;
}
// cv::dilate( imgThreshold0, mt1, Mat() );
// cv::erode( mt1, mt2, Mat() );
return imgThreshold0;
}
And here is the output:
And I want to detect the four coordinates of the rectangle. As you can see, the output is not perfect, I used cv::findContours in conjunction with cv::approxPolyDP before, but it's not working good anymore.
Is there any filter that I can apply for input image (except blur, dilate, erode) to make image better for processing?
Any suggestions?
Updated:
When I am using findContours like this:
findContours( src, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE );
double largest_area = 0;
for( int i = 0; i < contours.size(); i++) { // get the largest contour
area = fabs( contourArea( contours[i] ) );
if( area >= largest_area ) {
largest_area = area;
largestContours.clear();
largestContours.push_back( contours[i] );
}
}
if( largest_area > 5000 ) {
cv::approxPolyDP( cv::Mat(largestContours[0]), approx, 100, true );
cout << approx.size() << endl; /* ALWAYS RETURN 2 ?!? */
}
The approxPolyDP is not working as expected.
I think your result is quite good, maybe if you select the contour with greatest area using Image Moments and then finding the minimal rotated rectangle of the bigger contour.
vector<cv::RotatedRect> cv::minRect( contours.size() );
for( size_t = 0; i < contours.size(); i++ )
{
minRect[i] = minAreaRect( cv::Mat(contours[i]) );
}
Rotated Rect class already has a vector of Point2f to store the points.
RotatedRect rRect = RotatedRect(Point2f(100,100), Size2f(100,50), 30);
Point2f vertices[4];
rRect.points(vertices);
for(int i = 0; i < 4; i++){
std::cout << vertices[i] << " ";
}

How to detect squares in video with OpenCV?

So I combined squares.cpp with cvBoundingRect.cpp code to detect squares in video. I therefore, had to convert from IplImage to Mat type so that findSquares and drawSquares methods could run (By using cvarrToMat function). But unfortunately, after successful compilation I get this error when running:
OpenCV Error: Assertion failed (j < nsrcs && src[j].depth() == depth) in mixChannels, file /Users/Desktop/opencv-3.0.0-rc1/modules/core/src/convert.cpp, line 1205
libc++abi.dylib: terminating with uncaught exception of type cv::Exception: /Users/Desktop/opencv-3.0.0-rc1/modules/core/src/convert.cpp:1205: error: (-215) j < nsrcs && src[j].depth() == depth in function mixChannels
Abort trap: 6
Here's the code:
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <math.h>
#include <string.h>
using namespace cv;
using namespace std;
int thresh = 50, N = 11;
const char* wndname = "Square Detection Demo";
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
static double angle( Point pt1, Point pt2, Point pt0 )
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
static void findSquares( const Mat& image, vector<vector<Point> >& squares )
{
squares.clear();
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
vector<vector<Point> > contours;
// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
int ch[] = {c, 0};
mixChannels(&timg, 1, &gray0, 1, ch, 1);
// try several threshold levels
for( int l = 0; l < N; l++ )
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if( l == 0 )
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
Canny(gray0, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
gray = gray0 >= (l+1)*255/N;
}
// find contours and store them all as a list
findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for( size_t i = 0; i < contours.size(); i++ )
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)) )
{
double maxCosine = 0;
for( int j = 2; j < 5; j++ )
{
// find the maximum cosine of the angle between joint edges
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( maxCosine < 0.3 )
squares.push_back(approx);
}
}
}
}
}
// the function draws all the squares in the image
static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
{
for( size_t i = 0; i < squares.size(); i++ )
{
const Point* p = &squares[i][0];
int n = (int)squares[i].size();
polylines(image, &p, &n, 1, true, Scalar(255,0,0), 3, LINE_AA);
}
imshow(wndname, image);
}
CvRect rect;
CvSeq* contours = 0;
CvMemStorage* storage = NULL;
CvCapture *cam;
IplImage *currentFrame, *currentFrame_grey, *differenceImg, *oldFrame_grey;
bool first = true;
int main(int argc, char* argv[])
{
//Create a new movie capture object.
cam = cvCaptureFromCAM(0);
//create storage for contours
storage = cvCreateMemStorage(0);
//capture current frame from webcam
currentFrame = cvQueryFrame(cam);
//Size of the image.
CvSize imgSize;
imgSize.width = currentFrame->width;
imgSize.height = currentFrame->height;
//Images to use in the program.
currentFrame_grey = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);
namedWindow( wndname, 1 );
vector<vector<Point> > squares;
while(1)
{
currentFrame = cvQueryFrame( cam );
if( !currentFrame ) break;
//Convert the image to grayscale.
cvCvtColor(currentFrame,currentFrame_grey,CV_RGB2GRAY);
if(first) //Capturing Background for the first time
{
differenceImg = cvCloneImage(currentFrame_grey);
oldFrame_grey = cvCloneImage(currentFrame_grey);
cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
first = false;
continue;
}
//Minus the current frame from the moving average.
cvAbsDiff(oldFrame_grey,currentFrame_grey,differenceImg);
//bluring the differnece image
cvSmooth(differenceImg, differenceImg, CV_BLUR);
//apply threshold to discard small unwanted movements
cvThreshold(differenceImg, differenceImg, 25, 255, CV_THRESH_BINARY);
//find contours
cv::Mat diffImg = cv::cvarrToMat(differenceImg);
cv::Mat currFrame = cv::cvarrToMat(currentFrame);
findSquares(diffImg, squares);
//draw bounding box around each contour
drawSquares(currFrame, squares);
//display colour image with bounding box
cvShowImage("Output Image", currentFrame);
//display threshold image
cvShowImage("Difference image", differenceImg);
//New Background
cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
//clear memory and contours
cvClearMemStorage( storage );
contours = 0;
//press Esc to exit
char c = cvWaitKey(33);
if( c == 27 ) break;
}
// Destroy the image & movies objects
cvReleaseImage(&oldFrame_grey);
cvReleaseImage(&differenceImg);
cvReleaseImage(&currentFrame);
cvReleaseImage(&currentFrame_grey);
return 0;
}
As the error message says, your problem is in cv::mixChannels(). See documentation.
Or you could simply do something like
cv::Mat channels[3];
cv::split(multiChannelImage, channels);
and then access each channel using
cv::Mat currChannel = channels[channelNumber]

detect and count face on image using open cv and c++

I am using opencv and C++ although i'm beginner. I am trying to detect and count faces from a set of image using Haarcascade .
I only want to get the number of faces on each image .
how can i edit this code to get the number of faces on image????
// Function detectAndDisplay
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Set Region of Interest
cv::Rect roi_b;
cv::Rect roi_c;
size_t ic = 0; // ic is index of current element
int ac = 0; // ac is area of current element
size_t ib = 0; // ib is index of biggest element
int ab = 0; // ab is area of biggest element
for (ic = 0; ic < faces.size(); ic++) // Iterate through all current elements (detected faces)
{
roi_c.x = faces[ic].x;
roi_c.y = faces[ic].y;
roi_c.width = (faces[ic].width);
roi_c.height = (faces[ic].height);
ac = roi_c.width * roi_c.height; // Get the area of current element (detected face)
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
ab = roi_b.width * roi_b.height; // Get the area of biggest element, at beginning it is same as "current" element
if (ac > ab)
{
ib = ic;
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
}
crop = frame(roi_b);
resize(crop, res, Size(128, 128), 0, 0, INTER_LINEAR); // This will be needed later while saving images
cvtColor(crop, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale
// Form a filename
filename = "";
stringstream ssfn;
ssfn << filenumber << ".png";
filename = ssfn.str();
filenumber++;
imwrite(filename, gray);
printf("filename");
Point pt1(faces[ic].x, faces[ic].y); // Display detected faces on main window - live stream from camera
Point pt2((faces[ic].x + faces[ic].height), (faces[ic].y + faces[ic].width));
rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
}
// Show image
/*sstm << "Crop area size: " << roi_b.width << "x" << roi_b.height << " Filename: " << filename;
text = sstm.str();
putText(frame, text, cvPoint(30, 30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 0, 255), 1, CV_AA);
imshow("original", frame);
if (!crop.empty())
{
imshow("detected", crop);
}
else
destroyWindow("detected");*/
}
modified your posted code sample to just return the number of detected faces in the image...
// Function to count the detected faces in your image
void countFacesInImage(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
return faces.size();
}
if you want to get an impression of which faces were detected and which weren't you can add this code before the return:
cv::Mat tmpImage = frame.clone();
for(unsigned int i=0; i<faces.size(); ++i)
{
cv::rectangle(tmpImage, faces[i], cv::Scalar(0,255,0), 2);
}
cv::imshow("faces", tmpImage);
cv::waitKey(0);
after each image you have to press a key with active window "faces". You can change to cv::waitKey(n) to wait n milliseconds instead of the need to press a key.
I had to do something similar and used the example of a CascadeClassifier on the OpenCV website.
The rough steps to follow are:
Load all images you want to process.
For each image, apply the CascadeClassifier as in the example, you will need to pass a std::vector<cv::Rect> as parameter. After detection, this vector will contain the location of all detected objects (in your case, faces).
For each image, return the size of the vector to know the number of faces that were detected.
To be honest, the example I linked is something you could have found on your own without much effort.

How can I merge blobs / contours

I use findContours for blob detection. Now I would merge close and similar blobs together.
Here are some sample images:
Is that possible with normal Opencv?
The input images you gave us are pretty easy to work with:
The first step is isolate the yellow blobs from everything else and a simple color segmentation technique can accomplish this task. You can take a look at Segmentation & Object Detection by color or Tracking colored objects in OpenCV to have an idea on how to do it.
Then, it's time to merge the blobs. One technique in particular that can be useful is the bounding box, to put all the blobs inside a rectangle. Notice in the images below, that there is a green rectangle surrounding the blobs:
After that, all you need to do is fill the rectangle with the color of your choice, thus connecting all the blobs. I'm leaving this last as homework for you.
This is the quickest and most simple approach I could think of. The following code demonstrates how to achieve what I just described:
#include <cv.h>
#include <highgui.h>
#include <iostream>
#include <vector>
int main(int argc, char* argv[])
{
cv::Mat img = cv::imread(argv[1]);
if (!img.data)
{
std::cout "!!! Failed to open file: " << argv[1] << std::endl;
return 0;
}
// Convert RGB Mat into HSV color space
cv::Mat hsv;
cv::cvtColor(img, hsv, CV_BGR2HSV);
// Split HSV Mat into HSV components
std::vector<cv::Mat> v;
cv::split(hsv,v);
// Erase pixels with low saturation
int min_sat = 70;
cv::threshold(v[1], v[1], min_sat, 255, cv::THRESH_BINARY);
/* Work with the saturated image from now on */
// Erode could provide some enhancement, but I'm not sure.
// cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
// cv::erode(v[1], v[1], element);
// Store the set of points in the image before assembling the bounding box
std::vector<cv::Point> points;
cv::Mat_<uchar>::iterator it = v[1].begin<uchar>();
cv::Mat_<uchar>::iterator end = v[1].end<uchar>();
for (; it != end; ++it)
{
if (*it) points.push_back(it.pos());
}
// Compute minimal bounding box
cv::RotatedRect box = cv::minAreaRect(cv::Mat(points));
// Display bounding box on the original image
cv::Point2f vertices[4];
box.points(vertices);
for (int i = 0; i < 4; ++i)
{
cv::line(img, vertices[i], vertices[(i + 1) % 4], cv::Scalar(0, 255, 0), 1, CV_AA);
}
cv::imshow("box", img);
//cv::imwrite(argv[2], img);
cvWaitKey(0);
return 0;
}
i think i did it, thanks to your program details i found this solution: (comments are welcome)
vector<vector<Point> > contours;
vector<vector<Point> > tmp_contours;
findContours(detectedImg, tmp_contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
vector<vector<Point> >::iterator it1;
it1 = tmp_contours.begin();
Mat test;
test = Mat(FImage.size(), CV_32FC3);
while (it1 != tmp_contours.end()) {
vector<Point> approx1;
approxPolyDP(Mat(*it1), approx1, 3, true);
Rect box1 = boundingRect(approx1);
float area1 = contourArea(approx1);
if ((area1 > 50) && (area1 < 13000) && (box1.width < 100) && (box1.height < 120)) {
vector<vector<Point> >::iterator it2;
it2 = tmp_contours.begin();
while (it2 != tmp_contours.end()) {
vector<Point> approx2;
approxPolyDP(Mat(*it2), approx2, 3, true);
Moments m1 = moments(Mat(approx1), false);
Moments m2 = moments(Mat(approx2), false);
float x1 = m1.m10 / m1.m00;
float y1 = m1.m01 / m1.m00;
float x2 = m2.m10 / m2.m00;
float y2 = m2.m01 / m2.m00;
vector<Point> dist;
dist.push_back(Point(x1, y1));
dist.push_back(Point(x2, y2));
float d = arcLength(dist, false);
Rect box2 = boundingRect(approx2);
if (box1 != box2) {
if (d < 25) {
//Method to merge the vectors
approx1 = mergePoints(approx1, approx2);
}
}
++it2;
}
Rect b = boundingRect(approx1);
rectangle(test, b, CV_RGB(125, 255, 0), 2);
contours.push_back(approx1);
}
++it1;
}