I have written the program to detect the object in the water(swimming pool). But I am getting lot of other contours like as shown in this screenshot. But this is the actual image. Please help me to get rid of other unwanted contours detected in the video. Below is the code that I have written.
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
string window_name = "Captured rectangle block";
RNG rng(12345);
double fps;
int thresh = 100;
int main( int argc, const char** argv )
{
VideoCapture cap("IMGP2850.MOV"); // open the video file for reading
if ( !cap.isOpened() ) // if not success, exit program
{
cout << "Cannot open the video file" << endl;
return -1;
}
fps = cap.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
cout << "Frame per seconds : " << fps << endl;
double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH);
double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
Size S(dWidth,dHeight);
while(1)
{
Mat frame;
Mat threshold_output;
int skip_frame = 4;
while(skip_frame)
{
printf("inside while loop\n");
bool bSuccess = cap.read(frame); // read a new frame from video
skip_frame--;
if (!bSuccess) //if not success, break loop
{
cout << "Cannot read the frame from video file" << endl;
break;
}
}
//-- 3. Apply the classifier to the frame
if( frame.empty() )
{ printf(" --(!) No captured frame -- Break!"); break; }
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray);
blur( frame_gray, frame_gray, Size(3,3) );
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
printf("before finding countrs\n");
threshold( frame_gray, threshold_output, thresh, 255, THRESH_BINARY );
findContours( threshold_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0));
vector<vector<Point> > contours_poly( contours.size() );
vector<Rect> boundRect( contours.size() );
vector<Point2f>center( contours.size() );
vector<float>radius( contours.size() );
// contours.resize(contours.size());
printf("after finding countrs\n");
for( unsigned int i = 0; i < contours.size(); i++ )
{
printf("inside for loop\n");
approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
printf("after poly\n");
boundRect[i] = boundingRect( Mat(contours_poly[i]) );
printf("after bondrec\n");
minEnclosingCircle( (Mat)contours_poly[i], center[i], radius[i] );
}
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( unsigned int i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours_poly, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
rectangle( drawing, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0 );
circle( drawing, center[i], (int)radius[i], color, 2, 8, 0 );
}
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
return 0;
}
Related
I copied the code of Harris corner detector from openCV documentation site, but i wanted to make the trackbar and its output appear in the same window so i updated the code to be
#include "stdafx.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray;
int thresh = 100;
int max_thresh = 255;
char* corners_window = "Corners detected";
void cornerHarris_demo( int, void* )
{
Mat dst, dst_norm, dst_norm_scaled;
dst = Mat::zeros( src.size(), CV_32FC1 );
/// Detector parameters
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
/// Detecting corners
cornerHarris( src_gray, dst, blockSize, apertureSize, k, BORDER_DEFAULT );
/// Normalizing
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
/// Drawing a circle around corners
for( int j = 0; j < dst_norm.rows ; j++ )
{ for( int i = 0; i < dst_norm.cols; i++ )
{
if( (int) dst_norm.at<float>(j,i) > thresh )
{
circle( src, Point( i, j ), 5, Scalar(255,0,0), 2, 8, 0 );
}
}
}
/// Showing the result
imshow( corners_window, src);
}
/** #function main */
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
char* filename = "myimage.jpg";
src_gray = imread( filename, 0);
cvtColor(src_gray,src,CV_GRAY2RGB);
/// Create a window and a trackbar
namedWindow( corners_window, CV_WINDOW_AUTOSIZE);
createTrackbar( "Threshold: ", corners_window, &thresh, max_thresh, cornerHarris_demo );
cornerHarris_demo( 0, 0 );
waitKey(0);
return(0);
}
The problem is the output window is inactive i can't open it and play with the trackbar, but i can see it from the taskbar.
This update works
#include "stdafx.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray;
int thresh = 200;
int max_thresh = 255;
/// Function header
void cornerHarris_demo( int, void* );
/** #function main */
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
src = imread("chessboard.jpg");
cvtColor( src, src_gray, CV_BGR2GRAY );
/// Create a window and a trackbar
namedWindow( "Source image", CV_WINDOW_AUTOSIZE );
createTrackbar( "Threshold: ", "Source image", &thresh, max_thresh, cornerHarris_demo );
cornerHarris_demo( 0, 0 );
waitKey(0);
return(0);
}
/** #function cornerHarris_demo */
void cornerHarris_demo( int, void* )
{
Mat dst, dst_norm, dst_norm_scaled, temp;
src.copyTo(temp);
dst = Mat::zeros( src.size(), CV_32FC1 );
/// Detector parameters
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
/// Detecting corners
cornerHarris( src_gray, dst, blockSize, apertureSize, k, BORDER_DEFAULT );
/// Normalizing
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
/// Drawing a circle around corners
for( int j = 0; j < dst_norm.rows ; j++ )
{ for( int i = 0; i < dst_norm.cols; i++ )
{
if( (int) dst_norm.at<float>(j,i) > thresh )
{
circle( temp, Point( i, j ), 5, Scalar(255,0,0), 2, 8, 0 );
}
}
}
/// Showing the result
imshow( "Source image", temp );
}
Still don't know why the first one didn't.
Here I did coding for finding the edge and their all coordinate points of the image but I need only two or three coordinate point of each quadrant in image.
using namespace cv;
using namespace std;
Mat src;
Mat src_gray;
int thresh = 172;
int max_thresh = 255;
RNG rng(12345);
void thresh_callback(int, void* );
int main( int argc, char** argv ){
src = imread("Led50.jpg",1);
cvtColor( src, src_gray, CV_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey(0);
return(0);}
void thresh_callback(int, void* ){
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
Canny( src_gray, canny_output, thresh, thresh*2, 3 );
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
for(unsigned int i=0;i<contours.size();i++){
for(unsigned int j=0;j<contours[i].size();j++)
{
cout << "Point(x,y)=" << contours[i][j].x << "," << contours[i][j].y << endl;
}}}
Source file:
Result and i get all the coordinate point:
And I need only marked coordinate point but not in exact position as well as each quadrant atleast have two points:
The above code is based on the canny and findcontours, I need few coordinates from images.
#include "highgui.hpp"
#include "imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main( int argc, char** argv ){
Mat src_gray;
src_gray = imread("EXnc1.jpg",0);
blur( src_gray, src_gray, Size(3,3) );
Mat bwimg = src_gray > 127;
vector<vector<Point> > contours;
findContours( bwimg, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE );
for(unsigned int i=0;i<contours.size();i++){
approxPolyDP(Mat(contours[i]), contours[i], 10, true);
if(i > 0)
{
cout << "Outer contour points \n";
}
else cout << "Inner contour points \n";
for(unsigned int j=0;j<contours[i].size();j++)
{
cout << "Point(x,y)=" << contours[i][j].x << "," << contours[i][j].y << endl;
circle( src_gray, contours[i][j], 3, Scalar(0, 0, 255), FILLED, LINE_AA );
}
imshow( "Result", src_gray );
waitKey(0);
}
return(0);}
output :
Inner contour points
Point(x,y)=343,148
Point(x,y)=419,160
Point(x,y)=461,208
Point(x,y)=457,276
Point(x,y)=403,322
Point(x,y)=322,322
Point(x,y)=269,262
Point(x,y)=279,190
Outer contour points
Point(x,y)=371,133
Point(x,y)=289,159
Point(x,y)=251,224
Point(x,y)=271,298
Point(x,y)=351,341
Point(x,y)=436,320
Point(x,y)=481,247
Point(x,y)=456,172
I try to count the number of hearts in the following image by using a canny edge detection algorithm and contours.
But after the contours I have gotten image like this and it has contoured 4 instead of 3. What kind of method I have to follow to count any number of object different shapes of deck card pack. I only need number of symbols in the middle.
Here is my c++ code
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv2/core/core.hpp>
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
using namespace std;
Mat src;
Mat dst;
Mat canny_output;
//this is function for loading the image
void loadImage(char* source){
Mat tmp;
/// Load source image and convert it to gray
src = imread( source, 1 );
/// Convert image to gray and blur it
cvtColor( src, tmp, CV_RGB2GRAY );
//blur( src_gray, src_gray, Size(3,3) );
bitwise_not( tmp, src);
}
void clearImage(){
int i,j;
int r = 10;
Mat clone;
src.copyTo(clone);
for(i = 0;i < src.rows;++i){
j = 0;
clone.at<Vec3b>(i,j) = Vec3b(0,0,0);
for(j = 0;j < src.cols;++j){
if(src.at<cv::Vec3b>(i,j) == cv::Vec3b(255,255,255)){
rectangle(
clone,
cv::Point(i-r, j),
cv::Point(i+r, j+r),
cv::Scalar(255, 255, 255)
);
}
}
}
}
void detectImages(){
int thresh = 100;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
Canny( src, canny_output, thresh, thresh*2, 3 );
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
cout<<(hierarchy.size())<<endl;
}
//corpping and resizing the image
void corpResizeImage(){
int i,j;
Vec3b intensity;
intensity.val[0] = 0;
intensity.val[1] = 0;
intensity.val[2] = 0;
cv::Rect myROI(src.cols/6,0, 2*src.cols/3, src.rows);
Mat croppedImage = src(myROI);
Size size(300,600);
resize(croppedImage,src,size);//resize image
for(i = 0;i < src.rows;++i){
j = 0;
if((i < src.rows/25)||(i < (src.rows/25))){
for(j = 0;j < src.cols;++j){
src.at<Vec3b>(i,j)= intensity;
}
}
}
}
/** #function main */
int main( int argc, char ** argv )
{
loadImage("img/3h.png");
corpResizeImage();
detectImages();
/// Create Window
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, canny_output );
waitKey(0);
return(0);
}
I am trying to recover the point of convexity defect, but the function only returns me integer, can you give me a hint on how to find these points?
vector<vector<Point> >hull2( contours.size() );
vector<vector<int>> hull(contours.size());
std::vector<cv::Vec4i> convexityDefectsSet;
for( int i = 0; i < contours.size(); i++ ) {
convexHull( Mat(contours[i]), hull[i], false );
convexHull(Mat(contours[i]), hull2[i], false);
if (contours[i].size() > 3) {
cv::convexityDefects(Mat(contours[i]), hull[i], convexityDefectsSet);
for (int cDefIt = 0; cDefIt < convexityDefectsSet.size(); cDefIt++) {
int startIdx = convexityDefectsSet[cDefIt].val[0];
int endIdx = convexityDefectsSet[cDefIt].val[1];
int defectPtIdx = convexityDefectsSet[cDefIt].val[2];
double depth = static_cast<double>(convexityDefectsSet[cDefIt].val[3]) / 256.0;
std::cout << startIdx << ' ' << endIdx << ' ' << defectPtIdx << ' ' << depth << '\n' << '\n' << std::endl;
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
Point2f p(defectPtIdx, defectPtIdx);
circle(frame, p , 10, color, 2, 8, 0 );
}
}}
I think this my piece of code (should detect hand (color detector need to be tuned) and search conv. defects). But you can use it as base for your code:
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <ctype.h>
#include <time.h>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\video\tracking.hpp>
#include <opencv2\highgui\highgui.hpp>
using namespace cv;
using namespace std;
// Detect Skin from YCrCb
Mat DetectYCrCb(Mat img, Scalar min, Scalar max) {
Mat skin;
cvtColor(img, skin, cv::COLOR_BGR2YCrCb);
inRange(skin, min, max, skin);
Mat rect_12 = getStructuringElement(cv::MORPH_RECT, Size(12,12) , Point(6,6));
erode(skin, skin, rect_12,Point(),1);
Mat rect_6 = getStructuringElement(cv::MORPH_RECT, Size(6,6) , Point(3,3));
dilate(skin,skin,rect_6,Point(),2);
return skin;
}
void DetectContour(Mat img){
Mat drawing = Mat::zeros( img.size(), CV_8UC3 );
vector<vector<Point> > contours;
vector<vector<Point> > bigContours;
vector<Vec4i> hierarchy;
findContours(img,contours, hierarchy, cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE, Point());
if(contours.size()>0)
{
vector<std::vector<int> >hull( contours.size() );
vector<vector<Vec4i>> convDef(contours.size() );
vector<vector<Point>> hull_points(contours.size());
vector<vector<Point>> defect_points(contours.size());
for( int i = 0; i < contours.size(); i++ )
{
if(contourArea(contours[i])>5000)
{
convexHull( contours[i], hull[i], false );
convexityDefects( contours[i],hull[i], convDef[i]);
for(int k=0;k<hull[i].size();k++)
{
int ind=hull[i][k];
hull_points[i].push_back(contours[i][ind]);
}
for(int k=0;k<convDef[i].size();k++)
{
if(convDef[i][k][3]>20*256) // filter defects by depth
{
int ind_0=convDef[i][k][0];
int ind_1=convDef[i][k][1];
int ind_2=convDef[i][k][2];
defect_points[i].push_back(contours[i][ind_2]);
cv::circle(drawing,contours[i][ind_0],5,Scalar(0,255,0),-1);
cv::circle(drawing,contours[i][ind_1],5,Scalar(0,255,0),-1);
cv::circle(drawing,contours[i][ind_2],5,Scalar(0,0,255),-1);
cv::line(drawing,contours[i][ind_2],contours[i][ind_0],Scalar(0,0,255),1);
cv::line(drawing,contours[i][ind_2],contours[i][ind_1],Scalar(0,0,255),1);
}
}
drawContours( drawing, contours, i, Scalar(0,255,0), 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, hull_points, i, Scalar(255,0,0), 1, 8, vector<Vec4i>(), 0, Point() );
}
}
}
imshow( "Hull demo", drawing );
}
int main( int argc, char** argv )
{
Mat frame,copyFrame;
VideoCapture capture(0);
namedWindow( "Hull demo", cv::WINDOW_AUTOSIZE );
namedWindow( "Video", cv::WINDOW_AUTOSIZE );
if (capture.isOpened()){
while(true)
{
capture >> frame;
imshow( "Video", frame);
Mat skinYCrCb = DetectYCrCb(frame,Scalar(0, 100, 80), Scalar(255, 185, 135));
DetectContour(skinYCrCb);
int c = waitKey(10);
if( (char)c == 27 )
{
break;
}
}
}
cv::destroyAllWindows();
return 0;
}
According to the documentation:
In C++ and the new Python/Java interface each convexity defect is represented
as 4-element integer vector [...]: (start_index, end_index, farthest_pt_index,
fixpt_depth), where indices are 0-based indices in the original contour of
the convexity defect...
They correspond to indices in the original contour that was used to generate the convex hull, i.e., your contours[i] variable.
For example, the coordinates of the first point are obtained with:
cv::Point start = contours.at(startIdx);
Well, I am trying to create a small example of blob tracking using the kalman filter. I am using openCV in order to accomplish this task, however it does not seem to work as it supposed to, since when I am hiding the object which tracking the output with, the kalman filter does not try to estimate where the object should be. I am attaching the code below, I hope that someone can give a hint on what I am doing wrong.
Thanks in advance.... :-)
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/video/tracking.hpp>
using namespace std;
using namespace cv;
#define drawCross( img, center, color, d )\
line(img, Point(center.x - d, center.y - d), Point(center.x + d, center.y + d), color, 2, CV_AA, 0);\
line(img, Point(center.x + d, center.y - d), Point(center.x - d, center.y + d), color, 2, CV_AA, 0 )\
int main()
{
Mat frame, thresh_frame;
vector<Mat> channels;
VideoCapture capture;
vector<Vec4i> hierarchy;
vector<vector<Point> > contours;
capture.open("capture.avi");
if(!capture.isOpened())
cerr << "Problem opening video source" << endl;
KalmanFilter KF(4, 2, 0);
Mat_<float> state(4, 1);
Mat_<float> processNoise(4, 1, CV_32F);
Mat_<float> measurement(2,1); measurement.setTo(Scalar(0));
KF.statePre.at<float>(0) = 0;
KF.statePre.at<float>(1) = 0;
KF.statePre.at<float>(2) = 0;
KF.statePre.at<float>(3) = 0;
KF.transitionMatrix = *(Mat_<float>(4, 4) << 1,0,1,0, 0,1,0,1, 0,0,1,0, 0,0,0,1); // Including velocity
KF.processNoiseCov = *(cv::Mat_<float>(4,4) << 0.2,0,0.2,0, 0,0.2,0,0.2, 0,0,0.3,0, 0,0,0,0.3);
setIdentity(KF.measurementMatrix);
setIdentity(KF.processNoiseCov, Scalar::all(1e-4));
setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1));
setIdentity(KF.errorCovPost, Scalar::all(.1));
while((char)waitKey(1) != 'q' && capture.grab())
{
capture.retrieve(frame);
split(frame, channels);
add(channels[0], channels[1], channels[1]);
subtract(channels[2], channels[1], channels[2]);
threshold(channels[2], thresh_frame, 50, 255, CV_THRESH_BINARY);
medianBlur(thresh_frame, thresh_frame, 5);
findContours(thresh_frame, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
Mat drawing = Mat::zeros(thresh_frame.size(), CV_8UC1);
for(size_t i = 0; i < contours.size(); i++)
{
// cout << contourArea(contours[i]) << endl;
if(contourArea(contours[i]) > 500)
drawContours(drawing, contours, i, Scalar::all(255), CV_FILLED, 8, vector<Vec4i>(), 0, Point());
}
thresh_frame = drawing;
findContours(thresh_frame, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
drawing = Mat::zeros(thresh_frame.size(), CV_8UC1);
for(size_t i = 0; i < contours.size(); i++)
{
// cout << contourArea(contours[i]) << endl;
if(contourArea(contours[i]) > 500)
drawContours(drawing, contours, i, Scalar::all(255), CV_FILLED, 8, vector<Vec4i>(), 0, Point());
}
thresh_frame = drawing;
// Get the moments
vector<Moments> mu(contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{ mu[i] = moments( contours[i], false ); }
// Get the mass centers:
vector<Point2f> mc( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{ mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); }
Mat prediction = KF.predict();
Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
for(size_t i = 0; i < mc.size(); i++)
{
drawCross(frame, mc[i], Scalar(255, 0, 0), 5);
measurement(0) = mc[i].x;
measurement(1) = mc[i].y;
}
Point measPt(measurement(0),measurement(1));
Mat estimated = KF.correct(measurement);
Point statePt(estimated.at<float>(0),estimated.at<float>(1));
drawCross(frame, statePt, Scalar(255, 255, 255), 5);
vector<vector<Point> > contours_poly( contours.size() );
vector<Rect> boundRect( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{ approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
boundRect[i] = boundingRect( Mat(contours_poly[i]) );
}
for( size_t i = 0; i < contours.size(); i++ )
{
rectangle( frame, boundRect[i].tl(), boundRect[i].br(), Scalar(0, 255, 0), 2, 8, 0 );
}
imshow("Video", frame);
imshow("Red", channels[2]);
imshow("Binary", thresh_frame);
}
return 0;
}