I've been trying to set up a program that uses HSV to detect a specific colour and place a rectangle around the biggest area found of said colour. I am having problems when i get to the vectors and the "findContours" function, which stops the program with an error stating "xxx.exe has triggered a break point".
This only happens when I insert the line
findContours(dilatedBlue,contours,hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
I'm a rookie when it comes to OpenCV and C++ but I'm hoping someone will be able to help. I'm using windows 7 x64, Visual Studio 2012 and OpenCV 2.4.8.
My full code is what follows:
#include "stdafx.h"
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
int _tmain(int argc, _TCHAR* argv[])
{
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) // check if we succeeded
return -1;
// Declare Local Variables
int Quit = 0;
Mat imageDiff;
Mat HSVMatrix[3];
Mat erodedBlue;
Mat dilatedBlue;
//Start of search/shoot routine
while( Quit != 'q' ) // For as long as User wishes to continue...
{
Mat image;
cap >> image; // get a new frame from camera
imshow("Original", image);
//GaussianBlur(image, image, Size(3,3), 1.5, 1.5); // Setting up a Gaussian blur using 5x5 grid
cvtColor(image, imageDiff, CV_BGR2HSV);
split(imageDiff,HSVMatrix);
//Setting up saturation values for colour blue
threshold(HSVMatrix[0],HSVMatrix[0],130,255,THRESH_TOZERO_INV); // High value of colour - Dynamic changing of variable to select mode 130
threshold(HSVMatrix[0],HSVMatrix[0],75,255,THRESH_BINARY); // Low value of colour 75
threshold(HSVMatrix[1],HSVMatrix[1],255,255,THRESH_TOZERO_INV); // High saturation value - Modify for varying levels of light
threshold(HSVMatrix[1],HSVMatrix[1],100,255,THRESH_BINARY); // Low saturation value
HSVMatrix[2]=HSVMatrix[1]&HSVMatrix[0];
imshow("HSVblue", HSVMatrix[2]); // Displays binarised image
erode(HSVMatrix[2], HSVMatrix[2], Mat(), Point(-1, -1), 2, 1, 1);
erode(HSVMatrix[2], erodedBlue, Mat(), Point(-1, -1), 2, 1, 1);
imshow("Eroded-Blue", erodedBlue);
dilate(erodedBlue, dilatedBlue, Mat(), Point(-1, -1), 2, 1, 1);
imshow("Dilated-Blue", dilatedBlue);
vector<vector<Point> > contours; // Temporary variables used to hold contour information
vector<Point> approx;
vector<Vec4i> hierarchy;
vector<Rect> boundRect(contours.size());
findContours(dilatedBlue,contours,hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++)
{
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true) * 0.02, true); // Calculation of contours and their approximate size and sides
}
for (int i = 0; i < contours.size(); i++)
{
if ((approx.size() == 4) && (fabs(contourArea(Mat(approx))) > 100)) // The properties that have to be fulfilled in order for the object to be recognized by the computer
{
Rect outline; // If the properties have been fulfilled and the object has been recognized, display an outline around the object
outline = boundingRect(approx);
rectangle(image,Rect(outline),Scalar(0, 255, 255),5,8,0); // The properties of the outline: the colour[bgr] and thickness
}
}
imshow("Target", image);
}
Quit = cvWaitKey(1); // The program waits for the key "q" to be pressed to exit
}
cap.release();
return 0;
}
Related
I have a project that I need to make for classes and I chose task that is a bit out of my skills.
Target is to count result of dice rolls.
For now, I'am trying to make it work on a sample pic:
sample pic of dices
and my current code is added below:
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
Mat KostkaFunkcja(Mat image, Mat in, Scalar low, Scalar high);
int getMaxAreaContourId(vector <vector<cv::Point>> contours);
vector<Point> contoursConvexHull(vector<vector<Point> > contours, int index);
Mat ZnakiFunkcja(Mat image, Mat in, Scalar low, Scalar high);
int main(int argc, char** argv)
{
Mat image;
image = imread("kostki.jpg", CV_LOAD_IMAGE_COLOR);
if (!image.data)
{
cout << "Could not open or find the image" << std::endl;
return -1;
}
Mat imgHSV;
Mat workimage = image;
cvtColor(workimage, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV
//red dice
workimage = KostkaFunkcja(workimage, imgHSV, Scalar(146, 0, 31), Scalar(179, 255, 255));
//green dice
workimage = KostkaFunkcja(workimage, imgHSV, Scalar(25, 147, 0), Scalar(98, 255, 154));
//yellow dice
workimage = KostkaFunkcja(workimage, imgHSV, Scalar(22, 45, 161), Scalar(91, 255, 255));
//black dice
workimage = KostkaFunkcja(workimage, imgHSV, Scalar(98, 0, 0), Scalar(179, 232, 107));
//white symbols
workimage = ZnakiFunkcja(workimage, imgHSV, Scalar(58, 0, 183), Scalar(179, 145, 255));
namedWindow("Kostki_kontur", CV_WINDOW_AUTOSIZE);
imshow("Kostki_kontur", workimage);
waitKey(0);
return 0;
}
Mat KostkaFunkcja(Mat image, Mat in, Scalar low, Scalar high)
{
Mat temp;
inRange(in, low, high, temp);
erode(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
erode(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
Mat srcBlur, srcCanny;
blur(temp, srcBlur, Size(3, 3));
Canny(srcBlur, srcCanny, 0, 100, 3, true);
vector<vector<Point> > contours;
findContours(srcCanny, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
int largest_contour_index = getMaxAreaContourId(contours);
Mat drawing = Mat::zeros(srcCanny.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
Scalar color = Scalar(255, 255, 255);
drawContours(drawing, contours, i, color, 2);
}
vector<Point> ConvexHullPoints = contoursConvexHull(contours, largest_contour_index);
polylines(image, ConvexHullPoints, true, Scalar(0, 0, 255), 2);
return image;
}
vector<Point> contoursConvexHull(vector<vector<Point> > contours, int index)
{
vector<Point> result;
vector<Point> pts;
for (size_t j = 0; j< contours[index].size(); j++)
pts.push_back(contours[index][j]);
convexHull(pts, result);
return result;
}
int getMaxAreaContourId(vector <vector<cv::Point>> contours)
{
double maxArea = 0;
int maxAreaContourId = -1;
for (int j = 0; j < contours.size(); j++) {
double newArea = cv::contourArea(contours.at(j));
if (newArea > maxArea) {
maxArea = newArea;
maxAreaContourId = j;
}
return maxAreaContourId;
}
}
Mat ZnakiFunkcja(Mat image, Mat in, Scalar low, Scalar high)
{
Mat temp;
inRange(in, low, high, temp);
erode(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
erode(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
Mat srcBlur, srcCanny;
blur(temp, srcBlur, Size(3, 3));
Canny(srcBlur, srcCanny, 0, 100, 3, true);
vector<vector<Point> > contours;
findContours(srcCanny, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
Mat drawing = Mat::zeros(srcCanny.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
Scalar color = Scalar(255, 255, 255);
drawContours(drawing, contours, i, color, 2);
polylines(image, contours, true, Scalar(0, 0, 255), 2);
return image;
}
}
Yet I have no idea how to count different shapes (hearts, lightnings, shields, numbers).
I will be greatfull if anybody would give me a tip or solution of how to do the job.
1) sorry for bad english
2) we had no openCV in classes [only basic c++]
3) tryed to found anything usefull on internet, but even if I found anything, I could't understand what was happening in the code
Your project can be splited in three steps:
find the dices.
extract the shapes from the visible face of
the dices.
count the faces.
For the first step among all the possible approaches I think saliency map approaches can help.
Saliency map are a family of segmentation algorithm that aim to detect the parts in the image which are more likely to attract visual attention.
OpenCV have a saliency API that already implement several saliency algorithm and for each of them you can get an segmentation map.
It is highlikely considering the example image you gave the saliency will be focus on the dices.
From this you can so extract the dices as rois from the original image.
For the step 2) saliency algorithms may also fit... or not that depend a lot of the statistical criterions that are used by the algorithm.
However the previously extracted rois should only contain the face of the dice that does contain the shapes you want to count in step 3) so approaches based on contours detection may give quite good result.
Once you get the shapes among the way to count each shape you can use templateMatching (that is also already implement in OpenCV),a clustering approach based on the a shape sensitive metric (Hausdorff, Dice, ...), or many other.
Here is a code that can help you to deal with the two first step.
#ifndef _DEBUG
#define _DEBUG
#endif
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/saliency.hpp>
#include <opencv2/highgui.hpp>
#include <list>
CV_EXPORTS_W void get_regions_of_interest(cv::InputArray _src, cv::OutputArrayOfArrays mv, cv::OutputArrayOfArrays mv2 = cv::noArray());
int main()
{
cv::Mat tmp = cv::imread("C:\Desktop\dices.jpg");
if(!tmp.empty())
{
cv::imshow("source",tmp);
cv::waitKey(-1);
}
std::vector<cv::Mat> rois;
get_regions_of_interest(tmp,rois);
std::cout << "Hello World!" << std::endl;
return 0;
}
void get_regions_of_interest(cv::InputArray _src, cv::OutputArrayOfArrays _rois, cv::OutputArrayOfArrays _contours)
{
// Check that the first argument is an image and the second a vector of images.
CV_Assert(_src.isMat() && !_src.depth() && (_src.channels() == 1 || _src.channels() == 3) && _rois.isMatVector() && (!_contours.needed() || (_contours.needed() && _contours.isMatVector()) ) );
static cv::Ptr<cv::saliency::StaticSaliencySpectralResidual> saliency;
if(!saliency)
saliency = cv::saliency::StaticSaliencySpectralResidual::create();
cv::Mat src = _src.getMat();
cv::Mat gray;
if(src.depth() == src.type())
gray = src;
else
cv::cvtColor(src,gray,cv::COLOR_BGR2GRAY);
bool is_ctr_needed = _contours.needed();
std::list<cv::Mat> final_ctrs;
// Step 1) Process the saliency in order to segment the dices.
cv::Mat saliency_map;
cv::Mat binary_map;
saliency->computeSaliency(src,saliency_map);
saliency->computeBinaryMap(saliency_map,binary_map);
saliency_map.release();
// Step 2) From the binary map get the regions of interest.
cv::Mat1i stats;
std::vector<cv::Mat> rois;
cv::Mat labels;
cv::Mat centroids;
cv::connectedComponentsWithStats(binary_map, labels, stats, centroids);
labels.release();
centroids.release();
// prepare the memory
rois.reserve(stats.rows-1);
// Sort the stats in order to remove the background.
stats = stats.colRange(0,stats.cols-1);
// Extract the rois.
for(int i=0;i<stats.rows;i++)
{
cv::Rect roi = *reinterpret_cast<cv::Rect*>(stats.ptr<int>(i));
if(static_cast<std::size_t>(roi.area()) == gray.total())
continue;
rois.push_back(gray(roi));
#ifdef _DEBUG
cv::imshow("roi_"+std::to_string(i),gray(roi));
#endif
}
// Step 3) Refine.
// Because the final number of shape cannot be determine in advance it is better to use a linked list than a vector.
// In practice except if there is a huge number of elements to work with the performance will be almost the same.
std::list<cv::Mat> shapes;
int cnt=0;
for(const cv::Mat& roi : rois)
{
cv::Mat tmp = roi.clone();
// Slightly sharpen the regions contours
cv::morphologyEx(tmp,tmp, cv::MORPH_CLOSE, cv::noArray());
// Reduce the influence of local unhomogeneous illumination.
cv::GaussianBlur(tmp,tmp,cv::Size(31,31), 5);
cv::Mat thresh;
// Binarize the image.
cv::threshold(roi,thresh,0.,255.,cv::THRESH_BINARY | cv::THRESH_OTSU);
#ifdef _DEBUG
cv::imshow("thresh"+std::to_string(cnt++),thresh);
#endif
// Find the contours of each sub region on interest
std::vector<cv::Mat> contours;
cv::findContours(thresh, contours, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
cv::Mat dc;
cv::merge(std::vector<cv::Mat>(3,thresh),dc);
// cv::drawContours(dc, contours,-1,cv::Scalar(0.,0.,255),2);
// cv::imshow("ctrs"+std::to_string(cnt),dc);
// Extract the sub-regions
if(is_ctr_needed)
{
for(const cv::Mat& ctrs: contours)
{
cv::Rect croi = cv::boundingRect(ctrs);
// If the sub region is to big or to small it is depreate
if(static_cast<std::size_t>(croi.area()) == roi.total() || croi.area()<50)
continue;
final_ctrs.push_back(ctrs);
shapes.push_back(roi(croi));
#ifdef _DEBUG
cv::rectangle(dc,croi,cv::Scalar(0.,0.,255.));
cv::imshow("sub_roi_"+std::to_string(cnt++),roi(croi));
#endif
}
}
else
{
for(const cv::Mat& ctrs: contours)
{
cv::Rect croi = cv::boundingRect(ctrs);
// If the sub region is to big or to small it is depreate
if(static_cast<std::size_t>(croi.area()) == roi.total() || croi.area()<50)
continue;
shapes.push_back(roi(croi));
#ifdef _DEBUG
cv::rectangle(dc,croi,cv::Scalar(0.,0.,255.));
cv::imshow("sub_roi_"+std::to_string(cnt++),roi(croi));
#endif
}
}
}
#ifdef _DEBUG
cv::waitKey(-1);
#endif
// Final Step: set the output
_rois.create(shapes.size(),1,CV_8U);
_rois.assign(std::vector<cv::Mat>(shapes.begin(),shapes.end()));
if(is_ctr_needed)
{
_contours.create(final_ctrs.size(),1,CV_32SC2);
_contours.assign(std::vector<cv::Mat>(final_ctrs.begin(), final_ctrs.end()));
}
}
My goal is to find the biggest contour of a captured webcam frame, then after it's found, find its size and determine either to be rejected or accepted.
Just to explain the objetive of this project, i am currently working for a Hygiene product's Manufacturer. There we have, in total, 6 workers that are responsible for sorting the defective soap bars out of the production line. So in order to gain this workforce for other activities, i am trying to write an algorithm to "replace" their eyes.
I've tried several methods along the way (findcontours, SimpleBlobDetection, Canny, Object tracking), but the problem that i've been facing is that i can't seem to find a way to effectively find the biggest object in a webcam image, find its size and then determine to either discard or accept it.
Below follows my newest code to find the biggest contour in an webcam stream:
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv/cv.h"
#include "opencv2\imgproc\imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, const char** argv)
{
Mat src;
Mat imgGrayScale;
Mat imgCanny;
Mat imgBlurred;
/// Load source image
VideoCapture capWebcam(0);
if (capWebcam.isOpened() == false)
{
cout << "Não foi possível abrir webcam!" << endl;
return(0);
}
while (capWebcam.isOpened())
{
bool blnframe = capWebcam.read(src);
if (!blnframe || src.empty())
{
cout << "Erro! Frame não lido!\n";
break;
}
int largest_area = 0;
int largest_contour_index = 0;
Rect bounding_rect;
Mat thr(src.rows, src.cols, CV_8UC1);
Mat dst(src.rows, src.cols, CV_8UC1, Scalar::all(0));
cvtColor(src, imgGrayScale, CV_BGR2GRAY); //Convert to gray
GaussianBlur(imgGrayScale, imgBlurred, Size(5, 5), 1.8);
Canny(imgBlurred, imgCanny, 45, 90); //Threshold the gray
vector<vector<Point>> contours; // Vector for storing contour
vector<Vec4i> hierarchy;
findContours(imgCanny, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); // Find the contours in the image
for (int i = 0; i < contours.size(); i++) // iterate through each contour.
{
double a = contourArea(contours[i], false); // Find the area of contour
if (a > largest_area)
{
largest_area = a;
largest_contour_index = i; //Store the index of largest contour
bounding_rect = boundingRect(contours[i]); // Find the bounding rectangle for biggest contour
}
}
Scalar color(255, 255, 255);
drawContours(dst, contours, largest_contour_index, color, CV_FILLED, 8, hierarchy); // Draw the largest contour using previously stored index.
rectangle(src, bounding_rect, Scalar(0, 255, 0), 1, 8, 0);
imshow("src", src);
imshow("largest Contour", dst);
waitKey(27);
}
return(0);
}
And here are the results windows that the program generates and the image of the object that i want to detect and sort.
Thank you all in advance for any clues on how to achieve my goal.
I'm trying to count object from image. I use logs photo, and I use some steps to get a binary image.
This is my code:
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <features2d.hpp>
using namespace cv;
using namespace std;
int main(int argc, char *argv[])
{
//load image
Mat img = imread("kayu.jpg", CV_LOAD_IMAGE_COLOR);
if(img.empty())
return -1;
//namedWindow( "kayu", CV_WINDOW_AUTOSIZE );
imshow("kayu", img);
//convert to b/w
Mat bw;
cvtColor(img, bw, CV_BGR2GRAY);
imshow("bw1", bw);
threshold(bw, bw, 40, 255, CV_THRESH_BINARY);
imshow("bw", bw);
//distance transform & normalisasi
Mat dist;
distanceTransform(bw, dist, CV_DIST_L2, 3);
normalize(dist, dist, 0, 2., NORM_MINMAX);
imshow("dist", dist);
//threshold to draw line
threshold(dist, dist, .5, 1., CV_THRESH_BINARY);
imshow("dist2", dist);
//dist = bw;
//dilasi
Mat dilation, erotion, element;
int dilation_type = MORPH_ELLIPSE;
int dilation_size = 17;
element = getStructuringElement(dilation_type, Size(2*dilation_size + 1, 2*dilation_size+1), Point(dilation_size, dilation_size ));
erode(dist, erotion, element);
int erotionCount = 0;
for(int i=0; i<erotionCount; i++){
erode(erotion, erotion, element);
}
imshow("erotion", erotion);
dilate(erotion, dilation, element);
imshow("dilation", dilation);
waitKey(0);
return 0;
}
As you can see, I use Erosion and Dilation to get better circular object of log. My problem is, I'm stuck at counting the object. I tried SimpleBlobDetector but I got nothing, because when I try to convert the result of "dilation" step to CV_8U, the white object disappear. I got error too when I use findContours(). It say something about channel of image. I can't show the error here, because that's too many step and I already delete it from my code.
Btw, at the end, i got 1 channel of image.
Can i just use it to counting, or am i have to convert it and what is the best method to do it?
Two simple steps:
Find contours for the binarized image.
Get the count of the contours.
Code:
int count_trees(const cv::Mat& bin_image){
cv::Mat img;
if(bin_image.channels()>1){
cv::cvtColor(bin_image,img,cv::COLOR_BGR2GRAY);
}
else{
img=bin_image.clone();;
}
if(img.type()!=CV_8UC1){
img*=255.f; //This could be stupid, but I do not have an environment to try it
img.convertTo(img,CV_8UC1);
}
std::vector<std::vector<cv::Point>> contours
std::vector<Vec4i> hierarchy;
cv::findContours( img, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
return contours.size();
}
I have the same problem, here's an idea I'm about to implement.
1) Represent your image as an array of integers; 0 = black, 1 = white.
2) set N = 2;
3) Scan your image, pixel-by-pixel. Whenever you find a white pixel, activate a flood-fill algorithm, starting at the pixel just found; paint the region with the value of N++;
4) Iterate 3 until you reach the last pixel. (N-2) is the number of regions found.
This method depends on the shape of the objects; mine are more chaotic than yours (wish me luck..). I'll make use of a recursive flood-fill recipe found somewhere (maybe Rosetta Code).
This solution also makes it easy to compute the size of each region.
try to apply that on the your deleted img
// count
for (int i = 0; i< contours.size(); i = hierarchy[i][0]) // iteration sur chaque contour .
{
Rect r = boundingRect(contours[i]);
if (hierarchy[i][2]<0) {
rectangle(canny_output, Point(r.x, r.y), Point(r.x + r.width, r.y + r.height), Scalar(20, 50, 255), 3, 8, 0);
count++;
}
}
cout << "Numeber of contour = " << count << endl;
imshow("src", src);
imshow("contour", dst);
waitKey(0);
I have a problem and I am not quite sure how to move past it. I want to create an animation that changes the colour (fluently) of the contour which I have extracted. I assume that all I have to do is use while loop (for animation), and change the value of one of the r, g, b variables (with for loop), but I'm not sure HOW to properly do that.
Thanks in advance!
using namespace cv;
using namespace std;
int main()
{
Mat OurImage, img, bin, anim, gray;
string Destination = "rot.jpg";
OurImage = imread(Destination, CV_LOAD_IMAGE_COLOR);
if(! OurImage.data)
{
printf("No image!");
getchar();
return -1;
}
int r = 0, g = 255, b = 255;
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
cvtColor(OurImage,gray,CV_RGB2GRAY);
Canny( gray, img, 100, 200,3);
findContours(img,contours,hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
Mat drawing = Mat::zeros(img.size(),CV_8UC3);
for(int i = 0; i < contours.size(); i++)
{
Scalar color = Scalar(r, g, b);
drawContours(drawing, contours,i,color,2,8,hierarchy, 0,Point());
}
namedWindow("WINDOW", CV_WINDOW_AUTOSIZE);
while(true)
{
NO IDEA WHAT THE FOR LOOP SHOULD LOOK LIKE HERE
printf(".");
anim = drawing.clone();
r = r+5;
imshow("WINDOW",anim);
if(waitKey(20) == 27)
break;
}
waitKey(0);
}
EDIT
I have managed to make it work from yellow to white ( from 0 to 255), but now I don't know how to make it go back and force the program to do that till I click. Do I use a flag/if? another for?
while(true)
{
for(int i = 0; i < contours.size(); i++)
{
r=r+5;
printf(".");
Scalar color = Scalar(r, g, b);
drawing = drawing.clone();
drawContours(drawing, contours,i,color,2,8,hierarchy, 0,Point());
}
imshow("WINDOW",drawing);
if(waitKey(20) == 27)
break;
}
This code will change continuously the color of the edges from white to yellow, and back.
You can change the animation speed with delay.
Note that you can change olny the edge points using setTo with the edge mask. If you need to draw a thicker edges, you can call drawContours with index -1 to draw all contours, without the for loop.
Keep also in mind that OpenCV Highgui is manly for debugging purposes. Anything more than you program like this one should be done using an appropriate GUI library (like, among others, Qt).
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
// Yellow image
Mat3b img = imread("path_to_image");
int step = 5; // Color step
int delay = 30; // Animation speed
bool forward = true;
Scalar color(0,255,255);
Mat1b gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
Mat1b edges;
Canny(gray, edges, 400, 200);
vector<vector<Point>> contours;
findContours(edges, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
Mat3b canvas(img.rows, img.cols, Vec3b(0,0,0));
while (true)
{
imshow("Draw", canvas);
if (waitKey(delay) == 27 /*ESC*/) break;
// Update color
color[0] = color[0] + ((forward) ? step : -step);
// Deal with direction
if (forward && color[0] > 255) {
forward = false;
color[0] = 255;
}
if (!forward && color[0] < 0) {
forward = true;
color[0] = 0;
}
// Update only edge points
//canvas.setTo(color, edges);
// Draw a thick contour
drawContours(canvas, contours, -1, color, 2);
}
return 0;
}
I'm trying to convert bwareaopen function to OpenCV C++ ...
I Found this code but it is not working correctly.
So if anyone have solved this problem and can help me, I would be really delighted.
void removeSmallBlobs(cv::Mat& im, double size)
{
// Only accept CV_8UC1
if (im.channels() != 1 || im.type() != CV_8U)
return;
// Find all contours
std::vector<std::vector<cv::Point> > contours;
cv::findContours(im.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++)
{
// Calculate contour area
double area = cv::contourArea(contours[i]);
// Remove small objects by drawing the contour with black color
if (area > 0 && area <= size)
cv::drawContours(im, contours, i, CV_RGB(0, 0, 0), -1);
}
}
I think you need opening morphological operation. Here you can see an example.
Or take a look here: How to filter small segments from image in OpenCV?
I am using cvBlobsLib to implement such function in opencv. You should first compile and include the cvBlobsLib in your project. The library link is here:cvBlobsLib
Because matlab canny function does the Gaussian blur by default but opencv doesn't, you should first Gaussian blur the image to reduce noise. Then you dectect the canny edge, then you delete the edges that is shorter or longer than a given length measured by pixel.
Here is my code.
#include <highgui/highgui.hpp>
#include <imgproc/imgproc.hpp>
#include "BlobResult.h"
using namespace std;
using namespace cv;
void bwareaopen( Mat& img, int size);
int main()
{
Mat img;
img = imread("1.jpg");
Mat gray;
cvtColor(img,gray,CV_BGR2GRAY);
GaussianBlur( gray, gray, Size(7, 7), 2, 2);
Mat edges;
Canny(gray,edges,50,500,5,true);
imshow("raw edge",edges);
bwareaopen( edges, 800);
imshow("edge",edges);
waitKey(0);
}
void bwareaopen( Mat& img, int size)
{
CBlobResult blobs;
blobs = CBlobResult( img ,Mat(),4);
blobs.Filter( blobs, B_INCLUDE, CBlobGetLength(), B_GREATER, size );
Mat newimg(img.size(),img.type());
newimg.setTo(0);
for(int i=0;i<blobs.GetNumBlobs();i++)
{
blobs.GetBlob(i)->FillBlob(newimg,CV_RGB(255,255,255),0,0,true);
}
img = newimg;
}
I had the same problem. I changed the line
if (area > 0 && area <= size) to
if (area <= size)
This is after I found that many small blobs had area 0. That worked for me.