Opencv Dilation and Skeleton(c++) - c++

I have this image :
And I applied dilation with this code :
int dilation_elem = 0;
int dilation_size = 0;
int const max_elem = 2;
int const max_kernel_size = 21;
void Dilation( int, void* );
int main( int argc, char** argv )
{
src = imread("a18.png");
if( !src.data )
{ return -1; }
namedWindow( "Dilation Demo", CV_WINDOW_AUTOSIZE );
cvMoveWindow( "Dilation Demo", src.cols, 0 );
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Dilation Demo",
&dilation_elem, max_elem,
Dilation );
createTrackbar( "Kernel size:\n 2n +1", "Dilation Demo",
&dilation_size, max_kernel_size,
Dilation );
//int dilation_size =7;
/// Default start
Dilation( 0, 0 );
waitKey(0);
return 0;
}
void Dilation( int, void* )
{
int dilation_type;
if( dilation_elem == 0 ){ dilation_type = MORPH_RECT; }
else if( dilation_elem == 1 ){ dilation_type = MORPH_CROSS; }
else if( dilation_elem == 2) { dilation_type = MORPH_ELLIPSE; }
Mat element = getStructuringElement( dilation_type,
Size( 2*dilation_size + 1, 2*dilation_size+1 ),
Point( dilation_size, dilation_size ) );
dilate( src, dilation_dst, element );
imshow( "Dilation Demo", dilation_dst );
imwrite("a18d.png",dilation_dst);
}
And after this step I get this consequent:
And the final step is skeleton :
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
int main()
{
cv::Mat img = cv::imread("a18d.png", 0);
cv::threshold(img, img, 127, 255, cv::THRESH_BINARY);
cv::Mat skel(img.size(), CV_8UC1, cv::Scalar(0));
cv::Mat temp;
cv::Mat eroded;
cv::Mat element = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(3,3));
bool done;
do
{
cv::erode(img, eroded, element);
cv::dilate(eroded, temp, element); // temp = open(img)
cv::subtract(img, temp, temp);
cv::bitwise_or(skel, temp, skel);
eroded.copyTo(img);
done = (cv::countNonZero(img) == 0);
} while (!done);
cv::imshow("Skeleton", skel);
cv::imwrite("18s.png",skel);
cv::waitKey(0);
return 0;
}
code here
And I hve this image :
But I want image like this :
What can I do for this? What is the problem
these steps was done for Image 4 and result is good

Open CV doesn't seem to have a shrink as opposed to an erode operation. At least I can't find it. Try mine
https://github.com/MalcolmMcLean/binaryimagelibrary/blob/master/medialaxistransform.c
Try just using the function "thin" instead of eroding your image.

Related

segmentation fault error in Opencv c++

I am trying to code a program on opencv to decide whether a human has approached ahead the camera. After I run the execution file, I get the captured video for few seconds and encounter the segmentation fault error.
The code is like this
Here are headers:
#include "opencv2/objdetect.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
//define static variable
static int cApp = 0;//number of approached frame
static double last = 0;
//define functions
void detectAndDisplay( Mat frame );
bool computeArea( double width, double height, double lastArea);
double runningAverage(int M);
//define opencv function and classifier
String upperbody_cascade_name = "home/pi/opencv- 3.0.0/data/haarcascades/haarcascade_upperbody.xml";
CascadeClassifier upper_cascade;
String window_name = "Capture - upper body detection";
Here is the main function:
int main( void )
{
//define variable
VideoCapture capture;
Mat frame;
//-- 1. Load the cascades
upper_cascade.load("/home/pi/opencv-3.0.0/data/haarcascades/haarcascade_upperbody.xml");
//-- 2. Read the video stream
capture.open( -1 );
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
while ( capture.read(frame) )
{
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
detectAndDisplay( frame );
char c = (char)waitKey(10);
if( c == 27 ) { break; } // escape
}
capture.release();
return 0;
}
Here is the detectAndDisplay function:
void detectAndDisplay( Mat frame )
{
std::vector<Rect> upperbodys;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect upperbodys
upper_cascade.detectMultiScale( frame_gray, upperbodys, 1.05, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
Point center( upperbodys[0].x + upperbodys[0].width/2, upperbodys[0].y + upperbodys[0].height/2 );
ellipse( frame, center, Size( upperbodys[0].width/2, upperbodys[0].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
bool ifApproached = computeArea(upperbodys[0].width/2, upperbodys[0].height/2, last);
if (ifApproached == true) {
cApp++;
}
if (cApp == 3) {
cout << "have approached" << endl;
cApp = cApp - 3;
}
//-- Show what you got
imshow( window_name, frame );
}
Here is the computeArea function:
bool computeArea( double width, double height, double lastArea) {
double newArea = width * height;
bool ifApproached = false;
//double presentArea = newArea;
double presentArea = runningAverage(newArea);
double DifferenceBewteenAreas = presentArea - lastArea;
if (DifferenceBewteenAreas > 1) {//threshold
ifApproached = true;
}
last = presentArea;
return ifApproached;
}
Here is runningAverage function:
double runningAverage(int M) {
//M is measurement
//#define LM_SIZE 5
static int LM[5];
static int index =0;
static long sum = 0;
static int count =0;
//keep sum updated to improve speed
sum = sum - LM[index];
LM[index] = M;
sum = sum + LM[index];
index++;
index = index % 5;
if (count < 5) {
count++;
}
return (double)(sum / (double)count);
}
I have searched many opencv segmentation fault questions, some said this segmentation fault was caused by wrong array used, but my case has little use of array. Others said misused of function characters could also cause this kind of errors, I agree with this, some of my characters could be wrong here.
Actually I found that I should not use upperbodys[0] in the code, because sometimes there are no object being detected at all,so there could be some memory read error happens, I used upperbodys[i] instead and it works well then.
void detectAndDisplay( Mat frame )
{
std::vector<Rect> upperbodys;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect upperbodys
upper_cascade.detectMultiScale( frame_gray, upperbodys, 1.05, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
int size = upperbodys.size();
double newArea = -1;
for (int i = 0 ; i < size; i++) {
Point center( upperbodys[i].x + upperbodys[i].width/2, upperbodys[i].y + upperbodys[i].height/2 );
ellipse( frame, center, Size( upperbodys[i].width/2, upperbodys[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
//bool ifApproached = computeArea(upperbodys[i].width/2, upperbodys[i].height/2, last);
//////////////////////////////////////////
newArea = upperbodys[i].width/2 * upperbodys[i].height/2;
if (newArea != -1) {
cout << "UpperBodys has value, index = " << i << endl;
break;
}
}
bool ifApproached = false;
//double presentArea = runningAverage(newArea);
double presentArea = newArea;
double DifferenceBewteenAreas = presentArea - last;
if (DifferenceBewteenAreas > 1) {//threshold
ifApproached = true;
}
last = presentArea;
//////////////////////////////////////////
if (ifApproached == true) {
cApp++;
}
if (cApp == 3) {
cout << "have approached" << endl;
cApp = cApp - 3;
}
//-- Show what you got
imshow( window_name, frame );
}

No output from opencv in Visual Studio C++

I am new to OpenCV. This code runs on my Visual Studio 2017 but does not display any result even after entering the command line argument. The program runs fine and builds successfully without error or build failure notification. I have tried several times but there has not been a solution to this problem. Please can someone help out?
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
Mat src, erosion_dst, dilation_dst;
int erosion_elem = 0;
int erosion_size = 0;
int dilation_elem = 0;
int dilation_size = 0;
int const max_elem = 2;
int const max_kernel_size = 21;
void Erosion(int, void*);
void Dilation(int, void*);
int main(int, char** argv)
{
src = imread(argv[1], IMREAD_COLOR);
if (src.empty())
{
return -1;
}
namedWindow("Erosion Demo", WINDOW_AUTOSIZE);
namedWindow("Dilation Demo", WINDOW_AUTOSIZE);
moveWindow("Dilation Demo", src.cols, 0);
createTrackbar("Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo",
&erosion_elem, max_elem,
Erosion);
createTrackbar("Kernel size:\n 2n +1", "Erosion Demo",
&erosion_size, max_kernel_size,
Erosion);
createTrackbar("Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Dilation Demo",
&dilation_elem, max_elem,
Dilation);
createTrackbar("Kernel size:\n 2n +1", "Dilation Demo",
&dilation_size, max_kernel_size,
Dilation);
Erosion(0, 0);
Dilation(0, 0);
waitKey(0);
return 0;
}
void Erosion(int, void*)
{
int erosion_type = 0;
if (erosion_elem == 0) { erosion_type = MORPH_RECT; }
else if (erosion_elem == 1) { erosion_type = MORPH_CROSS; }
else if (erosion_elem == 2) { erosion_type = MORPH_ELLIPSE; }
Mat element = getStructuringElement(erosion_type,
Size(2 * erosion_size + 1, 2 * erosion_size + 1),
Point(erosion_size, erosion_size));
erode(src, erosion_dst, element);
imshow("Erosion Demo", erosion_dst);
}
void Dilation(int, void*)
{
int dilation_type = 0;
if (dilation_elem == 0) { dilation_type = MORPH_RECT; }
else if (dilation_elem == 1) { dilation_type = MORPH_CROSS; }
else if (dilation_elem == 2) { dilation_type = MORPH_ELLIPSE; }
Mat element = getStructuringElement(dilation_type,
Size(2 * dilation_size + 1, 2 * dilation_size + 1),
Point(dilation_size, dilation_size));
dilate(src, dilation_dst, element);
imshow("Dilation Demo", dilation_dst);
}
Thank you!
you have maybe a problem with the arguments...
try replacing src = imread(argv[1], IMREAD_COLOR); with
src = imread("myPathTo/lena.jpg", IMREAD_COLOR);
I've tried with lena and is working fine by me:

OpenCV circle detection bounding box

I'm working on a software using OpenCV for circles detection. I think that the most important problem is the image. Previously I try to detect circle by HoughCircles with bad results. After, I try to follow the instructions in this post but it doesn't work. Maybe I need some help to pre-processing image. Do anyone have any other ideas for detecting edges?
Original Image :
others similar images:
http://imgur.com/a/eSKFr
Below I have posted the code:
//Global variables
Mat src; Mat src_gray, threshold_output, element,dilated,eroded1, eroded2;
int thresh = 125;
int const max_value = 255;
int const max_BINARY_value = 255;
RNG rng(12345);
int s_ero1 =1;
int s_dil = 2;
int s_ero2 = 1;
int max_s = 50;
string source_window = "Thresh";
string TrackbarName = "Dilated";
string TrackbarName1 = "Eroded1";
string TrackbarName2 = "Eroded2";
/// Function header
void thresh_callback(int, void* );
void dilate_trackbar(int, void*);
void erode_trackbar1(int,void*);
void erode_trackbar2(int,void*);
int main( int, char** argv )
{
/// Load source image and convert it to gray
src = imread( "/media/Dati/image01.tif", 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
namedWindow( "source", WINDOW_NORMAL );
imshow( "source", src );
waitKey();
namedWindow( source_window, WINDOW_NORMAL );
//Create trackbar threshold
createTrackbar( " Threshold:", source_window, &thresh, max_value, thresh_callback );
thresh_callback( 0, 0 );
waitKey();
namedWindow( TrackbarName1, WINDOW_NORMAL );
createTrackbar( "Size: ", TrackbarName1, &s_ero1, max_s, erode_trackbar1);
erode_trackbar1(0,0);
waitKey();
namedWindow( TrackbarName, WINDOW_NORMAL );
createTrackbar( "Size: ", TrackbarName, &s_dil, max_s, dilate_trackbar);
dilate_trackbar(0,0);
waitKey();
namedWindow( TrackbarName2, WINDOW_NORMAL );
createTrackbar( "Size: ", TrackbarName2, &s_ero2, max_s, erode_trackbar2);
erode_trackbar2(0,0);
waitKey();
return(0);
}
/**
* #function bounding_box
*/
void bounding_box(Mat m){
int max_point_pos = 0;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
// Find contours
findContours( m, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0) );
cout<<"Numero di blob: "<< contours.size()<<endl;
for(int i = 1; i < contours.size(); i++){
max_point_pos = contours[max_point_pos].size() > contours[i].size()? max_point_pos : i;
}
int max_point = contours[max_point_pos].size();
cout<< "il blob con più punti è associato alla posizione : " << max_point_pos << " con " << max_point << " punti"<< endl;
/// Approximate contours to polygons + get bounding rects and circles
vector<vector<Point> > contours_poly( contours.size() );
vector<Rect> boundRect( contours.size() );
vector<Point2f>center( contours.size() );
vector<float>radius( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
boundRect[i] = boundingRect( Mat(contours_poly[i]) );
minEnclosingCircle( (Mat)contours_poly[i], center[i], radius[i] );
}
/// Draw polygonal contour + bounding rects + circles
Mat drawing = src.clone();
for( size_t i = 0; i< contours.size(); i++ )
{
if(contours[i].size() > 0.6*max_point){
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
//drawContours( drawing, contours_poly, (int)i, color, 1, 8, vector<Vec4i>(), 0, Point() );
//rectangle( drawing, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0 );
circle( drawing, center[i], (int)radius[i], color, 7, 8, 0 );
}
}
/// Show in a window
namedWindow( "Contours", WINDOW_NORMAL );
imshow( "Contours", drawing );
}
/**
* #function thresh_callback
*/
void thresh_callback(int, void* )
{
/// Detect edges using Threshold
threshold( src_gray, threshold_output, thresh, max_BINARY_value, THRESH_BINARY_INV);
imshow(source_window, threshold_output);
}
/**
* #function dilate_trackbar
* #brief Callback for trackbar
*/
void dilate_trackbar( int, void* )
{
dilated = threshold_output.clone();
element = getStructuringElement(MORPH_ELLIPSE,Size(s_dil, s_dil) , Point(-1,-1));
dilate(dilated,dilated,element,Point(-1,-1),1);
imshow(TrackbarName, dilated);
}
/**
* #function erode_trackbar
* #brief Callback for trackbar
*/
void erode_trackbar1( int, void*)
{
eroded1 = threshold_output.clone();
element = getStructuringElement(MORPH_ELLIPSE,Size(s_ero1, s_ero1) , Point(-1,-1));
erode(eroded1,eroded1,element,Point(-1,-1),1);
imshow(TrackbarName1, eroded1);
}

Applying medianBlur as well as Laplacian and threshold filters

I have a function, with the following signature:
Mat cartoonifyImage( Mat, Mat );
I also have a VS2010 program as follows, where I apply to a webcam stream a number of filters, as taught in this book: Mastering OpenCV
int main( int argc, const char** argv )
{
VideoCapture camera;
camera.open(0);
if( !camera.isOpened() )
{
cerr << "Could not access the camera!" << endl;
return 1;
}
while( true )
{
Mat cameraFrame;
camera >> cameraFrame;
if( cameraFrame.empty() )
{
cerr << "Could not grab a camera frame!" << endl;
return 1;
}
// imshow( "Camera Test", cameraFrame );
Mat displayedFrame( cameraFrame.size(), CV_8UC3 );
cartoonifyImage( cameraFrame, displayedFrame );
imshow( "Cartoonifier!", displayedFrame );
int keypress = waitKey( 20 );
if( keypress == 27 ) break;
}
}
Here is my function definition:
Mat cartoonifyImage( Mat srcColor, Mat mask )
{
Mat gray, edges;
cvtColor( srcColor, gray, CV_BGR2GRAY );
const int MEDIAN_BLUR_FILTER_SIZE = 7;
const int LAPLACIAN_FILTER_SIZE = 5;
const int EDGES_THRESHOLD = 80;
medianBlur( gray, gray, MEDIAN_BLUR_FILTER_SIZE );
Laplacian( gray, edges, CV_8U, LAPLACIAN_FILTER_SIZE );
threshold( edges, mask, EDGES_THRESHOLD, 255, THRESH_BINARY_INV );
return( mask );
}
When I run the program, I get a blank (gray) window.
Where the first imshow is commented out, I made sure the webcam is working and I can see my own image in the window, so the problem must be elsewhere.
Can anyone help me understand where the problem is and what I am doing wrong?
Thank you,
your displayedFrame never got filled.
(you pass it into the func, it gets manipulated there, but since you gave it a copy, you don't get the result back)
either return a Mat from cartoonifyImage:
Mat displayed = cartoonifyImage( cameraFrame );
or pass references :
void cartoonifyImage( const Mat & cameraFrame, Mat & displayedFrame );
Mat cartoonifyImage( Mat srcColor )
{
Mat gray, edges, mask;
cvtColor( srcColor, gray, CV_BGR2GRAY );
const int MEDIAN_BLUR_FILTER_SIZE = 7;
const int LAPLACIAN_FILTER_SIZE = 5;
const int EDGES_THRESHOLD = 80;
medianBlur( gray, gray, MEDIAN_BLUR_FILTER_SIZE );
Laplacian( gray, edges, CV_8U, LAPLACIAN_FILTER_SIZE );
threshold( edges, mask, EDGES_THRESHOLD, 255, THRESH_BINARY_INV );
return ( mask );
}
int main( int argc, const char** argv )
{
VideoCapture camera;
camera.open(0);
if( !camera.isOpened() )
{
cerr << "Could not access the camera!" << endl;
return 1;
}
while( true )
{
Mat cameraFrame;
camera >> cameraFrame;
if( cameraFrame.empty() )
{
cerr << "Could not grab a camera frame!" << endl;
return 1;
}
//imshow( "Camera Test", cameraFrame );
Mat displayedFrame( cameraFrame.size(), CV_8UC3 );
displayedFrame = cartoonifyImage(cameraFrame);
imshow( "Cartoonifier!", displayedFrame );
int keypress = waitKey( 20 );
if( keypress == 27 ) break;
}
}

OpenCv Convexity Defect

I am using OpenCV 2.4.6.
I am trying to get all the convexity defect depth_points of the biggest contour. But i am getting the bellow exception
Assertion failed (mtype == type0 || (CV_MAT_CN(mtype) == CV_MAT_CN(type0) && ((1 << type0) & fixedDepthMask) != 0)) in unknown function
the current code is pasted bellow
Thank you for any help
vector<vector<Point> >hulls( 1 );
vector<Point> hull;
std::vector<Vec4i> defects;
if(contours.size()>1){
convexHull( Mat(contours[largest_contour_index]), hulls[0], false );
convexityDefects(contours[largest_contour_index], hulls[0], defects);
drawContours(sourceVideo,contours,largest_contour_index,Scalar(255, 0, 0));
drawContours(sourceVideo,hulls,0,Scalar(0, 255, 0));
}
the error is in this line
convexityDefects(contours[largest_contour_index], hulls[0], defects);
Try this it works for me:
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <ctype.h>
#include <time.h>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
String window_name = "Hand_HSV";
Mat frame,copyFrame;
// Detect Skin from YCrCb
Mat DetectYCrCb(Mat img,Scalar min, Scalar max){
Mat skin;
cvtColor(img, skin, cv::COLOR_RGB2YCrCb);
inRange(skin, min, max, skin);
Mat rect_12 = getStructuringElement(cv::MORPH_RECT, Size(12,12) , Point(6,6));
erode(skin, skin, rect_12,Point(),1);
Mat rect_6 = getStructuringElement(cv::MORPH_RECT, Size(6,6) , Point(3,3));
dilate(skin,skin,rect_6,Point(),2);
return skin;
}
void DetectContour(Mat img){
Mat drawing = Mat::zeros( img.size(), CV_8UC3 );
vector<vector<Point> > contours;
vector<vector<Point> > bigContours;
vector<Vec4i> hierarchy;
findContours(img,contours, hierarchy, cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE, Point());
if(contours.size()>0)
{
vector<vector<int> >hull( contours.size() );
vector<vector<Vec4i>> convDef(contours.size() );
vector<vector<Point>> hull_points(contours.size());
vector<vector<Point>> defect_points(contours.size());
for( int i = 0; i < contours.size(); i++ )
{
if(contourArea(contours[i])>5000)
{
convexHull( contours[i], hull[i], false );
convexityDefects( contours[i],hull[i], convDef[i]);
// start_index, end_index, farthest_pt_index, fixpt_depth
for(int k=0;k<hull[i].size();k++)
{
int ind=hull[i][k];
hull_points[i].push_back(contours[i][ind]);
}
for(int k=0;k<convDef[i].size();k++)
{
if(convDef[i][k][3]>20*256)
{
int ind_0=convDef[i][k][0];
int ind_1=convDef[i][k][1];
int ind_2=convDef[i][k][2];
defect_points[i].push_back(contours[i][ind_2]);
cv::circle(drawing,contours[i][ind_0],5,Scalar(0,255,0),-1);
cv::circle(drawing,contours[i][ind_1],5,Scalar(0,255,0),-1);
cv::circle(drawing,contours[i][ind_2],5,Scalar(0,0,255),-1);
cv::line(drawing,contours[i][ind_2],contours[i][ind_0],Scalar(0,0,255),1);
cv::line(drawing,contours[i][ind_2],contours[i][ind_1],Scalar(0,0,255),1);
}
}
drawContours( drawing, contours, i, Scalar(0,255,0), 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, hull_points, i, Scalar(255,0,0), 1, 8, vector<Vec4i>(), 0, Point() );
}
}
}
namedWindow( "Hull demo",cv::WINDOW_AUTOSIZE );
imshow( "Hull demo", drawing );
}
int main( int argc, char** argv )
{
VideoCapture capture(0);
//VideoCapture capture("Video_Hand.MPG");
namedWindow( window_name, cv::WINDOW_AUTOSIZE );
if (capture.isOpened()){
while(true)
{
capture >> frame;
imshow( window_name, frame);
Mat skinYCrCb = DetectYCrCb(frame,Scalar(0, 100, 80), Scalar(255, 185, 135));
imshow("Result",skinYCrCb);
DetectContour(skinYCrCb);
int c = waitKey(10);
if( (char)c == 27 )
{
break;
}
}
}
return 0;
}
Are you sure vector<vector<Point> >hulls is correct? The documentation (docs.opencv.org) mentions, for the 2nd parameter of convexityDefects:
convexhull – Convex hull obtained using convexHull() that should contain indices of the contour points that make the hull.
So i think it should be rather vector<vector<int> >hulls