I am trying to write code to track hands. I am using the convexity defects function to find fingers, but for some reason, there seems to always be a problem with the last defect.
Here is a picture of what I'm talking about (sorry, i''m new to the forum, so cannot post images)
The cyan line is the contours, the yellow line is the hull points, and the red lines are the defect points. As you can see the last defect point detects the defect from the wrong side of the contour.
Here is my code:
#include "opencv2\opencv.hpp"
using namespace cv;
using namespace std;
int main() {
VideoCapture cap(0);
Mat src, gray, background, binary, diff;
cap >> background;
cvtColor(background, background, CV_BGR2GRAY);
vector<vector<Point>> contours;
vector < vector<int>> hullI = vector<vector<int>>(1);
vector < vector<Point>> hullP = vector<vector<Point>>(1);
vector<Vec4i> defects;
while (waitKey(30)!='q') {
cap >> src;
cvtColor(src, gray, CV_BGR2GRAY);
blur(gray, gray, Size(3, 3));
absdiff(gray, background, diff);
threshold(diff, binary, 15, 255, THRESH_BINARY);
erode(binary, binary, Mat(Size(5, 5), CV_8U));
imshow("binary", binary);
findContours(binary, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
if (!contours.empty()) {
sort(contours.begin(), contours.end(), [](vector<Point> a, vector<Point> b) { return a.size() > b.size(); });
drawContours(src, contours, 0, Scalar(255, 255, 0));
convexHull(contours[0], hullI[0]);
convexHull(contours[0], hullP[0]);
drawContours(src, hullP, 0, Scalar(0, 255, 255));
if (hullI[0].size() > 2) {
convexityDefects(contours[0], hullI[0], defects);
for (Vec4i defect : defects) {
line(src, contours[0][defect[0]], contours[0][defect[2]], Scalar(0, 0, 255));
line(src, contours[0][defect[1]], contours[0][defect[2]], Scalar(0, 0, 255));
}
}
}
imshow("src", src);
char key = waitKey(30);
if (key == 'q')break;
else if (key == 'p') waitKey();
else if (key == 'b') {
cap >> background;
cvtColor(background, background, CV_BGR2GRAY);
}
}
}
I have confirmed through experiments that it is always the last defect in the defect vector that this happens too. Is this a bug in opencv or am I doing something wrong?
i tested your code (with a small modification) with the image below (OpenCV version is 3.2).
as you can see on the result image it works as expected. probably you are using an old version of OpenCV and getting a buggy result. (i think it was a bug recently fixed)
#include "opencv2\opencv.hpp"
using namespace cv;
using namespace std;
int main() {
//VideoCapture cap(0);
Mat src, gray, background, binary, diff;
//cap >> background;
//cvtColor(background, background, CV_BGR2GRAY);
vector<vector<Point> > contours;
vector < vector<int> > hullI = vector<vector<int> >(1);
vector < vector<Point> > hullP = vector<vector<Point> >(1);
vector<Vec4i> defects;
src = imread("hand.png");
cvtColor(src, gray, CV_BGR2GRAY);
blur(gray, gray, Size(3, 3));
threshold(gray, binary, 150, 255, THRESH_BINARY_INV);
//erode(binary, binary, Mat(Size(5, 5), CV_8U));
imshow("binary", binary);
findContours(binary, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
if (!contours.empty()) {
sort(contours.begin(), contours.end(), [](vector<Point> a, vector<Point> b) { return a.size() > b.size(); });
drawContours(src, contours, 0, Scalar(255, 255, 0));
convexHull(contours[0], hullI[0]);
convexHull(contours[0], hullP[0]);
drawContours(src, hullP, 0, Scalar(0, 255, 255));
if (hullI[0].size() > 2) {
convexityDefects(contours[0], hullI[0], defects);
for (Vec4i defect : defects) {
line(src, contours[0][defect[0]], contours[0][defect[2]], Scalar(0, 0, 255));
line(src, contours[0][defect[1]], contours[0][defect[2]], Scalar(0, 0, 255));
}
}
}
imshow("result", src);
char key = waitKey(0);
return 0;
}
I have a solution which involves detecting the skin using OpenCV. I implemented it using python, which you can convert easily to C++.
I obtained the HSV values of the image you uploaded using:
hsv_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
This the range for the HSV values of human skin:
l = np.array([0, 48, 80], dtype = "uint8")
u = np.array([20, 255, 255], dtype = "uint8")
skin_img = cv2.inRange(hsv_img, l, u)
cv2.imshow("Hand", skin_img)
I then performed morphological dilation and obtained the following:
You can now apply contour hull and find convexity defects.
Related
I have an input image:
I use few functions to find contours in this image:
cv::Mat srcImg = cv::imread("input.png");
cv::Mat grayImg{};
cv::cvtColor(srcImg, grayImg, cv::COLOR_BGR2GRAY);
cv::Mat threshImg;
cv::adaptiveThreshold(grayImg, threshImg, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY_INV, 11, 2);
My picture after thresholding is as below:
Now I want to find contours:
std::vector<std::vector<cv::Point>> ptContours{};
cv::findContours(threshImg, ptContours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
And here is my question: how can I use drawContours() to fill the inner area of the character A?
I want to achieve something like this:
If you get the binary image via Otsu Thresholding, you get a nice binary blob without any "filling problems":
// Read the input image:
std::string imageName = "D://opencvImages//UZUd5.png";
cv::Mat testImage = cv::imread( imageName );
// Convert BGR to Gray:
cv::Mat grayImage;
cv::cvtColor( testImage, grayImage, cv::COLOR_RGB2GRAY );
// Get Binary via Otsu:
cv::Mat binaryImage;
cv::threshold( grayImage, binaryImage, 0, 255, cv::THRESH_OTSU );
This is the result:
If you don't want to use Otsu, and instead use your current approach, this is a possible way of filling the blob. It basically filters every contour by area and hierarchy. I look for the outmost and inmost contours, and perform some flood-fills accordingly - outside of the outer contour, to fill the canvas and inside the inner contour to fill the hole:
// Get Binary via Adaptive Thresh:
cv::Mat binaryImage;
cv::adaptiveThreshold( grayImage, binaryImage, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY_INV, 11, 2 );
// Containers:
std::vector< std::vector<cv::Point> > contours;
std::vector< cv::Vec4i > hierarchy;
// Create a new matrix where things will be drawn:
cv::Mat filledBlob = cv::Mat::ones( binaryImage.size(), CV_8UC3 );
// Find contours:
cv::findContours(binaryImage, contours, hierarchy, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);
// Filling colors:
cv::Scalar fillColor = cv::Scalar( 0,0,0 );
cv::Scalar canvasColor = cv::Scalar( 255,255,255 );
for( int i = 0; i < (int)contours.size(); i++ ){
// Get Blob area:
float blobArea = cv::contourArea( contours[i] );
// Filter smaller blobs:
int minArea = 3000;
if ( blobArea > minArea) {
// Get contour heirarchy:
int contourHierarchy = hierarchy[i][3];
// Process the child contour:
if ( contourHierarchy != -1 ){
// Draw "hole":
cv::drawContours( filledBlob, contours, (int)i, fillColor, 1, cv::LINE_8, hierarchy, 0 );
// Get bounding rectangle:
cv::Rect bBox = cv::boundingRect(contours[i]);
// Compute centroid:
cv::Point centroid;
centroid.x = bBox.x + 0.5*bBox.width;
centroid.y = bBox.y + 0.5*bBox.height;
// Flood-fill at centroid with canvas color:
cv::floodFill( filledBlob, centroid, canvasColor, (cv::Rect*)0, cv::Scalar(), 0);
}else{
// Process the parent contour:
if ( contourHierarchy == -1 ){
// Draw outline:
cv::drawContours( filledBlob, contours, (int)i, fillColor, 1, cv::LINE_8, hierarchy, 0 );
// Flood-fill at canvas (outside of contour):
cv::floodFill( filledBlob, cv::Point( 1, 1 ), canvasColor, (cv::Rect*)0, cv::Scalar(), 0);
}
}
// Show image
cv::imshow( "Filled Blob", filledBlob );
cv::waitKey(0);
}
}
Which yields this image:
If you want an inverted image, just subtract 255 - filledBlob:
cv::subtract( 255, filledBlob, filledBlob );
I have trained Haar cascade and now i need to work with founded object. How i can crop it from original image and show in new window?(or show multiple window if i found 2 object on image). There is my code (opencv ver 2.4.13):
#include <opencv2/opencv.hpp>
#include <iostream>
#include <fstream>
using namespace std;
using namespace cv;
int main(void)
{
CascadeClassifier trafficLightCascader;
string Cascade_name = "TrafficLight.xml";
if (!trafficLightCascader.load(Cascade_name))
{
cout << "Can't load the face feature data" << endl;
return -1;
}
vector<Rect> trafficLights;
Mat src = imread("6копия.png");
CvRect AssignRect = Rect(0, 0, src.cols, src.rows / 2);
Mat srcImage = src(AssignRect);
Mat grayImage(srcImage.rows, srcImage.cols, CV_8UC1);
cvtColor(srcImage, grayImage, CV_BGR2GRAY);
equalizeHist(grayImage, grayImage);
trafficLightCascader.detectMultiScale(grayImage, trafficLights, 1.1, 1, 0, Size(3,3));
for (int i = 0; i < trafficLights.size(); ++i)
{
rectangle(src, trafficLights[i], Scalar(0, 255, 0), 2, 8, 0);
}
imshow("src", src);
waitKey(0);
return 0;}
Your trafficLights vector is holding each rectangle's data of found objects. You just need to take left&top coordinates, width and height of each rectangle and you already have them. All you need is cropping each rectangle by creating Mat format of them and showing in different frames.
You can check here to learn more about cropping.
Here is the code which you need:
for (int i = 0; i < trafficLights.size(); ++i)
{
Rect crop_found(trafficLights[i].x,trafficLights[i].y, trafficLights[i].width, trafficLights[i].height);
Mat found(src, crop_found);
imshow(to_string(i),found);
rectangle(src, trafficLights[i], Scalar(0, 255, 0), 2, 8, 0);
}
I'm trying to identify drops on a water-sensitive card, as you can see in the figure below, in addition to the drops there are water risks that I don't want to account for. I'm using OpenCV's findContours function to detect these contours, the question is: can I separate the real drops, from the water drips on the card? Here is an excerpt from my code.
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
Mat src; Mat src_gray; Mat binary_image, goTo;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
cv::Scalar min_color_scanner = Scalar(0,0,0);
cv::Scalar max_color_scanner = Scalar(255,175,210);
int main(int argc, char** argv){
cv::Mat image, gray, thresh;
// MARK:- Load image, grayscale, Otsu's threshold
image = imread("/Users/user/Documents/Developer/Desktop/OpenCV-Teste3.3.1/normal1.png");
Mat circles_detect;
cvtColor( image, circles_detect, CV_BGR2GRAY );
GaussianBlur( circles_detect, circles_detect, Size(9, 9), 2, 2 );
//END CIRCLES
cvtColor(image, gray, CV_BGR2GRAY);
threshold(gray, thresh, 0, 255, THRESH_BINARY_INV + THRESH_OTSU);
Mat mask(image.rows, image.cols, CV_8UC3, Scalar(255,255,255));
cv::Mat bgr_image, inRangeImage;
cv::cvtColor(image, bgr_image, CV_RGB2BGR);
cv::inRange(bgr_image, min_color_scanner, max_color_scanner, binary_image);
//Find contours and filter using contour area
vector<vector<Point>> contours;
cv::findContours(thresh, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
// MARK:- data from image
double largest_area=0.0;
int largest_contour_index=0;
double smallest_area=0.0;
int smallest_contour_index=0;
int drop_derive=0;
Rect boundig_rect;
for(int i=0;i<contours.size();i++){
double area = contourArea(contours[i]);
if(area > largest_area){
largest_area=area;
largest_contour_index = i;
//boundig_rect = boundingRect(contourArea(contours[i]));
}
}
smallest_area = largest_area;
for(int i=0;i<contours.size();i++){
double area = contourArea(contours[i]);
if(area < smallest_area){
smallest_area=area;
smallest_contour_index = i;
//boundig_rect = boundingRect(contourArea(contours[i]));
}
if (area < 4){
drop_derive++;
cv::drawContours(image, contours, i, Scalar(255,0,0));
}
}
//show datas and images..
return(0);
}
Actually, I have read the official documentation here about class Map in opencv to try to use the module reg. And This is my test image:
This is my code:
#include<opencv.hpp>
#include "opencv2/reg/mapshift.hpp"
#include "opencv2/reg/mappergradshift.hpp"
#include "opencv2/reg/mapperpyramid.hpp"
using namespace cv;
using namespace std;
using namespace cv::reg;
Mat highlight1(const Mat src, const Mat t_mask) {
Mat srcImg = src.clone(), mask = t_mask.clone();
threshold(mask, mask, 0, 255, THRESH_BINARY_INV + THRESH_OTSU);
cvtColor(mask, mask, COLOR_GRAY2BGR);
cvtColor(srcImg, srcImg, COLOR_GRAY2BGR);
dilate(mask - Scalar(0, 0, 255), mask, Mat(), Point(-1, -1), 1);
return srcImg - mask;
}
int main() {
Mat img1 = imread("img.jpg", 0);
Mat img2;
// Warp original image
Vec<double, 2> shift(5., 5.);
MapShift mapTest(shift);
mapTest.warp(img1, img2);
// Register
Ptr<MapperGradShift> mapper = makePtr<MapperGradShift>();
MapperPyramid mappPyr(mapper);
Ptr<Map> mapPtr = mappPyr.calculate(img1, img2);
MapShift* mapShift = dynamic_cast<MapShift*>(mapPtr.get());
// Display registration result
Mat result;
mapShift->inverseWarp(img2, result);
Mat registration_before = highlight1(img1, img2);
Mat registration_after = highlight1(img1, result);
return 0;
}
But as we see, the registration_after is even worse than registration_before. What's I have missed?
This is registration_before:
This is registration_after:
I want to use kinect sdk 2.0 with opencv for shape detection. for that i want to get contour of for the image frame. But i am getting assertion error when I use findcontour function of opencv.
here is the code I used:
void Kinect::ProcessColor(RGBQUAD* pBuffer, int nWidth, int nHeight)
{
// Make sure we've received valid data
if (pBuffer && (nWidth == cColorWidth) && (nHeight == cColorHeight))
{
// Draw the data with OpenCV
Mat ColorImage(nHeight, nWidth, CV_8UC4, pBuffer);
//Mat ColorImage;
//ColorImage = imread("C:/Users/IWP/Desktop/Untitled.png", CV_LOAD_IMAGE_COLOR);
//imshow("ColorImage", ColorImage);
// Convert it to gray
Mat src_gray;
cvtColor(ColorImage, src_gray, CV_BGR2GRAY);
// Canny
Mat src_canny;
Canny(src_gray, src_canny, 75, 125, 3);
//threshold(src_gray, src_canny, 128, 255, CV_THRESH_BINARY);
// find the contours
vector< vector<Point> > contours;
findContours(src_canny, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
Mat showImage;
resize(src_canny, showImage, Size(nWidth / 2, nHeight / 2));
imshow("ColorImage", showImage);////imshow("ColorImage", ColorImage);
}
}