Assertion error " Vector subscript out of range "Line 1140 - c++

I am beginner at opencv.The thing is I am trying to do that is cropping eye region from main frame. But I get an error like this
Assertion error " Vector subscript out of range Line 1140."
Could you guys have any idea about how to fix that ?
Here is my code :
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
int main(int argc, const char** argv)
{
CascadeClassifier eye;
CascadeClassifier righteye;
CascadeClassifier lefteye;
eye.load("haarcascade_mcs_eyepair_big.xml");
righteye.load("haarcascade_mcs_lefteye.xml");
lefteye.load("haarcascade_mcs_righteye.xml");
VideoCapture vid;
vid.open(0);
if (!vid.isOpened())
{
cout<<"webcam cannot open"<<endl;
system("Pause");
return -1;
}
Mat frame;
Mat grires;
Mat frame2;
Mat frame3;
namedWindow("algilanan",1 );
namedWindow("algilanan2",2);
namedWindow("algilanan3",3);
while(true)
{
vid>>frame;
cvtColor(frame, grires, CV_BGR2GRAY); //resmi gri renk uzayına çevirir.
//equalizeHist(grires, grires); //istenirse histogram eşitlenir.
vector<Rect> eyepair;
eye.detectMultiScale(grires, eyepair, 1.1, 3, 0, Size(30,30));
for(int i = 0; i < eyepair.size(); i++)
{
Point pt1(eyepair[i].x + eyepair[i].width,eyepair[i].y + eyepair[i].height);
Point pt2(eyepair[i].x, eyepair[i].y);
rectangle(frame, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8, 0);
Mat ROI =grires(eyepair[i]);
vector<Rect> reye;
vector<Rect> leye;
righteye.detectMultiScale(ROI, reye, 1.1, 3, 0, Size(30,30));
lefteye.detectMultiScale(ROI, leye, 1.1, 3, 0, Size(30,30));
for(int t = 0; t < reye.size(); t++)
{
Point pt1(eyepair[i].x + reye[t].x, eyepair[i].x + reye[t].y + reye[t].height);
Point pt2(reye[t].x,reye[t].y);
rectangle(ROI, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8, 0);
Rect r1 = reye[t];
frame3=ROI(r1);
}
for(int z = 0; z < leye.size(); z++)
{
Point pt1(eyepair[i].x + leye[z].x, eyepair[i].x + leye[z].y + leye[z].height);
Point pt2(leye[z].x,leye[z].y);
rectangle(ROI, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8, 0);
Rect r2 = reye[z];
frame2=ROI(r2);
}
}
imshow("algilanan", frame);
imshow("algilanan2",frame2 );
imshow("algilanan3", frame3);
waitKey(33);
}
return 0;
}

Related

Add a button with functionality in c++ using Visual Studio

I am a newbie to c++ and the IDE I am using is Visual Studio '22. I have written a code to detect a face (eyes and mouth too) and save the roi to a folder on the pc. Now what it does can be thought of as an auto-capture of the roi as soon as the face is detected.
I now want to create the function for "force capture", for which I will need to have a button and add pretty much the same code I wrote for auto-capture to give it functionality.
How do I add the button and make it perform its task?
I found related answers but they use Qt not sure how to apply that here.
Thanks a ton! Really need help.
#include <opencv2/opencv.hpp>
#include <opencv2/dnn/dnn.hpp>
//(1) include face header
#include "opencv2/face.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
//(2) include face header
#include "opencv2/objdetect.hpp"
#include <iostream>
#include <opencv2/imgproc/types_c.h>
//file handling
#include <fstream>
#include <sstream>
using namespace cv;
using namespace std;
using namespace ml;
using namespace cv::face;
//(3) Global variables
Ptr<Facemark> facemark; //mark detection
CascadeClassifier faceDetector, mouth, eye, eye1; //face detection
string name, filename;
void process(Mat img, Mat imgcol) {
vector<Rect> faces;
faceDetector.detectMultiScale(img, faces);
Mat imFace;
if (faces.size() != 0) {
for (size_t i = 0; i < faces.size(); i++)
{
cv::rectangle(imgcol, faces[i], Scalar(255, 0, 0));
imFace = imgcol(faces[i]);
resize(imFace, imFace, Size(imFace.cols * 5, imFace.rows * 5));
faces[i] = Rect(faces[i].x = 0, faces[i].y = 0, faces[i].width * 5,
(faces[i].height) * 5);
}
vector< vector<Point2f> > shapes;
//vector < Rect > measures;
if (facemark->fit(imFace, faces, shapes)) //fiiting predef shapes in faces// // imface is the size of faces
{
for (unsigned long i = 0; i < faces.size(); i++) {
for (unsigned long k = 0; k < shapes[i].size(); k++) {
cv::circle(imFace, shapes[i][k], 5, cv::Scalar(0, 0, 255), FILLED);
}
}
}
namedWindow("Detected_shape");
imshow("Detected_shape", imFace);
waitKey(5);
}
else {
cout << "Faces not detected." << endl;
}
}
int main()
{
facemark = FacemarkLBF::create();
facemark->loadModel("C:/Dev/HeadPose/HeadPose/lbfmodel.yml");
faceDetector.load("D:/opencv/build/install/etc/haarcascades/haarcascade_frontalface_alt2.xml");
mouth.load("D:/opencv/build/install/etc/haarcascades/haarcascade_smile.xml");
eye.load("D:/opencv/build/install/etc/haarcascades/haarcascade_eye.xml");
cout << "Loaded model" << endl;
Mat frame, grayframe, testframe, faceROI;
int x_axis, y_axis;
namedWindow("Detecting");
VideoCapture cap(0); //1 for diff cam
while (1)
{
cap.read(frame);
if (!cap.read(frame))
{
cout << "an error while taking the frame from cap" << endl;
}
//face
vector<Rect> faces;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, grayframe, CV_BGR2GRAY);
equalizeHist(grayframe, testframe);
faceDetector.detectMultiScale(testframe, faces, 1.1, 3, CASCADE_SCALE_IMAGE, Size(30, 30));
Rect roi_b;
Rect roi_c;
size_t ic = 0;
int ac = 0;
size_t ib = 0;
int ab = 0;
for (int ic = 0; ic < faces.size(); ic++)
{
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
crop = frame(roi_b);
resize(crop, res, Size(128, 128), 0, 0, INTER_LINEAR);
cvtColor(crop, gray, COLOR_BGR2GRAY);
stringstream ssfn;
filename = "C:\\Users\\Hp\\Desktop\\Faces\\";
ssfn << filename.c_str() << name <<"_"<< roi_b.width<<"_"<< roi_b.height << ".jpg";
filename = ssfn.str();
imwrite(filename, res);
rectangle(frame, faces[ic], Scalar(255, 0, 255), 2, 8, 0);
Mat face = frame(faces[ic]);
cvtColor(face, face, CV_BGR2GRAY);
//mouth
vector <Rect> mouthi;
mouth.detectMultiScale(face, mouthi);
for (int k = 0; k < mouthi.size(); k++)
{
Point pt1(mouthi[0].x + faces[ic].x, mouthi[0].y + faces[ic].y);
Point pt2(pt1.x + mouthi[0].width, pt1.y + mouthi[0].height);
rectangle(frame, pt1, pt2, Scalar(255, 0, 0), 1, 8, 0);
}
//eyes
faceROI = frame(faces[ic]);//Taking area of the face as Region of Interest for eyes//
vector<Rect>eyes;//declaring a vector named eyes//
eye.detectMultiScale(faceROI, eyes, 1.1, 3, 0 | CASCADE_SCALE_IMAGE, Size(5, 5)); //detect eyes in every face//
/*eye1.detectMultiScale(faceROI, eyes, 1.1, 3, 0 | CASCADE_SCALE_IMAGE, Size(5, 5));*/
for (size_t j = 0; j < eyes.size(); j++)
{ //for locating eyes//
Point center(faces[ic].x + eyes[j].x + eyes[j].width * 0.5, faces[ic].y + eyes[j].y + eyes[j].height * 0.5);//getting the centers of both eyes//
int radius = cvRound((eyes[j].width + eyes[j].height) * 0.25); //declaring radius of the eye enclosing circles//
// cout << "radius" << radius << endl;
circle(frame, center, radius, Scalar(255, 0, 0), 1, 8, 0);//drawing circle around both eyes//
x_axis = eyes[j].x;//storing x axis location of eyes in x_axis//
y_axis = eyes[j].y;//storing y axis location of eyes in y_axis//
cout << "Position of the eyes is:" << "(" << x_axis << "," << y_axis << ")" << endl;//showing co-ordinate values//
}
}
sstm << "Crop area size: " << roi_b.width << "x" << roi_b.height << " Filename: " << filename;
text = sstm.str();
if (!crop.empty()) {
imshow("detected", crop);
}
else destroyWindow("detected");
cout << "Name\n";
cin >> name;
Mat img; //image containers
Mat imgbw;
cap >> img; //image from webcam
resize(img, img, Size(460, 460), 0, 0, INTER_LINEAR_EXACT);
cvtColor(img, imgbw, COLOR_BGR2GRAY);
process(imgbw, img);
imshow("Detecting", frame);
if (waitKey(30) == 27) {
break;
}
}
return 0;
}

Can't run c++ project with webcam, but it's actually work with image

I create c++ project named "Document Scanner". With images, he works perfectly but with my webcam, he gives me an error "vector subscript out of range".
Code for images:
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
//////////////// Project 2 – Document Scanner ////////////
Mat imgOriginal, imgGray, imgCanny, imgThre, imgBlur, imgDil, imgErode, imgCrop, imgWarp;
vector<Point> initialPoints, docPoints;
float w = 420, h = 596; //dimensions of a4 paper multiplied by 2
Mat preProcessing(Mat img) {
cvtColor(img, imgGray, COLOR_BGR2GRAY);
GaussianBlur(imgGray, imgBlur, Size(3, 3), 3, 0);
Canny(imgBlur, imgCanny, 25, 75);
Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
dilate(imgCanny, imgDil, kernel);
//erode(imgDil, imgErode, kernel);
return imgDil;
}
vector<Point> getContours(Mat image) {
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(imgDil, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
//drawContours(img, contours, -1, Scalar(255, 0, 255), 2);
vector<vector<Point>> conPoly(contours.size());
vector<Rect> boundRect(contours.size());
vector<Point> biggest;
int maxArea = 0;
for (int i = 0; i < contours.size(); i++)
{
int area = contourArea(contours[i]);
cout << area << endl;
string objectType;
if (area > 1000) {
float peri = arcLength(contours[i], true);
approxPolyDP(contours[i], conPoly[i], 0.02 * peri, true);
if (area > maxArea && conPoly[i].size() == 4) {
//drawContours(imgOriginal, conPoly, i, Scalar(255, 0, 255), 5);
biggest = { conPoly[i][0],conPoly[i][1], conPoly[i][2], conPoly[i][3] };
maxArea = area;
}
//drawContours(imgOriginal, conPoly, i, Scalar(255, 0, 255), 2);
//rectangle(imgOriginal, boundRect[i].tl(), boundRect[i].br(), Scalar(0, 255, 0), 5);
}
}
return biggest;
}
void drawPoints(vector<Point> points, Scalar color) {
for (int i = 0; i < points.size(); i++) {
circle(imgOriginal, points[i], 10, color, FILLED);
putText(imgOriginal, to_string(i), points[i], FONT_HERSHEY_PLAIN, 4, color, 4);
}
}
vector<Point> reorder(vector<Point> points) {
vector<Point> newPoints;
vector<int> sumPoints, subPoints;
for (int i = 0; i < 4; i++) {
sumPoints.push_back(points[i].x + points[i].y);
subPoints.push_back(points[i].x - points[i].y);
}
newPoints.push_back(points[min_element(sumPoints.begin(), sumPoints.end()) - sumPoints.begin()]); //0
newPoints.push_back(points[max_element(subPoints.begin(), subPoints.end()) - subPoints.begin()]); //1
newPoints.push_back(points[min_element(subPoints.begin(), subPoints.end()) - subPoints.begin()]); //2
newPoints.push_back(points[max_element(sumPoints.begin(), sumPoints.end()) - sumPoints.begin()]); //3
return newPoints;
}
Mat getWarp(Mat img, vector<Point> points, float w, float h) {
Point2f src[4] = { points[0], points[1], points[2], points[3] };
Point2f dst[4] = { {0.0f,0.0f}, {w,0.0f}, {0.0f,h}, {w, h} };
Mat matrix = getPerspectiveTransform(src, dst);
warpPerspective(img, imgWarp, matrix, Point(w, h));
return imgWarp;
}
void main() {
string path = "Resources/paper.jpg";
imgOriginal = imread(path);
//resize(imgOriginal, imgOriginal, Size(), 0.5, 0.4);
// Preprocessing
imgThre = preProcessing(imgOriginal);
// Get Contours - Biggest
initialPoints = getContours(imgThre);
///drawPoints(initialPoints, Scalar(0,0,255));
docPoints = reorder(initialPoints);
//drawPoints(docPoints, Scalar(0, 255, 0));
// Warp
imgWarp = getWarp(imgOriginal, docPoints, w, h);
//Crop
int cropVal = 5;
Rect roi(cropVal, cropVal, w - (2 * cropVal), h - (2 * cropVal));
imgCrop = imgWarp(roi);
imshow("Image", imgOriginal);
imshow("Image Dilation", imgThre);
imshow("Image Warp", imgWarp);
imshow("Image Crop", imgCrop);
waitKey(0);
}
Code for webcam:
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
//////////////// Project 2 – Document Scanner ////////////
Mat img, imgGray, imgCanny, imgThre, imgBlur, imgDil, imgErode, imgCrop, imgWarp;
vector<Point> initialPoints, docPoints;
float w = 420, h = 596; //dimensions of a4 paper multiplied by 2
Mat preProcessing(Mat img) {
cvtColor(img, imgGray, COLOR_BGR2GRAY);
GaussianBlur(imgGray, imgBlur, Size(3, 3), 3, 0);
Canny(imgBlur, imgCanny, 25, 75);
Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
dilate(imgCanny, imgDil, kernel);
//erode(imgDil, imgErode, kernel);
return imgDil;
}
vector<Point> getContours(Mat image) {
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(image, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
//drawContours(img, contours, -1, Scalar(255, 0, 255), 2);
vector<vector<Point>> conPoly(contours.size());
vector<Rect> boundRect(contours.size());
vector<Point> biggest;
int maxArea = 0;
for (int i = 0; i < contours.size(); i++)
{
int area = contourArea(contours[i]);
cout << area << endl;
if (area > 1000) {
float peri = arcLength(contours[i], true);
approxPolyDP(contours[i], conPoly[i], 0.02 * peri, true);
if (area > maxArea && conPoly[i].size() == 4) {
//drawContours(imgOriginal, conPoly, i, Scalar(255, 0, 255), 5);
biggest = { conPoly[i][0],conPoly[i][1], conPoly[i][2], conPoly[i][3] };
maxArea = area;
}
//drawContours(imgOriginal, conPoly, i, Scalar(255, 0, 255), 2);
//rectangle(imgOriginal, boundRect[i].tl(), boundRect[i].br(), Scalar(0, 255, 0), 5);
}
}
return biggest;
}
void drawPoints(vector<Point> points, Scalar color) {
for (int i = 0; i < points.size(); i++) {
circle(img, points[i], 10, color, FILLED);
putText(img, to_string(i), points[i], FONT_HERSHEY_PLAIN, 4, color, 4);
}
}
vector<Point> reorder(vector<Point> points) {
vector<Point> newPoints;
vector<int> sumPoints, subPoints;
for (int i = 0; i < 4; i++) {
sumPoints.push_back(points[i].x + points[i].y);
subPoints.push_back(points[i].x - points[i].y);
}
newPoints.push_back(points[min_element(sumPoints.begin(), sumPoints.end()) - sumPoints.begin()]); //0
newPoints.push_back(points[max_element(subPoints.begin(), subPoints.end()) - subPoints.begin()]); //1
newPoints.push_back(points[min_element(subPoints.begin(), subPoints.end()) - subPoints.begin()]); //2
newPoints.push_back(points[max_element(sumPoints.begin(), sumPoints.end()) - sumPoints.begin()]); //3
return newPoints;
}
Mat getWarp(Mat img, vector<Point> points, float w, float h) {
Point2f src[4] = { points[0], points[1], points[2], points[3] };
Point2f dst[4] = { {0.0f,0.0f}, {w,0.0f}, {0.0f,h}, {w, h} };
Mat matrix = getPerspectiveTransform(src, dst);
warpPerspective(img, imgWarp, matrix, Point(w, h));
return imgWarp;
}
void main() {
VideoCapture cap(0);
//string path = "Resources/paper.jpg";
//imgOriginal = imread(path);
//resize(imgOriginal, imgOriginal, Size(), 0.5, 0.4);
while (true) {
cap.read(img);
// Preprocessing
imgThre = preProcessing(img);
// Get Contours - Biggest
initialPoints = getContours(imgThre);
///drawPoints(initialPoints, Scalar(0,0,255));
docPoints = reorder(initialPoints);
//drawPoints(docPoints, Scalar(0, 255, 0));
// Warp
imgWarp = getWarp(img, docPoints, w, h);
//Crop
int cropVal = 5;
Rect roi(cropVal, cropVal, w - (2 * cropVal), h - (2 * cropVal));
imgCrop = imgWarp(roi);
//imshow("Image", imgOriginal);
//imshow("Image Dilation", imgThre);
//imshow("Image Warp", imgWarp);
imshow("Image Crop", imgCrop);
waitKey(1);
}
}
After debugging my program I find an error in function "reorder", when I push_back(). How can I resolve that problem?

OpenCV Error: Assertion failed (scn == 3 || scn == 4) in cv::cvtColor

I have downloaded a program from a YouTube video, it should be able to detect a red rectangle and calculate the distance.
(https://www.youtube.com/watch?v=3Xl8yWvMPl8)
I've never used C++ so that's why I actually need some help.
The error I get when I launch the console is the following:;
"OpenCV Error: Assertion failed (scn == 3 || scn == 4) in cv::cvtColor, file C:\builds\2_4_PackSlave-win64-vc12-shared\opencv\modules\imgproc\src\color.cpp, line 3739"
I'm not sure what it means, and I don't know how I should fix it.
I've managed to include all the files and using the correct libraries but it won't work. Personally I don't have a webcam, but I asked a friend of mine, which has one, to try out the program but he gets the same error.
So here is the code:
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
#include "opencv2\imgproc\imgproc_c.h"
#include <fstream>
#include "math.h"
int _tmain(int argc, _TCHAR* argv[])
{
using namespace std;
using namespace cv;
Mat img, img_gray, channel[3];
VideoCapture cam(1);
double distance = 0;
//FILE *data;
//data = fopen("data320.csv","a");
cam.set(CV_CAP_PROP_FRAME_WIDTH, 1280);
cam.set(CV_CAP_PROP_FRAME_HEIGHT, 720);
cam.set(CV_CAP_PROP_CONVERT_RGB, 1);
namedWindow("Frame", WINDOW_AUTOSIZE);
while (waitKey(10) != 'a')
{
cam >> img;
cvtColor(img, img_gray, COLOR_RGB2GRAY);
split(img, channel);
subtract(channel[2], img_gray, img_gray);
//convertScaleAbs(img, img);
threshold(img_gray, img_gray, 90, 255, THRESH_BINARY);
erode(img_gray, img_gray, Mat(), Point(-1, -1), 4);
dilate(img_gray, img_gray, Mat(), Point(-1, -1), 4);
vector<vector<Size>> contors;
vector<Vec4i> heirarcy;
findContours(img_gray, contors, heirarcy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
vector<Rect> boundRect(contors.size());
vector<vector<Point>> contor_poly(contors.size());
for (int i = 0; i< contors.size(); i++)
{
approxPolyDP(Mat(contors[i]), contor_poly[i], 3, true);
boundRect[i] = boundingRect(Mat(contor_poly[i]));
}
int max_index = 0, max_area = 0;
for (int i = 0; i< boundRect.size(); i++)
{
int a = boundRect[i].area();
rectangle(img, boundRect[i].tl(), boundRect[i].br(), Scalar(255, 255, 0), 2, 8, 0);
if (a > max_area)
{
max_area = a;
max_index = i;
}
}
int confidence = 0;
for (int i = 0; i< boundRect.size(); i++)
{
if ((boundRect[i].x < boundRect[max_index].x + boundRect[max_index].width && boundRect[i].x > boundRect[max_index].x - int(0.1*boundRect[max_index].width)) && (boundRect[i].y > boundRect[max_index].y))
confidence += 45;
}
if (boundRect.size() > 0)
{
if (confidence > 99)
confidence = 0;
//try{
//Mat sub_image = Mat(img, Rect(max(boundRect[max_index].x-30, 0), max(boundRect[max_index].y-30, 0), min(int(boundRect[max_index].width*1.75), img.cols - boundRect[max_index].x+30), min(boundRect[max_index].height*3, img.rows - boundRect[max_index].y+30)));
//imshow("Frame", sub_image);
//}catch(int e){
// cout<<"Error occured"<<endl;
//}
rectangle(img, boundRect[max_index].tl(), boundRect[max_index].br(), Scalar(0, 255, 0), 2, 8, 0);
//fprintf(data,"%d , %d , %d\n", boundRect[max_index].width, boundRect[max_index].height, boundRect[max_index].area());
distance = 8414.7*pow(boundRect[max_index].area(), -0.468);
cout << distance << " cm." << " Confidence: " << confidence << endl;
imshow("Frame", img);
}
else
imshow("Frame", img);
}
//fflush(data);
//fclose(data);
cam.release();
return 0;
}
Check shape of image before calling cvtColor, it work only if image shape greater than 2
if(img.size>2)
cvtColor(img, img_gray, COLOR_RGB2GRAY)

OpenCV convexity defects drawing

I have stored the defects using convexity defects in an 4 element vector integer array using vec4i.
My convex hull array is in hull element and contours in Contours;
What i want to do is draw a line from start point of a convexity defect to the end point of one.
For this i need to access the element start index which is present in the vec4i of a defects vector!
How do i do this??
#include <opencv\cv.h>
#include <opencv2\highgui\highgui.hpp>
#include<opencv\cvaux.h>
#include<opencv\cxcore.h>
#include <opencv2\imgproc\imgproc.hpp>
#include <iostream>
#include<conio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
int main(){
Mat img, frame, img2, img3;
VideoCapture cam(0);
while (true){
cam.read(frame);
cvtColor(frame, img, CV_BGR2HSV);
//thresholding
inRange(img, Scalar(0, 143, 86), Scalar(39, 255, 241), img2);
imshow("hi", img2);
//finding contours
vector<vector<Point>> Contours;
vector<Vec4i> hier;
//morphological transformations
erode(img2, img2, getStructuringElement(MORPH_RECT, Size(3, 3)));
erode(img2, img2, getStructuringElement(MORPH_RECT, Size(3, 3)));
dilate(img2, img2, getStructuringElement(MORPH_RECT, Size(8, 8)));
dilate(img2, img2, getStructuringElement(MORPH_RECT, Size(8, 8)));
//finding the contours required
findContours(img2, Contours, hier, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, Point(0, 0));
//finding the contour of largest area and storing its index
int lrgctridx = 0;
int maxarea = 0;
for (int i = 0; i < Contours.size(); i++)
{
double a = contourArea(Contours[i]);
if (a> maxarea)
{
maxarea = a;
lrgctridx = i;
}
}
//convex hulls
vector<vector<Point> >hull(Contours.size());
vector<vector<Vec4i>> defects(Contours.size());
for (int i = 0; i < Contours.size(); i++)
{
convexHull(Contours[i], hull[i], false);
convexityDefects(Contours[i], hull[i], defects[i]);
}
//REQUIRED contour is detected,then convex hell is found and also convexity defects are found and stored in defects
if (maxarea>100){
drawContours(frame, hull, lrgctridx, Scalar(255, 255, 255), 1, 8, vector<Vec4i>(), 0, Point());
\\ drawing the required lines joining defects!im facing problem on how to acheive this since i dont know how to access the elements stored in defects
line(frame, \\startindex, \\endindex, \\color, 1);
}
imshow("output", frame);
char key = waitKey(33);
if (key == 27) break;
}
}
Also my output window shows error when i add the convexityDefects(..) line i think it is in wrong format!
Thanks in advance.
convexityDefects needs a
Convex hull obtained using convexHull() that should contain indices of the contour points that make the hull.
that contains more than 3 indices. So you need this:
vector<vector<Point> >hull(Contours.size());
vector<vector<int> > hullsI(Contours.size()); // Indices to contour points
vector<vector<Vec4i>> defects(Contours.size());
for (int i = 0; i < Contours.size(); i++)
{
convexHull(Contours[i], hull[i], false);
convexHull(Contours[i], hullsI[i], false);
if(hullsI[i].size() > 3 ) // You need more than 3 indices
{
convexityDefects(Contours[i], hullsI[i], defects[i]);
}
}
Then your drawing part is (adapted from here):
/// Draw convexityDefects
for (int i = 0; i < Contours.size(); ++i)
{
for(const Vec4i& v : defects[i])
{
float depth = v[3] / 256;
if (depth > 10) // filter defects by depth, e.g more than 10
{
int startidx = v[0]; Point ptStart(Contours[i][startidx]);
int endidx = v[1]; Point ptEnd(Contours[i][endidx]);
int faridx = v[2]; Point ptFar(Contours[i][faridx]);
line(frame, ptStart, ptEnd, Scalar(0, 255, 0), 1);
line(frame, ptStart, ptFar, Scalar(0, 255, 0), 1);
line(frame, ptEnd, ptFar, Scalar(0, 255, 0), 1);
circle(frame, ptFar, 4, Scalar(0, 255, 0), 2);
}
}
}
Complete code
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat img, frame, img2, img3;
VideoCapture cam(0);
while (true){
cam.read(frame);
cvtColor(frame, img, CV_BGR2HSV);
//thresholding
inRange(img, Scalar(0, 143, 86), Scalar(39, 255, 241), img2);
imshow("hi", img2);
//finding contours
vector<vector<Point>> Contours;
vector<Vec4i> hier;
//morphological transformations
erode(img2, img2, getStructuringElement(MORPH_RECT, Size(3, 3)));
erode(img2, img2, getStructuringElement(MORPH_RECT, Size(3, 3)));
dilate(img2, img2, getStructuringElement(MORPH_RECT, Size(8, 8)));
dilate(img2, img2, getStructuringElement(MORPH_RECT, Size(8, 8)));
//finding the contours required
findContours(img2, Contours, hier, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, Point(0, 0));
//finding the contour of largest area and storing its index
int lrgctridx = 0;
int maxarea = 0;
for (int i = 0; i < Contours.size(); i++)
{
double a = contourArea(Contours[i]);
if (a> maxarea)
{
maxarea = a;
lrgctridx = i;
}
}
//convex hulls
vector<vector<Point> >hull(Contours.size());
vector<vector<int> > hullsI(Contours.size());
vector<vector<Vec4i>> defects(Contours.size());
for (int i = 0; i < Contours.size(); i++)
{
convexHull(Contours[i], hull[i], false);
convexHull(Contours[i], hullsI[i], false);
if(hullsI[i].size() > 3 )
{
convexityDefects(Contours[i], hullsI[i], defects[i]);
}
}
//REQUIRED contour is detected,then convex hell is found and also convexity defects are found and stored in defects
if (maxarea>100){
drawContours(frame, hull, lrgctridx, Scalar(2555, 0, 255), 3, 8, vector<Vec4i>(), 0, Point());
/// Draw convexityDefects
for(int j=0; j<defects[lrgctridx].size(); ++j)
{
const Vec4i& v = defects[lrgctridx][j];
float depth = v[3] / 256;
if (depth > 10) // filter defects by depth
{
int startidx = v[0]; Point ptStart(Contours[lrgctridx][startidx]);
int endidx = v[1]; Point ptEnd(Contours[lrgctridx][endidx]);
int faridx = v[2]; Point ptFar(Contours[lrgctridx][faridx]);
line(frame, ptStart, ptEnd, Scalar(0, 255, 0), 1);
line(frame, ptStart, ptFar, Scalar(0, 255, 0), 1);
line(frame, ptEnd, ptFar, Scalar(0, 255, 0), 1);
circle(frame, ptFar, 4, Scalar(0, 255, 0), 2);
}
}
}
imshow("output", frame);
char key = waitKey(33);
if (key == 27) break;
}
}

OpenCV c++ assertion failed <i < 0> in cv::_InputArray::getMat

#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/background_segm.hpp>
#include <iostream>
#include <windows.h>
using namespace cv;
using namespace std;
//initial min and max HSV filter values.
//these will be changed using trackbars
Mat src; Mat HSV; Mat roi; Mat range; Mat eroded; Mat gray;
int thresh = 100;
int max_thresh = 255;
/** #function main */
int main(int argc, char** argv)
{
createTrackbars();
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) // check if we succeeded
return -1;
namedWindow("background", 1);
int waitTime = 50;
int counter = 101;
int roiLeft = 20;
int roiTop = 50;
int roiRight = 200;
int roiBottom = 200;
Rect rRoi = Rect(roiLeft, roiTop, roiRight, roiBottom);
Mat background;
cap >> background;
background = background(rRoi);
//cvtColor(background, background, CV_BGR2HSV);
//imshow("background", background);
vector<vector<Point> > contours;
vector<vector < cv::Point >> hull(1);
vector<Vec4i> hierarchy;
vector<CvConvexityDefect> defects;
while (true)
{
cap >> src;
//Create the region of interest.
Mat iRoi = src.clone()(rRoi);
Mat iRoiSRC = src(rRoi);
//Draw a rectangle there.
rectangle(src, rRoi, Scalar(255, 128, 0), 1, 8, 0);
//imshow("roi", iRoi);
//Subtract the static background.
absdiff(iRoi, background, iRoi);
//imshow("diff", iRoi);
//Convert it to a GrayScale and threshold it.
cvtColor(iRoi, iRoi, CV_BGR2GRAY);
threshold(iRoi, gray, 15, 255, CV_THRESH_BINARY);
//Perform a closing.
Mat erodeElement = getStructuringElement(MORPH_ELLIPSE, Size(erodeSize, erodeSize));
Mat dilateElement = getStructuringElement(MORPH_ELLIPSE, Size(dilateSize, dilateSize));
for (int index = 0; index < loopAmount; index++)
{
erode(gray, gray, erodeElement);
dilate(gray, gray, dilateElement);
}
//imshow("range", gray);
//Find the contours.
findContours(gray, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
//Pick the biggest contour.
int biggestContourIndex = 0;
int largestArea = 0;
for (int i = 0; i < contours.size(); i++)
{
if (contours[i].size() > largestArea)
{
largestArea = contours[i].size();
biggestContourIndex = i;
}
}
vector<int> hullsI;
vector<Point> hullsP;
vector<Vec4i> defects;
//Find the convex hull.
if (contours.size() > 0)
{
convexHull(contours[biggestContourIndex], hullsI, true, true);
convexHull(contours[biggestContourIndex], hullsP, true, true);
}
//Find the convexity defects.
if (contours.size() > 0)
{
if (contours[biggestContourIndex].size() > 3)
{
convexityDefects(contours[biggestContourIndex], hullsI, defects);
}
}
//Draw the biggest contour and its convex hull.
Scalar colorOne = Scalar(255, 128, 0);
Scalar colorTwo = Scalar(0, 0, 255);
if (contours.size() > 0)
{
drawContours(iRoiSRC, contours, biggestContourIndex, colorOne, 2, 8, hierarchy, 0, Point());
drawContours(iRoiSRC, hullsP, 0, colorTwo, 1, 8, vector<Vec4i>(), 0, Point());
rectangle(iRoiSRC, boundingRect(contours[biggestContourIndex]), Scalar(0, 255, 0), 1, 8, 0);
}
imshow("src", src);
if (waitKey(waitTime) >= 0) break;
}
return(0);
}
There is a rectangle in the upper left of the screen, where my hand will be recognized once I hold it there.
The error that i get appears at the first drawContours. The full error which is given to me by the console is: OpenCV Error: Assertion failed <i <0> in cv::_InputArray::getMat, file C:\buildslave64\win64_amdoc1\2_4_PackSlave-win64-vc11-shared\opencv\modules\core\src\matrix.cpp, line 963
I've been extensively searching for a solution on multiple sites, including stackoverflow but none of the solution seem to be working.
Any help would be appreciated.
I use Visual studio 2013 with OpenCV-2.4.10
converting vector<CvConvexityDefect> defects; to a point seemed to do the trick