OpenCV findContours() error VS2015 Opencv2413 - c++

I'm having a problem with OpenCV findContours when running. I don't quite understand what the error is. During building, there is no error.
Here is the error message:
]1
Here is my code:
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
VideoCapture cap(0); //capture the video from web cam
if (!cap.isOpened()) // if not success, exit program
{
cout << "Cannot open the web cam" << endl;
return -1;
}
namedWindow("Control", CV_WINDOW_AUTOSIZE); //create a window called "Control"
int iLowH = 0;
int iHighH = 179;
int iLowS = 0;
int iHighS = 255;
int iLowV = 0;
int iHighV = 255;
//Create trackbars in "Control" window
cvCreateTrackbar("LowH", "Control", &iLowH, 179); //Hue (0 - 179)
cvCreateTrackbar("HighH", "Control", &iHighH, 179);
cvCreateTrackbar("LowS", "Control", &iLowS, 255); //Saturation (0 - 255)
cvCreateTrackbar("HighS", "Control", &iHighS, 255);
cvCreateTrackbar("LowV", "Control", &iLowV, 255); //Value (0 - 255)
cvCreateTrackbar("HighV", "Control", &iHighV, 255);
while (true)
{
Mat imgOriginal;
bool bSuccess = cap.read(imgOriginal); // read a new frame from video
if (!bSuccess) //if not success, break loop
{
cout << "Cannot read a frame from video stream" << endl;
break;
}
Mat imgHSV;
cvtColor(imgOriginal, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV
Mat imgThresholded;
inRange(imgHSV, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded); //Threshold the image
//morphological opening (remove small objects from the foreground)
erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
//morphological closing (fill small holes in the foreground)
dilate(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(imgThresholded, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
imshow("Thresholded Image", imgThresholded); //show the thresholded image
imshow("Original", imgOriginal); //show the original image
if (waitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}

Actually, this is a known compatibility error between VS2013, VS2015 and OpenCV. The temporary solution worked for me is to add the 2 lines shown below before return.
cv::imshow("img", threshold);
cv::destroyAllWindows();

I also met these mistakes, but when developing on visual studio 2010. and spent a lot of time with them.
In the end I found that the reason for this is just that I used opencv - lib instead of vc10 (for vs2010) vc14 (for vs2015).
Changing the opencv-lib corrects the error.
Build of vc10 - opencv lib also costs me a lot of time. When someone has tried but still doesn't get opencv-build successful. please contact me. I can maybe help you.

Related

draw a vertical line in a videocapture opencv

i followed a tutorial about facedetection using c++ and visual studio 2012 it worked well for , but then i wanted to add vertical lines to the video capture (from webcam) but nothing happened i dont know what exactly went wrong, i could really appreciate your help with this .here is the code i'm working on :
int main() {
VideoCapture cap(0); // Open default camera
Mat frame;
cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
line(frame, Point(frame.cols / 2 + 1, 0),
Point(frame.cols / 2 + 1, frame.rows - 1),
Scalar(255, 0, 128));
// Load preconstructed classifier
face_cascade.load("C:\\opencv24\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml");
while (cap.read(frame)) {
detectFaces(frame); // Call function to detect faces
if (waitKey(30) >= 0) // Pause key
break;
}
return 0;
}
after some modification in the code i finally arrived to get the line drawn ,here is the running code
while (cap.read(frame)) {
// Call function to detect faces
Mat frame;
cap >> frame; // get a new frame from camera
//cvtColor(frame, frame, COLOR_BGR2GRAY);
cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
line(frame, Point(frame.cols / 2 + 1, 0),
Point(frame.cols / 2 + 1, frame.rows - 1),
Scalar(255, 0, 0));
imshow("edges", frame);
detectFaces(frame);
if (waitKey(30) >= 0) // Pause key
break;
}
return 0;
}

Opencv: Changing a code that uses video to an image for color detection using trackbars

I have a project where i need to detect specific colors from leaves images, like green, brown and yellow.
I found this tutorial (http://opencv-srf.blogspot.com.br/2010/09/object-detection-using-color-seperation.html) that explains how to create a real time trackbar to find the best values for that, but it uses images from a webcam, and i want to use it with pictures.
Can you guys please help me do that?
Thank you.
Here is the code for thresholding an HSV image, selecting the ranges with trackbars.
Note that, differently from a video (as described here), I used morphologyEx to perform morphological operations, and replaced C style cvCreateTrackbar with the C++ function createTrackbar.
The comments in the code should be clear. Please ping me if something is not clear:
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
// Load BGR image
Mat3b bgr = imread("path_to_image");
if (bgr.empty())
{
cout << "Cannot open the image" << endl;
return -1;
}
// Transform to HSV
Mat3b hsv;
cvtColor(bgr, hsv, COLOR_BGR2HSV);
// Create a window called "Control"
namedWindow("Control", CV_WINDOW_AUTOSIZE);
// Set starting values for ranges
int iLowH = 0;
int iHighH = 179;
int iLowS = 0;
int iHighS = 255;
int iLowV = 0;
int iHighV = 255;
//Create trackbars in "Control" window
createTrackbar("LowH", "Control", &iLowH, 179); //Hue (0 - 179)
createTrackbar("HighH", "Control", &iHighH, 179);
createTrackbar("LowS", "Control", &iLowS, 255); //Saturation (0 - 255)
createTrackbar("HighS", "Control", &iHighS, 255);
createTrackbar("LowV", "Control", &iLowV, 255); //Value (0 - 255)
createTrackbar("HighV", "Control", &iHighV, 255);
//Show the original image
imshow("Original", bgr);
// Create kernel for morphological operation
Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(5, 5));
// Infinte loop, until user press "esc"
while (true)
{
Mat mask;
inRange(hsv, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), mask); //Threshold the image
//morphological opening (remove small objects from the foreground)
morphologyEx(mask, mask, MORPH_OPEN, kernel);
//morphological closing (fill small holes in the foreground)
morphologyEx(mask, mask, MORPH_CLOSE, kernel);
//Show the thresholded image
imshow("Thresholded Image", mask);
if (waitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}

simple way to get Z "depth" in OpenCV

I need to detect a blue object and a red object from two different cameras, the required task for now is to locate each object position in 3D space, this means for each object we have to have its x,y,z coordinates, I've seen this video and here witch does exactly what i'm trying to do but there was no sample code in case of the first video, my code looks like this for now it gets me x,y of red/blue object but no depth:
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv/highgui.h>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
//int func(int argc, char** argv)
{
VideoCapture cap(0); //capture the video from webcam
VideoCapture cap1(1); //capture the video from extrenal camers
if (!cap.isOpened()) // if not success, exit program
{
cout << "Cannot open the web cam" << endl;
return -1;
}
if (!cap1.isOpened()) // if not success, exit program
{
cout << "Cannot open the External camera" << endl;
return -1;
}
namedWindow("Control", CV_WINDOW_AUTOSIZE); //create a window called "Control"
int iLowH = 170;
int iHighH = 179;
int iLowS = 150;
int iHighS = 255;
int iLowV = 60;
int iHighV = 255;
//Create trackbars in "Control" window to control the range of red detection
createTrackbar("LowH", "Control", &iLowH, 179); //Hue (0 - 179)
createTrackbar("HighH", "Control", &iHighH, 179);
createTrackbar("LowS", "Control", &iLowS, 255); //Saturation (0 - 255)
createTrackbar("HighS", "Control", &iHighS, 255);
createTrackbar("LowV", "Control", &iLowV, 255);//Value (0 - 255)
createTrackbar("HighV", "Control", &iHighV, 255);
int iLastX = -1; //last known co-ordinates of red object
int iLastY = -1;
int iLastX1 = -1;
int iLastY1 = -1;
//Capture a temporary image from both cameras to obtain size
Mat imgTmp;
cap.read(imgTmp);
cap1.read(imgTmp);
//Create a black image with the size as the camera output
Mat imgLines = Mat::zeros(imgTmp.size(), CV_8UC3);;
Mat imgLines1 = Mat::zeros(imgTmp.size(), CV_8UC3);;
//loop of continuously capturing frames from video
while (true)
{
Mat imgOriginal;
Mat imgOriginal1;
bool bSuccess = cap.read(imgOriginal); // read a new frame from video webcam
bool bSuccess1 = cap1.read(imgOriginal1); // read a new frame from video external cam
if (!bSuccess || !bSuccess1) //if not success, break loop
{
cout << "Cannot read a frame from video stream" << endl;
break;
}
//WebCam code for image and tracking/detecting
Mat imgHSV;
cvtColor(imgOriginal, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV to control range of color to obtain and be able to detect it
Mat imgThresholded;
inRange(imgHSV, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded); //Threshold the image at the colors within specified range
//morphological opening (removes noise and similar colored objects appearing in thresholded image)
erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
//morphological closing (removes noise appearing inside our object in the thresholded image)
dilate(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
//Calculate the moments of the thresholded image to calculate the object position
Moments oMoments = moments(imgThresholded);
double dM01 = oMoments.m01;
double dM10 = oMoments.m10;
double dArea = oMoments.m00;
// if the area <= 10000, I consider that the there are no object in the image and it's because of the noise, the area is not zero
if (dArea > 10000)
{
//calculate the position of the ball
int posX = dM10 / dArea;
int posY = dM01 / dArea;
if (iLastX >= 0 && iLastY >= 0 && posX >= 0 && posY >= 0)
{
//Draw a red line from the previous point to the current point
line(imgLines, Point(posX, posY), Point(iLastX, iLastY), Scalar(0, 0, 255), 2);
}
iLastX = posX; //current point becomes last known point and loop continues
iLastY = posY;
}
imshow("Thresholded Image", imgThresholded); //show the thresholded image
imgOriginal = imgOriginal + imgLines;
imshow("Original", imgOriginal); //show the original image with the tracking lines if exist
//External Cam code track/detect
Mat imgHSV1;
cvtColor(imgOriginal1, imgHSV1, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV
Mat imgThresholded1;
inRange(imgHSV1, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded1); //Threshold the image
//morphological opening (removes noise and similar colored objects appearing in thresholded image)
erode(imgThresholded1, imgThresholded1, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(imgThresholded1, imgThresholded1, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
//morphological closing (removes noise appearing inside our object in the thresholded image)
dilate(imgThresholded1, imgThresholded1, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
erode(imgThresholded1, imgThresholded1, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
//Calculate the moments of the thresholded image to calculate the object position
Moments oMoments1 = moments(imgThresholded1);
double dM011 = oMoments1.m01;
double dM101 = oMoments1.m10;
double dArea1 = oMoments1.m00;
// if the area <= 10000, I consider that the there are no object in the image and it's because of the noise, the area is not zero
if (dArea1 > 10000)
{
//calculate the position of the ball
int posX1 = dM101 / dArea1;
int posY1 = dM011 / dArea1;
if (iLastX1 >= 0 && iLastY1 >= 0 && posX1 >= 0 && posY1 >= 0)
{
//Draw a red line from the previous point to the current point
line(imgLines1, Point(posX1, posY1), Point(iLastX1, iLastY1), Scalar(0, 0, 255), 2);
}
iLastX1 = posX1;
iLastY1 = posY1;
}
imshow("Thresholded Image 2", imgThresholded1); //show the thresholded image
imgOriginal1 = imgOriginal1 + imgLines1;
imshow("Original 2", imgOriginal1); //show the original image
if (waitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}
The answer to your question will be Stereo Vision. You need to do the Stereo Calibration of the two cameras in order to obtain the transformation matrix that allows to produce the depth map of the scene from the 2 views. OpenCV provides some functions to do that.
Here is a tutorial to begin with.

OpenCV convert Mat Object to array in C++

I am attempting to use OpenCV to grab frames from a webcam and convert it in to HSV(Hue,Saturation,Value) Mat object.
Now i need to convert HSV Mat object in to a array which store the values of pixels.
this is the far i got right now.
int main()
{
Mat imgOriginal;
VideoCapture cap(0); //capture the video from web cam
int camOpen = cap.open(CV_CAP_ANY);
if ( !cap.isOpened() ) // if not success, exit program
{
cout << "Cannot open the web cam" << endl;
return -1;
}
namedWindow("Control", CV_WINDOW_AUTOSIZE); //create a window called "Control"
int iLowH = 0;
int iHighH = 179;
int iLowS = 0;
int iHighS = 255;
int iLowV = 0;
int iHighV = 255;
//Create trackbars in "Control" window
cvCreateTrackbar("LowH", "Control", &iLowH, 179); //Hue (0 - 179)
cvCreateTrackbar("HighH", "Control", &iHighH, 179);
cvCreateTrackbar("LowS", "Control", &iLowS, 255); //Saturation (0 - 255)
cvCreateTrackbar("HighS", "Control", &iHighS, 255);
cvCreateTrackbar("LowV", "Control", &iLowV, 255); //Value (0 - 255)
cvCreateTrackbar("HighV", "Control", &iHighV, 255);
time_t start, end;
int counter = 0;
time(&start);
while (true)
{
time(&end);
counter++;
if(1<difftime (end, start))
{
cout<<"fps"<<counter<<endl;
counter=0;
time(&start);
cout<<"iHighH :"<<iLowH<<endl;
cout<<"iHighS :"<<iLowS<<endl;
cout<<"iHighV :"<<iLowV<<endl;
}
cap >> imgOriginal;
bool bSuccess = cap.read(imgOriginal); // read a new frame from video
if (!bSuccess) //if not success, break loop
{
cout << "Cannot read a frame from video stream" << endl;
break;
}
Mat imgHSV;
cvtColor(imgOriginal, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV
Mat imgThresholded;
inRange(imgHSV, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded); //Threshold the image
//morphological opening (remove small objects from the foreground)
erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
//morphological closing (fill small holes in the foreground)
dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
//
// CONVERT imgHSV object in to an array
//
imshow("Thresholded Image", imgThresholded); //show the thresholded image
imshow("Original", imgOriginal); //show the original image
if (waitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
}
system("pause");
return 0;
}
This will copy the image pixels into a vector. If you need a C like array just use &pixels[0].
std::vector<cv::Vec3b> pixels;
cv::MatIterator_<Vec3b> it, end;
for(it = imgHSV .begin<Vec3b>(), end = imgHSV.end<Vec3b>(); it != end; ++it)
{
pixels.push_back(*it);
}
This is a more efficient way to fill the pixels vector:
std::vector<cv::Vec3b> pixels(imgHSV.rows * imgHSV.cols);
cv::Mat m(imgHSV.rows, imgHSV.cols, CV_8UC3, &pixels[0]);
imgHSV.copyTo(m);

OpenCV: record footage in one window and Display the same video in 2nd window but with contours only

I want to capture a video and display it on one window and have second window in which contours are displayed simultaneous. I am struggling with how to have the processed video displayed in the second window. Please analyze my code and suggest a solution or indicate where am going wrong maybe give me some directions to an online tutorial or sources. Thanks.
#include "iostream"
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv\ml.h>
#include<opencv\cxcore.h>
#include <iostream>
#include <vector>
#include <string>
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat)
#include <opencv2/highgui/highgui.hpp> // Video write
using namespace cv;
using namespace std;
Mat image; Mat image_gray; Mat image_gray2; Mat threshold_output;
Mat frame;
int thresh=100, max_thresh=255;
int main(int argc, char** argv) {
//Capture Video
VideoCapture capCam(1);
if (!capCam.isOpened()){
cout<<"ERROR: Failed to Initialize Camera"<<endl;
return 1;
}
else{
cout<<"Camera Initialized"<<endl;
}
//Create Window
char* ImputFootage = "Source";
namedWindow(ImputFootage, CV_WINDOW_AUTOSIZE);
imshow(ImputFootage, frame);
char* OutputFootage = "Processed";
namedWindow(OutputFootage, CV_WINDOW_AUTOSIZE);
imshow(OutputFootage, frame);
while(1){
capCam>> frame;
imshow("Source", frame);
return(1);
if(capCam.read(ImputFootage)){
//Convert Image to gray & blur it
cvtColor( image,
image_gray,
CV_BGR2GRAY );
blur( image_gray,
image_gray2,
Size(3,3) );
//Threshold Gray&Blur Image
threshold(image_gray2,
threshold_output,
thresh,
max_thresh,
THRESH_BINARY);
//2D Container
vector<vector<Point>> contours;
//Fnd Countours Points, (Imput Image, Storage, Mode1, Mode2, Offset??)
findContours(threshold_output,
contours, // a vector of contours
CV_RETR_EXTERNAL,// retrieve the external contours
CV_CHAIN_APPROX_NONE,
Point(0, 0)); // all pixels of each contours
// Draw black contours on a white image
Mat result(threshold_output.size(),CV_8U,Scalar(255));
drawContours(result,contours,
-1, // draw all contours
Scalar(0), // in black
2); // with a thickness of 2
}
}
char CheckForEscKey = waitKey(10);
return 1;
}
You should call imshow("Processed", result); after calling drawContours
You were trying to show frames even before they were captured with camera. Compiler was not giving you error because Mat were declared ,but they were without value (null), Moreover you were trying to display Mat image, but what you capture from camera is Mat frame. Also, you lack exit (esc sequence, and your wait key was OUT of camera loop.
Anyway, here is your code (rewritten), I hope this is what you wanted.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <cstdio>
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
using namespace std;
using namespace cv;
Mat image;
Mat image_gray;
Mat image_gray2;
Mat threshold_output;
Mat frame;
int thresh = 100, max_thresh = 255;
int main(int argc, char** argv)
{
//Capture Video
VideoCapture capCam(0);
if (!capCam.isOpened())
{
cout << "ERROR: Failed to Initialize Camera" << endl;
return 1;
}
else
{
cout << "Camera Initialized" << endl;
}
//Create Window
char* ImputFootage = "Source";
namedWindow(ImputFootage, CV_WINDOW_AUTOSIZE);
char* OutputFootage = "Processed";
namedWindow(OutputFootage, CV_WINDOW_AUTOSIZE);
while (1)
{
capCam >> frame;
imshow(ImputFootage, frame);
if (capCam.read(frame))
{
//Convert Image to gray & blur it
cvtColor(frame, image_gray, CV_BGR2GRAY);
blur(image_gray, image_gray2, Size(3, 3));
//Threshold Gray&Blur Image
threshold(image_gray2, threshold_output, thresh, max_thresh, THRESH_BINARY);
//2D Container
vector<vector<Point> > contours;
//Fnd Countours Points, (Imput Image, Storage, Mode1, Mode2, Offset??)
findContours(threshold_output, contours, // a vector of contours
CV_RETR_EXTERNAL, // retrieve the external contours
CV_CHAIN_APPROX_NONE, Point(0, 0)); // all pixels of each contours
// Draw black contours on a white image
Mat result(threshold_output.size(), CV_8U, Scalar(255));
drawContours(result, contours, -1, // draw all contours
Scalar(0), // in black
2); // with a thickness of 2
imshow(OutputFootage, result);
char CheckForEscKey = waitKey(10);
//If the key pressed by user is Esc(ASCII is 27) then break out of the loop
if (CheckForEscKey == 27)
{
break;
}
}
}
return 0;
}