I have a problem with the code for the simpleBlobDetector. I can build and run all the code just fine, but the blobs that the program detects are only the size of a pixel or so. I've already tried to change the param.minArea and maxArea but it doesn't work. So Im asking you guys for help. By the way the image i was using is in grayscale already so it isn't because of my threshold command that it isn't working. Thanks before hand!
Martin.
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main(){
Mat src;
Mat dst;
src = imread("C:\\Users\\martin\\Desktop\\ThermalImage2.png", CV_LOAD_IMAGE_GRAYSCALE); //Load an image from directory path
if (! src.data){
cout << "Could not open or find the image" << endl ; // Look for invalid input
return -1;
}
else{
double thresh = 130; // Threshold
double maxValue = 255; // Value assigned to the pixel if it is over 'thresh'
threshold(src, dst, thresh, maxValue, THRESH_BINARY); // threshold the picture src and call it dst
namedWindow("thresholdedPicture", WINDOW_AUTOSIZE); // Create a window
imshow("thresholdedPicture", dst); // display thresholded picture in the window
}
SimpleBlobDetector::Params params; // Set parameters for the object detection
params.minDistBetweenBlobs = 10; //Minimum distance between blobs
params.filterByColor = true;
params.blobColor = 255;
params.filterByArea = true; // filter by area of the blob
params.minArea = 1 ;// Minimum area of the blob
params.maxArea = 100000; // Maximum area of the blob
vector<KeyPoint> keypoints;
cv::SimpleBlobDetector detector(params); // Set up the blob detector with the parameters (params)
detector.detect(dst, keypoints); // Input thresholded picture for detection of the blobs
Mat dst_blob_dect; // New array to store the picture with the blobs detected
drawKeypoints( dst, keypoints, dst_blob_dect, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS ); //Drawing a red line around the detected objects
namedWindow("keypoints", WINDOW_AUTOSIZE); // Create a window
imshow("keypoints", dst_blob_dect); // Show the picture with the blobs detected in the window "keypoints"
waitKey(0); // Press any key and the main function returns 0
return 0;}
Try this and use different values for params.minDistBetweenBlobs.
#include "stdafx.h"
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <opencv2/opencv.hpp>
#include <stdio.h>
using namespace std;
using namespace cv;
int main(){
Mat src;
Mat dst;
src = imread("C:\\Users\\sanche8x\\Pictures\\gather.png", CV_LOAD_IMAGE_GRAYSCALE); //Load an image from directory path
if (! src.data){
cout << "Could not open or find the image" << endl ; // Look for invalid input
return -1;
}
else{
double thresh = 130; // Threshold
double maxValue = 255; // Value assigned to the pixel if it is over 'thresh'
threshold(src, dst, thresh, maxValue, THRESH_BINARY); // threshold the picture src and call it dst
namedWindow("thresholdedPicture", WINDOW_AUTOSIZE); // Create a window
imshow("thresholdedPicture", dst); // display thresholded picture in the window
}
SimpleBlobDetector::Params params; // Set parameters for the object detection
params.minDistBetweenBlobs = 10; //Minimum distance between blobs
params.filterByColor = true;
params.blobColor = 255;
params.filterByCircularity = false;
params.filterByConvexity = false;
params.filterByInertia = false;
params.filterByArea = true; // filter by area of the blob
params.minArea = 1 ;// Minimum area of the blob
params.maxArea = 100000; // Maximum area of the blob
vector<KeyPoint> keypoints;
cv::SimpleBlobDetector detector(params); // Set up the blob detector with the parameters (params)
detector.detect(dst, keypoints); // Input thresholded picture for detection of the blobs
Mat dst_blob_dect; // New array to store the picture with the blobs detected
drawKeypoints( dst, keypoints, dst_blob_dect, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS ); //Drawing a red line around the detected objects
namedWindow("keypoints", WINDOW_AUTOSIZE); // Create a window
imshow("keypoints", dst_blob_dect); // Show the picture with the blobs detected in the window "keypoints"
waitKey(0); // Press any key and the main function returns 0
return 0;
}
Related
I'm trying to use SimpleBlobDetector in OpenCV 3 to detect blobs of heat in thermal images, for example people. Any simple code or example will be appreciated.
i tried`
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include "opencv2\features2d.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
if (argc != 2)
{
cout << " Usage: display_image ImageToLoadAndDisplay" << endl;
return 0;
}
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); // Read the file
if (!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl;
return 0;
}
// Set up the detector with default parameters.
//SimpleBlobDetector detector;
// Setup SimpleBlobDetector parameters.
SimpleBlobDetector::Params params;
// Change thresholds
params.minThreshold = 50;
params.maxThreshold = 200;
// Filter by Area.
params.filterByArea = true;
params.minArea = 1500;
// Filter by Circularity
params.filterByCircularity = true;
params.minCircularity = 0.1;
// Filter by Convexity
params.filterByConvexity = true;
params.minConvexity = 0.87;
// Filter by Inertia
params.filterByInertia = true;
params.minInertiaRatio = 0.01;
// Detect blobs.
std::vector<KeyPoint> keypoints;
cv::Ptr<cv::SimpleBlobDetector> detector = cv::SimpleBlobDetector::create(params);
//detector->detect(img, keypoints);
detector->detect(image, keypoints);
//params.detect(image, keypoints);
// Draw detected blobs as red circles.
//DrawMatchesFlags::DRAW_RICH_KEYPOINTS flag ensures the size of the circle corresponds to the size of blob
Mat im_with_keypoints;
drawKeypoints(image, keypoints, im_with_keypoints, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
// Show blobs
imshow("keypoints", im_with_keypoints);
waitKey(0);
//namedWindow("Display window", WINDOW_AUTOSIZE);// Create a window for display.
//imshow("Display window", image); // Show our image inside it.
//waitKey(0); // Wait for a keystroke in the window
//return 0;
}
` but it just returns the gray image unchanged.
To detect blobs with OpenCV you need to:
Instantiate a SimpleBlobDetector type
Declare a vector of type KeyPoints
Call SimpleBlobDetector::detect()
There is a brilliant tutorial online here (where I nicked the code from): https://www.learnopencv.com/blob-detection-using-opencv-python-c/
using namespace cv;
Mat im = imread( "blob.jpg", IMREAD_GRAYSCALE );
SimpleBlobDetector detector;
std::vector<KeyPoint> keypoints;
detector.detect( im, keypoints);
drawKeypoints( im, keypoints, im_with_keypoints, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
imshow("keypoints", im_with_keypoints );// Show blobs
waitKey(0);
You can also adjust parameters to select blobs with specific attributes, it's all listed in the tutorial. I'd suggest having a play, to get a feel for how it works.
I'm working in a motion detection script that tracking people.
I used foreground function MOG2 and it does what I want to do. In the next step, I want to draw a rectangle around moving people but I get an error when I run it.
Are there any ideas on how to fix it?
The error:
OpenCV Error: Bad flag (parameter or structure field) (Unrecognized or unsupported array type) in cvGetMat, file /home/shar/opencv/modules/core/src/array.cpp, line 2494
terminate called after throwing an instance of 'cv::Exception'
what(): /home/shar/opencv/modules/core/src/array.cpp:2494: error: (-206) Unrecognized or unsupported array type in function cvGetMat
Aborted
This is my code:
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <vector>
#include <iostream>
#include <sstream>
#include <opencv2/video/background_segm.hpp>
#include <opencv2/video/background_segm.hpp>
using namespace std;
using namespace cv;
int main()
{
//Openthevideofile
cv::VideoCapture capture(0);
cv::Mat frame;
Mat colored;
//foregroundbinaryimage
cv::Mat foreground;
int largest_area=0;
int largest_contour_index=0;
Rect bounding_rect;
cv::namedWindow("ExtractedForeground");
vector<vector<Point> > contours; // Vector for storing contour
vector<Vec4i> hierarchy;
findContours( frame, contours, hierarchy,CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
//checkifvideosuccessfullyopened
if (!capture.isOpened())
return 0;
//currentvideoframe
//TheMixtureofGaussianobject
//used with all default parameters
cv::Ptr<cv::BackgroundSubtractorMOG2> pMOG2 = cv::createBackgroundSubtractorMOG2();
bool stop(false);
// testing the bounding box
//forallframesinvideo
while(!stop){
//readnextframeifany
if(!capture.read(frame))
break;
//updatethebackground
//andreturntheforeground
float learningRate = 0.01; // or whatever
cv::Mat foreground;
pMOG2->apply(frame, foreground, learningRate);
//learningrate
//Complementtheimage
cv::threshold(foreground,foreground,128,255,cv::THRESH_BINARY_INV);
//showforeground
for( int i = 0; i< contours.size(); i++ )
{
// Find the area of contour
double a=contourArea( contours[i],false);
if(a>largest_area){
largest_area=a;cout<<i<<" area "<<a<<endl;
// Store the index of largest contour
largest_contour_index=i;
// Find the bounding rectangle for biggest contour
bounding_rect=boundingRect(contours[i]);
}
}
Scalar color( 255,255,255); // color of the contour in the
//Draw the contour and rectangle
drawContours( frame, contours,largest_contour_index, color, CV_FILLED,8,hierarchy);
rectangle(frame, bounding_rect, Scalar(0,255,0),2, 8,0);
cv::imshow("ExtractedForeground",foreground);
cv::imshow("colord",colored);
//introduceadelay
//orpresskeytostop
if(cv::waitKey(10)>=0)
stop=true;
}
}
Your code fails because you are calling findContours on frame, which is not initialized until the while loop.
You have also issues in finding the largest contour, since you don't reset largest_area and largest_contour_index at each iteration, so it will fail in case you don't find a contour in the current frame.
This code should be what you meant to do.
You can find a related answer here.
The code here is the port to OpenCV 3.0.0, plus noise removal using morphological open.
#include <opencv2\opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
int main(int argc, char *argv[])
{
Ptr<BackgroundSubtractorMOG2> bg = createBackgroundSubtractorMOG2(500, 16, false);
VideoCapture cap(0);
Mat3b frame;
Mat1b fmask;
Mat kernel = getStructuringElement(MORPH_RECT, Size(3,3));
for (;;)
{
// Capture frame
cap >> frame;
// Background subtraction
bg->apply(frame, fmask, -1);
// Clean foreground from noise
morphologyEx(fmask, fmask, MORPH_OPEN, kernel);
// Find contours
vector<vector<Point>> contours;
findContours(fmask.clone(), contours, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
if (!contours.empty())
{
// Get largest contour
int idx_largest_contour = -1;
double area_largest_contour = 0.0;
for (int i = 0; i < contours.size(); ++i)
{
double area = contourArea(contours[i]);
if (area_largest_contour < area)
{
area_largest_contour = area;
idx_largest_contour = i;
}
}
if (area_largest_contour > 200)
{
// Draw
Rect roi = boundingRect(contours[idx_largest_contour]);
drawContours(frame, contours, idx_largest_contour, Scalar(0, 0, 255));
rectangle(frame, roi, Scalar(0, 255, 0));
}
}
imshow("frame", frame);
imshow("mask", fmask);
if (cv::waitKey(30) >= 0) break;
}
return 0;
}
I am using openCV to capture and save the output image with fixed size (92 by 112). It will capture the frames of video and crop them. However, the output image did not show correct size (some time are 147 by 147, 140 by 140...). What is problem in my code. There are crop image code and whole code. Thanks in advance
CROP image
crop = frame(roi_b);
resize(crop, res, Size(92, 112), 0, 0, INTER_LINEAR); // This will be needed later while saving images
cvtColor(crop, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale
Let see whole code to view the meaning of parameters
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
// Function Headers
void detectAndDisplay(Mat frame);
// Global variables
// Copy this file from opencv/data/haarscascades to target folder
string face_cascade_name = "haarcascade_frontalface_alt.xml";
CascadeClassifier face_cascade;
string window_name = "Capture - Face detection";
int filenumber; // Number of file to be saved
string filename;
// Function main
int main(void)
{
VideoCapture capture(0);
if (!capture.isOpened()) // check if we succeeded
return -1;
// Load the cascade
if (!face_cascade.load(face_cascade_name))
{
printf("--(!)Error loading\n");
return (-1);
};
// Read the video stream
Mat frame;
for (;;)
{
capture >> frame;
// Apply the classifier to the frame
if (!frame.empty())
{
detectAndDisplay(frame);
}
else
{
printf(" --(!) No captured frame -- Break!");
break;
}
int c = waitKey(10);
if (27 == char(c))
{
break;
}
}
return 0;
}
// Function detectAndDisplay
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Set Region of Interest
cv::Rect roi_b;
cv::Rect roi_c;
size_t ic = 0; // ic is index of current element
int ac = 0; // ac is area of current element
size_t ib = 0; // ib is index of biggest element
int ab = 0; // ab is area of biggest element
for (ic = 0; ic < faces.size(); ic++) // Iterate through all current elements (detected faces)
{
roi_c.x = faces[ic].x;
roi_c.y = faces[ic].y;
roi_c.width = (faces[ic].width);
roi_c.height = (faces[ic].height);
ac = roi_c.width * roi_c.height; // Get the area of current element (detected face)
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
ab = roi_b.width * roi_b.height; // Get the area of biggest element, at beginning it is same as "current" element
if (ac > ab)
{
ib = ic;
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
}
crop = frame(roi_b);
resize(crop, res, Size(92, 112), 0, 0, INTER_LINEAR); // This will be needed later while saving images
cvtColor(crop, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale
// Form a filename
filename = "";
stringstream ssfn;
ssfn << filenumber << ".pgm";
filename = ssfn.str();
filenumber++;
imwrite(filename, gray);
Point pt1(faces[ic].x, faces[ic].y); // Display detected faces on main window - live stream from camera
Point pt2((faces[ic].x + faces[ic].height), (faces[ic].y + faces[ic].width));
rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
}
// Show image
sstm << "Crop area size: " << roi_b.width << "x" << roi_b.height << " Filename: " << filename;
text = sstm.str();
putText(frame, text, cvPoint(30, 30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 0, 255), 1, CV_AA);
imshow("original", frame);
if (!crop.empty())
{
imshow("detected", crop);
}
else
destroyWindow("detected");
}
You get it wrong in the 'resize image' part. The cv::resize is defined as
void resize(InputArray src, OutputArray dst, Size dsize, double fx=0, double fy=0, int interpolation=INTER_LINEAR )
thus in your code, when you call
resize(crop, res, Size(92, 112), 0, 0, INTER_LINEAR);
the resulted resized image is stored in 'res', not in 'crop'.
The correct code for the crop part is:
crop = frame(roi_b);
resize(crop, res, Size(92, 112), 0, 0, INTER_LINEAR); // This will be needed later while saving images
cvtColor(res, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale
and 'gray' is the grayscale resulted of your resized cropped image.
How can I get properly one resolution feed from camera in OpenCV (640x320) but cut it into half and display only one half of the frame (320x240). So not to scale down, but to actually crop. I am using OpenCV 2.4.5, VS2010 and C++
This quite standard code gets 640x480 input resolution and I made some changes to crop resolution to 320x240. Should I use Mat instead of IplImage, and if so what would be the best way?
#include "stdafx.h"
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
char key;
int main()
{
cvNamedWindow("Camera_Output", 1); //Create window
CvCapture* capture = cvCaptureFromCAM(1); //Capture using camera 1 connected to system
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 640 );
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 480 );
while(1){ //Create loop for live streaming
IplImage* framein = cvQueryFrame(capture); //Create image frames from capture
/* sets the Region of Interest - rectangle area has to be __INSIDE__ the image */
cvSetImageROI(framein, cvRect(0, 0, 320, 240));
/* create destination image - cvGetSize will return the width and the height of ROI */
IplImage *frameout = cvCreateImage(cvGetSize(framein), framein->depth, framein->nChannels);
/* copy subimage */
cvCopy(framein, frameout, NULL);
/* always reset the Region of Interest */
cvResetImageROI(framein);
cvShowImage("Camera_Output", frameout); //Show image frames on created window
key = cvWaitKey(10); //Capture Keyboard stroke
if (char(key) == 27){
break; //ESC key loop will break.
}
}
cvReleaseCapture(&capture); //Release capture.
cvDestroyWindow("Camera_Output"); //Destroy Window
return 0;
}
I think you don't check whether you are getting a CvCapture. On my system with only one camera your code doesn't work because you query camera 1. But the first camera should be 0 Thus change this code.
CvCapture* capture = cvCaptureFromCAM(1); //Capture using camera 1 connected to system
to (note I change 1 to 0):
CvCapture* capture = cvCaptureFromCAM(0); //Capture using camera 1 connected to system
if (! capture ){
/*your error handling*/
}
Further than that your code seems to be working for me. You might also check the other pointer values whether you are not getting NULL.
You can easily crop a video by calling the following function.
cvSetMouseCallback("image", mouseHandler, NULL);
The mouseHandler function is like that.
void mouseHandler(int event, int x, int y, int flags, void* param){
if (event == CV_EVENT_LBUTTONDOWN && !drag)
{
/* left button clicked. ROI selection begins */
select_flag=0;
point1 = Point(x, y);
drag = 1;
}
if (event == CV_EVENT_MOUSEMOVE && drag)
{
/* mouse dragged. ROI being selected */
Mat img1 = img.clone();
point2 = Point(x, y);
rectangle(img1, point1, point2, CV_RGB(255, 0, 0), 3, 8, 0);
imshow("image", img1);
}
if (event == CV_EVENT_LBUTTONUP && drag)
{
point2 = Point(x, y);
rect = Rect(point1.x,point1.y,x-point1.x,y-point1.y);
drag = 0;
roiImg = img(rect);
}
if (event == CV_EVENT_LBUTTONUP)
{
/* ROI selected */
select_flag = 1;
drag = 0;
}
}
For the details you can visit the following link.:How to Crop Video from Webcam using OpenCV
this is easy in python... but the key idea is that cv2 arrays can be referenced and sliced. all you need is a slice of framein.
the following code takes a slice from (0,0) to (320,240). note that numpy arrays are indexed with column priority.
# Required modules
import cv2
# Constants for the crop size
xMin = 0
yMin = 0
xMax = 320
yMax = 240
# Open cam, decode image, show in window
cap = cv2.VideoCapture(0) # use 1 or 2 or ... for other camera
cv2.namedWindow("Original")
cv2.namedWindow("Cropped")
key = -1
while(key < 0):
success, img = cap.read()
cropImg = img[yMin:yMax,xMin:xMax] # this is all there is to cropping
cv2.imshow("Original", img)
cv2.imshow("Cropped", cropImg)
key = cv2.waitKey(1)
cv2.destroyAllWindows()
Working Example of cropping Faces from live camera
void CropFaces::DetectAndCropFaces(Mat frame, string locationToSaveFaces) {
std::vector<Rect> faces;
Mat frame_gray;
// Convert to gray scale
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
// Equalize histogram
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 3,
0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Iterate over all of the faces
for (size_t i = 0; i < faces.size(); i++) {
// Find center of faces
Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
Mat face = frame_gray(faces[i]);
std::vector<Rect> eyes;
Mat croppedRef(frame, faces[i]);
cv::Mat cropped;
// Copy the data into new matrix
croppedRef.copyTo(cropped);
string fileName = locationToSaveFaces+ "\\face_" + to_string(faces[i].x) + ".jpg";
resize(cropped, cropped, Size(65, 65));
imwrite(fileName, cropped);
}
// Display frame
imshow("DetectAndSave", frame);
}
void CropFaces::PlayVideoForCropFaces(string locationToSaveFaces) {
VideoCapture cap(0); // Open default camera
Mat frame;
face_cascade.load("haarcascade_frontalface_alt.xml"); // load faces
while (cap.read(frame)) {
DetectAndCropFaces(frame, locationToSaveFaces); // Call function to detect faces
if (waitKey(30) >= 0) // pause
break;
}
}
I want to capture a video and display it on one window and have second window in which contours are displayed simultaneous. I am struggling with how to have the processed video displayed in the second window. Please analyze my code and suggest a solution or indicate where am going wrong maybe give me some directions to an online tutorial or sources. Thanks.
#include "iostream"
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv\ml.h>
#include<opencv\cxcore.h>
#include <iostream>
#include <vector>
#include <string>
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat)
#include <opencv2/highgui/highgui.hpp> // Video write
using namespace cv;
using namespace std;
Mat image; Mat image_gray; Mat image_gray2; Mat threshold_output;
Mat frame;
int thresh=100, max_thresh=255;
int main(int argc, char** argv) {
//Capture Video
VideoCapture capCam(1);
if (!capCam.isOpened()){
cout<<"ERROR: Failed to Initialize Camera"<<endl;
return 1;
}
else{
cout<<"Camera Initialized"<<endl;
}
//Create Window
char* ImputFootage = "Source";
namedWindow(ImputFootage, CV_WINDOW_AUTOSIZE);
imshow(ImputFootage, frame);
char* OutputFootage = "Processed";
namedWindow(OutputFootage, CV_WINDOW_AUTOSIZE);
imshow(OutputFootage, frame);
while(1){
capCam>> frame;
imshow("Source", frame);
return(1);
if(capCam.read(ImputFootage)){
//Convert Image to gray & blur it
cvtColor( image,
image_gray,
CV_BGR2GRAY );
blur( image_gray,
image_gray2,
Size(3,3) );
//Threshold Gray&Blur Image
threshold(image_gray2,
threshold_output,
thresh,
max_thresh,
THRESH_BINARY);
//2D Container
vector<vector<Point>> contours;
//Fnd Countours Points, (Imput Image, Storage, Mode1, Mode2, Offset??)
findContours(threshold_output,
contours, // a vector of contours
CV_RETR_EXTERNAL,// retrieve the external contours
CV_CHAIN_APPROX_NONE,
Point(0, 0)); // all pixels of each contours
// Draw black contours on a white image
Mat result(threshold_output.size(),CV_8U,Scalar(255));
drawContours(result,contours,
-1, // draw all contours
Scalar(0), // in black
2); // with a thickness of 2
}
}
char CheckForEscKey = waitKey(10);
return 1;
}
You should call imshow("Processed", result); after calling drawContours
You were trying to show frames even before they were captured with camera. Compiler was not giving you error because Mat were declared ,but they were without value (null), Moreover you were trying to display Mat image, but what you capture from camera is Mat frame. Also, you lack exit (esc sequence, and your wait key was OUT of camera loop.
Anyway, here is your code (rewritten), I hope this is what you wanted.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <cstdio>
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
using namespace std;
using namespace cv;
Mat image;
Mat image_gray;
Mat image_gray2;
Mat threshold_output;
Mat frame;
int thresh = 100, max_thresh = 255;
int main(int argc, char** argv)
{
//Capture Video
VideoCapture capCam(0);
if (!capCam.isOpened())
{
cout << "ERROR: Failed to Initialize Camera" << endl;
return 1;
}
else
{
cout << "Camera Initialized" << endl;
}
//Create Window
char* ImputFootage = "Source";
namedWindow(ImputFootage, CV_WINDOW_AUTOSIZE);
char* OutputFootage = "Processed";
namedWindow(OutputFootage, CV_WINDOW_AUTOSIZE);
while (1)
{
capCam >> frame;
imshow(ImputFootage, frame);
if (capCam.read(frame))
{
//Convert Image to gray & blur it
cvtColor(frame, image_gray, CV_BGR2GRAY);
blur(image_gray, image_gray2, Size(3, 3));
//Threshold Gray&Blur Image
threshold(image_gray2, threshold_output, thresh, max_thresh, THRESH_BINARY);
//2D Container
vector<vector<Point> > contours;
//Fnd Countours Points, (Imput Image, Storage, Mode1, Mode2, Offset??)
findContours(threshold_output, contours, // a vector of contours
CV_RETR_EXTERNAL, // retrieve the external contours
CV_CHAIN_APPROX_NONE, Point(0, 0)); // all pixels of each contours
// Draw black contours on a white image
Mat result(threshold_output.size(), CV_8U, Scalar(255));
drawContours(result, contours, -1, // draw all contours
Scalar(0), // in black
2); // with a thickness of 2
imshow(OutputFootage, result);
char CheckForEscKey = waitKey(10);
//If the key pressed by user is Esc(ASCII is 27) then break out of the loop
if (CheckForEscKey == 27)
{
break;
}
}
}
return 0;
}