Background substraction using opencv mogdetector and c++ from live camera feed - c++

I am trying to use this code for background subtraction for a live camera feed. but this code is giving a white image in both the windows. the problem is that when tested with a video file from the same camera it is working fine, without any error but when the video file is replaced by a camera feed it becomes white and in the running window terminal it displays error as:
HIGHGUI ERROR: V4L2: Unable to get property (1) - Invalid
argument
The above error is continuously being repeated when the video is taken from camera feed. Please help to solve this problem.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/background_segm.hpp>
#include <cv.h>
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
Mat frame; //current frame
Mat fgMaskMOG; //fg mask generated by MOG method
Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
int keyboard;
int main()
{
//create GUI windows
namedWindow("Frame",0);
//namedWindow("FG Mask MOG",0);
namedWindow("FG Mask MOG 2",0);
namedWindow("eroded",0);
namedWindow("eroded2",0);
//create Background Subtractor objects
pMOG= new BackgroundSubtractorMOG(); //MOG approach
pMOG2 = new BackgroundSubtractorMOG2(); //MOG2 approach
//create the capture object
VideoCapture capture(0);
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 )
{
//read the current frame
if(!capture.read(frame))
{
cerr << "Unable to read next frame." << endl;
cerr << "Exiting..." << endl;
exit(EXIT_FAILURE);
}
//update the background model
//AND HERE!!!
//pMOG->operator()(frame, fgMaskMOG);
pMOG2->operator()(frame, fgMaskMOG2);
//get the frame number and write it on the current frame
stringstream ss;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
ss << capture.get(CV_CAP_PROP_POS_FRAMES);
string frameNumberString = ss.str();
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG", fgMaskMOG);
imshow("FG Mask MOG 2", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey( 30 );
}
//delete capture object
capture.release();
//destroy GUI windows
distroyAllWindows();
return EXIT_SUCCESS;
}

I think property
CV_CAP_PROP_POS_FRAMES
not valid for camera.

Related

Drawn rectangle isn't visible in Video Stream (OpenCV)

I'm trying to make a simple program that draws a rectangle over a video stream coming from my webcam. The following code compiles and runs, but the rectangle isn't visible. I've tried various line thicknesses, colors, and positions; as well as tried to put rectangles simply on images rather than a video stream.
After looking through examples and tutorials as well as the OpenCV docs, I still can't seem to figure it out. If anyone could assist me in making the rectangle visible, it would be greatly appreciated.
#include <opencv2/video.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
VideoCapture vid(0);
Mat frame;
int main()
{
while(true)
{
vid.read(frame);
imshow("Webcam", frame);
rectangle(frame, Point(100, 100), Point(300, 300), Scalar(255), 10, 8, 0);
if (waitKey(30) == 27)
break;
}
}
Simply draw the rectangle before you show the image:
#include <opencv2\opencv.hpp> // It's just easier to #include only this
using namespace cv;
int main() {
// Don't use global variables if they are not needed!
VideoCapture vid(0);
Mat frame;
while(true)
{
// Read frame
vid.read(frame);
// Draw rectangle
rectangle(frame, Point(100, 100), Point(300, 300), Scalar(255, 0, 0) /*blue*/, 10, 8, 0);
// Show image
imshow("Webcam", frame);
if ((waitKey(30) & 0xFF) == 27) { // for portability
break;
}
}
}

Object detection with simpleBlobDetector, but it doesn't work

I have a problem with the code for the simpleBlobDetector. I can build and run all the code just fine, but the blobs that the program detects are only the size of a pixel or so. I've already tried to change the param.minArea and maxArea but it doesn't work. So Im asking you guys for help. By the way the image i was using is in grayscale already so it isn't because of my threshold command that it isn't working. Thanks before hand!
Martin.
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main(){
Mat src;
Mat dst;
src = imread("C:\\Users\\martin\\Desktop\\ThermalImage2.png", CV_LOAD_IMAGE_GRAYSCALE); //Load an image from directory path
if (! src.data){
cout << "Could not open or find the image" << endl ; // Look for invalid input
return -1;
}
else{
double thresh = 130; // Threshold
double maxValue = 255; // Value assigned to the pixel if it is over 'thresh'
threshold(src, dst, thresh, maxValue, THRESH_BINARY); // threshold the picture src and call it dst
namedWindow("thresholdedPicture", WINDOW_AUTOSIZE); // Create a window
imshow("thresholdedPicture", dst); // display thresholded picture in the window
}
SimpleBlobDetector::Params params; // Set parameters for the object detection
params.minDistBetweenBlobs = 10; //Minimum distance between blobs
params.filterByColor = true;
params.blobColor = 255;
params.filterByArea = true; // filter by area of the blob
params.minArea = 1 ;// Minimum area of the blob
params.maxArea = 100000; // Maximum area of the blob
vector<KeyPoint> keypoints;
cv::SimpleBlobDetector detector(params); // Set up the blob detector with the parameters (params)
detector.detect(dst, keypoints); // Input thresholded picture for detection of the blobs
Mat dst_blob_dect; // New array to store the picture with the blobs detected
drawKeypoints( dst, keypoints, dst_blob_dect, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS ); //Drawing a red line around the detected objects
namedWindow("keypoints", WINDOW_AUTOSIZE); // Create a window
imshow("keypoints", dst_blob_dect); // Show the picture with the blobs detected in the window "keypoints"
waitKey(0); // Press any key and the main function returns 0
return 0;}
Try this and use different values for params.minDistBetweenBlobs.
#include "stdafx.h"
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <opencv2/opencv.hpp>
#include <stdio.h>
using namespace std;
using namespace cv;
int main(){
Mat src;
Mat dst;
src = imread("C:\\Users\\sanche8x\\Pictures\\gather.png", CV_LOAD_IMAGE_GRAYSCALE); //Load an image from directory path
if (! src.data){
cout << "Could not open or find the image" << endl ; // Look for invalid input
return -1;
}
else{
double thresh = 130; // Threshold
double maxValue = 255; // Value assigned to the pixel if it is over 'thresh'
threshold(src, dst, thresh, maxValue, THRESH_BINARY); // threshold the picture src and call it dst
namedWindow("thresholdedPicture", WINDOW_AUTOSIZE); // Create a window
imshow("thresholdedPicture", dst); // display thresholded picture in the window
}
SimpleBlobDetector::Params params; // Set parameters for the object detection
params.minDistBetweenBlobs = 10; //Minimum distance between blobs
params.filterByColor = true;
params.blobColor = 255;
params.filterByCircularity = false;
params.filterByConvexity = false;
params.filterByInertia = false;
params.filterByArea = true; // filter by area of the blob
params.minArea = 1 ;// Minimum area of the blob
params.maxArea = 100000; // Maximum area of the blob
vector<KeyPoint> keypoints;
cv::SimpleBlobDetector detector(params); // Set up the blob detector with the parameters (params)
detector.detect(dst, keypoints); // Input thresholded picture for detection of the blobs
Mat dst_blob_dect; // New array to store the picture with the blobs detected
drawKeypoints( dst, keypoints, dst_blob_dect, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS ); //Drawing a red line around the detected objects
namedWindow("keypoints", WINDOW_AUTOSIZE); // Create a window
imshow("keypoints", dst_blob_dect); // Show the picture with the blobs detected in the window "keypoints"
waitKey(0); // Press any key and the main function returns 0
return 0;
}

OpenCV crop live feed from camera

How can I get properly one resolution feed from camera in OpenCV (640x320) but cut it into half and display only one half of the frame (320x240). So not to scale down, but to actually crop. I am using OpenCV 2.4.5, VS2010 and C++
This quite standard code gets 640x480 input resolution and I made some changes to crop resolution to 320x240. Should I use Mat instead of IplImage, and if so what would be the best way?
#include "stdafx.h"
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
char key;
int main()
{
cvNamedWindow("Camera_Output", 1); //Create window
CvCapture* capture = cvCaptureFromCAM(1); //Capture using camera 1 connected to system
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 640 );
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 480 );
while(1){ //Create loop for live streaming
IplImage* framein = cvQueryFrame(capture); //Create image frames from capture
/* sets the Region of Interest - rectangle area has to be __INSIDE__ the image */
cvSetImageROI(framein, cvRect(0, 0, 320, 240));
/* create destination image - cvGetSize will return the width and the height of ROI */
IplImage *frameout = cvCreateImage(cvGetSize(framein), framein->depth, framein->nChannels);
/* copy subimage */
cvCopy(framein, frameout, NULL);
/* always reset the Region of Interest */
cvResetImageROI(framein);
cvShowImage("Camera_Output", frameout); //Show image frames on created window
key = cvWaitKey(10); //Capture Keyboard stroke
if (char(key) == 27){
break; //ESC key loop will break.
}
}
cvReleaseCapture(&capture); //Release capture.
cvDestroyWindow("Camera_Output"); //Destroy Window
return 0;
}
I think you don't check whether you are getting a CvCapture. On my system with only one camera your code doesn't work because you query camera 1. But the first camera should be 0 Thus change this code.
CvCapture* capture = cvCaptureFromCAM(1); //Capture using camera 1 connected to system
to (note I change 1 to 0):
CvCapture* capture = cvCaptureFromCAM(0); //Capture using camera 1 connected to system
if (! capture ){
/*your error handling*/
}
Further than that your code seems to be working for me. You might also check the other pointer values whether you are not getting NULL.
You can easily crop a video by calling the following function.
cvSetMouseCallback("image", mouseHandler, NULL);
The mouseHandler function is like that.
void mouseHandler(int event, int x, int y, int flags, void* param){
if (event == CV_EVENT_LBUTTONDOWN && !drag)
{
/* left button clicked. ROI selection begins */
select_flag=0;
point1 = Point(x, y);
drag = 1;
}
if (event == CV_EVENT_MOUSEMOVE && drag)
{
/* mouse dragged. ROI being selected */
Mat img1 = img.clone();
point2 = Point(x, y);
rectangle(img1, point1, point2, CV_RGB(255, 0, 0), 3, 8, 0);
imshow("image", img1);
}
if (event == CV_EVENT_LBUTTONUP && drag)
{
point2 = Point(x, y);
rect = Rect(point1.x,point1.y,x-point1.x,y-point1.y);
drag = 0;
roiImg = img(rect);
}
if (event == CV_EVENT_LBUTTONUP)
{
/* ROI selected */
select_flag = 1;
drag = 0;
}
}
For the details you can visit the following link.:How to Crop Video from Webcam using OpenCV
this is easy in python... but the key idea is that cv2 arrays can be referenced and sliced. all you need is a slice of framein.
the following code takes a slice from (0,0) to (320,240). note that numpy arrays are indexed with column priority.
# Required modules
import cv2
# Constants for the crop size
xMin = 0
yMin = 0
xMax = 320
yMax = 240
# Open cam, decode image, show in window
cap = cv2.VideoCapture(0) # use 1 or 2 or ... for other camera
cv2.namedWindow("Original")
cv2.namedWindow("Cropped")
key = -1
while(key < 0):
success, img = cap.read()
cropImg = img[yMin:yMax,xMin:xMax] # this is all there is to cropping
cv2.imshow("Original", img)
cv2.imshow("Cropped", cropImg)
key = cv2.waitKey(1)
cv2.destroyAllWindows()
Working Example of cropping Faces from live camera
void CropFaces::DetectAndCropFaces(Mat frame, string locationToSaveFaces) {
std::vector<Rect> faces;
Mat frame_gray;
// Convert to gray scale
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
// Equalize histogram
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 3,
0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Iterate over all of the faces
for (size_t i = 0; i < faces.size(); i++) {
// Find center of faces
Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
Mat face = frame_gray(faces[i]);
std::vector<Rect> eyes;
Mat croppedRef(frame, faces[i]);
cv::Mat cropped;
// Copy the data into new matrix
croppedRef.copyTo(cropped);
string fileName = locationToSaveFaces+ "\\face_" + to_string(faces[i].x) + ".jpg";
resize(cropped, cropped, Size(65, 65));
imwrite(fileName, cropped);
}
// Display frame
imshow("DetectAndSave", frame);
}
void CropFaces::PlayVideoForCropFaces(string locationToSaveFaces) {
VideoCapture cap(0); // Open default camera
Mat frame;
face_cascade.load("haarcascade_frontalface_alt.xml"); // load faces
while (cap.read(frame)) {
DetectAndCropFaces(frame, locationToSaveFaces); // Call function to detect faces
if (waitKey(30) >= 0) // pause
break;
}
}

opencv cvblob render issue

I just installed cvblob for object detection.
When I tried to run the program, the image would not show up and it gives me an error:
"VIDIOC_QUERYMENU: Invalid argument"
Here is the code.
#include "highgui.h"
#include "cv.h"
#include "cvaux.h"
#include "iostream"
#include <stdio.h>
#include <ctype.h>
#include <cvblob.h>
using namespace cv;
using namespace std;
using namespace cvb;
int main(int argc, char** argv) {
CvTracks tracks;
namedWindow("frame", CV_WINDOW_AUTOSIZE);
cvMoveWindow("frame", 50, 100);
CvCapture* capture;
IplImage* frame = 0;
// frame = cvLoadImage("fruits.jpg", 1);
capture = cvCreateCameraCapture( 1 ); //capture frames from cam on index 0: /dev/video0/
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 240);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 320);
frame = cvQueryFrame(capture);
while(capture) {
IplImage *gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
cvCvtColor(frame, gray, CV_BGR2GRAY);
cvThreshold(gray, gray, 150, 255, CV_THRESH_BINARY);
IplImage *labelImg=cvCreateImage(cvGetSize(gray), IPL_DEPTH_LABEL, 1);
CvBlobs blobs;
unsigned int result=cvLabel(gray, labelImg, blobs);
cvFilterByArea(blobs, 500, 1000000);
cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX);
cvUpdateTracks(blobs, tracks, 200., 5);
cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX);
// for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it) {
// cout << "Blob #" << it->second->label << ": Area=" << it->second->area << ", Centroid=(" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
// }
cvShowImage("frame", frame);
frame = cvQueryFrame(capture);
}
}
If I uncomment the commented part, the blob information will be shown.
Can anyone help me find out why the image is not showing?
Thanks,
Milo
That error is coming from the video capture system, not cvBlob.
I see a few issues:
You must test capture after creating it to make sure you have successfully opened a camera.
Your while loop should be testing frame, not capture to make sure you've successfully received a frame of video.
Are you sure you have a camera at index 1?
Try this simplified version and see if it works. Note that I'm testing capture, looping while frame is not 0, and opening the camera at index 0. This works on my system.
int main(int argc, char** argv) {
namedWindow("frame", CV_WINDOW_AUTOSIZE);
cvMoveWindow("frame", 50, 100);
CvCapture* capture;
IplImage* frame = 0;
capture = cvCreateCameraCapture( 0 ); //capture frames from cam on index 0: /dev/video0/
if (!capture) {
return -1;
}
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 240);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 320);
frame = cvQueryFrame(capture);
while(frame) {
cvShowImage("frame", frame);
frame = cvQueryFrame(capture);
}
}
If this works for you, try changing the cvCreateCameraCapture argument to 1. Then try adding back your code a little at a time.

OpenCV unable to capture image from isight webcam

I can not capture image from my webcam using following OpenCV code.
The code can show images from a local AVI file or a video device. It works fine on a "test.avi" file.
When I make use my default webcam(CvCapture* capture =cvCreateCameraCapture(0)), the program can detected the size of the image from webcam,but just unable to display the image.
/I forgot to mention that I can see the iSight is working because the LED indicator is turn on/
Anyone encounter the same problem?
cvNamedWindow( "Example2", CV_WINDOW_AUTOSIZE );
CvCapture* capture =cvCreateFileCapture( "C:\\test.avi" ) ;// display images from avi file, works well
// CvCapture* capture =cvCreateCameraCapture(0); //display the frame(images) from default webcam not work
assert( capture );
IplImage* image;
while(1) {
image = cvQueryFrame( capture );
if( !image ) break;
cvShowImage( "Example2", image );
char c = cvWaitKey(33);
if( c == 27 ) break;
}
cvReleaseCapture( &capture );
cvDestroyWindow( "Example2" );
opencv 2.2
Debug library *d.lib
WebCam isight
Macbook OS win7 32
VS2008
I'm working on opencv 2.3 with Macbook pro Mid 2012 and I had that problem with the Isight cam. Somehow I managed to make it work on opencv by simply adjusting the parameters of the Cvcapture and adjusting the frame width and height:
CvCapture* capture = cvCaptureFromCAM(0);
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 500 );
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 600 );
You can also change these numbers to the frame width and height you want.
Did you try the example from the opencv page?
namely,
#include "cv.h"
#include "highgui.h"
using namespace cv;
int main(int, char**)
{
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) // check if we succeeded
return -1;
Mat edges;
namedWindow("edges",1);
for(;;)
{
Mat frame;
cap >> frame; // get a new frame from camera
cvtColor(frame, edges, CV_BGR2GRAY);
GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);
Canny(edges, edges, 0, 30, 3);
imshow("edges", edges);
if(waitKey(30) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
Works on a macbook pro for me (although on OS X). If it doesn't work, some kind of error message would be helpful.
Try this:
int main(int, char**) {
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) { // check if we succeeded
cout << "===couldn't open camera" << endl;
return -1;
}
Mat edges, frame;
frame = cv::Mat(10, 10, CV_8U);
namedWindow("edges", 1);
for (;;) {
cap >> frame; // get a new frame from camera
cout << "frame size: " << frame.cols << endl;
if (frame.cols > 0 && frame.rows > 0) {
imshow("edges", frame);
}
if (waitKey(30) >= 0)
break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
Latest update! Problem solved!
This happen to be one of OpenCV 2.2′s bug
Here is how to fix it:
http://dusijun.wordpress.com/2011/01/11/opencv-unable-to-capture-image-from-isight-webcam/
Why dont you try
capture=cvCaptureFromCam(0);
I think this may work.
Let me know about wheather its working or not.