opencv cvblob render issue - c++

I just installed cvblob for object detection.
When I tried to run the program, the image would not show up and it gives me an error:
"VIDIOC_QUERYMENU: Invalid argument"
Here is the code.
#include "highgui.h"
#include "cv.h"
#include "cvaux.h"
#include "iostream"
#include <stdio.h>
#include <ctype.h>
#include <cvblob.h>
using namespace cv;
using namespace std;
using namespace cvb;
int main(int argc, char** argv) {
CvTracks tracks;
namedWindow("frame", CV_WINDOW_AUTOSIZE);
cvMoveWindow("frame", 50, 100);
CvCapture* capture;
IplImage* frame = 0;
// frame = cvLoadImage("fruits.jpg", 1);
capture = cvCreateCameraCapture( 1 ); //capture frames from cam on index 0: /dev/video0/
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 240);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 320);
frame = cvQueryFrame(capture);
while(capture) {
IplImage *gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
cvCvtColor(frame, gray, CV_BGR2GRAY);
cvThreshold(gray, gray, 150, 255, CV_THRESH_BINARY);
IplImage *labelImg=cvCreateImage(cvGetSize(gray), IPL_DEPTH_LABEL, 1);
CvBlobs blobs;
unsigned int result=cvLabel(gray, labelImg, blobs);
cvFilterByArea(blobs, 500, 1000000);
cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX);
cvUpdateTracks(blobs, tracks, 200., 5);
cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX);
// for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it) {
// cout << "Blob #" << it->second->label << ": Area=" << it->second->area << ", Centroid=(" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
// }
cvShowImage("frame", frame);
frame = cvQueryFrame(capture);
}
}
If I uncomment the commented part, the blob information will be shown.
Can anyone help me find out why the image is not showing?
Thanks,
Milo

That error is coming from the video capture system, not cvBlob.
I see a few issues:
You must test capture after creating it to make sure you have successfully opened a camera.
Your while loop should be testing frame, not capture to make sure you've successfully received a frame of video.
Are you sure you have a camera at index 1?
Try this simplified version and see if it works. Note that I'm testing capture, looping while frame is not 0, and opening the camera at index 0. This works on my system.
int main(int argc, char** argv) {
namedWindow("frame", CV_WINDOW_AUTOSIZE);
cvMoveWindow("frame", 50, 100);
CvCapture* capture;
IplImage* frame = 0;
capture = cvCreateCameraCapture( 0 ); //capture frames from cam on index 0: /dev/video0/
if (!capture) {
return -1;
}
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 240);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 320);
frame = cvQueryFrame(capture);
while(frame) {
cvShowImage("frame", frame);
frame = cvQueryFrame(capture);
}
}
If this works for you, try changing the cvCreateCameraCapture argument to 1. Then try adding back your code a little at a time.

Related

Background substraction using opencv mogdetector and c++ from live camera feed

I am trying to use this code for background subtraction for a live camera feed. but this code is giving a white image in both the windows. the problem is that when tested with a video file from the same camera it is working fine, without any error but when the video file is replaced by a camera feed it becomes white and in the running window terminal it displays error as:
HIGHGUI ERROR: V4L2: Unable to get property (1) - Invalid
argument
The above error is continuously being repeated when the video is taken from camera feed. Please help to solve this problem.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/background_segm.hpp>
#include <cv.h>
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
Mat frame; //current frame
Mat fgMaskMOG; //fg mask generated by MOG method
Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
int keyboard;
int main()
{
//create GUI windows
namedWindow("Frame",0);
//namedWindow("FG Mask MOG",0);
namedWindow("FG Mask MOG 2",0);
namedWindow("eroded",0);
namedWindow("eroded2",0);
//create Background Subtractor objects
pMOG= new BackgroundSubtractorMOG(); //MOG approach
pMOG2 = new BackgroundSubtractorMOG2(); //MOG2 approach
//create the capture object
VideoCapture capture(0);
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 )
{
//read the current frame
if(!capture.read(frame))
{
cerr << "Unable to read next frame." << endl;
cerr << "Exiting..." << endl;
exit(EXIT_FAILURE);
}
//update the background model
//AND HERE!!!
//pMOG->operator()(frame, fgMaskMOG);
pMOG2->operator()(frame, fgMaskMOG2);
//get the frame number and write it on the current frame
stringstream ss;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
ss << capture.get(CV_CAP_PROP_POS_FRAMES);
string frameNumberString = ss.str();
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG", fgMaskMOG);
imshow("FG Mask MOG 2", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey( 30 );
}
//delete capture object
capture.release();
//destroy GUI windows
distroyAllWindows();
return EXIT_SUCCESS;
}
I think property
CV_CAP_PROP_POS_FRAMES
not valid for camera.

Unable to read video file in OpenCV

I am trying to capture a video and store it in a file and then read the same video file. I am able to write it but not able to read the same file. On pressing escape, the program is supposed to quit the webcam and play the recorded video but displays the following error instead:
mpeg1video # 0x2a16f40] ac-tex damaged at 14 28
[mpeg1video # 0x2a16f40] Warning MVs not available
OpenCV Error: Bad flag (parameter or structure field) (Unrecognized or unsupported array type) in cvGetMat, file /home/ujjwal/Downloads/OpenCV-2.4.0/modules/core/src/array.cpp, line 2482
terminate called after throwing an instance of 'cv::Exception'
what(): /home/ujjwal/Downloads/OpenCV-2.4.0/modules/core/src/array.cpp:2482: error: (-206) Unrecognized or unsupported array type in function cvGetMat
The code is:
#include <sstream>
#include <string>
#include <iostream>
#include <opencv/highgui.h>
#include <opencv/cv.h>
using namespace cv;
int main(int argc, char* argv[])
{
Mat inputVideo;
Mat frame;
Mat HSV;
Mat tracking;
char checkKey;
VideoCapture capture;
capture.open(0);
capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,480);
VideoWriter writer("OutputFile.mpeg", CV_FOURCC('P','I','M','1'), 50, Size(640, 480));
while(1){
capture.read(inputVideo);
imshow("Original Video",inputVideo);
writer.write(inputVideo);
checkKey = cvWaitKey(20);
if(checkKey == 27)
break;
}
capture.open("OutputFile.mpeg");
capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,480);
while(1){
capture.read(inputVideo);
imshow("Tracking Video", inputVideo);
}
return 0;
}
Can someone please help me? Thanks!
You need to correct several things to make it work:
You have to create the window before showing images in the window.
You have to close the writer to finish writing before open it later.
You need to add cvWaitKey(20) for the second image showing (check out here for why this is essential).
The whole fixed code is as follows:
#include <sstream>
#include <string>
#include <iostream>
#include <opencv/highgui.h>
#include <opencv/cv.h>
using namespace cv;
int main(int argc, char* argv[])
{
Mat inputVideo;
Mat frame;
Mat HSV;
Mat tracking;
char checkKey;
VideoCapture capture;
capture.open(0);
capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,480);
VideoWriter writer("OutputFile.mpeg", CV_FOURCC('P','I','M','1'), 50, Size(640, 480));
namedWindow("Original Video", WINDOW_AUTOSIZE );
while(1){
capture.read(inputVideo);
imshow("Original Video",inputVideo);
writer.write(inputVideo);
checkKey = cvWaitKey(20);
if(checkKey == 27)
break;
}
writer.release();
capture.open("OutputFile.mpeg");
capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
capture.set(CV_CAP_PROP_FRAME_HEIGHT,480);
namedWindow("Tracking Video", WINDOW_AUTOSIZE );
while(1){
capture.read(inputVideo);
if (!inputVideo.empty())
{
imshow("Tracking Video", inputVideo);
checkKey = cvWaitKey(20);
}
else
break;
}
return 0;
}

OpenCV: record footage in one window and Display the same video in 2nd window but with contours only

I want to capture a video and display it on one window and have second window in which contours are displayed simultaneous. I am struggling with how to have the processed video displayed in the second window. Please analyze my code and suggest a solution or indicate where am going wrong maybe give me some directions to an online tutorial or sources. Thanks.
#include "iostream"
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv\ml.h>
#include<opencv\cxcore.h>
#include <iostream>
#include <vector>
#include <string>
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat)
#include <opencv2/highgui/highgui.hpp> // Video write
using namespace cv;
using namespace std;
Mat image; Mat image_gray; Mat image_gray2; Mat threshold_output;
Mat frame;
int thresh=100, max_thresh=255;
int main(int argc, char** argv) {
//Capture Video
VideoCapture capCam(1);
if (!capCam.isOpened()){
cout<<"ERROR: Failed to Initialize Camera"<<endl;
return 1;
}
else{
cout<<"Camera Initialized"<<endl;
}
//Create Window
char* ImputFootage = "Source";
namedWindow(ImputFootage, CV_WINDOW_AUTOSIZE);
imshow(ImputFootage, frame);
char* OutputFootage = "Processed";
namedWindow(OutputFootage, CV_WINDOW_AUTOSIZE);
imshow(OutputFootage, frame);
while(1){
capCam>> frame;
imshow("Source", frame);
return(1);
if(capCam.read(ImputFootage)){
//Convert Image to gray & blur it
cvtColor( image,
image_gray,
CV_BGR2GRAY );
blur( image_gray,
image_gray2,
Size(3,3) );
//Threshold Gray&Blur Image
threshold(image_gray2,
threshold_output,
thresh,
max_thresh,
THRESH_BINARY);
//2D Container
vector<vector<Point>> contours;
//Fnd Countours Points, (Imput Image, Storage, Mode1, Mode2, Offset??)
findContours(threshold_output,
contours, // a vector of contours
CV_RETR_EXTERNAL,// retrieve the external contours
CV_CHAIN_APPROX_NONE,
Point(0, 0)); // all pixels of each contours
// Draw black contours on a white image
Mat result(threshold_output.size(),CV_8U,Scalar(255));
drawContours(result,contours,
-1, // draw all contours
Scalar(0), // in black
2); // with a thickness of 2
}
}
char CheckForEscKey = waitKey(10);
return 1;
}
You should call imshow("Processed", result); after calling drawContours
You were trying to show frames even before they were captured with camera. Compiler was not giving you error because Mat were declared ,but they were without value (null), Moreover you were trying to display Mat image, but what you capture from camera is Mat frame. Also, you lack exit (esc sequence, and your wait key was OUT of camera loop.
Anyway, here is your code (rewritten), I hope this is what you wanted.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <cstdio>
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
using namespace std;
using namespace cv;
Mat image;
Mat image_gray;
Mat image_gray2;
Mat threshold_output;
Mat frame;
int thresh = 100, max_thresh = 255;
int main(int argc, char** argv)
{
//Capture Video
VideoCapture capCam(0);
if (!capCam.isOpened())
{
cout << "ERROR: Failed to Initialize Camera" << endl;
return 1;
}
else
{
cout << "Camera Initialized" << endl;
}
//Create Window
char* ImputFootage = "Source";
namedWindow(ImputFootage, CV_WINDOW_AUTOSIZE);
char* OutputFootage = "Processed";
namedWindow(OutputFootage, CV_WINDOW_AUTOSIZE);
while (1)
{
capCam >> frame;
imshow(ImputFootage, frame);
if (capCam.read(frame))
{
//Convert Image to gray & blur it
cvtColor(frame, image_gray, CV_BGR2GRAY);
blur(image_gray, image_gray2, Size(3, 3));
//Threshold Gray&Blur Image
threshold(image_gray2, threshold_output, thresh, max_thresh, THRESH_BINARY);
//2D Container
vector<vector<Point> > contours;
//Fnd Countours Points, (Imput Image, Storage, Mode1, Mode2, Offset??)
findContours(threshold_output, contours, // a vector of contours
CV_RETR_EXTERNAL, // retrieve the external contours
CV_CHAIN_APPROX_NONE, Point(0, 0)); // all pixels of each contours
// Draw black contours on a white image
Mat result(threshold_output.size(), CV_8U, Scalar(255));
drawContours(result, contours, -1, // draw all contours
Scalar(0), // in black
2); // with a thickness of 2
imshow(OutputFootage, result);
char CheckForEscKey = waitKey(10);
//If the key pressed by user is Esc(ASCII is 27) then break out of the loop
if (CheckForEscKey == 27)
{
break;
}
}
}
return 0;
}

How can I convert a cv::Mat to a gray scale in OpenCv?

How can I convert a cv::Mat to a gray scale?
I am trying to run drawKeyPoints func from opencv, however I have been getting an Assertion Filed error. My guess is that it needs to receive a gray scale image rather than a color image in the parameter.
void SurfDetector(cv::Mat img){
vector<cv::KeyPoint> keypoints;
cv::Mat featureImage;
cv::drawKeypoints(img, keypoints, featureImage, cv::Scalar(255,255,255) ,cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
cv::namedWindow("Picture");
cv::imshow("Picture", featureImage);
}
Using the C++ API, the function name has slightly changed and it writes now:
#include <opencv2/imgproc/imgproc.hpp>
cv::Mat greyMat, colorMat;
cv::cvtColor(colorMat, greyMat, CV_BGR2GRAY);
The main difficulties are that the function is in the imgproc module (not in the core), and by default cv::Mat are in the Blue Green Red (BGR) order instead of the more common RGB.
OpenCV 3
Starting with OpenCV 3.0, there is yet another convention.
Conversion codes are embedded in the namespace cv:: and are prefixed with COLOR.
So, the example becomes then:
#include <opencv2/imgproc/imgproc.hpp>
cv::Mat greyMat, colorMat;
cv::cvtColor(colorMat, greyMat, cv::COLOR_BGR2GRAY);
As far as I have seen, the included file path hasn't changed (this is not a typo).
May be helpful for late comers.
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
using namespace cv;
using namespace std;
int main(int argc, char *argv[])
{
if (argc != 2) {
cout << "Usage: display_Image ImageToLoadandDisplay" << endl;
return -1;
}else{
Mat image;
Mat grayImage;
image = imread(argv[1], IMREAD_COLOR);
if (!image.data) {
cout << "Could not open the image file" << endl;
return -1;
}
else {
int height = image.rows;
int width = image.cols;
cvtColor(image, grayImage, CV_BGR2GRAY);
namedWindow("Display window", WINDOW_AUTOSIZE);
imshow("Display window", image);
namedWindow("Gray Image", WINDOW_AUTOSIZE);
imshow("Gray Image", grayImage);
cvWaitKey(0);
image.release();
grayImage.release();
return 0;
}
}
}

OpenCV unable to capture image from isight webcam

I can not capture image from my webcam using following OpenCV code.
The code can show images from a local AVI file or a video device. It works fine on a "test.avi" file.
When I make use my default webcam(CvCapture* capture =cvCreateCameraCapture(0)), the program can detected the size of the image from webcam,but just unable to display the image.
/I forgot to mention that I can see the iSight is working because the LED indicator is turn on/
Anyone encounter the same problem?
cvNamedWindow( "Example2", CV_WINDOW_AUTOSIZE );
CvCapture* capture =cvCreateFileCapture( "C:\\test.avi" ) ;// display images from avi file, works well
// CvCapture* capture =cvCreateCameraCapture(0); //display the frame(images) from default webcam not work
assert( capture );
IplImage* image;
while(1) {
image = cvQueryFrame( capture );
if( !image ) break;
cvShowImage( "Example2", image );
char c = cvWaitKey(33);
if( c == 27 ) break;
}
cvReleaseCapture( &capture );
cvDestroyWindow( "Example2" );
opencv 2.2
Debug library *d.lib
WebCam isight
Macbook OS win7 32
VS2008
I'm working on opencv 2.3 with Macbook pro Mid 2012 and I had that problem with the Isight cam. Somehow I managed to make it work on opencv by simply adjusting the parameters of the Cvcapture and adjusting the frame width and height:
CvCapture* capture = cvCaptureFromCAM(0);
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 500 );
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 600 );
You can also change these numbers to the frame width and height you want.
Did you try the example from the opencv page?
namely,
#include "cv.h"
#include "highgui.h"
using namespace cv;
int main(int, char**)
{
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) // check if we succeeded
return -1;
Mat edges;
namedWindow("edges",1);
for(;;)
{
Mat frame;
cap >> frame; // get a new frame from camera
cvtColor(frame, edges, CV_BGR2GRAY);
GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);
Canny(edges, edges, 0, 30, 3);
imshow("edges", edges);
if(waitKey(30) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
Works on a macbook pro for me (although on OS X). If it doesn't work, some kind of error message would be helpful.
Try this:
int main(int, char**) {
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) { // check if we succeeded
cout << "===couldn't open camera" << endl;
return -1;
}
Mat edges, frame;
frame = cv::Mat(10, 10, CV_8U);
namedWindow("edges", 1);
for (;;) {
cap >> frame; // get a new frame from camera
cout << "frame size: " << frame.cols << endl;
if (frame.cols > 0 && frame.rows > 0) {
imshow("edges", frame);
}
if (waitKey(30) >= 0)
break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
Latest update! Problem solved!
This happen to be one of OpenCV 2.2′s bug
Here is how to fix it:
http://dusijun.wordpress.com/2011/01/11/opencv-unable-to-capture-image-from-isight-webcam/
Why dont you try
capture=cvCaptureFromCam(0);
I think this may work.
Let me know about wheather its working or not.