I read a logitech c200 webcam on usb-port with this code in c++ with opencv:
Mat result;
IplImage* frame;
int hell=0;
int dunkel=0;
CvCapture* capture;
capture = 0;
capture = cvCaptureFromCAM( CV_CAP_ANY );
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 320);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 240);
frame = cvQueryFrame( capture );
if( !frame ) break;
result = frame;
flip(result , result ,-1);
cvReleaseCapture( &capture );
for(int i = 0; i < 240; ++i){
for(int j = 0; j < 320; ++j){
if((result.at<Vec3b>(i,j)[1] > 230) && (result.at<Vec3b>(i,j)[0] > 230))
{ hell++;}
else
{dunkel++;}
}
}
How can I get the alpha channel, in this case the [4] fourth element of one element in the Mat-Matrix in OpenCV?
Thanks for Help
there is no alpha channel in images from a webcam.
also, please use opencv's c++ api, the venerable c one is a dead end.
is there a possibility to read the alpha channel from a webcam?
like the hsv-color model:
http://en.wikipedia.org/wiki/HSL_and_HSV
it's the hue(Farbton)
Related
I am coding opencv source for playing video. I want to add trackBar and adjust video speed. But, The TrackBar does't move. and I can't focus the Video window. This is my code OpenCv C++. What should i do?
void onTrackbarSlide(int pos, void *){
sec = 1;
sec /= pos;
printf("pos = %d\n",pos);
}
int main(void)
{
strcat(InputFile, FileName); // input video file
//VideoCapture cap(0); // Create an object and open the default(0) camera. C++ Syntax: VideoCapture::VideoCapture(int device)
VideoCapture cap; // Class for video capturing from video files or cameras.
cap.open(InputFile); // Open video file or a capturing device for video capturing
if (!cap.isOpened()) { // Check if the file is opened sucessfully.
printf("\nFail to open a video file");
return -1;
}
// When querying a property that is not supported by the backend used by the VideoCapture class, value 0 is returned.
double fps = cap.get(CV_CAP_PROP_FPS); printf("\nCV_CAP_PROP_FPS = %f", fps);
double ToTalFrames = cap.get(CV_CAP_PROP_FRAME_COUNT); printf("\nCV_CAP_PROP_FRAME_COUNT = %f", fps);
CvSize FrameSize;
FrameSize.width = (int)cap.get(CV_CAP_PROP_FRAME_WIDTH);
FrameSize.height = (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT);
printf("\nWidth * Height = %d * %d\n", FrameSize.width, FrameSize.height);
VideoWriter wrt;
#define CODEC -1
namedWindow("original", 1);
int slider_position = 0;
int slider_max = 255;
createTrackbar("video speed", "original", &slider_position, slider_max, onTrackbarSlide);
wrt.open(OutputFile, CODEC, fps, FrameSize);
Mat frame, dst1;
Mat dst2;
for (int i = 1; i< ToTalFrames; i++)
{
cap.read(frame);
if (frame.data == NULL) { printf("\nNo image found!\n"); return(0); }
imshow("original", frame);
if( waitKey(sec /fps) == 0x1b ) break; // Break if key input is escape code.
}
return 0;
}
I am using Haar detection in my hobby project, the detection is done on a video stream. Once Haar detects something I imshow it, this is how it looks like: Mat faceROI = frame_gray(faces[i]);
imshow( "Detection", faceROI);
While the video is running I am getting detections and the Mat is getting updated/overwritten with a new image of the object. What I want to do now is to save Mat so when a new detections occure I get both the previous and current frame. I'll guess I have to save the Mat in some way and then update it so current -> previous and so on.
imshow( "Previous detection", previousROI);` <- want to be able to do this
In case you want to see the whole code, I am doing this: http://docs.opencv.org/2.4/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.html
Help is much appreciated!
You might have and easier time if you don't split the detect/display into a separate function. I've modified the OpenCV documentation code below. Keep in mind I haven't compiled or run this so there may be some errors, but it should give you an idea of a different way to address the issue.
/** #function main */
int main( int argc, const char** argv )
{
CvCapture* capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture )
{
//store faces here
std::vector<Mat> prev_faces;
std::vector<Mat> current_faces;
while( true )
{
frame = cvQueryFrame( capture );
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t i = 0; i < faces.size(); i++ )
{
Mat faceROI = frame_gray( faces[i] );
current_faces.push_back(faceROI); //adds all of the current detections to a vector
}
if (prev_faces.size() > 0 && current_faces.size() > 0)
{
// do stuff with prev_faces and current_faces
// for(int i = 0; i < prev_faces.size(); i++){
// imshow("previous", prev_faces[i])
// }
// for(int i = 0; i < prev_faces.size(); i++){
// imshow("current", current_faces[i])
// }
// imshow("stuff", other_cool_Mats_I_made_by_analysing_and_comparing_prev_and_current)
}
prev_faces = current_faces;
current_faces.erase(current_faces.begin(), current_faces.end());
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
}
return 0;
}
I'm trying to write a simple C++ script using opencv to detect faces and save one facial square box for each new face showing up in the camera. The frame should be right, but I'm struggling with the pointer / esp. "total" around cvseq. Could someone help me? Whenever it gets to the row "cvSeqPush(faces_new, r);", faces_new shows up with 60 or 70 in the faces_new->total, without ever being allocated with anything.... Very frustrated, and would really appreciate some help.
// memeory allocation
static CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
static CvMemStorage* storage2 = 0;
storage2 = cvCreateMemStorage(0);
static CvMemStorage* storage3 = 0;
storage3 = cvCreateMemStorage(0);
// Create a new named window with title: result
cvNamedWindow("Window"); // create a window to display in
CvCapture* capture = capture = cvCaptureFromCAM(-1); // capture from video device (Macbook iSight)
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 1000 );// set window size to 640
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 600 ); // set window size to 480
// Declare images
IplImage *imgCamera; // captured from camera
IplImage *imgCamera_last; // last campera image
IplImage *imgDrawn; // image with drawing (rect containing faces)
IplImage *imgFace; // face picture extracted from the camera
CvRect *r; // rects containing faces
CvSeq *faces = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage); ; // sequnece of faces in the camera image - CURRENT
CvSeq *faces_last = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage2); // sequnece of faces in the camera image - LAST FRAME
CvSeq *faces_new = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage3); // sequnece of faces in the camera image - NEW FACES
float scale = 1.0/5; // how far do we want to scale down the haar detect objects images for speed
// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;
cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
// file name where to save the file
std::stringstream filename;
int counter = 1;
int filecounter = 1;
while(1) {
//*************************************************************************************/
//Step 1: stream video. Video to images
//*************************************************************************************/
// capture frame from video and then turn it into one single image-imgCamera
capture_frame(capture, imgCamera);
// allocate an image to be used later
imgDrawn = cvCreateImage(cvGetSize(imgCamera), imgCamera->depth, imgCamera->nChannels);
imgFace = cvCreateImage(cvSize(600, 600), imgCamera->depth, imgCamera->nChannels);
cvCopy(imgCamera, imgDrawn);
if (counter == 10) { // take action for every 10 frames
counter = 1;
//*************************************************************************************/
//Step 2: Detection
//*************************************************************************************/
find_faces(imgCamera, storage, cascade, faces, scale);
//printf("Last faces seq had %d faces detected. \n",faces_last->total);
//*************************************************************************************/
//Step 4: Draw every face in the picture
//*************************************************************************************/
// for each face found in the image
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
// draw the rectange around the face on the imgDrawn
draw_rect(imgDrawn, r, scale);
}
cvShowImage("Window", imgDrawn);
// press escape to quit
if( cvWaitKey(33) == 27 ) break;
//*************************************************************************************/
//Step 3: Recognize the new faces
//*************************************************************************************/
//TO DO: Combined the following into a funciton: match_faces(faces_new, faces, faces_last, lastspotted, currentspotted, imgCamera);
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
cout << "faces_new"<< faces_new->total<< "\n";
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
if (faces_last->total == 0) {
//cout << "New PERSON!!";
cvSeqPush(faces_new, r);
}
else {
for(int k = 0; k < (faces_last ? faces_last->total : 0); k++ ){
CvRect *r_last = (CvRect*)cvGetSeqElem(faces_last, k);
if (!same_face(r, r_last, imgCamera, imgCamera_last, i, k)) {
cvSeqPush(faces_new, r);
//cout << "faces_new"<< faces_new->total<< "\n";
}
}
}
}
//*************************************************************************************/
//Step 3: Process faces - save new faces, report new faces
//*************************************************************************************/
if ((faces_new->total)>0) {
// To change to save only faces_new
save_faces(faces_new, imgCamera, imgFace, scale, filecounter);
// report_faces(filecounter, faces_new->total, model); // report new faces stored starting from filecounter
filecounter = filecounter+(faces_new-> total);}
cvClearMemStorage(storage2);
cvSeqPush(faces_last, faces);
//cout << "face_last:" << faces_last->total << "\n";}
cvClearMemStorage(storage);
cvClearMemStorage(storage3);
}
counter++;
imgCamera_last = imgCamera;
}
cvReleaseCapture( &capture );
cvReleaseImage(&imgCamera);
cvReleaseImage(&imgDrawn);
cvReleaseImage(&imgFace);
cvDestroyWindow("window");
Finally got it work.
// memeory allocation
static CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
static CvMemStorage* storage2 = 0;
storage2 = cvCreateMemStorage(0);
// Create a new named window with title: result
cvNamedWindow("Window"); // create a window to display in
CvCapture* capture = capture = cvCaptureFromCAM(-1); // capture from video device (Macbook iSight)
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 1000 );// set window size to 640
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 600 ); // set window size to 480
// Declare images
IplImage *imgCamera; // captured from camera
IplImage *imgCamera_last; // last campera image
IplImage *imgDrawn; // image with drawing (rect containing faces)
IplImage *imgFace; // face picture extracted from the camera
CvRect *r; // rects containing faces
CvSeq *faces = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage); ; // sequnece of faces in the camera image - CURRENT
CvSeq *faces_last = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage2); // sequnece of faces in the camera image - LAST FRAME
float scale = 1.0/5; // how far do we want to scale down the haar detect objects images for speed
// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;
cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
// file name where to save the file
std::stringstream filename;
int counter = 1;
int filecounter = 1;
while(1) {
//*************************************************************************************/
//Step 1: stream video. Video to images
//*************************************************************************************/
// capture frame from video and then turn it into one single image-imgCamera
capture_frame(capture, imgCamera);
// allocate an image to be used later
imgDrawn = cvCreateImage(cvGetSize(imgCamera), imgCamera->depth, imgCamera->nChannels);
imgFace = cvCreateImage(cvSize(600, 600), imgCamera->depth, imgCamera->nChannels);
cvCopy(imgCamera, imgDrawn);
if (counter == 10) { // take action for every 10 frames
counter = 1;
//*************************************************************************************/
//Step 2: Detection
//*************************************************************************************/
find_faces(imgCamera, storage, cascade, faces, scale);
//printf("Last faces seq had %d faces detected. \n",faces_last->total);
//*************************************************************************************/
//Step 4: Draw every face in the picture
//*************************************************************************************/
// for each face found in the image
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
// draw the rectange around the face on the imgDrawn
draw_rect(imgDrawn, r, scale);
}
cvShowImage("Window", imgDrawn);
// press escape to quit
if( cvWaitKey(33) == 27 ) break;
//*************************************************************************************/
//Step 3: Recognize the new faces
//*************************************************************************************/
//TO DO: Combined the following into a funciton: match_faces(faces_new, faces, faces_last, lastspotted, currentspotted, imgCamera);
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
if (faces_last->total == 0) {
cout<<"a face appeared: "<<"there are total faces of "<<faces_last->total<<"\n";
save_face(r, imgCamera, imgFace, scale, filecounter);
filecounter++;
//report_faces(filecounter, faces_new->total, model); // report new faces stored starting from filecounter
}
else {
for(int k = 0; k < (faces_last ? faces_last->total : 0); k++ ){
CvRect *r_last = (CvRect*)cvGetSeqElem(faces_last, k);
if (!same_face(r, r_last, imgCamera, imgCamera_last, i, k)) {
save_face(r, imgCamera, imgFace, scale, filecounter);
filecounter++;
//report_faces(filecounter, faces_new->total, model); // report new faces stored starting from filecounter
}
}
}
}
//cvClearMemStorage(storage2);
while (faces_last->total >0) {
cvSeqPop(faces_last);}
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
cvSeqPush(faces_last, r);
}
//cout << "face_last:" << faces_last->total << "\n";}
cvClearMemStorage(storage);
}
counter++;
imgCamera_last = imgCamera;
}
cvReleaseCapture( &capture );
cvReleaseImage(&imgCamera);
cvReleaseImage(&imgDrawn);
cvReleaseImage(&imgFace);
cvDestroyWindow("window");
// return 0 to indicate successfull execution of the program
return 0;
I have few cameras in system. I initialise them this way
cap1 = cvCreateCameraCapture(0);
cap2 = cvCreateCameraCapture(1); // or -1
But after each execution their behaviour is different: they work together or both or them don't work or one of them captures well and other shows green screen. And sometimes system shows me dialogue box for choosing device.
Here is this part of source code:
CvCapture* cap2;
CvCapture* cap1;
printf("- Searching first cam : \n");
for (i; i < LASTCAM; i++)
{
cap1 = cvCreateCameraCapture(i);
if (!cap1)
{
printf("-- Camera %d is empty \n", i);
}
else
{
printf("-- Camera %d is OK \n", i);
i++;
break;
}
}
printf("- Searching second cam : \n");
for (; i < LASTCAM; i++)
{
cap2 = cvCreateCameraCapture(i);
if (!cap2)
{
printf("-- Camera %d is empty \n", i);
}
else
{
printf("-- Camera %d is OK \n", i);
break;
}
} printf("Frame propeties:\n");
double width = cvGetCaptureProperty(cap1, CV_CAP_PROP_FRAME_WIDTH);
double height = cvGetCaptureProperty(cap1, CV_CAP_PROP_FRAME_HEIGHT);
printf("First cam : %.0f x %.0f\n", width, height );
double width2 = cvGetCaptureProperty(cap2, CV_CAP_PROP_FRAME_WIDTH);
double height2 = cvGetCaptureProperty(cap2, CV_CAP_PROP_FRAME_HEIGHT);
printf("Second cam : %.0f x %.0f\n\n", width2, height2 );
IplImage* frame1=0;
IplImage* frame2=0;
cvNamedWindow("cam1", CV_WINDOW_AUTOSIZE);
cvNamedWindow("cam2", CV_WINDOW_AUTOSIZE);
int counter=0;
char filename[512];
while(true){
frame1 = cvQueryFrame( cap1 );
frame2 = cvQueryFrame( cap2 );
cvShowImage("cam1", frame1);
cvShowImage("cam2", frame2);
...
what's wrong with it?
1-9 cams are empty; 10 - first cam, 11-infinity - returns cams which are "green screens".
Thanks beforehand.
Have you looked at the stereo mode? It looks like it's required if you want to run multiple cameras.
USB cameras (at least through directshow on windows) can be a little difficult.
Some things to try:
// A small delay between the captures
cap1 = cvCreateCameraCapture(0);
Sleep(100);
cap2 = cvCreateCameraCapture(1);
or
// call all the setup functiosn for camera0 before capturing camera1
cap1 = cvCreateCameraCapture(0);
cvGetCaptureProperty(cap1,......)
cap2 = cvCreateCameraCapture(1);
cvGetCaptureProperty(cap2,......)
I can not capture image from my webcam using following OpenCV code.
The code can show images from a local AVI file or a video device. It works fine on a "test.avi" file.
When I make use my default webcam(CvCapture* capture =cvCreateCameraCapture(0)), the program can detected the size of the image from webcam,but just unable to display the image.
/I forgot to mention that I can see the iSight is working because the LED indicator is turn on/
Anyone encounter the same problem?
cvNamedWindow( "Example2", CV_WINDOW_AUTOSIZE );
CvCapture* capture =cvCreateFileCapture( "C:\\test.avi" ) ;// display images from avi file, works well
// CvCapture* capture =cvCreateCameraCapture(0); //display the frame(images) from default webcam not work
assert( capture );
IplImage* image;
while(1) {
image = cvQueryFrame( capture );
if( !image ) break;
cvShowImage( "Example2", image );
char c = cvWaitKey(33);
if( c == 27 ) break;
}
cvReleaseCapture( &capture );
cvDestroyWindow( "Example2" );
opencv 2.2
Debug library *d.lib
WebCam isight
Macbook OS win7 32
VS2008
I'm working on opencv 2.3 with Macbook pro Mid 2012 and I had that problem with the Isight cam. Somehow I managed to make it work on opencv by simply adjusting the parameters of the Cvcapture and adjusting the frame width and height:
CvCapture* capture = cvCaptureFromCAM(0);
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 500 );
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 600 );
You can also change these numbers to the frame width and height you want.
Did you try the example from the opencv page?
namely,
#include "cv.h"
#include "highgui.h"
using namespace cv;
int main(int, char**)
{
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) // check if we succeeded
return -1;
Mat edges;
namedWindow("edges",1);
for(;;)
{
Mat frame;
cap >> frame; // get a new frame from camera
cvtColor(frame, edges, CV_BGR2GRAY);
GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);
Canny(edges, edges, 0, 30, 3);
imshow("edges", edges);
if(waitKey(30) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
Works on a macbook pro for me (although on OS X). If it doesn't work, some kind of error message would be helpful.
Try this:
int main(int, char**) {
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) { // check if we succeeded
cout << "===couldn't open camera" << endl;
return -1;
}
Mat edges, frame;
frame = cv::Mat(10, 10, CV_8U);
namedWindow("edges", 1);
for (;;) {
cap >> frame; // get a new frame from camera
cout << "frame size: " << frame.cols << endl;
if (frame.cols > 0 && frame.rows > 0) {
imshow("edges", frame);
}
if (waitKey(30) >= 0)
break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
Latest update! Problem solved!
This happen to be one of OpenCV 2.2′s bug
Here is how to fix it:
http://dusijun.wordpress.com/2011/01/11/opencv-unable-to-capture-image-from-isight-webcam/
Why dont you try
capture=cvCaptureFromCam(0);
I think this may work.
Let me know about wheather its working or not.