I'm trying to write a simple C++ script using opencv to detect faces and save one facial square box for each new face showing up in the camera. The frame should be right, but I'm struggling with the pointer / esp. "total" around cvseq. Could someone help me? Whenever it gets to the row "cvSeqPush(faces_new, r);", faces_new shows up with 60 or 70 in the faces_new->total, without ever being allocated with anything.... Very frustrated, and would really appreciate some help.
// memeory allocation
static CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
static CvMemStorage* storage2 = 0;
storage2 = cvCreateMemStorage(0);
static CvMemStorage* storage3 = 0;
storage3 = cvCreateMemStorage(0);
// Create a new named window with title: result
cvNamedWindow("Window"); // create a window to display in
CvCapture* capture = capture = cvCaptureFromCAM(-1); // capture from video device (Macbook iSight)
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 1000 );// set window size to 640
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 600 ); // set window size to 480
// Declare images
IplImage *imgCamera; // captured from camera
IplImage *imgCamera_last; // last campera image
IplImage *imgDrawn; // image with drawing (rect containing faces)
IplImage *imgFace; // face picture extracted from the camera
CvRect *r; // rects containing faces
CvSeq *faces = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage); ; // sequnece of faces in the camera image - CURRENT
CvSeq *faces_last = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage2); // sequnece of faces in the camera image - LAST FRAME
CvSeq *faces_new = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage3); // sequnece of faces in the camera image - NEW FACES
float scale = 1.0/5; // how far do we want to scale down the haar detect objects images for speed
// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;
cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
// file name where to save the file
std::stringstream filename;
int counter = 1;
int filecounter = 1;
while(1) {
//*************************************************************************************/
//Step 1: stream video. Video to images
//*************************************************************************************/
// capture frame from video and then turn it into one single image-imgCamera
capture_frame(capture, imgCamera);
// allocate an image to be used later
imgDrawn = cvCreateImage(cvGetSize(imgCamera), imgCamera->depth, imgCamera->nChannels);
imgFace = cvCreateImage(cvSize(600, 600), imgCamera->depth, imgCamera->nChannels);
cvCopy(imgCamera, imgDrawn);
if (counter == 10) { // take action for every 10 frames
counter = 1;
//*************************************************************************************/
//Step 2: Detection
//*************************************************************************************/
find_faces(imgCamera, storage, cascade, faces, scale);
//printf("Last faces seq had %d faces detected. \n",faces_last->total);
//*************************************************************************************/
//Step 4: Draw every face in the picture
//*************************************************************************************/
// for each face found in the image
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
// draw the rectange around the face on the imgDrawn
draw_rect(imgDrawn, r, scale);
}
cvShowImage("Window", imgDrawn);
// press escape to quit
if( cvWaitKey(33) == 27 ) break;
//*************************************************************************************/
//Step 3: Recognize the new faces
//*************************************************************************************/
//TO DO: Combined the following into a funciton: match_faces(faces_new, faces, faces_last, lastspotted, currentspotted, imgCamera);
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
cout << "faces_new"<< faces_new->total<< "\n";
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
if (faces_last->total == 0) {
//cout << "New PERSON!!";
cvSeqPush(faces_new, r);
}
else {
for(int k = 0; k < (faces_last ? faces_last->total : 0); k++ ){
CvRect *r_last = (CvRect*)cvGetSeqElem(faces_last, k);
if (!same_face(r, r_last, imgCamera, imgCamera_last, i, k)) {
cvSeqPush(faces_new, r);
//cout << "faces_new"<< faces_new->total<< "\n";
}
}
}
}
//*************************************************************************************/
//Step 3: Process faces - save new faces, report new faces
//*************************************************************************************/
if ((faces_new->total)>0) {
// To change to save only faces_new
save_faces(faces_new, imgCamera, imgFace, scale, filecounter);
// report_faces(filecounter, faces_new->total, model); // report new faces stored starting from filecounter
filecounter = filecounter+(faces_new-> total);}
cvClearMemStorage(storage2);
cvSeqPush(faces_last, faces);
//cout << "face_last:" << faces_last->total << "\n";}
cvClearMemStorage(storage);
cvClearMemStorage(storage3);
}
counter++;
imgCamera_last = imgCamera;
}
cvReleaseCapture( &capture );
cvReleaseImage(&imgCamera);
cvReleaseImage(&imgDrawn);
cvReleaseImage(&imgFace);
cvDestroyWindow("window");
Finally got it work.
// memeory allocation
static CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
static CvMemStorage* storage2 = 0;
storage2 = cvCreateMemStorage(0);
// Create a new named window with title: result
cvNamedWindow("Window"); // create a window to display in
CvCapture* capture = capture = cvCaptureFromCAM(-1); // capture from video device (Macbook iSight)
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 1000 );// set window size to 640
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 600 ); // set window size to 480
// Declare images
IplImage *imgCamera; // captured from camera
IplImage *imgCamera_last; // last campera image
IplImage *imgDrawn; // image with drawing (rect containing faces)
IplImage *imgFace; // face picture extracted from the camera
CvRect *r; // rects containing faces
CvSeq *faces = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage); ; // sequnece of faces in the camera image - CURRENT
CvSeq *faces_last = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage2); // sequnece of faces in the camera image - LAST FRAME
float scale = 1.0/5; // how far do we want to scale down the haar detect objects images for speed
// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;
cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
// file name where to save the file
std::stringstream filename;
int counter = 1;
int filecounter = 1;
while(1) {
//*************************************************************************************/
//Step 1: stream video. Video to images
//*************************************************************************************/
// capture frame from video and then turn it into one single image-imgCamera
capture_frame(capture, imgCamera);
// allocate an image to be used later
imgDrawn = cvCreateImage(cvGetSize(imgCamera), imgCamera->depth, imgCamera->nChannels);
imgFace = cvCreateImage(cvSize(600, 600), imgCamera->depth, imgCamera->nChannels);
cvCopy(imgCamera, imgDrawn);
if (counter == 10) { // take action for every 10 frames
counter = 1;
//*************************************************************************************/
//Step 2: Detection
//*************************************************************************************/
find_faces(imgCamera, storage, cascade, faces, scale);
//printf("Last faces seq had %d faces detected. \n",faces_last->total);
//*************************************************************************************/
//Step 4: Draw every face in the picture
//*************************************************************************************/
// for each face found in the image
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
// draw the rectange around the face on the imgDrawn
draw_rect(imgDrawn, r, scale);
}
cvShowImage("Window", imgDrawn);
// press escape to quit
if( cvWaitKey(33) == 27 ) break;
//*************************************************************************************/
//Step 3: Recognize the new faces
//*************************************************************************************/
//TO DO: Combined the following into a funciton: match_faces(faces_new, faces, faces_last, lastspotted, currentspotted, imgCamera);
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
if (faces_last->total == 0) {
cout<<"a face appeared: "<<"there are total faces of "<<faces_last->total<<"\n";
save_face(r, imgCamera, imgFace, scale, filecounter);
filecounter++;
//report_faces(filecounter, faces_new->total, model); // report new faces stored starting from filecounter
}
else {
for(int k = 0; k < (faces_last ? faces_last->total : 0); k++ ){
CvRect *r_last = (CvRect*)cvGetSeqElem(faces_last, k);
if (!same_face(r, r_last, imgCamera, imgCamera_last, i, k)) {
save_face(r, imgCamera, imgFace, scale, filecounter);
filecounter++;
//report_faces(filecounter, faces_new->total, model); // report new faces stored starting from filecounter
}
}
}
}
//cvClearMemStorage(storage2);
while (faces_last->total >0) {
cvSeqPop(faces_last);}
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
cvSeqPush(faces_last, r);
}
//cout << "face_last:" << faces_last->total << "\n";}
cvClearMemStorage(storage);
}
counter++;
imgCamera_last = imgCamera;
}
cvReleaseCapture( &capture );
cvReleaseImage(&imgCamera);
cvReleaseImage(&imgDrawn);
cvReleaseImage(&imgFace);
cvDestroyWindow("window");
// return 0 to indicate successfull execution of the program
return 0;
Related
Currently I'm trying to use OpenCV for fruit detector project with camera. I take the source code from https://github.com/JackThePie/fruit-detector. I use QT Creator app in Ubuntu. I am new to OpenCV can someone help me with this issue? Note: I use OpenCV-MinGW-Build-OpenCV-4.5.0-with-contrib library which I am sure it is OpenCV2.
This is some part of my .cpp file for Blob detection.
// Blob detection
void MainWindow::on_pushButton_4_clicked()
{
// Capture the video from web cam
VideoCapture capture(filename1);
// If not success, exit program
if (!capture.isOpened())
{
cout << "Cannot open the video file" << endl;
}
namedWindow("Ustawienia Blob", WINDOW_AUTOSIZE); // Create a window called "Ustawienia Blob"
int colour = 10;
int iLowTresh = 0;
int iHighTresh = 300;
int minArea = 150;
int minCirc = 0.1;
int minConv = 0.087;
int minInertia = 0.0001;
// Create trackbars in "Ustawienia Blob" window
createTrackbar("Colour", "Ustawienia Blob", &colour, 255); //Hue (0 - 179)
createTrackbar("minTresh", "Ustawienia Blob", &iLowTresh, 100); //Hue (0 - 179)
createTrackbar("maxTresh", "Ustawienia Blob", &iHighTresh, 700);
createTrackbar("minArea", "Ustawienia Blob", &minArea, 700); //Saturation (0 - 255)
createTrackbar("minCirc", "Ustawienia Blob", &minCirc, 200); //Value (0 - 255)
createTrackbar("minConv", "Ustawienia Blob", &minConv, 200);
createTrackbar("minInertia", "Ustawienia Blob", &minInertia, 100);
while (true)
{
Mat imgOriginal;
// Read a new frame from video
bool bSuccess = capture.read(imgOriginal);
// If not success, break loop
if (!bSuccess)
{
break;
}
// Setup SimpleBlobDetector parameters.
SimpleBlobDetector::Params params;
params.filterByColor = true;
params.blobColor = colour;
// Change thresholds
params.minThreshold = iLowTresh;
params.maxThreshold = iHighTresh;
// Filter by Area
params.filterByArea = true;
params.minArea = minArea;
// Filter by Circularity
params.filterByCircularity = true;
params.minCircularity = minCirc;
// Filter by Convexity
params.filterByConvexity = true;
params.minConvexity = minConv;
// Filter by Inertia
params.filterByInertia = true;
params.minInertiaRatio = minInertia;
// Set up detector with params
SimpleBlobDetector detector(params); //HERE WHERE THE ERROR IS
// Storage for blobs
std::vector<KeyPoint> keypoints;
// Detect blobs
detector.detect(imgOriginal, keypoints);
// Draw detected blobs as red circles.
// DrawMatchesFlags::DRAW_RICH_KEYPOINTS flag ensures
// the size of the circle corresponds to the size of blob
Mat im_with_keypoints;
drawKeypoints(imgOriginal, keypoints, im_with_keypoints, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
QImage qigm=MatToQImage(imgOriginal);
image=imgOriginal.clone();
ui->label->setPixmap(QPixmap::fromImage(qigm).scaledToWidth(ui->label->size().width(),Qt::FastTransformation));
QImage qigm2=MatToQImage(im_with_keypoints);
image=im_with_keypoints.clone();
ui->label_2->setPixmap(QPixmap::fromImage(qigm2).scaledToWidth(ui->label->size().width(),Qt::FastTransformation));
// Wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
if (waitKey(30) == 27)
{
break;
}
}
}
When I try to run the code, there is an error occur in line :
SimpleBlobDetector detector(params);
The error is stated in the picture Line 147
Thank You!
I try to find the solution here https://docs.opencv.org/4.x/d0/d7a/classcv_1_1SimpleBlobDetector.html#details but cannot find any solution..
I am detecting shapes in real time with the help of OpenCv in C++ programming language. I found a code that reads from the folder and detect shapes. But in My case camera should detect in real time. How can I use raspicam::RaspiCam_Cv capture; instead of CvCapture *capture = cvCaptureFromAVI("a.avi"); in C++.
#include <cv.h>
#include <highgui.h>
using namespace std;
IplImage* imgTracking=0;
int lastX1 = -1;
int lastY1 = -1;
int lastX2 = -1;
int lastY2 = -1;
void trackObject(IplImage* imgThresh){
CvSeq* contour; //hold the pointer to a contour
CvSeq* result; //hold sequence of points of a contour
CvMemStorage *storage = cvCreateMemStorage(0); //storage area for all contours
//finding all contours in the image
cvFindContours(imgThresh, storage, &contour, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
//iterating through each contour
while(contour)
{
//obtain a sequence of points of the countour, pointed by the variable 'countour'
result = cvApproxPoly(contour, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contour)*0.02, 0);
//if there are 3 vertices in the contour and the area of the triangle is more than 100 pixels
if(result->total==3 && fabs(cvContourArea(result, CV_WHOLE_SEQ))>100 )
{
//iterating through each point
CvPoint *pt[3];
for(int i=0;i<3;i++){
pt[i] = (CvPoint*)cvGetSeqElem(result, i);
}
int posX=( pt[0]->x + pt[1]->x + pt[2]->x )/3;
int posY=( pt[0]->y + pt[1]->y + pt[2]->y )/3;
if(posX > 360 ){
if(lastX1>=0 && lastY1>=0 && posX>=0 && posY>=0){
// Draw a red line from the previous point to the current point
cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX1, lastY1), cvScalar(0,0,255), 4);
}
lastX1 = posX;
lastY1 = posY;
}
else{
if(lastX2>=0 && lastY2>=0 && posX>=0 && posY>=0){
// Draw a blue line from the previous point to the current point
cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX2, lastY2), cvScalar(255,0,0), 4);
}
lastX2 = posX;
lastY2 = posY;
}
}
//obtain the next contour
contour = contour->h_next;
}
cvReleaseMemStorage(&storage);
}
int main(){
//load the video file to the memory
CvCapture *capture = cvCaptureFromAVI("a.avi");
if(!capture){
printf("Capture failure\n");
return -1;
}
IplImage* frame=0;
frame = cvQueryFrame(capture);
if(!frame) return -1;
//create a blank image and assigned to 'imgTracking' which has the same size of original video
imgTracking=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U, 3);
cvZero(imgTracking); //covert the image, 'imgTracking' to black
cvNamedWindow("Video");
//iterate through each frames of the video
while(true){
frame = cvQueryFrame(capture);
if(!frame) break;
frame=cvCloneImage(frame);
//smooth the original image using Gaussian kernel
cvSmooth(frame, frame, CV_GAUSSIAN,3,3);
//converting the original image into grayscale
IplImage* imgGrayScale = cvCreateImage(cvGetSize(frame), 8, 1);
cvCvtColor(frame,imgGrayScale,CV_BGR2GRAY);
//thresholding the grayscale image to get better results
cvThreshold(imgGrayScale,imgGrayScale,100,255,CV_THRESH_BINARY_INV);
//track the possition of the ball
trackObject(imgGrayScale);
// Add the tracking image and the frame
cvAdd(frame, imgTracking, frame);
cvShowImage("Video", frame);
//Clean up used images
cvReleaseImage(&imgGrayScale);
cvReleaseImage(&frame);
//Wait 10mS
int c = cvWaitKey(10);
//If 'ESC' is pressed, break the loop
if((char)c==27 ) break;
}
cvDestroyAllWindows();
cvReleaseImage(&imgTracking);
cvReleaseCapture(&capture);
return 0;
}
I cannot use raspicam::RaspiCam_Cv capture; keyword instead of CvCapture *capture = cvCaptureFromAVI(); I should detect shapes in real time for example when triangle comes then call some function. Please help me
Some warnings appear in terminal during running:
OpenCV Error: Assertion failed(s>=0) in setSize, file /home/me/opencv2.4/modules/core/src/matrix.cpp, line 116
The program compiled without error and executes, the problem is the eye ROI size changes when user moves closer/farther away from webcam, due to the changing of size, the warning appears. I managed to solve these warnings by setting the eye ROI size equal to my eye template size. However, it ends up the program fails to classify user's eyes open/close because the minVal obtained is 0. The method used is OpenCV Template Matching. Alternatively, I fix my distance from webcam and fix the eye template size could avoid the warning. Every time warning appears, the program fails to classify open/close eyes. The program doesn't work effectively because sometimes it mistakenly classifies the open eyes as closed and vice versa.
Questions:
Is there any alternative to identify open and close eyes other than template matching?
Any ideas how to improve the program in classification of blinking?
Any working example that you know in opencv C/C++ API can classify open and close eyes and count accurately the blinking times?
static CvMemStorage* storage = 0;
// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;
// Function prototype for detecting and drawing an object from an image
bool detect_and_draw( IplImage* image ,CvHaarClassifierCascade* cascade);
const char *cascade_name[1]={"eyes.xml"};
cv::Mat roiImg;
int threshold_value = 200;
int threshold_type = 3;;
int const max_value = 255;
int const max_type = 4;
int const max_BINARY_value = 255;
int hough_thr = 35;
cv::Mat src_gray, dst;
using namespace cv;
Mat img1; Mat img2; Mat templ; Mat result;
const char* image_window = "Source Image";
const char* result_window = "Result window";
int match_method=0;
int max_Trackbar = 5;
int eye_open=0;
int eye_close=0;
//Matching with 2 images ,eye closed or open
void MatchingMethod(cv::Mat templ,int id )
{
/// Source image to display
cv::Mat img_display;
roiImg.copyTo( img_display );
/// Create the result matrix
int result_cols = roiImg.cols - templ.cols + 1;
int result_rows = roiImg.rows - templ.rows + 1;
result.create( result_cols, result_rows, CV_32FC1 );
/// Do the Matching and Normalize
cv::matchTemplate( roiImg, templ, result, match_method );
cv::normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
/// Localizing the best match with minMaxLoc
double minVal; double maxVal; Point minLoc; Point maxLoc;
cv::Point matchLoc;
cv::minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
///Justing checkin the match template value reaching the threashold
if(id == 0 && (minVal < 0))
{
eye_open=eye_open+1;
if(eye_open == 1)
{
std::cout<<"Eye Open"<<std::endl;
eye_open=0;
eye_close=0;
}
}
else if(id == 1 && (minVal < 0))
eye_close=eye_close+1;
if(eye_close == 1)
{
std::cout<<"Eye Closed"<<std::endl;
eye_close=0;
system("python send_arduino.py");
}
/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
if( match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )
{ matchLoc = minLoc; }
else
{ matchLoc = maxLoc; }
/// Show me what you got
cv::rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
cv::rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
cv::imshow( image_window, img_display );
cv::imshow( result_window, result );
return;
}
void detect_blink(cv::Mat roi)
{
try
{
MatchingMethod(img1,0);
MatchingMethod(img2,1);
}
catch( cv::Exception& e )
{
std::cout<<"An exception occued"<<std::endl;
}
}
// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{
if(argc <= 1)
{
std::cout<<"\n Help "<<std::endl;
std::cout<<"\n ------------------------------------\n"<<std::endl;
std::cout<<"./blink_detect open_eye.jpg close_eye.jpg\n"<<std::endl;
std::cout<<"Eg :: ./blink_detect 2.jpg 3.jpg\n"<<std::endl;
std::cout<<"\n ------------------------------------\n"<<std::endl;
exit(0);
}
// Structure for getting video from camera or avi
CvCapture* capture = 0;
// Images to capture the frame from video or camera or from file
IplImage *frame, *frame_copy = 0;
// Used for calculations
int optlen = strlen("--cascade=");
// Input file name for avi or image file.
const char* input_name;
img1 = imread( argv[1], 1 );
img2 = imread( argv[2], 1 );
// Load the HaarClassifierCascade
/// Create windows
cv::namedWindow( image_window, CV_WINDOW_AUTOSIZE );
cv::namedWindow( result_window, CV_WINDOW_AUTOSIZE );
// Allocate the memory storage
storage = cvCreateMemStorage(0);
capture = cvCaptureFromCAM( 0);
// Create a new named window with title: result
cvNamedWindow( "original_frame", 1 );
// If loaded succesfully, then:
if( capture )
{
// Capture from the camera.
for(;;)
{
// Capture the frame and load it in IplImage
if( !cvGrabFrame( capture ))
break;
frame = cvRetrieveFrame( capture );
// If the frame does not exist, quit the loop
if( !frame )
break;
// Allocate framecopy as the same size of the frame
if( !frame_copy )
frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
IPL_DEPTH_8U, frame->nChannels );
// Check the origin of image. If top left, copy the image frame to frame_copy.
if( frame->origin == IPL_ORIGIN_TL )
cvCopy( frame, frame_copy, 0 );
// Else flip and copy the image
for(int i=0;i<1;i++)
{
cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name[i], 0, 0, 0 );
// Check whether the cascade has loaded successfully. Else report and error and quit
if( !cascade )
{
fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
return -1;
}
// Call the function to detect and draw the face
if(detect_and_draw(frame_copy,cascade))
{
std::cout<<"Detected"<<std::endl;
}
}
// Wait for a while before proceeding to the next frame
if( cvWaitKey( 1 ) >= 0 )
break;
}
// Release the images, and capture memory
cvReleaseHaarClassifierCascade(&cascade);
cvReleaseImage( &frame_copy );
cvReleaseCapture( &capture );
cvReleaseMemStorage(&storage);
}
return 0;
}
// Function to detect and draw any faces that is present in an image
bool detect_and_draw( IplImage* img,CvHaarClassifierCascade* cascade )
{
int scale = 1;
// Create a new image based on the input image
IplImage* temp = cvCreateImage( cvSize(img->width/scale,img->height/scale), 8, 3 );
// Create two points to represent the face locations
CvPoint pt1, pt2;
int i;
// Clear the memory storage which was used before
cvClearMemStorage( storage );
// Find whether the cascade is loaded, to find the faces. If yes, then:
if( cascade )
{
// There can be more than one face in an image. So create a growable sequence of faces.
// Detect the objects and store them in the sequence
CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,
1.1, 8, CV_HAAR_DO_CANNY_PRUNING,
cvSize(40, 40) );
// Loop the number of faces found.
for( i = 0; i < (faces ? faces->total : 0); i++ )
{
// Create a new rectangle for drawing the face
CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
// Find the dimensions of the face,and scale it if necessary
pt1.x = r->x*scale;
pt2.x = (r->x+r->width)*scale;
pt1.y = r->y*scale;
pt2.y = (r->y+r->height)*scale;
// Draw the rectangle in the input image
cvRectangle( img, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
cv::Mat image(img);
cv::Rect rect;
rect = cv::Rect(pt1.x,pt1.y,(pt2.x-pt1.x),(pt2.y-pt1.y));
roiImg = image(rect);
cv::imshow("roi",roiImg);
///Send to arduino
detect_blink(roiImg);
}
}
cvShowImage( "original_frame", img );
if(i > 0)
return 1;
else
return 0;
cvReleaseImage( &temp );
}
Reference:
Website referred
How can I get properly one resolution feed from camera in OpenCV (640x320) but cut it into half and display only one half of the frame (320x240). So not to scale down, but to actually crop. I am using OpenCV 2.4.5, VS2010 and C++
This quite standard code gets 640x480 input resolution and I made some changes to crop resolution to 320x240. Should I use Mat instead of IplImage, and if so what would be the best way?
#include "stdafx.h"
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
char key;
int main()
{
cvNamedWindow("Camera_Output", 1); //Create window
CvCapture* capture = cvCaptureFromCAM(1); //Capture using camera 1 connected to system
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 640 );
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 480 );
while(1){ //Create loop for live streaming
IplImage* framein = cvQueryFrame(capture); //Create image frames from capture
/* sets the Region of Interest - rectangle area has to be __INSIDE__ the image */
cvSetImageROI(framein, cvRect(0, 0, 320, 240));
/* create destination image - cvGetSize will return the width and the height of ROI */
IplImage *frameout = cvCreateImage(cvGetSize(framein), framein->depth, framein->nChannels);
/* copy subimage */
cvCopy(framein, frameout, NULL);
/* always reset the Region of Interest */
cvResetImageROI(framein);
cvShowImage("Camera_Output", frameout); //Show image frames on created window
key = cvWaitKey(10); //Capture Keyboard stroke
if (char(key) == 27){
break; //ESC key loop will break.
}
}
cvReleaseCapture(&capture); //Release capture.
cvDestroyWindow("Camera_Output"); //Destroy Window
return 0;
}
I think you don't check whether you are getting a CvCapture. On my system with only one camera your code doesn't work because you query camera 1. But the first camera should be 0 Thus change this code.
CvCapture* capture = cvCaptureFromCAM(1); //Capture using camera 1 connected to system
to (note I change 1 to 0):
CvCapture* capture = cvCaptureFromCAM(0); //Capture using camera 1 connected to system
if (! capture ){
/*your error handling*/
}
Further than that your code seems to be working for me. You might also check the other pointer values whether you are not getting NULL.
You can easily crop a video by calling the following function.
cvSetMouseCallback("image", mouseHandler, NULL);
The mouseHandler function is like that.
void mouseHandler(int event, int x, int y, int flags, void* param){
if (event == CV_EVENT_LBUTTONDOWN && !drag)
{
/* left button clicked. ROI selection begins */
select_flag=0;
point1 = Point(x, y);
drag = 1;
}
if (event == CV_EVENT_MOUSEMOVE && drag)
{
/* mouse dragged. ROI being selected */
Mat img1 = img.clone();
point2 = Point(x, y);
rectangle(img1, point1, point2, CV_RGB(255, 0, 0), 3, 8, 0);
imshow("image", img1);
}
if (event == CV_EVENT_LBUTTONUP && drag)
{
point2 = Point(x, y);
rect = Rect(point1.x,point1.y,x-point1.x,y-point1.y);
drag = 0;
roiImg = img(rect);
}
if (event == CV_EVENT_LBUTTONUP)
{
/* ROI selected */
select_flag = 1;
drag = 0;
}
}
For the details you can visit the following link.:How to Crop Video from Webcam using OpenCV
this is easy in python... but the key idea is that cv2 arrays can be referenced and sliced. all you need is a slice of framein.
the following code takes a slice from (0,0) to (320,240). note that numpy arrays are indexed with column priority.
# Required modules
import cv2
# Constants for the crop size
xMin = 0
yMin = 0
xMax = 320
yMax = 240
# Open cam, decode image, show in window
cap = cv2.VideoCapture(0) # use 1 or 2 or ... for other camera
cv2.namedWindow("Original")
cv2.namedWindow("Cropped")
key = -1
while(key < 0):
success, img = cap.read()
cropImg = img[yMin:yMax,xMin:xMax] # this is all there is to cropping
cv2.imshow("Original", img)
cv2.imshow("Cropped", cropImg)
key = cv2.waitKey(1)
cv2.destroyAllWindows()
Working Example of cropping Faces from live camera
void CropFaces::DetectAndCropFaces(Mat frame, string locationToSaveFaces) {
std::vector<Rect> faces;
Mat frame_gray;
// Convert to gray scale
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
// Equalize histogram
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 3,
0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Iterate over all of the faces
for (size_t i = 0; i < faces.size(); i++) {
// Find center of faces
Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
Mat face = frame_gray(faces[i]);
std::vector<Rect> eyes;
Mat croppedRef(frame, faces[i]);
cv::Mat cropped;
// Copy the data into new matrix
croppedRef.copyTo(cropped);
string fileName = locationToSaveFaces+ "\\face_" + to_string(faces[i].x) + ".jpg";
resize(cropped, cropped, Size(65, 65));
imwrite(fileName, cropped);
}
// Display frame
imshow("DetectAndSave", frame);
}
void CropFaces::PlayVideoForCropFaces(string locationToSaveFaces) {
VideoCapture cap(0); // Open default camera
Mat frame;
face_cascade.load("haarcascade_frontalface_alt.xml"); // load faces
while (cap.read(frame)) {
DetectAndCropFaces(frame, locationToSaveFaces); // Call function to detect faces
if (waitKey(30) >= 0) // pause
break;
}
}
I have few cameras in system. I initialise them this way
cap1 = cvCreateCameraCapture(0);
cap2 = cvCreateCameraCapture(1); // or -1
But after each execution their behaviour is different: they work together or both or them don't work or one of them captures well and other shows green screen. And sometimes system shows me dialogue box for choosing device.
Here is this part of source code:
CvCapture* cap2;
CvCapture* cap1;
printf("- Searching first cam : \n");
for (i; i < LASTCAM; i++)
{
cap1 = cvCreateCameraCapture(i);
if (!cap1)
{
printf("-- Camera %d is empty \n", i);
}
else
{
printf("-- Camera %d is OK \n", i);
i++;
break;
}
}
printf("- Searching second cam : \n");
for (; i < LASTCAM; i++)
{
cap2 = cvCreateCameraCapture(i);
if (!cap2)
{
printf("-- Camera %d is empty \n", i);
}
else
{
printf("-- Camera %d is OK \n", i);
break;
}
} printf("Frame propeties:\n");
double width = cvGetCaptureProperty(cap1, CV_CAP_PROP_FRAME_WIDTH);
double height = cvGetCaptureProperty(cap1, CV_CAP_PROP_FRAME_HEIGHT);
printf("First cam : %.0f x %.0f\n", width, height );
double width2 = cvGetCaptureProperty(cap2, CV_CAP_PROP_FRAME_WIDTH);
double height2 = cvGetCaptureProperty(cap2, CV_CAP_PROP_FRAME_HEIGHT);
printf("Second cam : %.0f x %.0f\n\n", width2, height2 );
IplImage* frame1=0;
IplImage* frame2=0;
cvNamedWindow("cam1", CV_WINDOW_AUTOSIZE);
cvNamedWindow("cam2", CV_WINDOW_AUTOSIZE);
int counter=0;
char filename[512];
while(true){
frame1 = cvQueryFrame( cap1 );
frame2 = cvQueryFrame( cap2 );
cvShowImage("cam1", frame1);
cvShowImage("cam2", frame2);
...
what's wrong with it?
1-9 cams are empty; 10 - first cam, 11-infinity - returns cams which are "green screens".
Thanks beforehand.
Have you looked at the stereo mode? It looks like it's required if you want to run multiple cameras.
USB cameras (at least through directshow on windows) can be a little difficult.
Some things to try:
// A small delay between the captures
cap1 = cvCreateCameraCapture(0);
Sleep(100);
cap2 = cvCreateCameraCapture(1);
or
// call all the setup functiosn for camera0 before capturing camera1
cap1 = cvCreateCameraCapture(0);
cvGetCaptureProperty(cap1,......)
cap2 = cvCreateCameraCapture(1);
cvGetCaptureProperty(cap2,......)