I am working on opencv and qt project.it 's working very quickly on linux but it is working very slow on windows.Linux OS is 64 bit ,windows 64 bit too.I didnt understand the problem.Thanks (sorry for my english)
following code:
#include <opencv2/core/base.hpp>
#include <opencv2/core/mat.hpp>
#include <opencv2/core/types.hpp>
#include <opencv2/highgui/highgui_c.h>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/objdetect/objdetect_c.h>
#include <opencv2/objdetect.hpp>
#include <opencv2/videoio.hpp>
#include <iostream>
#include <vector>
using namespace cv;
using namespace std;
int main(int argc, char* argv[])
{
VideoCapture cap(0); // open the video camera no. 0
namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
// Load Face cascade (.xml file)
CascadeClassifier face_cascade;
face_cascade.load( "xml/haarcascade_frontalface_alt.xml" );
while (1)
{
Mat frame;
bool bSuccess = cap.read(frame); // read a new frame from video
if (!bSuccess) //if not success, break loop
{
cout << "Cannot read a frame from video stream" << endl;
break;
}
// Detect faces
std::vector<Rect> faces;
face_cascade.detectMultiScale( frame, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
// Draw circles on the detected faces
for( int i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
imshow( "Detected Face", frame );
}
//rectangle( frame, center, Size( faces[i].width*2, faces[i].height*2), Scalar( 255, 0, 255 ) );
if (waitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}
Related
I'm working on a project to detect motion on a camera.
I need to start recording video when motion is detected for example:
Record while motion is being detected
Continue recording for 10 seconds after the motion detection is stopped
I have a working example that only detects the motion and draw rectangles on the moving parts.
I searched for examples on how to record when motion is detected but no good results.
Here is my working code:
#include <iostream>
#include <sstream>
#include <opencv4/opencv2/imgproc.hpp>
#include <opencv4/opencv2/videoio.hpp>
#include <opencv4/opencv2/highgui.hpp>
#include <opencv4/opencv2/video.hpp>
#include <unistd.h>
using namespace cv;
using namespace std;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
int main(int argc, char* argv[])
{
//create Background Subtractor objects
Ptr<BackgroundSubtractor> pBackSub;
pBackSub = createBackgroundSubtractorMOG2();
VideoCapture capture(0);
if (!capture.isOpened()){
//error in opening the video input
cerr << "Unable to open: " << endl;
return 0;
}
Mat frame, fgMask;
sleep(3);
while (true) {
capture >> frame;
if (frame.empty())
break;
//update the background model
pBackSub->apply(frame, fgMask);
imshow("FG Mask", fgMask);
RNG rng(12345);
findContours(fgMask, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE,Point(0, 0));
vector<Rect>boundRect (contours.size());
vector<vector<Point> > contours_poly( contours.size() );
for (int i = 0; i < contours.size();i++) {
if( contourArea(contours[i])< 500)
{
continue;
}
putText(frame, "Motion Detected", Point(10,20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0,0,255),2);
approxPolyDP( contours[i], contours_poly[i], 3, true );
boundRect[i] = boundingRect( contours_poly[i] );
Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) );
rectangle( frame, boundRect[i].tl(), boundRect[i].br(), color, 2 );
}
imshow("Frame", frame);
int keyboard = waitKey(30);
if (keyboard == 'q' || keyboard == 27)
break;
}
return 0;
}
I tried adding this to the code:
int frameWidth = 320;
int frameHeight = 240;
cv::Size frameSize = cv::Size(frameWidth, frameHeight);
/* Output file */
int codec = cv::VideoWriter::fourcc('M', 'P', '4', 'V');
cv::VideoWriter outputVideo;
outputVideo.open("rr.mp4", codec, capture.get(cv::CAP_PROP_FPS), frameSize, true);
and after drawing the rectangle I write the frame to the video:
outputVideo.write(frame);
but after that, the video is empty and crashes.
I already took a look at Motion but I didn't find an example.
How can I achieve this?
Thanks,
Talel
I resolved the issue,
I was opening the output video with a specific dimensions (320,240) and I was saving the captured frame which is bigger.
So the solution is to resize the captured frame to fit into the output video.
Here is the final solution if anyone is interesting:
Turn the laptop camera into an IP camera with: cam2ip
Here is the source code:
#include <iostream>
#include <sstream>
#include <opencv4/opencv2/imgproc.hpp>
#include <opencv4/opencv2/videoio.hpp>
#include <opencv4/opencv2/highgui.hpp>
#include <opencv4/opencv2/video.hpp>
#include <unistd.h>
using namespace cv;
using namespace std;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
int main(int argc, char* argv[])
{
//create Background Subtractor objects
Ptr<BackgroundSubtractor> pBackSub;
pBackSub = createBackgroundSubtractorMOG2();
const std::string videoStreamAddress = "http://192.168.20.100:56000/mjpeg";
cv::VideoCapture vcap;
if(!vcap.open(videoStreamAddress)) {
std::cout << "Error opening video stream or file" << std::endl;
return -1;
}
Mat frame, fgMask;
int frameWidth = 320;
int frameHeight = 240;
cv::Size frameSize = cv::Size(frameWidth, frameHeight);
/* Output file */
int codec = cv::VideoWriter::fourcc('M', 'P', '4', 'V');
cv::VideoWriter outputVideo;
outputVideo.open("rr.mp4", codec, vcap.get(cv::CAP_PROP_FPS), frameSize, true);
sleep(3);
while (true) {
vcap >> frame;
if (frame.empty())
break;
//update the background model
pBackSub->apply(frame, fgMask);
imshow("FG Mask", fgMask);
RNG rng(12345);
findContours(fgMask, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE,Point(0, 0));
vector<Rect>boundRect (contours.size());
vector<vector<Point> > contours_poly( contours.size() );
for (int i = 0; i < contours.size();i++) {
if( contourArea(contours[i])< 500)
{
continue;
}
putText(frame, "Motion Detected", Point(10,20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0,0,255),2);
approxPolyDP( contours[i], contours_poly[i], 3, true );
boundRect[i] = boundingRect( contours_poly[i] );
Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) );
rectangle( frame, boundRect[i].tl(), boundRect[i].br(), color, 2 );
resize(frame, frame, frameSize);
outputVideo.write(frame);
}
imshow("Frame", frame);
int keyboard = waitKey(30);
if (keyboard == 'q' || keyboard == 27)
break;
}
outputVideo.release();
return 0;
}
Further enhancement suggestions:
Make sure that the light is not part of the motion detection
Open an output video with the same capture's dimensions
I am developing a face detection algorithm in c++ visual studio with haar cascade method.
At the below line application quits while the same application works perfectly in linux environment.
Is there any opencv configuration which might cause this?
Any help would be really great.
frontfaceCascade.detectMultiScale(grey_image, faces, 1.1, 0, 0 |
CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
Thanks.
#include "stdafx.h"
#include <opencv2/opencv.hpp>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include <iostream>
#include <thread>
#include <stdlib.h>
#include <corecrt_math_defines.h>
using namespace std;
using namespace cv;
vector<Mat> Frames;
void getFrames(char* fileName);
cv::CascadeClassifier frontfaceCascade;
cv::CascadeClassifier sidefaceCascade;
int main(int argc, char *argv[]) {
if (argc < 2) {
cout << " enter file name: \n";
exit(1);
}
char* fileName = argv[1];
// if( !frontfaceCascade.load("lbpcascade_frontalface.xml")){
if (!frontfaceCascade.load("haarcascade_frontalface_default.xml")) {
std::cout << "Error Loading frontface haarcascade" << std::endl;
}
if (!sidefaceCascade.load("haarcascade_profileface.xml"))
{
std::cout << "Error Loading sideface haarcascade" << std::endl;
}
namedWindow("Faces", WINDOW_NORMAL);
resizeWindow("Faces", 100, 400);
moveWindow("Faces", 30, 100);
getFrames(fileName);
return 0;
}
void getFrames(char* fileName) {
cv::VideoCapture vcap(fileName);
if (!vcap.isOpened()) {
cout << "Error opening video stream or file" << endl;
return;
}
cv::Mat grey_image;
Mat img;
namedWindow("Frames", CV_WINDOW_AUTOSIZE);
resizeWindow("Frames", 640, 360);
moveWindow("Frames", 130, 100);
int init = 0;
while (1)
{
vcap >> img;
// If the frame is empty, break immediately
if (img.empty())
break;
unsigned int prevSize = 0;
resize(img, img, cv::Size(640, 360));
cv::cvtColor(img, grey_image, CV_RGB2GRAY);
//cv::equalizeHist(grey_image, grey_image);
std::vector<cv::Rect> faces;
frontfaceCascade.detectMultiScale(grey_image, faces, 1.1, 0, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30)); // Minimum size of obj
//sidefaceCascade.detectMultiScale(grey_image, faces, 1.1, 4, 0 | 1, Size(40, 40), Size(100, 100)); // Minimum size of obj
/****/
}
destroyAllWindows();
vcap.release();
}
i have the following code :
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
String object_cascade_name = "haarcascade_frontalface_alt.xml";
CascadeClassifier object_cascade;
string window_name = "Capture - detector";
int main( void )
{
VideoCapture capture;
Mat frame;
std::vector<Rect> objects;
Mat frame_gray;
if( !object_cascade.load( object_cascade_name ) ){ std::cout << "ERROR: Cascade not loaded!\n" ; return -1; };
capture.open( 0 );
if( capture.isOpened() ){
for(;;){
capture >> frame;
capture.retrieve(frame);
//-- 3. Apply the classifier to the frame
if( !frame.empty() ){
// Start
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect Object
object_cascade.detectMultiScale( frame_gray, objects, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t i = 0; i < objects.size(); i++ ){
Point pt1 = Point( objects[i].x, objects[i].y );
Point pt2 = Point( objects[i].x + objects[i].width, objects[i].y + objects[i].height );
rectangle( frame, pt1, pt2, Scalar( 34, 92, 241 ), 2, 8, 0 );
Mat faceROI = frame_gray( objects[i] );
}
//-- Show what you got
imshow( window_name, frame );
// End
}
else{ std::cout << "ERROR: frame.empty returns 1!"; break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
}
return 0;
}
which plays a video from the build-in webcam and detect faces, my idea is that i want the video to stop when an object -face- is detected, then display a window contains the detected object only from the last frame.
Could anyone please help me write the output video file? I have read many similar questions on how to write the program and followed the exact steps to write the video file in .avi format, but I am not able to find out where I am wrong. The face_output.avi file is created but it only contains one frame. My program is not adding all the frames to the video file. Below is the complete code:
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay( Mat frame);
/** Global variables */
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
double fps;
/** #function main */
int main( int argc, const char** argv )
{
VideoCapture cap("/home/pradeep/Downloads/President Obama Lectures Romney.mp4"); // open the video file for reading
if ( !cap.isOpened() ) // if not success, exit program
{
cout << "Cannot open the video file" << endl;
return -1;
}
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
fps = cap.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
cout << "Frame per seconds : " << fps << endl;
double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH);
double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
Size S(dWidth,dHeight);
while(1)
{
Mat frame;
int skip_frame = 4;
while(skip_frame)
{
bool bSuccess = cap.read(frame); // read a new frame from video
skip_frame--;
if (!bSuccess) //if not success, break loop
{
cout << "Cannot read the frame from video file" << endl;
break;
}
}
//-- 3. Apply the classifier to the frame
if( frame.empty() )
{ printf(" --(!) No captured frame -- Break!"); break; }
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.3, 5, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 0, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
VideoWriter Video("face_output.avi", CV_FOURCC('M','J','P','G'), fps, S, true);
if(!Video.isOpened())
{
printf("unable to write video file");
}
Video.write(frame);
//-- Show what you got
imshow( window_name, frame );
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
return 0;
}
You are creating VideoWriter Video("face_output.avi", CV_FOURCC('M','J','P','G'), fps, S, true); inside the while(1), so you create a new Video in each iteration. Since you only write one frame per iteration, this is will be the only content of your face_output.avi file.
Try moving that line before the while(1):
// ...
Size S(dWidth,dHeight);
VideoWriter Video("face_output.avi", CV_FOURCC('M','J','P','G'), fps, S, true);
while(1)
{
// ...
I already had an optical flow code implemented using C++ in OpenCV. However, i would like to detect optical flow in half of the image frame. Which part should i edit? is it from this function below?
cvCalcOpticalFlowPyrLK(
frame1_1C, frame2_1C,
pyramid1, pyramid2,
frame1_features,
frame2_features,
number_of_features,
optical_flow_window,
5,
optical_flow_found_feature,
optical_flow_feature_error,
optical_flow_termination_criteria,
0 );
No. There are no changes necessary in the function itself. All you need to do is pass only the part of image on which you want to calculate optical flow to the function.
You can define the range of the image that you want to carry out the optical flow calculations on. using
wanted_image=image(Range(x1,y1), Range(x2,y2))
The following is a working code based on the lkdemo.cpp in the samples folder. THe only worthwhile change is
gray = gray(Range(1,480), Range(1,320));
//Gives the left half of the image
which defines the region of interest.
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
static void help()
{
cout << "*** Using OpenCV version " << CV_VERSION <<" ***"<< endl;
cout << "\n\nUsage: \n"
"\tESC - quit the program\n"
"\tr - auto-initialize tracking\n"
"\tc - delete all the points\n"
"\tn - switch the \"night\" mode on/off\n"<< endl;
}
int main( int argc, char** argv )
{
help();
//Termination of the algo after 20 iterations or accuracy going under 0.03
TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.3);
Size subPixWinSize(10,10), winSize(31,31);
const int MAX_COUNT = 500;
bool needToInit = false;
bool nightMode = false;
//Video capture is from the default device i.e. the webcam
VideoCapture cap(0);
if( !cap.isOpened() )
{
cout << "Could not initialize capturing...\n";
return 0;
}
namedWindow( "Half screen Optical flow Demo!", 1 );
Mat gray, prevGray, image;
vector<Point2f> points[2];
for(;;)
{
Mat frame;
//Output from the Videocapture is piped to 'frame'
cap >> frame;
if( frame.empty() )
break;
frame.copyTo(image);
cvtColor(image, gray, COLOR_BGR2GRAY);
// Night mode not disabled
if( nightMode )
image = Scalar::all(0);
gray = gray(Range(1,480), Range(1,320));
if( needToInit || points[0].size()<=5)
{
goodFeaturesToTrack(gray, points[1], MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.4);
cornerSubPix(gray, points[1], subPixWinSize, Size(-1,-1), termcrit);
}
else if( !points[0].empty() )
{
vector<uchar> status;
vector<float> err;
if(prevGray.empty())
gray.copyTo(prevGray);
calcOpticalFlowPyrLK(prevGray, gray, points[0], points[1], status, err, winSize, 3, termcrit, 0, 0.001);
size_t i, k;
for( i = k = 0; i < points[1].size(); i++ )
{
if( !status[i] )
continue;
points[1][k++] = points[1][i];
circle(image, points[1][i], 3, Scalar(0,255,0), -1, 8);
}
points[1].resize(k);
}
needToInit = false;
imshow("Half screen Optical flow Demo!", image);
char c = (char)waitKey(10);
if( c == 27 )
break;
switch( c )
{
case 'r':
needToInit = true;
break;
case 'c':
points[0].clear();
points[1].clear();
break;
case 'n':
nightMode = !nightMode;
break;
}
std::swap(points[1], points[0]);
cv::swap(prevGray, gray);
}
cap.release();
return 0;
}
if you want to detect optical flow only in the half of the image, then you can simply give halves of the images (frame1_1C, frame2_1C) as parameters. For example, following code initializes a matrix belonging to the left half of frame1_1C:
cv::Mat frame1_1C_half(frame1_1C, cv::Range(0, frame1_1C.rows), cv::Range(0, frame1_1C.cols/2));