I am pretty new to OpenCV, but I wrote a programm that detects circles in a video for a school project. The Problem: In my video (.avi) there are some circles that are not being detected and some "things" that are definetely not circles but they ARE detected. I am using OpenCV 3.0.0 on Windows 7 and my programming language is C++. Below you can find the code.
Any ideas how I could share the video file with you (maybe send e-mail on request)?
I think the problems are the parameters in the HoughCircles method.
Any suggestions how I can find the wanted circles? (I also posted a screenshot from a frame in the video (gray)).
As you can see I want to track the circles on the start/landing lane for planes.
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
VideoCapture cap("C:/Users/Julian Büchel/Documents/Screen Video/Circle_Rotenburg.avi"); // open the video file for reading
if (!cap.isOpened()) // if not success, exit program
{
cout << "Cannot open the video file" << endl;
return -1;
}
double fps = cap.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
cout << "Frame per seconds : " << fps << endl;
namedWindow("MyVideo", CV_WINDOW_NORMAL); //create a window called "MyVideo"
namedWindow("Trackbar", CV_WINDOW_NORMAL); //create a window called "Trackbar"
resizeWindow("MyVideo", 1200, 700);
resizeWindow("Trackbar", 1200, 200);
//These are the parameters for my HoughCircles method. I added sliders to be able to adjust them.
int iSliderValue1 = 1;
createTrackbar("dp", "Trackbar", &iSliderValue1, 4);
int iSliderValue2 = 200;
createTrackbar("Min. Distance", "Trackbar", &iSliderValue2, 500);
int iSliderValue3 = 200;
createTrackbar("P1", "Trackbar", &iSliderValue3, 500);
int iSliderValue4 = 100;
createTrackbar("P2", "Trackbar", &iSliderValue4, 500);
int iSliderValue5 = 0;
createTrackbar("Min.R", "Trackbar", &iSliderValue5, 40);
int iSliderValue6 = 15;
createTrackbar("Max.R", "Trackbar", &iSliderValue6, 50);
while (1)
{
Mat frame, gray;
bool bSuccess = cap.read(frame); // read a new frame from video
if (!bSuccess) //if not success, break loop
{
cout << "Cannot read the frame from video file" << endl;
waitKey(10000);
break;
}
//Should I add the GaussianBlur? Right now you can see the circles more clear, but it takes up more CPU.
cvtColor(frame, gray, CV_RGB2GRAY);
//GaussianBlur(gray, gray, Size(9, 9), 2, 2);
vector<Vec3f> circles; //Store the circles
// Apply the Hough Transform to find the circles
// src vector method dp dist p1 p2 miR maR
//HoughCircles(gray, circles, CV_HOUGH_GRADIENT, 2, 200, 200, 100, 25, 35);
HoughCircles(gray, circles, CV_HOUGH_GRADIENT, iSliderValue1, iSliderValue2, iSliderValue3, iSliderValue4, iSliderValue5, iSliderValue6);
//Draw circles
for (size_t i = 0; i < circles.size(); i++)
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
circle(gray, center, 3, Scalar(0, 255, 0), -1, 8, 0);// circle center
circle(gray, center, radius, Scalar(0, 0, 255), 3, 8, 0);// circle outside
cout << "center : " << center << "\nradius : " << radius << endl;
}
imshow("MyVideo", gray); //show the frame in "MyVideo" window
waitKey(25);//Make video slower/back to normal speed
if (waitKey(30) == 27) //wait for 'esc' key press for 30 ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}
Related
I'm working on a project to detect motion on a camera.
I need to start recording video when motion is detected for example:
Record while motion is being detected
Continue recording for 10 seconds after the motion detection is stopped
I have a working example that only detects the motion and draw rectangles on the moving parts.
I searched for examples on how to record when motion is detected but no good results.
Here is my working code:
#include <iostream>
#include <sstream>
#include <opencv4/opencv2/imgproc.hpp>
#include <opencv4/opencv2/videoio.hpp>
#include <opencv4/opencv2/highgui.hpp>
#include <opencv4/opencv2/video.hpp>
#include <unistd.h>
using namespace cv;
using namespace std;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
int main(int argc, char* argv[])
{
//create Background Subtractor objects
Ptr<BackgroundSubtractor> pBackSub;
pBackSub = createBackgroundSubtractorMOG2();
VideoCapture capture(0);
if (!capture.isOpened()){
//error in opening the video input
cerr << "Unable to open: " << endl;
return 0;
}
Mat frame, fgMask;
sleep(3);
while (true) {
capture >> frame;
if (frame.empty())
break;
//update the background model
pBackSub->apply(frame, fgMask);
imshow("FG Mask", fgMask);
RNG rng(12345);
findContours(fgMask, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE,Point(0, 0));
vector<Rect>boundRect (contours.size());
vector<vector<Point> > contours_poly( contours.size() );
for (int i = 0; i < contours.size();i++) {
if( contourArea(contours[i])< 500)
{
continue;
}
putText(frame, "Motion Detected", Point(10,20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0,0,255),2);
approxPolyDP( contours[i], contours_poly[i], 3, true );
boundRect[i] = boundingRect( contours_poly[i] );
Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) );
rectangle( frame, boundRect[i].tl(), boundRect[i].br(), color, 2 );
}
imshow("Frame", frame);
int keyboard = waitKey(30);
if (keyboard == 'q' || keyboard == 27)
break;
}
return 0;
}
I tried adding this to the code:
int frameWidth = 320;
int frameHeight = 240;
cv::Size frameSize = cv::Size(frameWidth, frameHeight);
/* Output file */
int codec = cv::VideoWriter::fourcc('M', 'P', '4', 'V');
cv::VideoWriter outputVideo;
outputVideo.open("rr.mp4", codec, capture.get(cv::CAP_PROP_FPS), frameSize, true);
and after drawing the rectangle I write the frame to the video:
outputVideo.write(frame);
but after that, the video is empty and crashes.
I already took a look at Motion but I didn't find an example.
How can I achieve this?
Thanks,
Talel
I resolved the issue,
I was opening the output video with a specific dimensions (320,240) and I was saving the captured frame which is bigger.
So the solution is to resize the captured frame to fit into the output video.
Here is the final solution if anyone is interesting:
Turn the laptop camera into an IP camera with: cam2ip
Here is the source code:
#include <iostream>
#include <sstream>
#include <opencv4/opencv2/imgproc.hpp>
#include <opencv4/opencv2/videoio.hpp>
#include <opencv4/opencv2/highgui.hpp>
#include <opencv4/opencv2/video.hpp>
#include <unistd.h>
using namespace cv;
using namespace std;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
int main(int argc, char* argv[])
{
//create Background Subtractor objects
Ptr<BackgroundSubtractor> pBackSub;
pBackSub = createBackgroundSubtractorMOG2();
const std::string videoStreamAddress = "http://192.168.20.100:56000/mjpeg";
cv::VideoCapture vcap;
if(!vcap.open(videoStreamAddress)) {
std::cout << "Error opening video stream or file" << std::endl;
return -1;
}
Mat frame, fgMask;
int frameWidth = 320;
int frameHeight = 240;
cv::Size frameSize = cv::Size(frameWidth, frameHeight);
/* Output file */
int codec = cv::VideoWriter::fourcc('M', 'P', '4', 'V');
cv::VideoWriter outputVideo;
outputVideo.open("rr.mp4", codec, vcap.get(cv::CAP_PROP_FPS), frameSize, true);
sleep(3);
while (true) {
vcap >> frame;
if (frame.empty())
break;
//update the background model
pBackSub->apply(frame, fgMask);
imshow("FG Mask", fgMask);
RNG rng(12345);
findContours(fgMask, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE,Point(0, 0));
vector<Rect>boundRect (contours.size());
vector<vector<Point> > contours_poly( contours.size() );
for (int i = 0; i < contours.size();i++) {
if( contourArea(contours[i])< 500)
{
continue;
}
putText(frame, "Motion Detected", Point(10,20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0,0,255),2);
approxPolyDP( contours[i], contours_poly[i], 3, true );
boundRect[i] = boundingRect( contours_poly[i] );
Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) );
rectangle( frame, boundRect[i].tl(), boundRect[i].br(), color, 2 );
resize(frame, frame, frameSize);
outputVideo.write(frame);
}
imshow("Frame", frame);
int keyboard = waitKey(30);
if (keyboard == 'q' || keyboard == 27)
break;
}
outputVideo.release();
return 0;
}
Further enhancement suggestions:
Make sure that the light is not part of the motion detection
Open an output video with the same capture's dimensions
I am new to Realsense & C++. I know this question might be easy on.
However, I cannot solve this even I searched half of day.
I am trying to use Realsense with OpenCV face detection(Haarcascade).
But when I use 'face_cascade,detectMultiScale', the project gets access violation error.
(just like the picture underneath)
and my codes are this :
// License: Apache 2.0.See LICENSE file in root directory.
// Copyright(c) 2017 Intel Corporation. All Rights Reserved.
#include <rs.hpp> // Include RealSense Cross Platform API
#include <opencv2/opencv.hpp> // Include OpenCV API
#include "opencv2/objdetect.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
using namespace std;
using namespace cv;
//String face_cascade_name;
CascadeClassifier face_cascade;
string window_name = "Face detection";
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
if (frame.empty()) {
printf("error, no data");
}
else {
printf("no problem");
}
//face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
face_cascade.detectMultiScale(frame_gray, faces , 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(500, 500));
for (size_t i = 0; i < faces.size(); i++)
{
Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
ellipse(frame, center, Size(faces[i].width / 2, faces[i].height / 2),
0, 0, 360, Scalar(0, 0, 255), 4, 8, 0);
}
imshow(window_name, frame);
}
int main(int argc, char * argv[]) try
{
// Declare depth colorizer for pretty visualization of depth data
rs2::colorizer color_map;
// Declare RealSense pipeline, encapsulating the actual device and sensors
rs2::pipeline pipe;
face_cascade.load("C:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml");
//if (!face_cascade.load(face_cascade_name)) { printf("--(!)Error loading face cascade\n"); };
// Start streaming with default recommended configuration
pipe.start();
const auto window_name = "Display Image";
namedWindow(window_name, WINDOW_AUTOSIZE);
while (waitKey(1) < 0 && cvGetWindowHandle(window_name))
{
rs2::frameset data = pipe.wait_for_frames(); // Wait for next set of frames from the camera
//rs2::frame depth = color_map(data.get_depth_frame());
rs2::frame color = data.get_color_frame();
// Query frame size (width and height)
//const int w = depth.as<rs2::video_frame>().get_width();
//const int h = depth.as<rs2::video_frame>().get_height();
const int color_w = color.as<rs2::video_frame>().get_width();
const int color_h = color.as<rs2::video_frame>().get_height();
// Create OpenCV matrix of size (w,h) from the colorized depth data
//Mat image(Size(w, h), CV_8UC3, (void*)depth.get_data(), Mat::AUTO_STEP);
Mat image(Size(color_w, color_h), CV_8UC3, (void*)color.get_data(), Mat::AUTO_STEP);
// Update the window with new data
//imshow(window_name, image);
detectAndDisplay(image);
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception& e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
enter code here
I am working on visual studio 2010 C++ and Opencv 2.3.1. Using HP laptop windows 7 32 bit. I really have tried many times to solve this problem but it still happening. My built-in webcam with some codes (some times works and some times not ) and with some other codes it always displays a gray window instead of camera feed?
could anyone help?
Thanks in advance.
For Example The first code sometimes display cam feed and the second one always display gray window
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace std;
int main(int, char**)
{
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) // check if we succeeded
return -1;
Ptr<BackgroundSubtractor> pMOG = new BackgroundSubtractorMOG2();
Mat fg_mask;
Mat frame;
int count = -1;
for (;;)
{
// Get frame
cap >> frame; // get a new frame from camera
// Update counter
++count;
// Background subtraction
pMOG->operator()(frame, fg_mask);
imshow("frame", frame);
imshow("fg_mask", fg_mask);
// Save foreground mask
string name = "mask_" + std::to_string(static_cast<long long>(count)) + ".png";
imwrite("D:\\SO\\temp\\" + name, fg_mask);
if (waitKey(1) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
////////////////////////
second code:
// WriteVideo.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char* argv[])
{
VideoCapture cap(0); // open the video camera no. 0
if (!cap.isOpened()) // if not success, exit program
{
cout << "ERROR: Cannot open the video file" << endl;
return -1;
}
namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
cout << "Frame Size = " << dWidth << "x" << dHeight << endl;
Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));
VideoWriter oVideoWriter ("D:/visual outs/mix/WriteVideo.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true); //initialize the VideoWriter object
if ( !oVideoWriter.isOpened() ) //if not initialize the VideoWriter successfully, exit the program
{
cout << "ERROR: Failed to write the video" << endl;
return -1;
}
while (1)
{
Mat frame;
bool bSuccess = cap.read(frame); // read a new frame from video
if (!bSuccess) //if not success, break loop
{
cout << "ERROR: Cannot read a frame from video file" << endl;
break;
}
oVideoWriter.write(frame); //writer the frame into the file
imshow("MyVideo", frame); //show the frame in "MyVideo" window
if (waitKey(10) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}
Without the actual code, there's nothing we can really do in order to pinpoint the issue. Could you reformulate your question please? Anyway, in order to instantiate a VideoCapture object in OpenCV, the C++ code would be pretty much something like this
#include "opencv2/opencv.hpp"
using namespace cv;
int main(int, char**)
{
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) // check if we succeeded
return -1;
Mat edges;
namedWindow("edges",1);
for(;;)
{
Mat frame;
cap >> frame; // get a new frame from camera
cvtColor(frame, edges, CV_BGR2GRAY);
GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);
Canny(edges, edges, 0, 30, 3);
imshow("edges", edges);
if(waitKey(30) >= 0) break; // Wait 30 ms for a key feed, then break out of the code if a keypress is found in the message queue
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
As for the standard C version, you'd have to leverage the old yet still reliable CvCapture struct which is contained in the old OpenCV API. In this case, you'd act upon the cvCreateCameraCapture(0) which simply returns a pointer to the newly created camera instance (CvCapture). In the latter case, the code would evaluate to the snipper hereunder.
# include "highgui.h"
# include "cv.h"
int main( int argc, char** argv )
{
CvCapture* capture;
capture = cvCreateCameraCapture(0);
if (!capture)
return -1;
IplImage* bgr_frame = cvQueryFrame(capture); // Query the next frame
CvSize size = cvSize(
(int)cvGetCaptureProperty(capture,
CV_CAP_PROP_FRAME_WIDTH),
(int)cvGetCaptureProperty(capture,
CV_CAP_PROP_FRAME_HEIGHT)
);
cvNamedWindow("OpenCV via CvCapture", CV_WINDOW_AUTOSIZE); // Create a window and assign it a title
/* Open a CvVideoWriter to save the video to a file. Think of it as a stream */
CvVideoWriter *writer = cvCreateVideoWriter(argv[1],
CV_FOURCC('D','I','V','X'),
30,
size
);
while((bgr_frame = cvQueryFrame(capture)) != NULL)
{
cvWriteFrame(writer, bgr_frame);
cvShowImage("OpenCV via CvCapture", bgr_frame);
char c = cvWaitKey(30); // Same as the snippet above. Wait for 30 ms
if(c == 27) break; // If the key code is 0x27 (ESC), break
}
cvReleaseVideoWriter(&writer); // Dispose the CvVideoWriter instance
cvReleaseCapture(&capture); // Dispose the CvCapture instance
cvDestroyWindow("OpenCV via CvCapture"); // Destroy the window
return 0;
}
I want to calculate optical flow on the left half and the right half of the captured video seperately. Using the results from the left and right half I want to eventually detect if the user(i.e. the camera) is moving forward or backward.
Currently, I have this system going for the left half of the screen. The code looks like this:
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
static void help()
{
cout << "*** Using OpenCV version " << CV_VERSION <<" ***"<< endl;
cout << "\n\nUsage: \n"
"\tESC - quit the program\n"
"\tr - auto-initialize tracking\n"
"\tc - delete all the points\n"
"\tn - switch the \"night\" mode on/off\n"<< endl;
}
int main( int argc, char** argv )
{
help();
//Termination of the algo after 20 iterations or accuracy going under 0.03
TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.3);
Size subPixWinSize(10,10), winSize(31,31);
const int MAX_COUNT = 500;
bool needToInit = false;
bool nightMode = false;
//Video capture is from the default device i.e. the webcam
VideoCapture cap(0);
if( !cap.isOpened() )
{
cout << "Could not initialize capturing...\n";
return 0;
}
namedWindow( "Half screen Optical flow Demo!", 1 );
Mat gray, prevGray, image;
vector<Point2f> points[2];
for(;;)
{
Mat frame;
//Output from the Videocapture is piped to 'frame'
cap >> frame;
if( frame.empty() )
break;
frame.copyTo(image);
cvtColor(image, gray, COLOR_BGR2GRAY);
// Night mode not disabled
if( nightMode )
image = Scalar::all(0);
//This line saves into 'gray' only the left half of the original image. Thus making the detection of optical only on the left side possible.
gray = gray(Range(1,480), Range(1,320));
if( needToInit || points[0].size()<=5)
{
goodFeaturesToTrack(gray, points[1], MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.4);
cornerSubPix(gray, points[1], subPixWinSize, Size(-1,-1), termcrit);
}
else if( !points[0].empty() )
{
vector<uchar> status;
vector<float> err;
if(prevGray.empty())
gray.copyTo(prevGray);
calcOpticalFlowPyrLK(prevGray, gray, points[0], points[1], status, err, winSize, 3, termcrit, 0, 0.001);
size_t i, k;
for( i = k = 0; i < points[1].size(); i++ )
{
if( !status[i] )
continue;
points[1][k++] = points[1][i];
circle(image, points[1][i], 3, Scalar(0,255,0), -1, 8);
}
points[1].resize(k);
}
needToInit = false;
imshow("Half screen Optical flow Demo!", image);
char c = (char)waitKey(10);
if( c == 27 )
break;
switch( c )
{
case 'r':
needToInit = true;
break;
case 'c':
points[0].clear();
points[1].clear();
break;
case 'n':
nightMode = !nightMode;
break;
}
std::swap(points[1], points[0]);
cv::swap(prevGray, gray);
}
cap.release();
return 0;
}
How do I do this for the right half of the screen as well?
If I simply replicate the functions from above, say by modifying the image=image(Range(), Range()) values etc. I am not getting any output.
EDIT: Should I write out the part where the optical flow is calculated to a separate function and call it twice - once for the left half of the image and then for the right half?
I already had an optical flow code implemented using C++ in OpenCV. However, i would like to detect optical flow in half of the image frame. Which part should i edit? is it from this function below?
cvCalcOpticalFlowPyrLK(
frame1_1C, frame2_1C,
pyramid1, pyramid2,
frame1_features,
frame2_features,
number_of_features,
optical_flow_window,
5,
optical_flow_found_feature,
optical_flow_feature_error,
optical_flow_termination_criteria,
0 );
No. There are no changes necessary in the function itself. All you need to do is pass only the part of image on which you want to calculate optical flow to the function.
You can define the range of the image that you want to carry out the optical flow calculations on. using
wanted_image=image(Range(x1,y1), Range(x2,y2))
The following is a working code based on the lkdemo.cpp in the samples folder. THe only worthwhile change is
gray = gray(Range(1,480), Range(1,320));
//Gives the left half of the image
which defines the region of interest.
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
static void help()
{
cout << "*** Using OpenCV version " << CV_VERSION <<" ***"<< endl;
cout << "\n\nUsage: \n"
"\tESC - quit the program\n"
"\tr - auto-initialize tracking\n"
"\tc - delete all the points\n"
"\tn - switch the \"night\" mode on/off\n"<< endl;
}
int main( int argc, char** argv )
{
help();
//Termination of the algo after 20 iterations or accuracy going under 0.03
TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.3);
Size subPixWinSize(10,10), winSize(31,31);
const int MAX_COUNT = 500;
bool needToInit = false;
bool nightMode = false;
//Video capture is from the default device i.e. the webcam
VideoCapture cap(0);
if( !cap.isOpened() )
{
cout << "Could not initialize capturing...\n";
return 0;
}
namedWindow( "Half screen Optical flow Demo!", 1 );
Mat gray, prevGray, image;
vector<Point2f> points[2];
for(;;)
{
Mat frame;
//Output from the Videocapture is piped to 'frame'
cap >> frame;
if( frame.empty() )
break;
frame.copyTo(image);
cvtColor(image, gray, COLOR_BGR2GRAY);
// Night mode not disabled
if( nightMode )
image = Scalar::all(0);
gray = gray(Range(1,480), Range(1,320));
if( needToInit || points[0].size()<=5)
{
goodFeaturesToTrack(gray, points[1], MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.4);
cornerSubPix(gray, points[1], subPixWinSize, Size(-1,-1), termcrit);
}
else if( !points[0].empty() )
{
vector<uchar> status;
vector<float> err;
if(prevGray.empty())
gray.copyTo(prevGray);
calcOpticalFlowPyrLK(prevGray, gray, points[0], points[1], status, err, winSize, 3, termcrit, 0, 0.001);
size_t i, k;
for( i = k = 0; i < points[1].size(); i++ )
{
if( !status[i] )
continue;
points[1][k++] = points[1][i];
circle(image, points[1][i], 3, Scalar(0,255,0), -1, 8);
}
points[1].resize(k);
}
needToInit = false;
imshow("Half screen Optical flow Demo!", image);
char c = (char)waitKey(10);
if( c == 27 )
break;
switch( c )
{
case 'r':
needToInit = true;
break;
case 'c':
points[0].clear();
points[1].clear();
break;
case 'n':
nightMode = !nightMode;
break;
}
std::swap(points[1], points[0]);
cv::swap(prevGray, gray);
}
cap.release();
return 0;
}
if you want to detect optical flow only in the half of the image, then you can simply give halves of the images (frame1_1C, frame2_1C) as parameters. For example, following code initializes a matrix belonging to the left half of frame1_1C:
cv::Mat frame1_1C_half(frame1_1C, cv::Range(0, frame1_1C.rows), cv::Range(0, frame1_1C.cols/2));