I have 3 video source connected to my PC and I want to show them on one screen.
I initially started to put the video sources next to each other and that works fine, but I want to be able to enable/disable each video source at run time.
So I am want to use keyboard keys (r(right) and l(left)) to change what cameras are being shown at the moment.
I want to move the declaration of the following 3 variables to the outside of the while loop so I can access them in the if-cases and change them.
cv::Mat3b combinedFrame(camRightSize.height, camMiddleSize.width + camRightSize.width);
cv::Mat3b leftSideOfScreen(combinedFrame, Rect(0, 0, camMiddleSize.width, camMiddleSize.height));
cameraMiddleFrameMirroredResize.copyTo(leftSideOfScreen);
cv::Mat3b rightSideOfScreen(combinedFrame, Rect(camMiddleSize.width, 0, camRightSize.width, camRightSize.height));
Below is my whole code:
int main(int argc, char **argv) {
int combinedScreenWidth = 1440;
int combinedScreenHeight = 540;
int rearCameraBiggerByThis = 200;
int combinedScreenWidthHalv = combinedScreenWidth / 2;
bool showRight = true;
bool showLeft = false;
//initialize and allocate memory to load the video stream from camera
cv::VideoCapture cameraRight(0); // RIGHT
cv::VideoCapture cameraMiddle(3); // REAR
cv::VideoCapture cameraLeft(3); // LEFT
if (!cameraRight.isOpened()) return 1;
if (!cameraMiddle.isOpened()) return 1;
if (!cameraLeft.isOpened()) return 1;
cv::Mat3b cameraRightFrame;
cv::Mat3b cameraMiddleFrame;
cv::Mat3b cameraLeftFrame;
cv::Mat3b cameraRightFrameMirrored;
cv::Mat3b cameraMiddleFrameMirrored;
cv::Mat3b cameraLeftFrameMirrored;
Size camRightSize;
Size camMiddleSize;
Size camLeftSize;
cv::Mat3b cameraRightFrameMirroredResize;
cv::Mat3b cameraMiddleFrameMirroredResize;
cv::Mat3b cameraLeftFrameMirroredResize;
while (true) {
// Grab and retrieve each frames of the video sequentially
cameraRight >> cameraRightFrame;
cameraMiddle >> cameraMiddleFrame;
cameraLeft >> cameraLeftFrame;
// Mirror
cv::flip(cameraRightFrame, cameraRightFrameMirrored, 1);
cv::flip(cameraMiddleFrame, cameraMiddleFrameMirrored, 1);
cv::flip(cameraLeftFrame, cameraMiddleFrameMirrored, 1);
// Resize
camRightSize = cameraRightFrame.size();
camMiddleSize = cameraMiddleFrame.size();
camLeftSize = cameraLeftFrame.size();
resize(cameraMiddleFrameMirrored, cameraMiddleFrameMirroredResize, Size(combinedScreenWidthHalv + rearCameraBiggerByThis, combinedScreenHeight));
resize(cameraRightFrameMirrored, cameraRightFrameMirroredResize, Size(combinedScreenWidthHalv - rearCameraBiggerByThis, combinedScreenHeight));
// Compilation
camRightSize = cameraRightFrameMirroredResize.size();
camMiddleSize = cameraMiddleFrameMirroredResize.size();
camLeftSize = cameraLeftFrameMirroredResize.size();
if (showRight && showLeft) { // LEFT + REAR + RIGHT
} else if (showRight) { // REAR + RIGHT
} else if (showLeft) { // LEFT + REAR
} else { // REAR
}
cv::Mat3b combinedFrame(camRightSize.height, camMiddleSize.width + camRightSize.width);
cv::Mat3b leftSideOfScreen(combinedFrame, Rect(0, 0, camMiddleSize.width, camMiddleSize.height));
cameraMiddleFrameMirroredResize.copyTo(leftSideOfScreen);
cv::Mat3b rightSideOfScreen(combinedFrame, Rect(camMiddleSize.width, 0, camRightSize.width, camRightSize.height));
cameraRightFrameMirroredResize.copyTo(rightSideOfScreen);
// declare windows
cv:namedWindow("Combined", CV_WINDOW_NORMAL);
cv::setWindowProperty("Combined", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
cv::putText(combinedFrame, "REAR", cv::Point(500, 50), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(255, 255, 255), 2 );
cv::putText(combinedFrame, "RIGHT", cv::Point(950, 50), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(255, 255, 255), 2 );
cv::imshow("Combined", combinedFrame); // 1440 x 540 Screen size
//cv::imshow("Right Cam", cameraRightFrame);
//cv::imshow("Middle Cam", cameraMiddleFrame);
//cv::imshow("Left Cam", cameraLeftFrame);
//wait for 40 milliseconds
int c = cvWaitKey(1);
//exit the loop if user press "Esc" key (ASCII value of "Esc" is 27)
if (27 == char(c)) {
break;
}
else if (114 == char(c)) {
showRight = !showRight;
}
else if (108 == char(c)) {
showLeft = !showLeft;
}
}
return 0;
}
According to your description, I think what you're wanting to write is:
combinedFrame(Rect(0, 0, camMiddleSize.width, camMiddleSize.height)).copyTo(leftSideOfScreen);
That is:
create a cv::Mat from another one (call to cv::Mat::copyTo(cv::Mat&))
the "source matrix" is obtained by extracting a rectangle from combinedFrame : combinedFrame(Rect(0, 0, camMiddleSize.width, camMiddleSize.height))
Related
I'm trying to detect multiple faces from video file according to user input.
The code detects the number of the desired faces but sometimes the detection "jumps" to someone else - different from the first frame.
The video file contains 5-6 faces and the user can select a number from 1 to 4.
The program should to detect the first X faces and tracked throughout the video, in addition, the program opens another window with the detected faces.
Here is my algorithm code:
capture >> cap_img;
waitKey(2);
cvtColor(cap_img, gray_img, CV_BGR2GRAY);
equalizeHist(gray_img, gray_img);
while (1)
{
capture >> cap_img;
waitKey(2);
cvtColor(cap_img, gray_img, CV_BGR2GRAY);
equalizeHist(gray_img, gray_img);
// Detect faces
face_cascade.detectMultiScale(gray_img, faces, 1.1, 5,0 | CV_HAAR_SCALE_IMAGE | CV_HAAR_DO_CANNY_PRUNING | CASCADE_SCALE_IMAGE, Size(1, 1));
// Set Region of Interest
cv::Rect reg_b;
cv::Rect reg_c;
int i = 0; // i is index of current element
int ac = 0; // ac is area of current element
int ib = 0; // ib is index of biggest element
int ab = 0; // ab is area of biggest element
if (numOfFaces > faces.size())
{
numOfFaces = faces.size();
}
for (i = 0; i < numOfFaces; i++) // Iterate through all current elements (detected faces)
{
reg_c.x = faces[i].x;
reg_c.y = faces[i].y;
reg_c.width = (faces[i].width);
reg_c.height = (faces[i].height);
// Get the area of current element (detected face), at beginning it is same as "current" element
ac = reg_c.width * reg_c.height;
reg_b.x = faces[ib].x;
reg_b.y = faces[ib].y;
reg_b.width = (faces[ib].width);
reg_b.height = (faces[ib].height);
ab = reg_b.width * reg_b.height;
// Get the area of biggest element, at beginning it is same as "current" element
if (ac > ab)
{
ib = i;
reg_b.x = faces[ib].x;
reg_b.y = faces[ib].y;
reg_b.width = (faces[ib].width);
reg_b.height = (faces[ib].height);
}
crop = cap_img(reg_b);
resize(crop, res, Size(128, 128), 0, 0, INTER_LINEAR); // This will be needed later while saving images
cvtColor(crop, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale
Point pt1(faces[i].x, faces[i].y); // Display detected faces on main window - live stream from camera
Point pt2((faces[i].x + faces[i].height), (faces[i].y + faces[i].width));
rectangle(cap_img, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
}
// Show image
resize(cap_img, cap_img, Size(cap_img.cols / 2, cap_img.rows / 2)); // to half size or even smaller
imshow("original", cap_img);
if (!crop.empty())
{
imshow("detected", crop);
}
else
destroyWindow("detected");
}
"numOfFaces" is the number of faces to detect.
What am I doing wrong?
I am trying to detect colored balls like ps3 move controller balls from 2 mt distance.I have 10 camera in same room hanging from the ceiling.Room is dark and balls have led inside.I have 4-5 balls.(red,blue,green,yellow,pink). I want track their position with opencv.Whats the right mehtod for doing this in opencv ? Can u give link , example for this ?
I use this code but i have delay problem.When i comment // my trackFilteredObject line there is no lag.But when using this code i have lot latency.I cant understand why happening because my normal cpu usage ~%15 ram usage 6.3GB/15GB (%40) when run this code cpu usage ~20-23 ram usage 6.4GB . I think its not about cpu-ram performance.What am i doing wrong ?
Video: https://www.youtube.com/watch?v=_BKtJpPrkO4 (You can see lag in first 10 sec.After 10 sen i comment tracking codes.)
Note:Kamerasayisi mean cameracount My Track Function:
void trackFilteredObject(Object theObject,Mat threshold,Mat HSV, Mat &cameraFeed){
//max number of objects to be detected in frame
const int FRAME_WIDTH = 5120;
const int FRAME_HEIGHT = 480;
const int MAX_NUM_OBJECTS=50;
//minimum and maximum object area
const int MIN_OBJECT_AREA = 10*10;
const int MAX_OBJECT_AREA = FRAME_HEIGHT*FRAME_WIDTH/1.5;
vector <Object> objects;
Mat temp;
threshold.copyTo(temp);
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours of filtered image using openCV findContours function
findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );
//use moments method to find our filtered object
double refArea = 0;
bool objectFound = false;
if (hierarchy.size() > 0) {
int numObjects = hierarchy.size();
//if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
if(numObjects<MAX_NUM_OBJECTS){
for (int index = 0; index >= 0; index = hierarchy[index][0]) {
Moments moment = moments((cv::Mat)contours[index]);
double area = moment.m00;
//if the area is less than 20 px by 20px then it is probably just noise
//if the area is the same as the 3/2 of the image size, probably just a bad filter
//we only want the object with the largest area so we safe a reference area each
//iteration and compare it to the area in the next iteration.
if(area>MIN_OBJECT_AREA){
Object object;
object.setXPos(moment.m10/area);
object.setYPos(moment.m01/area);
object.setType(theObject.getType());
object.setColor(theObject.getColor());
objects.push_back(object);
objectFound = true;
}else objectFound = false;
}
//let user know you found an object
if(objectFound ==true){
//draw object location on screen
drawObject(objects,cameraFeed,temp,contours,hierarchy);}
}else putText(cameraFeed,"TOO MUCH NOISE! ADJUST FILTER",Point(0,50),1,2,Scalar(0,0,255),2);
}
}
};
Main Code:
void Run()
{
int w, h;
_fps = 30;
IplImage *pCapImage[kameraSayisi];
IplImage *pDisplayImage;
PBYTE pCapBuffer = NULL;
// Create camera instance
for(int i = 0; i < kameraSayisi; i++)
{
_cam[i] = CLEyeCreateCamera(_cameraGUID[i], _mode, _resolution, _fps);
if(_cam[i] == NULL) return;
// Get camera frame dimensions
CLEyeCameraGetFrameDimensions(_cam[i], w, h);
// Create the OpenCV images
pCapImage[i] = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
// Set some camera parameters
CLEyeSetCameraParameter(_cam[i], CLEYE_GAIN, 0);
CLEyeSetCameraParameter(_cam[i], CLEYE_EXPOSURE, 511);
// Start capturing
CLEyeCameraStart(_cam[i]);
}
pDisplayImage = cvCreateImage(cvSize(w*kameraSayisi / 2, h * kameraSayisi/4 ), IPL_DEPTH_8U ,1);
if(_cam == NULL) return;
int iLastX = -1;
int iLastY = -1;
//Capture a temporary image from the camera
//program
bool trackObjects = true;
bool useMorphOps = true;
Mat HSV;
//Create a black image with the size as the camera output
Mat imgLines;
// imgLines = Mat::zeros( cvarrToMat(image).size(), CV_8UC3 );;
Mat threshold;
//x and y values for the location of the object
int x=0, y=0;
bool calibrationMode = false;
if(calibrationMode){
//create slider bars for HSV filtering
createTrackbars();
}
// image capturing loop
while(_running)
{
PBYTE pCapBuffer;
// Capture camera images
for(int i = 0; i < kameraSayisi; i++)
{
cvGetImageRawData(pCapImage[i], &pCapBuffer);
CLEyeCameraGetFrame(_cam[i], pCapBuffer, (i==0)?2000:0);
}
// Display stereo image
for(int i = 0; i < kameraSayisi; i++)
{
cvSetImageROI(pDisplayImage, cvRect(w * (i%4) ,i/4 * h, w, h));
cvCopy(pCapImage[i], pDisplayImage);
}
cvResetImageROI(pDisplayImage);
Mat imgOriginal;
Mat imgConverted = cvarrToMat(pDisplayImage);
if(calibrationMode==true)
{
//need to find the appropriate color range values
// calibrationMode must be false
//if in calibration mode, we track objects based on the HSV slider values.
//cvtColor(imgOriginal,imgOriginal,CV_BayerRG2RGB);
cvtColor(imgConverted,imgOriginal,CV_BayerGB2BGR);
cvtColor(imgOriginal,HSV,CV_BGR2HSV);
inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
morphOps(threshold);
imshow(_windowName + 'T',threshold);
//the folowing for canny edge detec
/// Create a matrix of the same type and size as src (for dst)
dst.create( imgOriginal.size(), src.type() );
/// Convert the image to grayscale
cvtColor( imgOriginal, src_gray, CV_BGR2GRAY );
/// Create a window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
/// Create a Trackbar for user to enter threshold
// createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );
/// Show the image
Object a = Object(H_MIN,S_MIN,V_MIN,H_MAX,S_MAX,V_MAX);
trackFilteredObject(a,threshold,HSV,imgOriginal);
}
else{
//we can use their member functions/information
Object blue("blue"), yellow("yellow"), red("red"), orange("orange"),white("white");
cvtColor(imgConverted,imgOriginal,CV_BayerGB2BGR);
//first find blue objects
cvtColor(imgOriginal,HSV,CV_RGB2HSV);
inRange(HSV,blue.getHSVmin(),blue.getHSVmax(),threshold);
morphOps(threshold);
//then yellows
inRange(HSV,yellow.getHSVmin(),yellow.getHSVmax(),threshold);
//then reds
inRange(HSV,red.getHSVmin(),red.getHSVmax(),threshold);
//then white
inRange(HSV,white.getHSVmin(),white.getHSVmax(),threshold);
//then orange
inRange(HSV,orange.getHSVmin(),orange.getHSVmax(),threshold);
trackFilteredObject(yellow,threshold,HSV,imgOriginal);
trackFilteredObject(white,threshold,HSV,imgOriginal);
trackFilteredObject(red,threshold,HSV,imgOriginal);
trackFilteredObject(blue,threshold,HSV,imgOriginal);
trackFilteredObject(orange,threshold,HSV,imgOriginal);
}
//delay 10ms so that screen can refresh.
//image will not appear without this waitKey() command
if (cvWaitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
// cvShowImage(_windowName, image);
imshow(_windowName,imgOriginal);
}
for(int i = 0; i < kameraSayisi; i++)
{
// Stop camera capture
CLEyeCameraStop(_cam[i]);
// Destroy camera object
CLEyeDestroyCamera(_cam[i]);
// Destroy the allocated OpenCV image
cvReleaseImage(&pCapImage[i]);
_cam[i] = NULL;
}
}
I am trying to calculate HOG features on GPU for different levels and then I am saving features of each level to a yml file. Below is the function that I am using.
void App::run()
{
unsigned int count = 0;
FileStorage fs;
running = true;
int width = 640;
int height = 480;
Size win_size(args.win_width, args.win_width * 2);
Size win_stride(args.win_stride_width, args.win_stride_height);
cv::gpu::HOGDescriptor gpu_hog(win_size, Size(16, 16), Size(8, 8), Size(8, 8), 9,
cv::gpu::HOGDescriptor::DEFAULT_WIN_SIGMA, 0.2, gamma_corr,
cv::gpu::HOGDescriptor::DEFAULT_NLEVELS);
VideoCapture vc("/home/ubuntu/Desktop/getdescriptor/images/image%d.jpg");
Mat frame;
Mat Left;
Mat img_aux, img, img_to_show, img_new;
cv::Mat temp;
gpu::GpuMat gpu_img, descriptors, new_img;
char cbuff[20];
while (running)
{
vc.read(frame);
if (!frame.empty())
{
workBegin();
sprintf (cbuff, "%04d", count);
// Change format of the image
if (make_gray) cvtColor(frame, img_aux, CV_BGR2GRAY);
else if (use_gpu) cvtColor(frame, img_aux, CV_BGR2BGRA);
else Left.copyTo(img_aux);
// Resize image
if (args.resize_src) resize(img_aux, img, Size(args.width, args.height));
else img = img_aux;
img_to_show = img;
gpu_hog.nlevels = nlevels;
hogWorkBegin();
if (use_gpu)
{
gpu_img.upload(img);
new_img.upload(img_new);
fs.open(cbuff, FileStorage::WRITE);
//double scale = 1.05;
for(int levels = 0; levels < nlevels; levels++)
{
gpu_hog.getDescriptors(gpu_img, win_stride, descriptors, cv::gpu::HOGDescriptor::DESCR_FORMAT_ROW_BY_ROW);
descriptors.download(temp);
printf("size %d %d\n", temp.rows, temp.cols);
fs <<"level" << levels;
fs << "features" << temp;
cout<<"("<<width<<","<<height<<")"<<endl;
width = round(width/scale);
height = round(height/scale);
cout<<"Levels "<<levels<<endl;
if(width < win_size.width || height < win_size.height)
break;
resize(img,img_new,Size(width,height));
scale *= scale;
}
cout<<count<<endl;
count++;
}
hogWorkEnd();
fs.release();
}
else running = false;
}
}
For the first image it is correctly calculating HOG features for all levels but for the next image it takes the old value of width and height and in that case it break the following loop.
if(width < win_size.width || height < win_size.height)
break;
Can some one point my mistake. I tried to debug but unfortunately no success yet.
HOG feature calculation of each image takes old value of the following three parameters.
1. Width
2. Height
3. Scale
And when it calculates HOG features for next image it break the loop immediately. A common programming mistake.
I want to calibrate my stereo camera so, I am trying to capture 10 images for a chessboard from the right and the left camera .
what the code should do:
capture 10 images of a chessboard from the right and the left camera.The program saves the two images if both contain a clear corner points. The program should wait 20 frames for me to change the position of the chessboard.
what is happening :
it capture the 10 good images and saves them but, it does not wait for me to change the position of the chessboard
this is my code:
int captureImages_stereoCal()
{
CvCapture* captureL = cvCreateCameraCapture(1);
assert(captureL);
waitKey(10000);
CvCapture* captureR = cvCreateCameraCapture(2);
assert(captureR);
waitKey(10000);
/*Mat imageL ;
Mat imageR*/ ;
int nx=8 , nh=5;
int frame=0;
int s =1;
int ss;
CvPoint2D32f* cornersL = new CvPoint2D32f[nx*nh];
int corner_countL;
CvPoint2D32f* cornersR = new CvPoint2D32f[nx*nh];
int corner_countR;
IplImage *imageL=cvQueryFrame(captureL);
IplImage *gray_imageL=cvCreateImage(cvGetSize(imageL),8,1);
IplImage *CimageL=cvCreateImage(cvGetSize(imageL),32,3);
IplImage *imageR=cvQueryFrame(captureR);
IplImage *gray_imageR=cvCreateImage(cvGetSize(imageR),8,1);
IplImage *CimageR=cvCreateImage(cvGetSize(imageL),32,3);
const int board_dt=20;
while(s<=12)
{
if(frame++ % board_dt == 0)
{
string Result;
ostringstream convert;
ss=s-2;
convert << ss;
Result = convert.str();
//n=(char)s;
//waitKey(1000);
//Left -----------------------------------------------------------------------------------------------------------
string nameL="L.jpg";
//Find chessboard corners:
int foundL = cvFindChessboardCorners(imageL, Size(nx,nh), cornersL, &corner_countL,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
//Get Subpixel accuracy on those corners
cvCvtColor(imageL, gray_imageL, CV_BGR2GRAY);
cvFindCornerSubPix(gray_imageL, cornersL, corner_countL,cvSize(11,11),cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
//Draw it
Mat MimageL(imageL);
CimageL=imageL;
cvDrawChessboardCorners(CimageL, Size(nx,nh), cornersL,corner_countL, foundL);
//Right -----------------------------------------------------------------------------------------------
string nameR="R.jpg";
//Find chessboard corners:
int foundR = cvFindChessboardCorners(imageR, Size(nx,nh), cornersR, &corner_countR,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
//Get Subpixel accuracy on those corners
cvCvtColor(imageR, gray_imageR, CV_BGR2GRAY);
cvFindCornerSubPix(gray_imageR, cornersR, corner_countR,cvSize(11,11),cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
//Draw it
Mat MimageR(imageR);
CimageR=imageR;
cvDrawChessboardCorners(CimageR, Size(nx,nh), cornersR,corner_countR, foundR);
cvShowImage( "CalibrationL", CimageL );
cvShowImage( "CalibrationR", CimageR );
if(s>2)
{
if((corner_countL==(nx*nh)) && (corner_countR==(nx*nh)) )
{
nameL.insert(1,Result);
imwrite(nameL,MimageL);
nameR.insert(1,Result);
imwrite(nameR,MimageR);
s++;
}
}
int c = cvWaitKey(15);
if(c == 'p')
{
c = 0;
while(c != 'p' && c != 27)
{
c = cvWaitKey(250);
}
}
if(c == 27)
return 0;
imageL = cvQueryFrame(captureL);
imageR = cvQueryFrame(captureR);
if(s<3)
s++;
}// frame++ end
} // while end
return 0;
}
Also , after it draws the corners it saves the image with the corners drawn on it. I want to save the images with out any changes.
If I understand the problem correctly, it's because you're using a post-increment on your if-statement conditional:
if(frame++ % board_dt == 0)
You initialize frame to be 0 above, so on the 1st call, the if-statement is effectively testing (0 % 20) == 0, which is true.
You can change the if-statement to be a pre-increment, e.g.
if(++frame % board_dt == 0) to get the behavior you are looking for
I am developing some video analytic algorithms using openCV. However, after I process a frame and want to display it on a window, it hangs at imshow() function. I have searched for this issue online but still cannot find the problem! Here is the code where I am using multithread and openCV:
void CFeatureExtraction::extract(){
boost::thread OpFlowThread, BGSThread;
while (m_nRun){
ImgPtr frame_ptr;
m_pQueue->wait_and_pop(frame_ptr);
Mat frame1, frame2;
(*frame_ptr).copyTo(frame1);
(*frame_ptr).copyTo(frame2);
OpFlowThread = boost::thread(&COpFlow::op_flow, m_pAlgo,frame1);
BGSThread = boost::thread(&CBGSub::bgsub, m_pBGS, frame2);
OpFlowThread.join();
BGSThread.join();
}
}
And inside op_flow and bgsub function, I am using imshow() and cvWaitKey() together! But it keeps hanging the whole program.
If my question is still not clear for you, pls feel free to ask me for more detail.
Here is the detail code of calling imshow():
CFarnebackAlgo::CFarnebackAlgo() : COpFlow()
{
namedWindow("Optical Flow");
}
CFarnebackAlgo::~CFarnebackAlgo()
{
destroyWindow("Optical Flow");
}
int CFarnebackAlgo::op_flow(Mat frame)
{
Mat flow;
cvtColor(frame, gray, COLOR_BGR2GRAY);
if (prevgray.data)
{
calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
for (int y = 0; y < frame.rows; y += 8)
for (int x = 0; x < frame.cols; x += 8){
const Point2f& fxy = flow.at<Point2f>(y, x);
line(frame, Point(x, y), Point(cvRound(x + fxy.x), cvRound(y + fxy.y)), Scalar(0, 255, 0));
circle(frame, Point(x, y), 2, Scalar(0, 255, 0), -1);
}
if (frame.data == NULL){
cout << "No Img!" << endl;
exit(0);
}
imshow("Optical Flow", frame);
waitKey(50);
}
std::swap(prevgray, gray);
return 0;
}
If I put the namedWindow() in the op_flow(), it is working. But if I put it in the constructor, it's not working and freezes. Anyone knows why?