I want to calibrate my stereo camera so, I am trying to capture 10 images for a chessboard from the right and the left camera .
what the code should do:
capture 10 images of a chessboard from the right and the left camera.The program saves the two images if both contain a clear corner points. The program should wait 20 frames for me to change the position of the chessboard.
what is happening :
it capture the 10 good images and saves them but, it does not wait for me to change the position of the chessboard
this is my code:
int captureImages_stereoCal()
{
CvCapture* captureL = cvCreateCameraCapture(1);
assert(captureL);
waitKey(10000);
CvCapture* captureR = cvCreateCameraCapture(2);
assert(captureR);
waitKey(10000);
/*Mat imageL ;
Mat imageR*/ ;
int nx=8 , nh=5;
int frame=0;
int s =1;
int ss;
CvPoint2D32f* cornersL = new CvPoint2D32f[nx*nh];
int corner_countL;
CvPoint2D32f* cornersR = new CvPoint2D32f[nx*nh];
int corner_countR;
IplImage *imageL=cvQueryFrame(captureL);
IplImage *gray_imageL=cvCreateImage(cvGetSize(imageL),8,1);
IplImage *CimageL=cvCreateImage(cvGetSize(imageL),32,3);
IplImage *imageR=cvQueryFrame(captureR);
IplImage *gray_imageR=cvCreateImage(cvGetSize(imageR),8,1);
IplImage *CimageR=cvCreateImage(cvGetSize(imageL),32,3);
const int board_dt=20;
while(s<=12)
{
if(frame++ % board_dt == 0)
{
string Result;
ostringstream convert;
ss=s-2;
convert << ss;
Result = convert.str();
//n=(char)s;
//waitKey(1000);
//Left -----------------------------------------------------------------------------------------------------------
string nameL="L.jpg";
//Find chessboard corners:
int foundL = cvFindChessboardCorners(imageL, Size(nx,nh), cornersL, &corner_countL,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
//Get Subpixel accuracy on those corners
cvCvtColor(imageL, gray_imageL, CV_BGR2GRAY);
cvFindCornerSubPix(gray_imageL, cornersL, corner_countL,cvSize(11,11),cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
//Draw it
Mat MimageL(imageL);
CimageL=imageL;
cvDrawChessboardCorners(CimageL, Size(nx,nh), cornersL,corner_countL, foundL);
//Right -----------------------------------------------------------------------------------------------
string nameR="R.jpg";
//Find chessboard corners:
int foundR = cvFindChessboardCorners(imageR, Size(nx,nh), cornersR, &corner_countR,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
//Get Subpixel accuracy on those corners
cvCvtColor(imageR, gray_imageR, CV_BGR2GRAY);
cvFindCornerSubPix(gray_imageR, cornersR, corner_countR,cvSize(11,11),cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
//Draw it
Mat MimageR(imageR);
CimageR=imageR;
cvDrawChessboardCorners(CimageR, Size(nx,nh), cornersR,corner_countR, foundR);
cvShowImage( "CalibrationL", CimageL );
cvShowImage( "CalibrationR", CimageR );
if(s>2)
{
if((corner_countL==(nx*nh)) && (corner_countR==(nx*nh)) )
{
nameL.insert(1,Result);
imwrite(nameL,MimageL);
nameR.insert(1,Result);
imwrite(nameR,MimageR);
s++;
}
}
int c = cvWaitKey(15);
if(c == 'p')
{
c = 0;
while(c != 'p' && c != 27)
{
c = cvWaitKey(250);
}
}
if(c == 27)
return 0;
imageL = cvQueryFrame(captureL);
imageR = cvQueryFrame(captureR);
if(s<3)
s++;
}// frame++ end
} // while end
return 0;
}
Also , after it draws the corners it saves the image with the corners drawn on it. I want to save the images with out any changes.
If I understand the problem correctly, it's because you're using a post-increment on your if-statement conditional:
if(frame++ % board_dt == 0)
You initialize frame to be 0 above, so on the 1st call, the if-statement is effectively testing (0 % 20) == 0, which is true.
You can change the if-statement to be a pre-increment, e.g.
if(++frame % board_dt == 0) to get the behavior you are looking for
Related
guys.
I've written a code to describe motion on interest points over a .avi video file.
Here is the code:
#include "opencv2/video/tracking.hpp"
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<iostream>
using namespace cv;
using namespace std;
int main() {
VideoCapture capture("video.avi");
if (!capture.isOpened()) {
cout << "ERROR OPENING VIDEO\n\n";
return(0);
}
double rate = capture.get(CV_CAP_PROP_FPS);
unsigned int numberFrames = (unsigned int) capture.get(CV_CAP_PROP_FRAME_COUNT);
int width = (unsigned int) capture.get(CV_CAP_PROP_FRAME_WIDTH);
int height = (unsigned int) capture.get(CV_CAP_PROP_FRAME_HEIGHT);
unsigned int codec = (unsigned int) capture.get(CV_CAP_PROP_FOURCC);
Mat currentGray;
Mat previousGray;
vector< Point2f > points[2];
vector< Point2f > initial;
vector< Point2f > features;
vector< uchar > status;
vector< float > error;
int maxCorners = 500; // maximum number of features to detect
double qualityLevel = 0.01; // quality level for feature detection
double minDistance = 10; // min distance between two points
Mat frame, output;
VideoWriter createdVideo("output.avi", codec, rate, Size(width,height), 1);
for (unsigned frameCounter = 0; frameCounter < numberFrames; frameCounter++) {
capture >> frame;
if (frame.empty())
break;
imshow("Video", frame);
cvtColor(frame, currentGray, CV_BGR2GRAY);
frame.copyTo(output);
if (points[0].size() <= 10){
goodFeaturesToTrack(currentGray, // the image
features, // the output detected features
maxCorners, // the maximum number of features
qualityLevel, // quality level
minDistance); // min distance between two features
// add the detected features to
// the currently tracked features
points[0].insert(points[0].end(),
features.begin(), features.end());
initial.insert(initial.end(),
features.begin(), features.end());
}
if (previousGray.empty())
currentGray.copyTo(previousGray);
calcOpticalFlowPyrLK(previousGray, currentGray, // 2 consecutive images
points[0], // input point positions in first image
points[1], // output point positions in the 2nd image
status, // tracking success
error); // tracking error
int k = 0;
for (int i = 0; i < points[1].size(); i++) {
// do we keep this point?
if (status[i] && // if point has moved
(abs(points[0][i].x - points[1][i].x) +
(abs(points[0][i].y - points[1][i].y)) > 2))
initial[k] = initial[i];
points[1][k++] = points[1][i];
}
points[1].resize(k);
initial.resize(k);
for (int i = 0; i < points[1].size(); i++) {
// draw line and circle
line(output,
initial[i], // initial position
points[1][i],// new position
Scalar(0, 255, 0), 2);
circle(output,
points[1][i],
2,
Scalar(0, 0, 255), -1);
}
std::swap(points[1], points[0]);
cv::swap(previousGray, currentGray);
createdVideo.write(output);
}
waitKey(0);
return(0);
}
My code tracks displacement of points frame by frame and keeps the first location of them until the end of video.
However, I would like not the keep the location's points of the first frame, but change them over time, i.e. changing the first point location with the second point location so on and then huge lines will not appear but only the displacement between two points in two frames.
Is there any possibility of doing this?
Since you only want the position of points in two frames, just use two vectors; one holding the keypoints from the last frame, and one holding keypoints from the previous frame. At the end of each iteration, just set the previous points to the current points. Something like this pseudocode:
// first frame
// detect keypoints
prev_frame_points = keypoints
// rest of the frames
for frame in frames:
// detect keypoints
curr_frame_points = keypoints
line(..., prev_frame_points, curr_frame_points, ...)
prev_frame_points = curr_frame_points
I have 3 video source connected to my PC and I want to show them on one screen.
I initially started to put the video sources next to each other and that works fine, but I want to be able to enable/disable each video source at run time.
So I am want to use keyboard keys (r(right) and l(left)) to change what cameras are being shown at the moment.
I want to move the declaration of the following 3 variables to the outside of the while loop so I can access them in the if-cases and change them.
cv::Mat3b combinedFrame(camRightSize.height, camMiddleSize.width + camRightSize.width);
cv::Mat3b leftSideOfScreen(combinedFrame, Rect(0, 0, camMiddleSize.width, camMiddleSize.height));
cameraMiddleFrameMirroredResize.copyTo(leftSideOfScreen);
cv::Mat3b rightSideOfScreen(combinedFrame, Rect(camMiddleSize.width, 0, camRightSize.width, camRightSize.height));
Below is my whole code:
int main(int argc, char **argv) {
int combinedScreenWidth = 1440;
int combinedScreenHeight = 540;
int rearCameraBiggerByThis = 200;
int combinedScreenWidthHalv = combinedScreenWidth / 2;
bool showRight = true;
bool showLeft = false;
//initialize and allocate memory to load the video stream from camera
cv::VideoCapture cameraRight(0); // RIGHT
cv::VideoCapture cameraMiddle(3); // REAR
cv::VideoCapture cameraLeft(3); // LEFT
if (!cameraRight.isOpened()) return 1;
if (!cameraMiddle.isOpened()) return 1;
if (!cameraLeft.isOpened()) return 1;
cv::Mat3b cameraRightFrame;
cv::Mat3b cameraMiddleFrame;
cv::Mat3b cameraLeftFrame;
cv::Mat3b cameraRightFrameMirrored;
cv::Mat3b cameraMiddleFrameMirrored;
cv::Mat3b cameraLeftFrameMirrored;
Size camRightSize;
Size camMiddleSize;
Size camLeftSize;
cv::Mat3b cameraRightFrameMirroredResize;
cv::Mat3b cameraMiddleFrameMirroredResize;
cv::Mat3b cameraLeftFrameMirroredResize;
while (true) {
// Grab and retrieve each frames of the video sequentially
cameraRight >> cameraRightFrame;
cameraMiddle >> cameraMiddleFrame;
cameraLeft >> cameraLeftFrame;
// Mirror
cv::flip(cameraRightFrame, cameraRightFrameMirrored, 1);
cv::flip(cameraMiddleFrame, cameraMiddleFrameMirrored, 1);
cv::flip(cameraLeftFrame, cameraMiddleFrameMirrored, 1);
// Resize
camRightSize = cameraRightFrame.size();
camMiddleSize = cameraMiddleFrame.size();
camLeftSize = cameraLeftFrame.size();
resize(cameraMiddleFrameMirrored, cameraMiddleFrameMirroredResize, Size(combinedScreenWidthHalv + rearCameraBiggerByThis, combinedScreenHeight));
resize(cameraRightFrameMirrored, cameraRightFrameMirroredResize, Size(combinedScreenWidthHalv - rearCameraBiggerByThis, combinedScreenHeight));
// Compilation
camRightSize = cameraRightFrameMirroredResize.size();
camMiddleSize = cameraMiddleFrameMirroredResize.size();
camLeftSize = cameraLeftFrameMirroredResize.size();
if (showRight && showLeft) { // LEFT + REAR + RIGHT
} else if (showRight) { // REAR + RIGHT
} else if (showLeft) { // LEFT + REAR
} else { // REAR
}
cv::Mat3b combinedFrame(camRightSize.height, camMiddleSize.width + camRightSize.width);
cv::Mat3b leftSideOfScreen(combinedFrame, Rect(0, 0, camMiddleSize.width, camMiddleSize.height));
cameraMiddleFrameMirroredResize.copyTo(leftSideOfScreen);
cv::Mat3b rightSideOfScreen(combinedFrame, Rect(camMiddleSize.width, 0, camRightSize.width, camRightSize.height));
cameraRightFrameMirroredResize.copyTo(rightSideOfScreen);
// declare windows
cv:namedWindow("Combined", CV_WINDOW_NORMAL);
cv::setWindowProperty("Combined", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
cv::putText(combinedFrame, "REAR", cv::Point(500, 50), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(255, 255, 255), 2 );
cv::putText(combinedFrame, "RIGHT", cv::Point(950, 50), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(255, 255, 255), 2 );
cv::imshow("Combined", combinedFrame); // 1440 x 540 Screen size
//cv::imshow("Right Cam", cameraRightFrame);
//cv::imshow("Middle Cam", cameraMiddleFrame);
//cv::imshow("Left Cam", cameraLeftFrame);
//wait for 40 milliseconds
int c = cvWaitKey(1);
//exit the loop if user press "Esc" key (ASCII value of "Esc" is 27)
if (27 == char(c)) {
break;
}
else if (114 == char(c)) {
showRight = !showRight;
}
else if (108 == char(c)) {
showLeft = !showLeft;
}
}
return 0;
}
According to your description, I think what you're wanting to write is:
combinedFrame(Rect(0, 0, camMiddleSize.width, camMiddleSize.height)).copyTo(leftSideOfScreen);
That is:
create a cv::Mat from another one (call to cv::Mat::copyTo(cv::Mat&))
the "source matrix" is obtained by extracting a rectangle from combinedFrame : combinedFrame(Rect(0, 0, camMiddleSize.width, camMiddleSize.height))
I am trying to set ROI in real time camera and copy a picture in the ROI.
However, I tried many methods from Internet but it is still unsuccessful.
Part of my code is shown below:
while(!protonect_shutdown)
{
listener.waitForNewFrame(frames);
libfreenect2::Frame *ir = frames[libfreenect2::Frame::Ir];
//! [loop start]
cv::Mat(ir->height, ir->width, CV_32FC1, ir->data).copyTo(irmat);
Mat img = imread("button.png");
cv::Rect r(1,1,100,200);
cv::Mat dstroi = img(Rect(0,0,r.width,r.height));
irmat(r).convertTo(dstroi, dstroi.type(), 1, 0);
cv::imshow("ir", irmat / 4500.0f);
int key = cv::waitKey(1);
protonect_shutdown = protonect_shutdown || (key > 0 && ((key & 0xFF) == 27));
listener.release(frames);
}
My real time camera can show the video normally. And no bugs in my program, but the picture cannot be shown in the ROI.
Does anyone have some ideas?
Any help is appreciate.
I hope I understood your question right and you want an output something like this:
I have created a rectangle of size 100x200 on the video feed and displaying an image in that rectangle.
Here is the code:
int main()
{
Mat frame,overlayFrame;
VideoCapture cap("video.avi");//use 0 for webcam
overlayFrame=imread("picture.jpg");
if (!cap.isOpened())
{
cout << "Could not capture video";
return -1;
}
Rect roi(1,1,100,200);//creating a rectangle of size 100x200 at point (1,1) on the videofeed
namedWindow("CameraFeed");
while ((cap.get(CV_CAP_PROP_POS_FRAMES) + 1) < cap.get(CV_CAP_PROP_FRAME_COUNT))
{
cap.read(frame);
resize(overlayFrame, overlayFrame, resize(overlayFrame, overlayFrame, Size(roi.width, roi.height));//changing the size of the image to fit in the roi
overlayFrame.copyTo(frame(roi));//copying the picture to the roi
imshow("CameraFeed", frame);
if (waitKey(27) >= 0)
break;
}
destroyAllWindows;
return 0;
}
I am trying to detect colored balls like ps3 move controller balls from 2 mt distance.I have 10 camera in same room hanging from the ceiling.Room is dark and balls have led inside.I have 4-5 balls.(red,blue,green,yellow,pink). I want track their position with opencv.Whats the right mehtod for doing this in opencv ? Can u give link , example for this ?
I use this code but i have delay problem.When i comment // my trackFilteredObject line there is no lag.But when using this code i have lot latency.I cant understand why happening because my normal cpu usage ~%15 ram usage 6.3GB/15GB (%40) when run this code cpu usage ~20-23 ram usage 6.4GB . I think its not about cpu-ram performance.What am i doing wrong ?
Video: https://www.youtube.com/watch?v=_BKtJpPrkO4 (You can see lag in first 10 sec.After 10 sen i comment tracking codes.)
Note:Kamerasayisi mean cameracount My Track Function:
void trackFilteredObject(Object theObject,Mat threshold,Mat HSV, Mat &cameraFeed){
//max number of objects to be detected in frame
const int FRAME_WIDTH = 5120;
const int FRAME_HEIGHT = 480;
const int MAX_NUM_OBJECTS=50;
//minimum and maximum object area
const int MIN_OBJECT_AREA = 10*10;
const int MAX_OBJECT_AREA = FRAME_HEIGHT*FRAME_WIDTH/1.5;
vector <Object> objects;
Mat temp;
threshold.copyTo(temp);
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours of filtered image using openCV findContours function
findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );
//use moments method to find our filtered object
double refArea = 0;
bool objectFound = false;
if (hierarchy.size() > 0) {
int numObjects = hierarchy.size();
//if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
if(numObjects<MAX_NUM_OBJECTS){
for (int index = 0; index >= 0; index = hierarchy[index][0]) {
Moments moment = moments((cv::Mat)contours[index]);
double area = moment.m00;
//if the area is less than 20 px by 20px then it is probably just noise
//if the area is the same as the 3/2 of the image size, probably just a bad filter
//we only want the object with the largest area so we safe a reference area each
//iteration and compare it to the area in the next iteration.
if(area>MIN_OBJECT_AREA){
Object object;
object.setXPos(moment.m10/area);
object.setYPos(moment.m01/area);
object.setType(theObject.getType());
object.setColor(theObject.getColor());
objects.push_back(object);
objectFound = true;
}else objectFound = false;
}
//let user know you found an object
if(objectFound ==true){
//draw object location on screen
drawObject(objects,cameraFeed,temp,contours,hierarchy);}
}else putText(cameraFeed,"TOO MUCH NOISE! ADJUST FILTER",Point(0,50),1,2,Scalar(0,0,255),2);
}
}
};
Main Code:
void Run()
{
int w, h;
_fps = 30;
IplImage *pCapImage[kameraSayisi];
IplImage *pDisplayImage;
PBYTE pCapBuffer = NULL;
// Create camera instance
for(int i = 0; i < kameraSayisi; i++)
{
_cam[i] = CLEyeCreateCamera(_cameraGUID[i], _mode, _resolution, _fps);
if(_cam[i] == NULL) return;
// Get camera frame dimensions
CLEyeCameraGetFrameDimensions(_cam[i], w, h);
// Create the OpenCV images
pCapImage[i] = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
// Set some camera parameters
CLEyeSetCameraParameter(_cam[i], CLEYE_GAIN, 0);
CLEyeSetCameraParameter(_cam[i], CLEYE_EXPOSURE, 511);
// Start capturing
CLEyeCameraStart(_cam[i]);
}
pDisplayImage = cvCreateImage(cvSize(w*kameraSayisi / 2, h * kameraSayisi/4 ), IPL_DEPTH_8U ,1);
if(_cam == NULL) return;
int iLastX = -1;
int iLastY = -1;
//Capture a temporary image from the camera
//program
bool trackObjects = true;
bool useMorphOps = true;
Mat HSV;
//Create a black image with the size as the camera output
Mat imgLines;
// imgLines = Mat::zeros( cvarrToMat(image).size(), CV_8UC3 );;
Mat threshold;
//x and y values for the location of the object
int x=0, y=0;
bool calibrationMode = false;
if(calibrationMode){
//create slider bars for HSV filtering
createTrackbars();
}
// image capturing loop
while(_running)
{
PBYTE pCapBuffer;
// Capture camera images
for(int i = 0; i < kameraSayisi; i++)
{
cvGetImageRawData(pCapImage[i], &pCapBuffer);
CLEyeCameraGetFrame(_cam[i], pCapBuffer, (i==0)?2000:0);
}
// Display stereo image
for(int i = 0; i < kameraSayisi; i++)
{
cvSetImageROI(pDisplayImage, cvRect(w * (i%4) ,i/4 * h, w, h));
cvCopy(pCapImage[i], pDisplayImage);
}
cvResetImageROI(pDisplayImage);
Mat imgOriginal;
Mat imgConverted = cvarrToMat(pDisplayImage);
if(calibrationMode==true)
{
//need to find the appropriate color range values
// calibrationMode must be false
//if in calibration mode, we track objects based on the HSV slider values.
//cvtColor(imgOriginal,imgOriginal,CV_BayerRG2RGB);
cvtColor(imgConverted,imgOriginal,CV_BayerGB2BGR);
cvtColor(imgOriginal,HSV,CV_BGR2HSV);
inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
morphOps(threshold);
imshow(_windowName + 'T',threshold);
//the folowing for canny edge detec
/// Create a matrix of the same type and size as src (for dst)
dst.create( imgOriginal.size(), src.type() );
/// Convert the image to grayscale
cvtColor( imgOriginal, src_gray, CV_BGR2GRAY );
/// Create a window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
/// Create a Trackbar for user to enter threshold
// createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );
/// Show the image
Object a = Object(H_MIN,S_MIN,V_MIN,H_MAX,S_MAX,V_MAX);
trackFilteredObject(a,threshold,HSV,imgOriginal);
}
else{
//we can use their member functions/information
Object blue("blue"), yellow("yellow"), red("red"), orange("orange"),white("white");
cvtColor(imgConverted,imgOriginal,CV_BayerGB2BGR);
//first find blue objects
cvtColor(imgOriginal,HSV,CV_RGB2HSV);
inRange(HSV,blue.getHSVmin(),blue.getHSVmax(),threshold);
morphOps(threshold);
//then yellows
inRange(HSV,yellow.getHSVmin(),yellow.getHSVmax(),threshold);
//then reds
inRange(HSV,red.getHSVmin(),red.getHSVmax(),threshold);
//then white
inRange(HSV,white.getHSVmin(),white.getHSVmax(),threshold);
//then orange
inRange(HSV,orange.getHSVmin(),orange.getHSVmax(),threshold);
trackFilteredObject(yellow,threshold,HSV,imgOriginal);
trackFilteredObject(white,threshold,HSV,imgOriginal);
trackFilteredObject(red,threshold,HSV,imgOriginal);
trackFilteredObject(blue,threshold,HSV,imgOriginal);
trackFilteredObject(orange,threshold,HSV,imgOriginal);
}
//delay 10ms so that screen can refresh.
//image will not appear without this waitKey() command
if (cvWaitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
// cvShowImage(_windowName, image);
imshow(_windowName,imgOriginal);
}
for(int i = 0; i < kameraSayisi; i++)
{
// Stop camera capture
CLEyeCameraStop(_cam[i]);
// Destroy camera object
CLEyeDestroyCamera(_cam[i]);
// Destroy the allocated OpenCV image
cvReleaseImage(&pCapImage[i]);
_cam[i] = NULL;
}
}
I have a c++-cli/opencv program that is running fine but it has a memory leak in part of it. I included the part where the memory leak is the most.
I already fixed the leaks in contour0 and contour1 and that reduced the memory leak by 1/3, but there is still a leak somwehere. Is there a way to still reduced memory leak? Thanks.
// capture video frame and convert to grayscale
const int nFrames0 = (int) cvGetCaptureProperty( capture0 , CV_CAP_PROP_FRAME_COUNT );
printf("LICENSECOUNT=%d\n",nFrames0);
img = cvQueryFrame( capture0 );
IplImage* frame1;
cvReleaseImage(&frame1);
frame1=cvCreateImage(cvSize(img->width,img->height),img->depth, 1 );
cvConvertImage(img, frame1,0);
// create blank images for storing
cvReleaseImage(&img00);
img00=cvCreateImage(cvSize(img->width,img->height),img->depth, 3 );
cvReleaseImage(&img10);
img10=cvCreateImage(cvSize(img->width,img->height),img->depth, 1 );
cvReleaseImage(&img20);
img20=cvCreateImage(cvSize(img->width,img->height),img->depth, 1 );
cvReleaseImage(&img30);
img30=cvCreateImage(cvSize(img->width,img->height),img->depth, 1 );
cvReleaseImage(&imggray1);
imggray1=cvCreateImage(cvSize(img->width,img->height),img->depth, 1 );
cvReleaseImage(&imgdiff);
imgdiff=cvCreateImage(cvSize(img->width,img->height),img->depth, 1 );
cvReleaseImage(&imgco);
imgco=cvCreateImage(cvSize(img->width,img->height),img->depth, 1 );
int flagp=1;
int licf=0;
CvSeq *contour0;
CvSeq* result0;
storage0 = cvCreateMemStorage(0);
CvRect r0;
//skip a few frames
for (int i=0;i<cf1-1;i++)
img = cvQueryFrame( capture0 );
// go through all frames to find frames that contain square with certain dimension
while ( key != 'q')
{
img = cvQueryFrame( capture0 );
if( !img ) break;
cvConvertImage(img,img00,0);
cvSetImageROI(img,cvRect(0,img->height-35,img->width,35));
cvZero(img);
cvResetImageROI(img);
cvConvertImage(img, img10,0);
cvConvertImage(img, img20,0);
cvConvertImage(img, imggray1,0);
int flagp=1;
cvAbsDiff(img10,frame1,imgdiff);
cvThreshold(imgdiff, imgdiff,60,255,CV_THRESH_BINARY);
mem0 = cvCreateMemStorage(0);
CvSeq *ptr,*polygon;
//vary threshold levels for segmentation
for (int thr=1;thr<11;thr++)
{
// do morphology if segmentation does not work
if (thr==10)
{
cvEqualizeHist( img20, img10 );
cvSetImageROI(img10,cvRect(0,0,20,img->height));
cvZero(img10);
cvResetImageROI(img10);
cvMorphologyEx(img20,img10,img20,cvCreateStructuringElementEx(20,10,10,5,CV_SHAPE_RECT,NULL),CV_MOP_TOPHAT,1);
IplImage *frame_copy1 = 0;
frame_copy1 = cvCreateImage(cvSize(img10->width,img10->height),IPL_DEPTH_16S,1 );
cvSobel(img10,frame_copy1,1,0,3);
cvConvertScaleAbs(frame_copy1, img10, 1, 0);
cvSetImageROI(img10,cvRect(0,0,20,img->height));
cvZero(img10);
cvResetImageROI(img10);
cvSetImageROI(img10,cvRect(img->width-20,0,20,img->height));
cvZero(img10);
cvResetImageROI(img10);
cvMorphologyEx(img10,img10,img20,cvCreateStructuringElementEx(16,5,8,3,CV_SHAPE_RECT,NULL),CV_MOP_CLOSE,1);
cvThreshold(img10,img10,180,255,CV_THRESH_BINARY | CV_THRESH_OTSU);
cvErode(img10,img10,cvCreateStructuringElementEx(10,5,5,2,CV_SHAPE_RECT,NULL),1);
cvErode(img10,img10,cvCreateStructuringElementEx(5,10,2,5,CV_SHAPE_RECT,NULL),1);
cvDilate(img10,img10,cvCreateStructuringElementEx(5,10,2,5,CV_SHAPE_RECT,NULL),1);
cvDilate(img10,img10,cvCreateStructuringElementEx(10,5,5,2,CV_SHAPE_RECT,NULL),1);
cvErode(img10,img10,cvCreateStructuringElementEx(10,5,5,2,CV_SHAPE_RECT,NULL),2);
cvDilate(img10,img10,cvCreateStructuringElementEx(10,5,5,2,CV_SHAPE_RECT,NULL),1);
}
//segmenation
else
{
cvThreshold(img20,img10,thr*255/11,255,CV_THRESH_BINARY);
cvDilate(img10,img10,cvCreateStructuringElementEx(10,5,5,2,CV_SHAPE_RECT,NULL),1);
cvDilate(img10,img10,cvCreateStructuringElementEx(20,30,10,15,CV_SHAPE_RECT,NULL),1);
}
//trim the sides of the image
cvSetImageROI(img10,cvRect(0,0,20,img->height));
cvZero(img10);
cvResetImageROI(img10);
cvSetImageROI(img10,cvRect(img->width-20,0,20,img->height));
cvZero(img10);
cvResetImageROI(img10);
cvReleaseImage(&imgco);
imgco = cvCloneImage(img10);
///find contours to find squares with certain dimension
cvRelease((void**)&contour0);
int Nc0;
Nc0= cvFindContours(imgco, storage0, &contour0, sizeof (CvContour),
CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
float k;
int white=0;
while( contour0 )
{
r0 = cvBoundingRect(contour0, 0);
double s,t;
if( ((r0.width*r0.height)>2000 || (r0.width*r0.height && thr==10)>1000) && (r0.width*r0.height) < 40000 && (float(r0.width)/float(r0.height))>1.7 && (float(r0.width)/float(r0.height))<5 )
{
k=0.8;
if (thr==10 && licf<2)
k=0.6 ;
cvSetImageROI(img10,r0);
cc=cvCountNonZero(img10);
cvResetImageROI(img10);
//if area of contour is a percentage of area of rectangle surrounding contour
if (cc>k*r0.width*r0.height && (cvCountNonZero(imgdiff)>10000))
{
cvSetImageROI(img,cvRect(0,img->height-35,img->width,35));
cvSet(img, cvScalar(255,255,255));
cvResetImageROI(img);
//process the image contained inside the contour area
cvSetImageROI(img,cvRect(r0.x-5,r0.y-10,r0.width+10,r0.height+20));
img30 = cvCreateImage( cvGetSize( img), IPL_DEPTH_8U, 1);
cvCvtColor( img, img30, CV_RGB2GRAY );
IplImage* img_temp=cvCreateImage(cvSize(2*r0.width,2*r0.height+20),img->depth, 1 );
IplImage* img_tempo=cvCreateImage(cvSize(2*r0.width,2*r0.height+20),img->depth, 1 );
cvResize(img30,img_tempo);
CvMemStorage *storage1;
CvSeq *contour1;
CvSeq* result1;
storage1 = cvCreateMemStorage(0);
CvRect r1;
//segment inside squares check if square contains letters or numbers with certain dimension
for (int th=20;th<200;th+=5)
{
cvThreshold(img_tempo, img_temp, th, 255, CV_THRESH_BINARY);
cvThreshold(img_temp, img_temp, 0, 255, CV_THRESH_BINARY_INV);
{
cvErode(img_temp,img_temp);
cvDilate(img_temp,img_temp);
cvErode(img_temp,img_temp);
}
cvResize(img_temp,img30);
cvRelease((void**)&contour1);
int Nc=cvFindContours(img30, storage1, &contour1, sizeof (CvContour),
CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE) ;
int count =0 ;
while( contour1)
{
r1 = cvBoundingRect(contour1, 0);
int s_y1av=0;
int s_y2av=0;
int s_x1av=0;
{
int s_x1=r1.x;
int s_y1=r1.y;
float width1=r1.width;
float height1=r1.height;
float ratio1= width1/height1;
//if contours match certain dimensions
if(ratio1>0.05 && ratio1<1 && height1>0.3*r0.height && width1>0.05*r0.width && width1<0.3*r0.width && width1*height1>60 && width1*height1<2000)
{
count+=1;
}
s_y1av=s_y1;
s_y2av=s_y1+height1;
}
contour1=contour1->h_next;
}
//if there are more than 3 letters/numbers and less than 9
if (count>=3 && count<9)
{
th=200;
thr=11;
if (thr!=10)
licf=1;
if (a)
{
cvNamedWindow( "license", 1 );
cvShowImage( "license", img00 );
cvWaitKey(1);
}
int jpeg_params[] = { CV_IMWRITE_JPEG_QUALITY, 80, 0 };
CvMat* buf0 = cvEncodeImage(".jpeg", img00, jpeg_params);
int img_sz=buf0->width*buf0->height;
array <Byte>^ hh = gcnew array<Byte> (img_sz);
Marshal::Copy( (IntPtr)buf0->data.ptr, hh, 0, img_sz );
if(!myResult->TryGetValue("PLATE", thisList4))
{
thisList4 = gcnew List<array<Byte>^>();
myResult->Add("PLATE", thisList4);}
thisList4->Add(hh);
}
cvResetImageROI(img);
}
}
}
contour0=contour0->h_next;
}
}
}
Using some memory leak detection tools i.e. Valgrind could be helpful and good way to start debugging as well.
The newer OpenCV C++ interface automatically handles memory for you - allocations and deallocations. You should look at a sample in the samples/cpp folder and take it as a model.
With it, you can forget about memory leaks.
A part of your code written with the new interface will look like
VideoCapture cap("SomeVideo.avi");
if(!cap.isOpen())
return 0;
const int nFrames = cap.get(CV_CAP_PROP_FRAME_COUNT );
...
cv::Mat img;
cap >> img;
You should keep in mind that all the functions and data types that start with cv.., like CvSeq, are from the C interface, and there is a better counterpart in C++.
For example:
IplImage -> cv::Mat
CvPoint -> cv::Point
CvSeq -> std::vector<>
etc.
Most of the functions in the new interface keep the same name, just without "cv". I wrote above the main exceptions to the rule.
By the way, some of your operations seem to be redundant or inefficient. You should look carefully to see which of them are needed, and also to reuse some matrices, in order to minimize memory allocations.
I would suggest taking a look at the new improved smart pointers in C++11. It won't provide automatic garbaje collection but at least it deals with the pain of C++ memory managment. You can also take a look at JavaCV it is just a wrapper but takes away some of the pain of the memory leaks.
If you are not using the latest C++ standard then take a look in autoptr. If not it could be a bug with OpenCV.