OpenCV VideoCapture Output image cut to top left quarter - c++

I'm trying to integrate a IDS uEye camera with OpenCV and it kinda works for now.
Problem I'm facing is that when I use the IDS SDK to view the camera image, I get a full image. But using OpenCV's VideoCapture, I only get the top left quarter of the image.
I just put an image of a rectangle split into quarters to clarify what the full image should be (entire rectangle) and what I'm getting from videocapture (top left quarter only)
(source: kheper.net)
I've already tried to adjust the image width and height via cap.set and since the VideoCapture line is after setting the uEye camera's parameters, I'm rather certain it's not a settings issue with the camera and more to do with VideoCapture itself
char strCamFileName[256];
char* pcImageMemory;
int memId;
int nRet = 0;
SENSORINFO sInfo;
IplImage* img;
HIDS hCam = 0; // index 0 means taking first camera available
RECT rc;
MSG msg;
Mat frame(MaxImageSizeY, MaxImageSizeX, CV_8UC1);
nRet = is_InitCamera(&hCam, hWndDisplay);
if (nRet != IS_SUCCESS)
{
cout << endl << "Error Connecting to Camera" << endl;
cout << "Closing program..." << endl;
return 0;
}
else
{
cout << endl << "Camera initialisation was successful!" << endl << endl;
}
// you can query information about the sensor type of the camera
nRet = is_GetSensorInfo(hCam, &sInfo);
if (nRet == IS_SUCCESS)
{
cout << "Cameramodel: \t\t" << sInfo.strSensorName << endl;
cout << "Maximum image width: \t" << sInfo.nMaxWidth << endl;
cout << "Maximum image height: \t" << sInfo.nMaxHeight << endl << endl << endl;
}
MaxImageSizeX = sInfo.nMaxWidth;
MaxImageSizeY = sInfo.nMaxHeight;
DisplayWidth = MaxImageSizeX;
DisplayHeight = MaxImageSizeY;
int nColorMode = IS_COLORMODE_CBYCRY;
int nBitsPerPixel = 32;
// Get number of available formats and size of list
UINT count;
UINT bytesNeeded = sizeof(IMAGE_FORMAT_LIST);
nRet = is_ImageFormat(hCam, IMGFRMT_CMD_GET_NUM_ENTRIES, &count, sizeof(count));
bytesNeeded += (count - 1) * sizeof(IMAGE_FORMAT_INFO);
void* ptr = malloc(bytesNeeded);
// Create and fill list
IMAGE_FORMAT_LIST* pformatList = (IMAGE_FORMAT_LIST*)ptr;
pformatList->nSizeOfListEntry = sizeof(IMAGE_FORMAT_INFO);
pformatList->nNumListElements = count;
nRet = is_ImageFormat(hCam, IMGFRMT_CMD_GET_LIST, pformatList, bytesNeeded);
// Prepare for creating image buffers
char* pMem = NULL;
int memID = 0;
// Set each format and then capture an image
IMAGE_FORMAT_INFO formatInfo;
// Allocate image mem for current format, set format
nRet = is_AllocImageMem(hCam, MaxImageSizeX, MaxImageSizeY, nBitsPerPixel, &pMem, &memID);
nRet = is_SetImageMem(hCam, pMem, memID);
nRet = is_ImageFormat(hCam, IMGFRMT_CMD_SET_FORMAT, &formatInfo.nFormatID, sizeof(formatInfo.nFormatID));
// Sets the color mode to be used when image data are saved or displayed by the graphics card
is_SetColorMode(hCam, nColorMode);
// allocates an image memory for an image, activates it and sets the way in which the images will be displayed on the screen
int nMemoryId;
is_AllocImageMem(hCam, MaxImageSizeX, MaxImageSizeY, nBitsPerPixel, &pcImageMemory, &nMemoryId);
is_SetImageMem(hCam, pcImageMemory, nMemoryId);
is_SetDisplayMode(hCam, IS_SET_DM_DIB);
is_HotPixel(hCam, IS_HOTPIXEL_DISABLE_CORRECTION, NULL, NULL);
IS_RECT AAOI; // IS_RECT type variable for Auto AOI parameters
AAOI.s32X = MaxImageSizeX / 3 | IS_AOI_IMAGE_POS_ABSOLUTE;
AAOI.s32Width = MaxImageSizeX / 3;
AAOI.s32Y = MaxImageSizeY / 3 | IS_AOI_IMAGE_POS_ABSOLUTE;
AAOI.s32Height = MaxImageSizeY / 3;
double enable = 1;
double disable = 0;
is_SetAutoParameter(hCam, IS_SET_AUTO_SPEED, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_GAIN, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_FRAMERATE, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SHUTTER, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_GAIN, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_SHUTTER, &disable, 0);
is_AOI(hCam, IS_AOI_AUTO_BRIGHTNESS_SET_AOI, &AAOI, sizeof(AAOI));
is_AOI(hCam, IS_AOI_AUTO_WHITEBALANCE_SET_AOI, &AAOI, sizeof(AAOI));
VideoCapture cap; //--- INITIALIZE VIDEOCAPTURE
int deviceID = 0; // 0 = open default camera
int apiID = cv::CAP_ANY; // 0 = autodetect default API
if (cap.open(deviceID, apiID))
{
cout << "cap opened" << endl;
}
else
{
cout << "cap not opened" << endl;
}
cout << "Press 1 to capture image" << endl
<< "Press 2 to use (last) captured image" << endl;
cap.read(frame);
From what I know VideoCapture should be able to obtain the entire image from the camera right?
I'm honestly just really confused why VideoCapture cuts of 3/4 of the image and I would appreciate any help

Alright I found out the problem...
Again I left out too much code in the original post (because there's ALOT of irrelevant code related to USB stuff) so I'll include the most important part I left out here
double enable = 1;
double disable = 0;
is_SetAutoParameter(hCam, IS_SET_AUTO_SPEED, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_GAIN, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_FRAMERATE, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SHUTTER, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_GAIN, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_SHUTTER, &disable, 0);
is_AOI(hCam, IS_AOI_AUTO_BRIGHTNESS_SET_AOI, &AAOI, sizeof(AAOI));
is_AOI(hCam, IS_AOI_AUTO_WHITEBALANCE_SET_AOI, &AAOI, sizeof(AAOI));
//// Acquires a single image from the camera
//is_FreezeVideo(hCam, IS_WAIT);
//// Output an image from an image memory in the specified window
//int nRenderMode = IS_RENDER_FIT_TO_WINDOW;
//is_RenderBitmap(hCam, nMemoryId, hWndDisplay, nRenderMode);
is_ExitCamera(hCam); // exit camera so that OpenCV can access as camera parameters have been set
CalibSet CS; // declaring variable 'CS' under the class 'CalibSet'
Mat livemap1, livemap2;
FileStorage tfs(inputCalibFile, FileStorage::READ); // Read the settings
if (!tfs.isOpened())
{
cout << "Could not open the calibration file: \"" << inputCalibFile << "\"" << endl;
return -1;
}
tfs["camera_matrix"] >> CS.CamMat;
tfs["distortion_coefficients"] >> CS.DistCoeff;
tfs["image_width"] >> CS.image.width;
tfs["image_height"] >> CS.image.height;
tfs.release(); // close Settings file
So. Basically what the class CalibSet does is it holds values for a .xml file that is used to extract values after undistortion calibration.
More about that here Camera calibration data retrieval
But the issue that prevented cap.set from working was likely these last few lines.
tfs["image_width"] >> CS.image.width; and tfs["image_height"] >> CS.image.height; which took the values in "image_width" and "image_height" and stored them in the respective variables in the class CalibSet.
And guess what... The width and height in the .xml file was 640x480...
I modified that portion in the .xml to the supposed 1280x1024 and the live feed from the camera was fixed and I finally got the full image instead of the 1/4 that I got before.

Related

How to disable shadow detection in MoG2

I am using C++ and Opencv 2.3.1 for background subtraction. I have tried many times to change the parameters of Mog2 in order to disable shadow detection feature also i have tried what other people suggest on the internet. however, the shadow detection still enabled.
could you please tell me how to disable it?
see the sample code and the generated mask.
//opencv
#include < opencv2/opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2/video/background_segm.hpp>
#include < opencv2/imgproc/imgproc.hpp>
#include < opencv2/video/video.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
// Global variables
Mat frame; //current frame
Mat fgMaskMOG2; //fg mask fg mask generated by MOG method
Ptr<BackgroundSubtractor> pMOG2; //MOG Background subtractor
int keyboard; //input from keyboard
//new variables
int history = 1250;
float varThreshold = 16;
bool bShadowDetection = true;
/*
//added to remove the shadow
unsigned char nShadowDetection = 0;
float fTau = 0.5;
//static const unsigned char nShadowDetection =( unsigned char)0;
*/
// Function Headers
void help();
void processImages(char* firstFrameFilename);
void help()
{
cout
<< "This program shows how to use background subtraction methods provided by " << endl
<< " OpenCV. You can process images (-img)." << endl
<< "Usage:" << endl
<< "./bs -img <image filename>}" << endl
<< "for example: ./bs -img /data/images/1.png" << endl
<< endl;
}
// morphological operation
void morphOps(Mat &thresh){
//create structuring element that will be used to "dilate" and "erode" image.
//the element chosen here is a 3px by 3px rectangle
Mat erodeElement = getStructuringElement( MORPH_RECT,Size(2,2)); //3x3
//dilate with larger element so make sure object is nicely visible
Mat dilateElement = getStructuringElement( MORPH_RECT,Size(1,1)); //8x8
erode(thresh,thresh,erodeElement);
erode(thresh,thresh,erodeElement);
dilate(thresh,thresh,dilateElement);
dilate(thresh,thresh,dilateElement);
}
// main function
int main(int argc, char* argv[])
{
//print help information
help();
//check for the input parameter correctness
if(argc != 3) {
cerr <<"Incorret input list" << endl;
cerr <<"exiting..." << endl;
return EXIT_FAILURE;
}
//create GUI windows
namedWindow("Frame");
namedWindow("FG Mask MOG2 ");
//create Background Subtractor objects
//pMOG2 = new BackgroundSubtractorMOG2();
pMOG2 = new BackgroundSubtractorMOG2( history, varThreshold, bShadowDetection);
//BackgroundSubtractorMOG2(int history, float varThreshold, bool bShadowDetection=1);
if(strcmp(argv[1], "-img") == 0) {
//input data coming from a sequence of images
processImages(argv[2]);
}
else {
//error in reading input parameters
cerr <<"Please, check the input parameters." << endl;
cerr <<"Exiting..." << endl;
return EXIT_FAILURE;
}
//destroy GUI windows
destroyAllWindows();
return EXIT_SUCCESS;
}
//function processImages
void processImages(char* fistFrameFilename) {
//read the first file of the sequence
frame = imread(fistFrameFilename);
if(frame.empty()){
//error in opening the first image
cerr << "Unable to open first image frame: " << fistFrameFilename << endl;
exit(EXIT_FAILURE);
//current image filename
string fn(fistFrameFilename);
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//update the background model
pMOG2->operator()(frame, fgMaskMOG2,-1);
//get the frame number and write it on the current frame
size_t index = fn.find_last_of("/");
if(index == string::npos) {
index = fn.find_last_of("\\");
}
size_t index2 = fn.find_last_of(".");
string prefix = fn.substr(0,index+1);
string suffix = fn.substr(index2);
string frameNumberString = fn.substr(index+1, index2-index-1);
istringstream iss(frameNumberString);
int frameNumber = 0;
iss >> frameNumber;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
morphOps(fgMaskMOG2);
imshow("FG Mask MOG2 ", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey(1);
//search for the next image in the sequence
ostringstream oss;
oss << (frameNumber + 1);
string nextFrameNumberString = oss.str();
string nextFrameFilename = prefix + nextFrameNumberString + suffix;
//read the next frame
frame = imread(nextFrameFilename);
if(frame.empty()){
//error in opening the next image in the sequence
cerr << "Unable to open image frame: " << nextFrameFilename << endl;
exit(EXIT_FAILURE);
}
//update the path of the current frame
fn.assign(nextFrameFilename);
// save subtracted images
string imageToSave =("output_MOG_" + frameNumberString + ".png");
bool saved = imwrite( "D:\\SO\\temp\\" +imageToSave,fgMaskMOG2);
if(!saved) {
cerr << "Unable to save " << imageToSave << endl;
}
}
}
}
take a look at the documentation
on your code you have
bool bShadowDetection = true;
change it to
bool bShadowDetection = false;
EDIT:
OpenCV 3's BackgroundSubtractorMOG2 Class has setShadowValue (int value) function to set gray value of shadow.
setting value of gray to zero will remove the shadow.
it depends on what you really want to see - if you want to separate the shadows from your segmentation:
bool bShadowDetection = true;
and use
cv::threshold(Mask,Mask,254,255,cv::THRESH_BINARY);
after MOG2->apply()
you'll get exactly the part of wich is {255} in your image
and sry for reanimating this...

PvAPI OpenCV built-in code: FPS mismatch and how to use multiple cameras?

I have two Manta G125B cameras (B stands for black that means monochrome). These are GigE interface cameras, and I am using PvAPI C++ application programmer's interface to read camera data to my Windows OS laptop by using Microsoft Visual Studio Community 2015 IDE.
Recently I came across Steven Puttemans' github account and there he shared the code AVT_Manta_opencv_builtin.cpp in this link:
https://github.com/StevenPuttemans/opencv_tryout_code/blob/master/camera_interfacing/AVT_Manta_opencv_builtin.cpp
I downloaded OpenCV 3.0.0 source files from
Itseez github page and built all required files by using CMake (I selected use default native compilers and Visual Studio 14 2015 64 options after I clicked configure option as I am using a 64-bit CPU and MVS Community 2015). I selected WITH_PVAPI option after first configuration (actually it was already selected) and I noticed that PVAPI_INCLUDE_PATH and PVAPI_LIBRARY options are automatically recognized correctly as C:/Program Files/Allied Vision Technologies/GigESDK/inc-pc and C:/Program Files/Allied Vision Technologies/GigESDK/lib-pc/x64/PvAPI.lib, respectively. I clicked configure option again, and then clicked generate option (in the mean time, if I don't unselect BUILD_PERF_TESTS and BUILD_TESTS options which are already selected after configurations, when I open OpenCV.sln and build ALL_BUILD and INSTALL, Visual Studio shows three errors. I removed the ticks at BUILD_PERF_TEST and BUILD_TESTS options, and errors are gone).
After building OpenCV from scratch, I made a Visual Studio project and modified Steven Puttemans' code slightly to see real-time camera acquisition from one of my cameras while printing frame number and frame rate on console. Here is my code:
int main()
{
Mat frame, imgResized;
double f = 0.4; /* f is a scalar in [0-1] range that scales the raw image. Output image is displayed in the screen. */
DWORD timeStart, timeEnd; // these variables are used for computing fps and avg_fps.
double fps = 1.0; // frame per second
double sum_fps(0.);
double avg_fps(0.); // average fps
int frameCount = 0;
VideoCapture camera(0 + CV_CAP_PVAPI); /* open the default camera; VideoCapture is class, camera is object. */
if (!camera.isOpened())
{
cerr << "Cannot open the camera." << endl;
return EXIT_FAILURE;
}
double rows = camera.get(CV_CAP_PROP_FRAME_HEIGHT); /* Height of the frames in the video stream. */
double cols = camera.get(CV_CAP_PROP_FRAME_WIDTH); /* Width of the frames in the video stream. */
double exposure = camera.get(CV_CAP_PROP_EXPOSURE);
cout << "Exposure value of the camera at the beginning is " << exposure << endl;
double exposureTimeInSecond = 0.02; /* As exposureTimeInSecond variable decreases, fps should increase */
exposure = exposureTimeInSecond * 1000000; /* esposure time in us */
camera.set(CV_CAP_PROP_EXPOSURE, exposure);
double frameRate; /* built-in fps */
cout << "Frame size of the camera is " << cols << "x" << rows << "." << endl;
cout << "Exposure value of the camera is set to " << exposure << endl;
char* winname = "Manta Camera";
namedWindow(winname, WINDOW_AUTOSIZE);
cout << "Press ESC to terminate default camera." << endl;
while (true)
{
timeStart = GetTickCount();
camera >> frame;
frameCount++;
frameRate = camera.get(CV_CAP_PROP_FPS); /* Built-in frame rate in Hz. */
/* resize() built-in function is in imgproc main module, in Geometric Image Transformations module. I resize the image by f (where f is a scalar in [0-1] range) for display. */
resize(frame, imgResized, Size(), f, f, INTER_LINEAR); /* void cv::resize(InputArray src, OutputArray dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR) */
imshow(winname, imgResized);
moveWindow(winname, 980, 50);
int key = waitKey(10);
if (key == VK_ESCAPE)
{
destroyWindow(winname);
break;
}
/* Calculating FPS in my own way. */
timeEnd = GetTickCount();
fps = 1000.0 / (double)(timeEnd - timeStart); /* 1s = 1000ms */
sum_fps += fps;
avg_fps = sum_fps / frameCount;
cout << "FPS = " << frameRate << ", frame #" << frameCount << ", my_fps = " << fps << ", avg_fps = "<< avg_fps << endl;
}
cout << "Compiled with OpenCV version " << CV_VERSION << endl; /* thanks to Shervin Emami */
system("pause");
return EXIT_SUCCESS;
}
and this is the screenshot of the output during real-time acquisition:
Can someone explain why built-in fps is 30.9? I am pretty sure that real fps in my screen is around 15-16 Hz because I computed time between two different frame numbers that I see in my console, and then computed the fps and the result is the same as avg_fps value in console. I also ran SampleViewer.exe file that is in C:\Program Files\Allied Vision Technologies\GigESDK\bin-pc\x64 directory and when I click on "Show camera's attributes" icon, I see StatFrameRate = 30.2 approximately.
My second question is, how can I open the second camera that is connected to network switch? And also, how can I trigger them at the same moment? I examined cap_pvapi.cpp file that is located in source files that I downloaded from Itseez github page, and as far as I understand, the camera's FrameStartTriggerMode is "Freerun." Other options are SyncIn1, SyncIn2, FixedRate and Software. My main camera is left camera and I call the second camera as right camera. What should be the corresponding FrameStartTriggerMode for my left and right cameras?
I am able to open both of the cameras:
stereo camera real-time acquisition with OpenCV PvAPI built-in option
#include <iostream>
#include "Windows.h" /* data type DWORD and function GetTickCount() are defined in Windows.h */
#include "opencv2/opencv.hpp"
#include <cassert>
#include <ppl.h> // Parallel patterns library, concurrency namespace. Young-Jin uses this, so do I. It's very important; if not used, when images are processed, fps drops dramatically.
#if !defined VK_ESCAPE /* VK stands for virtual key*/
#define VK_ESCAPE 0x1B /* ASCII code for ESC character is 27 */
#endif
using namespace std;
using namespace cv;
const unsigned long numberOfCameras = 2;
bool displayImages = true;
int main()
{
Mat frame[numberOfCameras], imgResized[numberOfCameras];
double f = 0.4; /* f is a scalar in [0-1] range that scales the raw image. Output image is displayed in the screen. */
DWORD timeStart, timeEnd; // these variables are used for computing fps and avg_fps.
double fps = 1.0; // frame per second
double sum_fps(0.);
double avg_fps(0.); // average fps
int frameCount = 0;
VideoCapture camera[numberOfCameras]; // (0 + CV_CAP_PVAPI); /* open the default camera; VideoCapture is class, camera is object. */
for (int i = 0; i < numberOfCameras; i++)
{
camera[i].open(i + CV_CAP_PVAPI);
if (!camera[i].isOpened())
{
cerr << "Cannot open camera " << i << "." << endl;
return EXIT_FAILURE;
}
}
double rows = camera[0].get(CV_CAP_PROP_FRAME_HEIGHT); /* Height of the frames in the video stream. */
double cols = camera[0].get(CV_CAP_PROP_FRAME_WIDTH); /* Width of the frames in the video stream. */
if (numberOfCameras == 2)
assert(rows == camera[1].get(CV_CAP_PROP_FRAME_HEIGHT) && cols == camera[0].get(CV_CAP_PROP_FRAME_WIDTH));
for (int i = 0; i<numberOfCameras; i++) // initializing monochrome images.
{
frame[i] = Mat(Size(cols, rows), CV_8UC1); /* Mat(Size size, int type) */
resize(frame[i], imgResized[i], Size(0, 0), f, f, INTER_LINEAR);
}
/* combo is a combined image consisting of left and right resized images. images are resized in order to be displayed at a smaller region on the screen. */
Mat combo(Size(imgResized[0].size().width * 2, imgResized[0].size().height), imgResized[0].type()); /* This is the merged image (i.e., side by side) for real-time display. */
Rect roi[numberOfCameras]; /* roi stands for region of interest. */
for (int i = 0; i < numberOfCameras; i++)
roi[i] = Rect(0, 0, imgResized[0].cols, imgResized[0].rows);
/* Setting locations of images coming from different cameras in the combo image. */
if (numberOfCameras > 1) /* I assume max camera number is 2. */
{
roi[1].x = imgResized[0].cols;
roi[1].y = 0;
}
double exposure, exposureTimeInSecond = 0.06; /* As exposureTimeInSecond variable decreases, fps should increase */
for (int i = 0; i < numberOfCameras; i++)
{
exposure = camera[i].get(CV_CAP_PROP_EXPOSURE);
cout << "Exposure value of the camera " << i << " at the beginning is " << exposure << endl;
exposure = exposureTimeInSecond * 1000000; /* esposure time in us */
camera[i].set(CV_CAP_PROP_EXPOSURE, exposure);
}
double frameRate[numberOfCameras]; /* built-in fps */
cout << "Frame size of the camera is " << cols << "x" << rows << "." << endl;
cout << "Exposure value of both cameras is set to " << exposure << endl;
char* winname = "real-time image acquisition";
if (displayImages)
namedWindow(winname, WINDOW_AUTOSIZE);
cout << "Press ESC to terminate real-time acquisition." << endl;
while (true)
{
timeStart = GetTickCount();
Concurrency::parallel_for((unsigned long)0, numberOfCameras, [&](unsigned long i)
{
camera[i] >> frame[i];
frameRate[i] = camera[i].get(CV_CAP_PROP_FPS); /* Built-in frame rate in Hz. */
resize(frame[i], imgResized[i], Size(), f, f, INTER_LINEAR); /* void cv::resize(InputArray src, OutputArray dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR) */
imgResized[i].copyTo(combo(roi[i])); /* This is C++ API. */
});
frameCount++;
if (displayImages)
{
imshow(winname, combo);
moveWindow(winname, 780, 50);
}
int key = waitKey(10);
if (key == VK_ESCAPE)
{
destroyWindow(winname);
break;
}
/* Calculating FPS in my own way. */
timeEnd = GetTickCount();
fps = 1000.0 / (double)(timeEnd - timeStart); /* 1s = 1000ms */
sum_fps += fps;
avg_fps = sum_fps / frameCount;
for (int i = 0; i < numberOfCameras; i++)
cout << "FPScam" << i << "=" << frameRate[i] << " ";
cout << "frame#" << frameCount << " my_fps=" << fps << " avg_fps=" << avg_fps << endl;
}
cout << "Compiled with OpenCV version " << CV_VERSION << endl; /* thanks to Shervin Emami */
system("pause");
return EXIT_SUCCESS;
}
// double triggerMode = camera.get(CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE);
////camera.set(CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE, 4.0);
//
//if (triggerMode == 0.)
//cout << "Trigger mode is Freerun" << endl;
//else if (triggerMode == 1.0)
//cout << "Trigger mode is SyncIn1" << endl;
//else if (triggerMode == 2.0)
//cout << "Trigger mode is SyncIn2" << endl;
//else if (triggerMode == 3.0)
//cout << "Trigger mode is FixedRate" << endl;
//else if (triggerMode == 4.0)
//cout << "Trigger mode is Software" << endl;
//else
//cout << "There is no trigger mode!!!";
But fps is still not the value that I desire.. :(
I also noticed that there exists cap_pvapi.cpp (contributed by Justin G. Eskesen) file that is implicitly used by my code at this subdirectory:
C:\opencv\sources\modules\videoio\src
and when I examined it, I saw that both cameras are set to "Freerun" FrameStartTriggerMode. Original command in PvAPI is:
PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "Freerun");
In my opinion, when using stereo cameras, cameras should be set to "Software" FrameStartTriggerMode. I already have a code that is running in this configuration for my stereo camera setup, but after some time, my console and running application are getting frozen. I tested my code if there memory leak or not and I am quite sure that there is no memory leak.
Average fps is about 16 but sometimes fps becomes low such as 2Hz which is not good. Good side of this code is, it doesn't stop but still not reliable. I am still wondering if it's possible to transfer all images at a fps more than 25. Actually, in my other code that gets frozen after some time (the one that is triggered by "Software" option), if I don't display the images on screen, avg_fps gets close to 27Hz. But it stops and screen gets frozen due to an unknown failure.

Change resolution on openni2 not working

I want to read depth frame at 640x480.
I am using windows 8.1 64bit, openni2 32bit, kinect:PSMP05000,PSCM04900(PrimeSense)
I take code reference from here:
cannot set VGA resolution
Simple Read
Combined to this code:
main.cpp
OniSampleUtilities.h
SimpleRead.vcxproj
should be compiled if you install openni2 32bit from here:
OpeniNI 2
#include "iostream"
#include "OpenNI.h"
#include "OniSampleUtilities.h"
#define SAMPLE_READ_WAIT_TIMEOUT 2000 //2000ms
using namespace openni;
using namespace std;
int main()
{
Status rc = OpenNI::initialize();
if (rc != STATUS_OK)
{
cout << "Initialize failed:" << endl << OpenNI::getExtendedError() << endl;
return 1;
}
Device device;
rc = device.open(ANY_DEVICE);
if (rc != STATUS_OK)
{
cout << "Couldn't open device" << endl << OpenNI::getExtendedError() << endl;
return 2;
}
VideoStream depth;
if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
{
rc = depth.create(device, SENSOR_DEPTH);
if (rc != STATUS_OK)
{
cout << "Couldn't create depth stream" << endl << OpenNI::getExtendedError() << endl;
return 3;
}
}
rc = depth.start();
if (rc != STATUS_OK)
{
cout << "Couldn't start the depth stream" << endl << OpenNI::getExtendedError() << endl;
return 4;
}
VideoFrameRef frame;
// set resolution
// depth modes
cout << "Depth modes" << endl;
const openni::SensorInfo* sinfo = device.getSensorInfo(openni::SENSOR_DEPTH); // select index=4 640x480, 30 fps, 1mm
const openni::Array< openni::VideoMode>& modesDepth = sinfo->getSupportedVideoModes();
for (int i = 0; i<modesDepth.getSize(); i++) {
printf("%i: %ix%i, %i fps, %i format\n", i, modesDepth[i].getResolutionX(), modesDepth[i].getResolutionY(),
modesDepth[i].getFps(), modesDepth[i].getPixelFormat()); //PIXEL_FORMAT_DEPTH_1_MM = 100, PIXEL_FORMAT_DEPTH_100_UM
}
rc = depth.setVideoMode(modesDepth[0]);
if (openni::STATUS_OK != rc)
{
cout << "error: depth fromat not supprted..." << endl;
}
system("pause");
while (!wasKeyboardHit())
{
int changedStreamDummy;
VideoStream* pStream = &depth;
rc = OpenNI::waitForAnyStream(&pStream, 1, &changedStreamDummy, SAMPLE_READ_WAIT_TIMEOUT);
if (rc != STATUS_OK)
{
cout << "Wait failed! (timeout is " << SAMPLE_READ_WAIT_TIMEOUT << " ms)" << endl << OpenNI::getExtendedError() << endl;
continue;
}
rc = depth.readFrame(&frame);
if (rc != STATUS_OK)
{
cout << "Read failed!" << endl << OpenNI::getExtendedError() << endl;
continue;
}
if (frame.getVideoMode().getPixelFormat() != PIXEL_FORMAT_DEPTH_1_MM && frame.getVideoMode().getPixelFormat() != PIXEL_FORMAT_DEPTH_100_UM)
{
cout << "Unexpected frame format" << endl;
continue;
}
DepthPixel* pDepth = (DepthPixel*)frame.getData();
int middleIndex = (frame.getHeight()+1)*frame.getWidth()/2;
printf("[%08llu] %8d\n", (long long)frame.getTimestamp(), pDepth[middleIndex]);
}
depth.stop();
depth.destroy();
device.close();
OpenNI::shutdown();
return 0;
}
There is 6 mode of operation:
0: 320x240, 30 fps, 100 format
1: 320x240, 30 fps, 101 format
2: 320x240, 60 fps, 100 format
3: 320x240, 60 fps, 101 format
4: 640x480, 30 fps, 100 format
5: 640x480, 30 fps, 101 format
It can read only from modes=0-3.
At mode 4,5 i get timeout.
How i can read depth frame at 640x480?
Thanks for the help,
Tal.
====================================================
new information:
I use also this line, and i get the same results:
const openni::SensorInfo* sinfo = &(depth.getSensorInfo());
This line never execute at any mode:
cout << "error: depth fromat not supprted..." << endl;
At mode 4,5 I always get this line execute:
cout << "Wait failed! (timeout is " << SAMPLE_READ_WAIT_TIMEOUT << " ms)" << endl << OpenNI::getExtendedError() << endl;
I think maybe it a bug at openni2.
At openni1, I can read depth image at 640x480, in the same computer,os and device.
Maybe I am wrong, but I am almost sure that the problem is the order that you are doing it.
I think you should change it before depth.start() and after depth.create(device, SENSOR_DEPTH)
If I remember correctly, once it has started you may bot change the resolution of the stream.
So it should be something like this
...
if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
{
rc = depth.create(device, SENSOR_DEPTH);
if (rc != STATUS_OK)
{
cout << "Couldn't create depth stream" << endl << OpenNI::getExtendedError() << endl;
return 3;
}
}
// set resolution
// depth modes
cout << "Depth modes" << endl;
const openni::SensorInfo* sinfo = device.getSensorInfo(openni::SENSOR_DEPTH);
const openni::Array< openni::VideoMode>& modesDepth = sinfo->getSupportedVideoModes();
rc = depth.setVideoMode(modesDepth[0]);
if (openni::STATUS_OK != rc)
{
cout << "error: depth fromat not supprted..." << endl;
}
rc = depth.start();
if (rc != STATUS_OK)
{
cout << "Couldn't start the depth stream" << endl << OpenNI::getExtendedError() << endl;
return 4;
}
VideoFrameRef frame;
...
I hope that this helps you, if not, please add a comment. I have a similar code working in the git repository I show you the other day, tested with a PrimeSense carmine camera.
In my case (Asus Xtion PRO in a USB 3.0 port, OpenNI2, Windows 8.1), it seems there are something wrong with OpenNI2 (or its driver) that prevents me from changing the resolution in the code. NiViewer simple hangs or has frame rates drop significantly if the color resolution is set to 640x480.
However, on Windows, I managed to change the resolution by changing the settings in PS1080.ini in OpenNI2/Tools/OpenNI2/Drivers folder. In the ini file, for Asus, make sure
UsbInterface = 2
is enabled. By default it's zero. Then set Resolution = 1 for the depth and image sections.
My Asus Xtion firmware is v5.8.22.
I've tried the method #api55 mentioned and it works. The code and result are in the following.
But there is a problem when I make the similar change to the OpenNI sample code "SampleViewer" so that I can change the resolution free. When I set the resolution to 320*240 all is well. However, when I change it to 640*480, although the program still read frames in (at a apparently slower rate), the program display just get stuck.
2015-12-27 15:15:32
Then I test the aforementioned sample viewer with a kinect 1.0 depth camera. Since the color camera has a resolution no less than 640*480, I cannot experiment the resolution of 320*240. But the program works well with kinect 1.0 at a resolution of 640*480. In conclusion, I think that there must be some problem with the ASUS Xtion camera.
#include <iostream>
#include <cstdio>
#include <vector>
#include <OpenNI.h>
#include "OniSampleUtilities.h"
#pragma comment(lib, "OpenNI2")
#define SAMPLE_READ_WAIT_TIMEOUT 2000 //2000ms
using namespace openni;
using namespace std;
int main()
{
Status rc = OpenNI::initialize();
if (rc != STATUS_OK)
{
printf("Initialize failed:\n%s\n", OpenNI::getExtendedError());
return 1;
}
Device device;
openni::Array<openni::DeviceInfo> deviceInfoList;
OpenNI::enumerateDevices(&deviceInfoList);
for (int i = 0; i < deviceInfoList.getSize(); i++)
{
printf("%d: Uri: %s\n"
"Vendor: %s\n"
"Name: %s\n", i, deviceInfoList[i].getUri(), deviceInfoList[i].getVendor(), deviceInfoList[i].getName());
}
rc = device.open(deviceInfoList[0].getUri());
if (rc != STATUS_OK)
{
printf("Counldn't open device\n%s\n", OpenNI::getExtendedError());
return 2;
}
VideoStream depth;
// set resolution
// depth modes
printf("\nDepth modes\n");
const openni::SensorInfo* sinfo = device.getSensorInfo(openni::SENSOR_DEPTH); // select index=4 640x480, 30 fps, 1mm
if (sinfo == NULL)
{
printf("Couldn't get device info\n%s\n", OpenNI::getExtendedError());
return 3;
}
rc = depth.create(device, SENSOR_DEPTH);
if (rc != STATUS_OK)
{
printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
return 4;
}
const openni::Array< openni::VideoMode>& modesDepth = sinfo->getSupportedVideoModes();
vector<int> item;
for (int i = 0; i < modesDepth.getSize(); i++) {
printf("%i: %ix%i, %i fps, %i format\n", i, modesDepth[i].getResolutionX(), modesDepth[i].getResolutionY(),
modesDepth[i].getFps(), modesDepth[i].getPixelFormat()); //PIXEL_FORMAT_DEPTH_1_MM = 100, PIXEL_FORMAT_DEPTH_100_UM
if (modesDepth[i].getResolutionX() == 640 && modesDepth[i].getResolutionY() == 480)
item.push_back(i);
}
int item_idx = item[0];
printf("Choose mode %d\nWidth: %d, Height: %d\n", item_idx, modesDepth[item_idx].getResolutionX(), modesDepth[item_idx].getResolutionY());
rc = depth.setVideoMode(modesDepth[item_idx]);
if (rc != STATUS_OK)
{
printf("error: depth format not supported...\n");
return 5;
}
rc = depth.start();
if (rc != STATUS_OK)
{
printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
return 6;
}
VideoFrameRef frame;
printf("\nCurrent resolution:\n");
printf("Width: %d Height: %d\n", depth.getVideoMode().getResolutionX(), depth.getVideoMode().getResolutionY());
system("pause");
while (!wasKeyboardHit())
{
int changedStreamDummy;
VideoStream* pStream = &depth;
rc = OpenNI::waitForAnyStream(&pStream, 1, &changedStreamDummy, SAMPLE_READ_WAIT_TIMEOUT);
if (rc != STATUS_OK)
{
printf("Wait failed! (timeout is \" %d \" ms)\n%s\n", SAMPLE_READ_WAIT_TIMEOUT, OpenNI::getExtendedError());
continue;
}
rc = depth.readFrame(&frame);
if (rc != STATUS_OK)
{
printf("Read failed!\n%s\n", OpenNI::getExtendedError());
continue;
}
if (frame.getVideoMode().getPixelFormat() != PIXEL_FORMAT_DEPTH_1_MM && frame.getVideoMode().getPixelFormat() != PIXEL_FORMAT_DEPTH_100_UM)
{
printf("Unexpected frame format\n");
continue;
}
DepthPixel* pDepth = (DepthPixel*)frame.getData();
int middleIndex = (frame.getHeight() + 1)*frame.getWidth() / 2;
printf("[%08llu] %8d\n", (long long)frame.getTimestamp(), pDepth[middleIndex]);
printf("Width: %d Height: %d\n", frame.getWidth(), frame.getHeight());
}
depth.stop();
depth.destroy();
device.close();
OpenNI::shutdown();
return 0;
}
I had the same problem, but now solved it by referencing NiViewer example in OpenNI2. Apparently after you start the stream, either depth or color, you have to stop it to change the resolution and then start
const openni::SensorInfo* sinfo = device.getSensorInfo(openni::SENSOR_DEPTH);
const openni::Array< openni::VideoMode>& modesDepth = sinfo->getSupportedVideoModes();
depth.stop();
rc = depth.setVideoMode(modesDepth[4]);
depth.start();
I confirmed that this works on Asus Xtion on OpenNI2.
Hope this helps!
Final conclusion:
Actually, it is Xtion's problem itself (maybe related to hardware).
If you want just one of depth or color to be 640*480, and the other to be 320*240, it'll work. I can post my code if you want.
Details
Some of the answers above made a mistake: even the NiViewer.exe itself doesn't allow a depth 640*480 and color 640*480 at the same time.
Note: don't be misled by the visualization of NiViewer.exe, the video stream displayed is large but actually it does not mean 640*480. Actually it is initialsed with
depth: 320*240
color: 320*240
When you set either of the mode to 640*480, it is still works, which is
depth: 640*480
color: 320*240
or
depth: 320*240
color: 640*480
But when you want both of them to be the highest resolution:
depth: 640*480
color: 640*480
The viewer program starts encountering acute frame drop in the depth mode (in my case), but since the viewer retrieves the depth frame in an un-block way (the default code is written in a block way), you still see the color updates normally, while the depth updates every two seconds or even more.
To conclude
You could only set either of depth or color to be 640*480, and the other to be 320*240.

OpenCV: memory corruption

I am trying to make a program work, but for some reason i get a memory corruption error.
I am only using the "videoprocess" method and not the imageprocess method.
When I target a specific video, it works only once. When I try to use it again on the same video, i get this kind of error:
*** Error in `./camack': malloc(): memory corruption: 0x000000000234b2c0 ***
I cant figure out where is the problem...
Here's the code:
//opencv
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/background_segm.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
//global variables
Mat frame; //current frame
Mat fgMaskMOG; //fg mask generated by MOG method
Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
int keyboard;
//function declarations
void help();
void processVideo(char* videoFilename);
void processImages(char* firstFrameFilename);
void help()
{
cout
<< "--------------------------------------------------------------------------" << endl
<< "This program shows how to use background subtraction methods provided by " << endl
<< " OpenCV. You can process both videos (-vid) and images (-img)." << endl
<< endl
<< "Usage:" << endl
<< "./bs {-vid <video filename>|-img <image filename>}" << endl
<< "for example: ./bs -vid video.avi" << endl
<< "or: ./bs -img /data/images/1.png" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int argc, char* argv[])
{
//print help information
help();
//check for the input parameter correctness
if(argc != 3) {
cerr <<"Incorret input list" << endl;
cerr <<"exiting..." << endl;
return EXIT_FAILURE;
}
//create GUI windows
namedWindow("Frame");
namedWindow("FG Mask MOG");
namedWindow("FG Mask MOG 2");
//create Background Subtractor objects
//NOTE HERE!!!!
pMOG= new BackgroundSubtractorMOG(); //MOG approach
pMOG2 = new BackgroundSubtractorMOG2(); //MOG2 approach
if(strcmp(argv[1], "-vid") == 0) {
//input data coming from a video
processVideo(argv[2]);
}
else if(strcmp(argv[1], "-img") == 0) {
//input data coming from a sequence of images
processImages(argv[2]);
}
else {
//error in reading input parameters
cerr <<"Please, check the input parameters." << endl;
cerr <<"Exiting..." << endl;
return EXIT_FAILURE;
}
//destroy GUI windows
destroyAllWindows();
return EXIT_SUCCESS;
}
void processVideo(char* videoFilename) {
//create the capture object
VideoCapture capture(videoFilename);
//VideoCapture capture(0);
if(!capture.isOpened()){
//error in opening the video input
cerr << "Unable to open video file: " << videoFilename << endl;
exit(EXIT_FAILURE);
}
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//read the current frame
if(!capture.read(frame)) {
cerr << "Unable to read next frame." << endl;
cerr << "Exiting..." << endl;
exit(EXIT_FAILURE);
}
//update the background model
//AND HERE!!!
pMOG->operator()(frame, fgMaskMOG);
pMOG2->operator()(frame, fgMaskMOG2);
//get the frame number and write it on the current frame
stringstream ss;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
ss << capture.get(CV_CAP_PROP_POS_FRAMES);
string frameNumberString = ss.str();
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG", fgMaskMOG);
imshow("FG Mask MOG 2", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey( 30 );
}
//delete capture object
capture.release();
}
void processImages(char* fistFrameFilename) {
//read the first file of the sequence
frame = imread(fistFrameFilename);
if(!frame.data){
//error in opening the first image
cerr << "Unable to open first image frame: " << fistFrameFilename << endl;
exit(EXIT_FAILURE);
}
//current image filename
string fn(fistFrameFilename);
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//update the background model
//ALSO HERE!!!!
pMOG->operator()(frame, fgMaskMOG);
pMOG2->operator()(frame, fgMaskMOG2);
//get the frame number and write it on the current frame
size_t index = fn.find_last_of("/");
if(index == string::npos) {
index = fn.find_last_of("\\");
}
size_t index2 = fn.find_last_of(".");
string prefix = fn.substr(0,index+1);
string suffix = fn.substr(index2);
string frameNumberString = fn.substr(index+1, index2-index-1);
istringstream iss(frameNumberString);
int frameNumber = 0;
iss >> frameNumber;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG", fgMaskMOG);
imshow("FG Mask MOG 2", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey( 30 );
//search for the next image in the sequence
ostringstream oss;
oss << (frameNumber + 1);
string nextFrameNumberString = oss.str();
string nextFrameFilename = prefix + nextFrameNumberString + suffix;
//read the next frame
frame = imread(nextFrameFilename);
if(!frame.data){
//error in opening the next image in the sequence
cerr << "Unable to open image frame: " << nextFrameFilename << endl;
exit(EXIT_FAILURE);
}
//update the path of the current frame
fn.assign(nextFrameFilename);
}
}
Can you help me on this one? thank you a lot
global vars are evil ! you should not use them.
here, Ptr<BackgroundSubtractor> pMOG2; will never get released
move it into main, and pass it as an arg to functions like processVideo()
also, all images passed to the BackgroundSubtractor must have the same size.
If you dont need the processImage part then remove it so that you can have a short code which is easy for you to understand. I have remove the irrelevant part. Here is the short code which is working fine here at my system;
//opencv
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/background_segm.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
//global variables
Mat frame; //current frame
Mat fgMaskMOG; //fg mask generated by MOG method
Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
int keyboard;
//function declarations
void processVideo(char* videoFilename);
int main(int argc, char* argv[])
{
//create GUI windows
namedWindow("Frame");
namedWindow("FG Mask MOG");
namedWindow("FG Mask MOG 2");
//create Background Subtractor objects
//NOTE HERE!!!!
pMOG= new BackgroundSubtractorMOG(); //MOG approach
pMOG2 = new BackgroundSubtractorMOG2(); //MOG2 approach
if(strcmp(argv[1], "-vid") == 0) {
//input data coming from a video
processVideo(argv[2]);
}
//destroy GUI windows
destroyAllWindows();
return EXIT_SUCCESS;
}
void processVideo(char* videoFilename) {
//create the capture object
VideoCapture capture(videoFilename);
//VideoCapture capture(0);
if(!capture.isOpened()){
//error in opening the video input
cerr << "Unable to open video file: " << videoFilename << endl;
exit(EXIT_FAILURE);
}
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//read the current frame
if(!capture.read(frame)) {
cerr << "Unable to read next frame." << endl;
cerr << "Exiting..." << endl;
exit(EXIT_FAILURE);
}
//update the background model
//AND HERE!!!
pMOG->operator()(frame, fgMaskMOG);
pMOG2->operator()(frame, fgMaskMOG2);
//get the frame number and write it on the current frame
stringstream ss;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
ss << capture.get(CV_CAP_PROP_POS_FRAMES);
string frameNumberString = ss.str();
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG", fgMaskMOG);
imshow("FG Mask MOG 2", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey( 30 );
}
//delete capture object
capture.release();
}

OpenCv Memory clearing for reuse

I have a problem with the memory management in openCV after using the function:
cvCreateMemStorage(0);
Here is what i want to do: I am looping on the many image and I am using cvExtractSurf() to extract the keypoints and descriptors. I do that for every frame and for every two frames i am doing a certain processing. I only need to keep two frames in memory so I created memory storage using:
cvCreateMemStorage(0);
I would like once the processing done on each frame to completely clean the memory storage in order to reuse it to store the keypoints and descriptors of the next frame to come. I need to do that because I am processing a lot of frames and therefore creating a memory storage of a huge amount is not the best option.
I tried to use cvClearMemStorage(), cvClearSeq() and cvRelease() but nothing work and I always end up with errors. Here is the code with the important part (i have removed all the line not directly related to the questin so of course this code wont compile):
CvMemStorage* storageSrc = cvCreateMemStorage(0);
CvMemStorage* storageDest = cvCreateMemStorage(0);
// loop on all .bmp files in the selected directory
cout << "STARTING READING THE FRAME" << endl;
while(myHandle!=INVALID_HANDLE_VALUE && myFile.cFileName!= buffer){
buffer=myFile.cFileName;
fileLocation = dirName + buffer;
frameNames.push_back(fileLocation);
frame = cvLoadImage(fileLocation.c_str(), CV_LOAD_IMAGE_COLOR);
frameResized = cvCreateImage(cvSize(processingSizeX, processingSizeY), 8, 3);
cvResize(frame, frameResized, CV_INTER_AREA);
resizedGray = cvCreateImage(cvSize(processingSizeX, processingSizeY), 8, 1);
cvCvtColor( frameResized, resizedGray, CV_RGB2GRAY );
if(!frame){
fprintf(stderr, "Error when loading the images.");
exit(-1);
}
if(nbFrameRead == 0){
cout << endl;
cout << "ZONE 1" << endl;
cout << endl;
cvSetImageROI( correspond, cvRect( 0, 0, processingSizeX, processingSizeY) );
cvCopy( frameResized, correspond );
cvResetImageROI( correspond );
cvExtractSURF( resizedGray, 0, &srcFrameKeypoints, &srcFrameDescriptors, storageSrc, params );
nbFrameRead++;
}
else if(nbFrameRead == 1){
cout << endl;
cout << "ZONE 2" << endl;
cout << endl;
cvExtractSURF( resizedGray, 0, &destFrameKeypoints, &destFrameDescriptors, storageDest, params );
//printf("Nb Key Points in frame %d: %d\n", nbFrameRead, srcFrameDescriptors->total);
// clear memory and switch current frame to last frame
cvClearSeq(srcFrameKeypoints);
cvClearSeq(srcFrameDescriptors);
cvClearSeq(descriptorsOrderedSrc);
cvClearMemStorage(storageSrc);
srcFrameKeypoints = cvCloneSeq(destFrameKeypoints, storageSrc);
descriptorsOrderedSrc = cvCloneSeq(descriptorsOrderedDest, storageSrc);
cvClearSeq(destFrameKeypoints);
cvClearSeq(destFrameDescriptors);
cvClearSeq(descriptorsOrderedDest);
cvClearMemStorage(storageDest);
cvSetImageROI( correspond, cvRect( 0, 0, processingSizeX, processingSizeY) );
cvCopy( frameResized, correspond );
cvResetImageROI( correspond );
nbFrameRead++;
}
else if(nbFrameRead < bufferSize + 2){
cout << endl;
cout << "ZONE 3" << endl;
cout << endl;
cvExtractSURF( resizedGray, 0, &destFrameKeypoints, &destFrameDescriptors, storageDest, params );
//printf("Nb Key Points in frame %d: %d\n", nbFrameRead, srcFrameDescriptors->total);
//clear memory and switch current frame to last frame
cvClearSeq(srcFrameKeypoints);
cvClearSeq(srcFrameDescriptors);
cvClearSeq(descriptorsOrderedSrc);
cvClearMemStorage(storageSrc);
srcFrameKeypoints = cvCloneSeq(destFrameKeypoints, storageSrc);
descriptorsOrderedSrc = cvCloneSeq(descriptorsOrderedDest, storageSrc);
cvClearSeq(destFrameKeypoints);
cvClearSeq(destFrameDescriptors);
cvClearSeq(descriptorsOrderedDest);
cvClearMemStorage(storageDest);
nbFrameRead++;
}
else{
cout << endl;
cout << "ZONE 4" << endl;
cout << endl;
cvExtractSURF( resizedGray, 0, &destFrameKeypoints, &destFrameDescriptors, storageDest, params );
// clear memory and switch current frame to last frame
cvClearSeq(srcFrameKeypoints);
cvClearSeq(srcFrameDescriptors);
cvClearSeq(descriptorsOrderedSrc);
cvClearMemStorage(storageSrc);
srcFrameKeypoints = cvCloneSeq(destFrameKeypoints, storageSrc);
descriptorsOrderedSrc = cvCloneSeq(descriptorsOrderedDest, storageSrc);
cvClearSeq(destFrameKeypoints);
cvClearSeq(destFrameDescriptors);
cvClearSeq(descriptorsOrderedDest);
cvClearMemStorage(storageDest);
nbFrameRead++;
nbFrameSmoothed++;
}
FindNextFile(myHandle,&myFile);
}
Is there anything wrong in this code ? If yes, what should I do to be able to completely clean storageSrc and storageDest in order to reuse it as many time as needed?
Thank you in advance for your answer.
The counterpart of cvCreateMemStorage() is cvReleaseMemStorage().