I have a problem with the memory management in openCV after using the function:
cvCreateMemStorage(0);
Here is what i want to do: I am looping on the many image and I am using cvExtractSurf() to extract the keypoints and descriptors. I do that for every frame and for every two frames i am doing a certain processing. I only need to keep two frames in memory so I created memory storage using:
cvCreateMemStorage(0);
I would like once the processing done on each frame to completely clean the memory storage in order to reuse it to store the keypoints and descriptors of the next frame to come. I need to do that because I am processing a lot of frames and therefore creating a memory storage of a huge amount is not the best option.
I tried to use cvClearMemStorage(), cvClearSeq() and cvRelease() but nothing work and I always end up with errors. Here is the code with the important part (i have removed all the line not directly related to the questin so of course this code wont compile):
CvMemStorage* storageSrc = cvCreateMemStorage(0);
CvMemStorage* storageDest = cvCreateMemStorage(0);
// loop on all .bmp files in the selected directory
cout << "STARTING READING THE FRAME" << endl;
while(myHandle!=INVALID_HANDLE_VALUE && myFile.cFileName!= buffer){
buffer=myFile.cFileName;
fileLocation = dirName + buffer;
frameNames.push_back(fileLocation);
frame = cvLoadImage(fileLocation.c_str(), CV_LOAD_IMAGE_COLOR);
frameResized = cvCreateImage(cvSize(processingSizeX, processingSizeY), 8, 3);
cvResize(frame, frameResized, CV_INTER_AREA);
resizedGray = cvCreateImage(cvSize(processingSizeX, processingSizeY), 8, 1);
cvCvtColor( frameResized, resizedGray, CV_RGB2GRAY );
if(!frame){
fprintf(stderr, "Error when loading the images.");
exit(-1);
}
if(nbFrameRead == 0){
cout << endl;
cout << "ZONE 1" << endl;
cout << endl;
cvSetImageROI( correspond, cvRect( 0, 0, processingSizeX, processingSizeY) );
cvCopy( frameResized, correspond );
cvResetImageROI( correspond );
cvExtractSURF( resizedGray, 0, &srcFrameKeypoints, &srcFrameDescriptors, storageSrc, params );
nbFrameRead++;
}
else if(nbFrameRead == 1){
cout << endl;
cout << "ZONE 2" << endl;
cout << endl;
cvExtractSURF( resizedGray, 0, &destFrameKeypoints, &destFrameDescriptors, storageDest, params );
//printf("Nb Key Points in frame %d: %d\n", nbFrameRead, srcFrameDescriptors->total);
// clear memory and switch current frame to last frame
cvClearSeq(srcFrameKeypoints);
cvClearSeq(srcFrameDescriptors);
cvClearSeq(descriptorsOrderedSrc);
cvClearMemStorage(storageSrc);
srcFrameKeypoints = cvCloneSeq(destFrameKeypoints, storageSrc);
descriptorsOrderedSrc = cvCloneSeq(descriptorsOrderedDest, storageSrc);
cvClearSeq(destFrameKeypoints);
cvClearSeq(destFrameDescriptors);
cvClearSeq(descriptorsOrderedDest);
cvClearMemStorage(storageDest);
cvSetImageROI( correspond, cvRect( 0, 0, processingSizeX, processingSizeY) );
cvCopy( frameResized, correspond );
cvResetImageROI( correspond );
nbFrameRead++;
}
else if(nbFrameRead < bufferSize + 2){
cout << endl;
cout << "ZONE 3" << endl;
cout << endl;
cvExtractSURF( resizedGray, 0, &destFrameKeypoints, &destFrameDescriptors, storageDest, params );
//printf("Nb Key Points in frame %d: %d\n", nbFrameRead, srcFrameDescriptors->total);
//clear memory and switch current frame to last frame
cvClearSeq(srcFrameKeypoints);
cvClearSeq(srcFrameDescriptors);
cvClearSeq(descriptorsOrderedSrc);
cvClearMemStorage(storageSrc);
srcFrameKeypoints = cvCloneSeq(destFrameKeypoints, storageSrc);
descriptorsOrderedSrc = cvCloneSeq(descriptorsOrderedDest, storageSrc);
cvClearSeq(destFrameKeypoints);
cvClearSeq(destFrameDescriptors);
cvClearSeq(descriptorsOrderedDest);
cvClearMemStorage(storageDest);
nbFrameRead++;
}
else{
cout << endl;
cout << "ZONE 4" << endl;
cout << endl;
cvExtractSURF( resizedGray, 0, &destFrameKeypoints, &destFrameDescriptors, storageDest, params );
// clear memory and switch current frame to last frame
cvClearSeq(srcFrameKeypoints);
cvClearSeq(srcFrameDescriptors);
cvClearSeq(descriptorsOrderedSrc);
cvClearMemStorage(storageSrc);
srcFrameKeypoints = cvCloneSeq(destFrameKeypoints, storageSrc);
descriptorsOrderedSrc = cvCloneSeq(descriptorsOrderedDest, storageSrc);
cvClearSeq(destFrameKeypoints);
cvClearSeq(destFrameDescriptors);
cvClearSeq(descriptorsOrderedDest);
cvClearMemStorage(storageDest);
nbFrameRead++;
nbFrameSmoothed++;
}
FindNextFile(myHandle,&myFile);
}
Is there anything wrong in this code ? If yes, what should I do to be able to completely clean storageSrc and storageDest in order to reuse it as many time as needed?
Thank you in advance for your answer.
The counterpart of cvCreateMemStorage() is cvReleaseMemStorage().
Related
I'm trying to integrate a IDS uEye camera with OpenCV and it kinda works for now.
Problem I'm facing is that when I use the IDS SDK to view the camera image, I get a full image. But using OpenCV's VideoCapture, I only get the top left quarter of the image.
I just put an image of a rectangle split into quarters to clarify what the full image should be (entire rectangle) and what I'm getting from videocapture (top left quarter only)
(source: kheper.net)
I've already tried to adjust the image width and height via cap.set and since the VideoCapture line is after setting the uEye camera's parameters, I'm rather certain it's not a settings issue with the camera and more to do with VideoCapture itself
char strCamFileName[256];
char* pcImageMemory;
int memId;
int nRet = 0;
SENSORINFO sInfo;
IplImage* img;
HIDS hCam = 0; // index 0 means taking first camera available
RECT rc;
MSG msg;
Mat frame(MaxImageSizeY, MaxImageSizeX, CV_8UC1);
nRet = is_InitCamera(&hCam, hWndDisplay);
if (nRet != IS_SUCCESS)
{
cout << endl << "Error Connecting to Camera" << endl;
cout << "Closing program..." << endl;
return 0;
}
else
{
cout << endl << "Camera initialisation was successful!" << endl << endl;
}
// you can query information about the sensor type of the camera
nRet = is_GetSensorInfo(hCam, &sInfo);
if (nRet == IS_SUCCESS)
{
cout << "Cameramodel: \t\t" << sInfo.strSensorName << endl;
cout << "Maximum image width: \t" << sInfo.nMaxWidth << endl;
cout << "Maximum image height: \t" << sInfo.nMaxHeight << endl << endl << endl;
}
MaxImageSizeX = sInfo.nMaxWidth;
MaxImageSizeY = sInfo.nMaxHeight;
DisplayWidth = MaxImageSizeX;
DisplayHeight = MaxImageSizeY;
int nColorMode = IS_COLORMODE_CBYCRY;
int nBitsPerPixel = 32;
// Get number of available formats and size of list
UINT count;
UINT bytesNeeded = sizeof(IMAGE_FORMAT_LIST);
nRet = is_ImageFormat(hCam, IMGFRMT_CMD_GET_NUM_ENTRIES, &count, sizeof(count));
bytesNeeded += (count - 1) * sizeof(IMAGE_FORMAT_INFO);
void* ptr = malloc(bytesNeeded);
// Create and fill list
IMAGE_FORMAT_LIST* pformatList = (IMAGE_FORMAT_LIST*)ptr;
pformatList->nSizeOfListEntry = sizeof(IMAGE_FORMAT_INFO);
pformatList->nNumListElements = count;
nRet = is_ImageFormat(hCam, IMGFRMT_CMD_GET_LIST, pformatList, bytesNeeded);
// Prepare for creating image buffers
char* pMem = NULL;
int memID = 0;
// Set each format and then capture an image
IMAGE_FORMAT_INFO formatInfo;
// Allocate image mem for current format, set format
nRet = is_AllocImageMem(hCam, MaxImageSizeX, MaxImageSizeY, nBitsPerPixel, &pMem, &memID);
nRet = is_SetImageMem(hCam, pMem, memID);
nRet = is_ImageFormat(hCam, IMGFRMT_CMD_SET_FORMAT, &formatInfo.nFormatID, sizeof(formatInfo.nFormatID));
// Sets the color mode to be used when image data are saved or displayed by the graphics card
is_SetColorMode(hCam, nColorMode);
// allocates an image memory for an image, activates it and sets the way in which the images will be displayed on the screen
int nMemoryId;
is_AllocImageMem(hCam, MaxImageSizeX, MaxImageSizeY, nBitsPerPixel, &pcImageMemory, &nMemoryId);
is_SetImageMem(hCam, pcImageMemory, nMemoryId);
is_SetDisplayMode(hCam, IS_SET_DM_DIB);
is_HotPixel(hCam, IS_HOTPIXEL_DISABLE_CORRECTION, NULL, NULL);
IS_RECT AAOI; // IS_RECT type variable for Auto AOI parameters
AAOI.s32X = MaxImageSizeX / 3 | IS_AOI_IMAGE_POS_ABSOLUTE;
AAOI.s32Width = MaxImageSizeX / 3;
AAOI.s32Y = MaxImageSizeY / 3 | IS_AOI_IMAGE_POS_ABSOLUTE;
AAOI.s32Height = MaxImageSizeY / 3;
double enable = 1;
double disable = 0;
is_SetAutoParameter(hCam, IS_SET_AUTO_SPEED, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_GAIN, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_FRAMERATE, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SHUTTER, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_GAIN, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_SHUTTER, &disable, 0);
is_AOI(hCam, IS_AOI_AUTO_BRIGHTNESS_SET_AOI, &AAOI, sizeof(AAOI));
is_AOI(hCam, IS_AOI_AUTO_WHITEBALANCE_SET_AOI, &AAOI, sizeof(AAOI));
VideoCapture cap; //--- INITIALIZE VIDEOCAPTURE
int deviceID = 0; // 0 = open default camera
int apiID = cv::CAP_ANY; // 0 = autodetect default API
if (cap.open(deviceID, apiID))
{
cout << "cap opened" << endl;
}
else
{
cout << "cap not opened" << endl;
}
cout << "Press 1 to capture image" << endl
<< "Press 2 to use (last) captured image" << endl;
cap.read(frame);
From what I know VideoCapture should be able to obtain the entire image from the camera right?
I'm honestly just really confused why VideoCapture cuts of 3/4 of the image and I would appreciate any help
Alright I found out the problem...
Again I left out too much code in the original post (because there's ALOT of irrelevant code related to USB stuff) so I'll include the most important part I left out here
double enable = 1;
double disable = 0;
is_SetAutoParameter(hCam, IS_SET_AUTO_SPEED, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_GAIN, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_FRAMERATE, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SHUTTER, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_GAIN, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_SHUTTER, &disable, 0);
is_AOI(hCam, IS_AOI_AUTO_BRIGHTNESS_SET_AOI, &AAOI, sizeof(AAOI));
is_AOI(hCam, IS_AOI_AUTO_WHITEBALANCE_SET_AOI, &AAOI, sizeof(AAOI));
//// Acquires a single image from the camera
//is_FreezeVideo(hCam, IS_WAIT);
//// Output an image from an image memory in the specified window
//int nRenderMode = IS_RENDER_FIT_TO_WINDOW;
//is_RenderBitmap(hCam, nMemoryId, hWndDisplay, nRenderMode);
is_ExitCamera(hCam); // exit camera so that OpenCV can access as camera parameters have been set
CalibSet CS; // declaring variable 'CS' under the class 'CalibSet'
Mat livemap1, livemap2;
FileStorage tfs(inputCalibFile, FileStorage::READ); // Read the settings
if (!tfs.isOpened())
{
cout << "Could not open the calibration file: \"" << inputCalibFile << "\"" << endl;
return -1;
}
tfs["camera_matrix"] >> CS.CamMat;
tfs["distortion_coefficients"] >> CS.DistCoeff;
tfs["image_width"] >> CS.image.width;
tfs["image_height"] >> CS.image.height;
tfs.release(); // close Settings file
So. Basically what the class CalibSet does is it holds values for a .xml file that is used to extract values after undistortion calibration.
More about that here Camera calibration data retrieval
But the issue that prevented cap.set from working was likely these last few lines.
tfs["image_width"] >> CS.image.width; and tfs["image_height"] >> CS.image.height; which took the values in "image_width" and "image_height" and stored them in the respective variables in the class CalibSet.
And guess what... The width and height in the .xml file was 640x480...
I modified that portion in the .xml to the supposed 1280x1024 and the live feed from the camera was fixed and I finally got the full image instead of the 1/4 that I got before.
I am using C++ and Opencv 2.3.1 for background subtraction. I have tried many times to change the parameters of Mog2 in order to disable shadow detection feature also i have tried what other people suggest on the internet. however, the shadow detection still enabled.
could you please tell me how to disable it?
see the sample code and the generated mask.
//opencv
#include < opencv2/opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2/video/background_segm.hpp>
#include < opencv2/imgproc/imgproc.hpp>
#include < opencv2/video/video.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
// Global variables
Mat frame; //current frame
Mat fgMaskMOG2; //fg mask fg mask generated by MOG method
Ptr<BackgroundSubtractor> pMOG2; //MOG Background subtractor
int keyboard; //input from keyboard
//new variables
int history = 1250;
float varThreshold = 16;
bool bShadowDetection = true;
/*
//added to remove the shadow
unsigned char nShadowDetection = 0;
float fTau = 0.5;
//static const unsigned char nShadowDetection =( unsigned char)0;
*/
// Function Headers
void help();
void processImages(char* firstFrameFilename);
void help()
{
cout
<< "This program shows how to use background subtraction methods provided by " << endl
<< " OpenCV. You can process images (-img)." << endl
<< "Usage:" << endl
<< "./bs -img <image filename>}" << endl
<< "for example: ./bs -img /data/images/1.png" << endl
<< endl;
}
// morphological operation
void morphOps(Mat &thresh){
//create structuring element that will be used to "dilate" and "erode" image.
//the element chosen here is a 3px by 3px rectangle
Mat erodeElement = getStructuringElement( MORPH_RECT,Size(2,2)); //3x3
//dilate with larger element so make sure object is nicely visible
Mat dilateElement = getStructuringElement( MORPH_RECT,Size(1,1)); //8x8
erode(thresh,thresh,erodeElement);
erode(thresh,thresh,erodeElement);
dilate(thresh,thresh,dilateElement);
dilate(thresh,thresh,dilateElement);
}
// main function
int main(int argc, char* argv[])
{
//print help information
help();
//check for the input parameter correctness
if(argc != 3) {
cerr <<"Incorret input list" << endl;
cerr <<"exiting..." << endl;
return EXIT_FAILURE;
}
//create GUI windows
namedWindow("Frame");
namedWindow("FG Mask MOG2 ");
//create Background Subtractor objects
//pMOG2 = new BackgroundSubtractorMOG2();
pMOG2 = new BackgroundSubtractorMOG2( history, varThreshold, bShadowDetection);
//BackgroundSubtractorMOG2(int history, float varThreshold, bool bShadowDetection=1);
if(strcmp(argv[1], "-img") == 0) {
//input data coming from a sequence of images
processImages(argv[2]);
}
else {
//error in reading input parameters
cerr <<"Please, check the input parameters." << endl;
cerr <<"Exiting..." << endl;
return EXIT_FAILURE;
}
//destroy GUI windows
destroyAllWindows();
return EXIT_SUCCESS;
}
//function processImages
void processImages(char* fistFrameFilename) {
//read the first file of the sequence
frame = imread(fistFrameFilename);
if(frame.empty()){
//error in opening the first image
cerr << "Unable to open first image frame: " << fistFrameFilename << endl;
exit(EXIT_FAILURE);
//current image filename
string fn(fistFrameFilename);
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//update the background model
pMOG2->operator()(frame, fgMaskMOG2,-1);
//get the frame number and write it on the current frame
size_t index = fn.find_last_of("/");
if(index == string::npos) {
index = fn.find_last_of("\\");
}
size_t index2 = fn.find_last_of(".");
string prefix = fn.substr(0,index+1);
string suffix = fn.substr(index2);
string frameNumberString = fn.substr(index+1, index2-index-1);
istringstream iss(frameNumberString);
int frameNumber = 0;
iss >> frameNumber;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
morphOps(fgMaskMOG2);
imshow("FG Mask MOG2 ", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey(1);
//search for the next image in the sequence
ostringstream oss;
oss << (frameNumber + 1);
string nextFrameNumberString = oss.str();
string nextFrameFilename = prefix + nextFrameNumberString + suffix;
//read the next frame
frame = imread(nextFrameFilename);
if(frame.empty()){
//error in opening the next image in the sequence
cerr << "Unable to open image frame: " << nextFrameFilename << endl;
exit(EXIT_FAILURE);
}
//update the path of the current frame
fn.assign(nextFrameFilename);
// save subtracted images
string imageToSave =("output_MOG_" + frameNumberString + ".png");
bool saved = imwrite( "D:\\SO\\temp\\" +imageToSave,fgMaskMOG2);
if(!saved) {
cerr << "Unable to save " << imageToSave << endl;
}
}
}
}
take a look at the documentation
on your code you have
bool bShadowDetection = true;
change it to
bool bShadowDetection = false;
EDIT:
OpenCV 3's BackgroundSubtractorMOG2 Class has setShadowValue (int value) function to set gray value of shadow.
setting value of gray to zero will remove the shadow.
it depends on what you really want to see - if you want to separate the shadows from your segmentation:
bool bShadowDetection = true;
and use
cv::threshold(Mask,Mask,254,255,cv::THRESH_BINARY);
after MOG2->apply()
you'll get exactly the part of wich is {255} in your image
and sry for reanimating this...
In my current project, when I call VideoCapture::open(camera device index) and the camera is in used by another program, it shows a Video Source dialog and returns true when I select a device that is already in use.
However, in my [previous] experiment project, when I called VideoCapture::open(camera device index), it doesn't show this dialog.
I want to know what is causing the Video Source dialog to show and the program to behave differently from the experimental project.
This is the source code to the experiment project:
int main (int argc, char *argv[])
{
//vars
time_duration td, td1;
ptime nextFrameTimestamp, currentFrameTimestamp, initialLoopTimestamp, finalLoopTimestamp;
int delayFound = 0;
int totalDelay= 0;
// initialize capture on default source
VideoCapture capture;
std::cout << "capture.open(0): " << capture.open(0) << std::endl;
std::cout << "NOOO" << std::endl;
namedWindow("video", 1);
// set framerate to record and capture at
int framerate = 15;
// Get the properties from the camera
double width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
double height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
// print camera frame size
//cout << "Camera properties\n";
//cout << "width = " << width << endl <<"height = "<< height << endl;
// Create a matrix to keep the retrieved frame
Mat frame;
// Create the video writer
VideoWriter video("capture.avi",0, framerate, cvSize((int)width,(int)height) );
// initialize initial timestamps
nextFrameTimestamp = microsec_clock::local_time();
currentFrameTimestamp = nextFrameTimestamp;
td = (currentFrameTimestamp - nextFrameTimestamp);
// start thread to begin capture and populate Mat frame
boost::thread captureThread(captureFunc, &frame, &capture);
// loop infinitely
for(bool q=true;q;)
{
if(frame.empty()){continue;}
//if(cvWaitKey( 5 ) == 'q'){ q=false; }
// wait for X microseconds until 1second/framerate time has passed after previous frame write
while(td.total_microseconds() < 1000000/framerate){
//determine current elapsed time
currentFrameTimestamp = microsec_clock::local_time();
td = (currentFrameTimestamp - nextFrameTimestamp);
if(cvWaitKey( 5 ) == 'q'){
std::cout << "B" << std::endl;
q=false;
boost::posix_time::time_duration timeout = boost::posix_time::milliseconds(0);
captureThread.timed_join(timeout);
break;
}
}
// determine time at start of write
initialLoopTimestamp = microsec_clock::local_time();
// Save frame to video
video << frame;
imshow("video", frame);
//write previous and current frame timestamp to console
cout << nextFrameTimestamp << " " << currentFrameTimestamp << " ";
// add 1second/framerate time for next loop pause
nextFrameTimestamp = nextFrameTimestamp + microsec(1000000/framerate);
// reset time_duration so while loop engages
td = (currentFrameTimestamp - nextFrameTimestamp);
//determine and print out delay in ms, should be less than 1000/FPS
//occasionally, if delay is larger than said value, correction will occur
//if delay is consistently larger than said value, then CPU is not powerful
// enough to capture/decompress/record/compress that fast.
finalLoopTimestamp = microsec_clock::local_time();
td1 = (finalLoopTimestamp - initialLoopTimestamp);
delayFound = td1.total_milliseconds();
cout << delayFound << endl;
//output will be in following format
//[TIMESTAMP OF PREVIOUS FRAME] [TIMESTAMP OF NEW FRAME] [TIME DELAY OF WRITING]
if(!q || cvWaitKey( 5 ) == 'q'){
std::cout << "C" << std::endl;
q=false;
boost::posix_time::time_duration timeout = boost::posix_time::milliseconds(0);
captureThread.timed_join(timeout);
break;
}
}
// Exit
return 0;
}
I have written the following code below to display a video in OpenCV. I have compiled it fine but when I run it, the window that is supposed to show the video opens but it is too small to actually see if the video is playing. Everything else seems to be working fine. The width, height and number of frames are printed on the command line as coded. Anyone know what the problem is? Check it out.
void info()
{
cout << "This program will accept input video with fixed lengths and produce video textures" << endl;
}
int main(int argc, char *argv[])
{
info();
if(argc != 2)
{
cout << "Please enter more parameters" << endl;
return -1;
}
const string source = argv[1];
VideoCapture input_vid(source);
if(! input_vid.isOpened())
{
cout << "Error: Could not find input video file" << source << endl;
return -1;
}
Size S = Size((int) input_vid.get(CV_CAP_PROP_FRAME_WIDTH), //Acquire size of input video
(int) input_vid.get(CV_CAP_PROP_FRAME_HEIGHT));
cout << "Width: = " << S.width << " Height: = " << S.height << " Number of frames: " << input_vid.get(CV_CAP_PROP_FRAME_COUNT)<<endl;
const char* PLAY = "Video player";
namedWindow(PLAY, CV_WINDOW_AUTOSIZE);
//imshow(PLAY,100);
char c;
c = (char)cvWaitKey(27);
//if ( c == 27)break;
return 0;
}
assuming video is from webcam:
capture = CaptureFromCAM( 0 );
SetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT, 640);
SetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH, 480);
this will fix your problem
another simple tweak could be using CV_WINDOW_NORMAL instead of CV_WINDOW_AUTOSIZE
namedWindow(PLAY, CV_WINDOW_AUTOSIZE);
which lets you resize the window manually
Next iteration of my question:
Thank you for your inputs, it has helped me to understand a little bit more about the Frame and inputSamples utility.
I’ve done modifications to my source code with the new knowledge you’ve given me. But I still have problems, so I might not have understood fully what you meant.
Here is my OpenFile function, sorry for the name but I’ll refactor later; when it’ll work =)
//-----------------------------------------------------------------------------
/*
This Function Open a File containing the Audio, Binary, Data.
*///___________________________________________________________________________
const short* OpenFile(const char* fileName, long& fileSize, WavFormat* wav)
{
// ouvre le fichier
ifstream file;
file.open((char*)fileName, ios::binary|ios::in);
if (file.good())
{
// Read the WAV's Header
wav = CheckWavHeader(file, wav);
cout << "chunkID: " << wav->chunkID <<'\n';
cout << "chunkSize: " << wav->chunkSize <<'\n';
cout << "format: " << wav->format <<'\n';
cout << "subChunk1ID: " << wav->subChunk1ID <<'\n';
cout << "subChunk1Size: " << wav->subChunk1Size <<'\n';
cout << "audioFormat: " << wav->audioFormat <<'\n'; // audioFormat == 1, alors PCM 16bits
cout << "numChannels: " << wav->numChannels <<'\n';
cout << "sampleRate: " << wav->sampleRate <<'\n';
cout << "byteRate: " << wav->byteRate <<'\n';
cout << "blockAlign: " << wav->blockAlign <<'\n';
cout << "bitsPerSample: " << wav->bitsPerSample <<'\n';
cout << "subChunk2ID: " << wav->subChunk2ID <<'\n';
cout << "subChunk2Size: " << wav->subChunk2Size <<'\n';
// Get the file’s size
file.seekg(0L, ios::end);
fileSize = ((long)file.tellg() - DATA_POS);
file.seekg(DATA_POS, ios::beg); // back to the data.
// Read the Data into the Buffer
uint nbSamples = fileSize / sizeof(short);
short* inputArray = new short[nbSamples];
file.read((char*)inputArray, fileSize);
// Close the file and return the Data
file.close();
return (const short*)inputArray;
}
else
{
exit(-1);
}
}
I’m opening the file, checking its size, create a short buffer and read the wav’s data into the short buffer and finally I return it.
In the main, for now I commented the G711 decoder.
When I run the application, the faacEncOpen gives me 2048 for inputSamples (it’s logic since I have 2 channels in the Wav’s file for a FRAME_LEN of 1024).
So if I understood correctly, 1 Frame == 2048 samples for my application. So for each Frame I call the faacEncEncode, I give the tmpInputBuffer that is a buffer of the same size as inputSamples at the inputBuffer[i * inputSamples] index.
//-----------------------------------------------------------------------------
/*
The Main entry Point of the Application
*///_____________________________________________________________________________
int main()
{
// Get the File's Data
WavFormat* wav = new WavFormat;
long fileSize;
const short* fileInput = OpenFile("audioTest.wav", fileSize, wav);
// G711 mu-Law Decoder
//MuLawDecoder* decoder = new MuLawDecoder();
//short* inputBuffer = decoder->MuLawDecode_shortArray((byte*)fileInput, (int)nbChunk);
short* inputBuffer = (short*)fileInput;
// Info for FAAC
ulong sampleRate = wav->sampleRate;
uint numChannels = wav->numChannels;
ulong inputSamples;
ulong maxOutputBytes;
// Ouvre l'Encodeur et assigne la Configuration.
faacEncHandle hEncoder = faacEncOpen(sampleRate, numChannels, &inputSamples, &maxOutputBytes);
faacEncConfigurationPtr faacConfig = faacEncGetCurrentConfiguration(hEncoder);
faacConfig->inputFormat = FAAC_INPUT_16BIT;
faacConfig->bitRate = 64000;
int result = faacEncSetConfiguration(hEncoder, faacConfig);
/*Input Buffer and Output Buffer*/
byte* outputBuffer = new byte[maxOutputBytes];
int nbBytesWritten = 0;
Sink* sink = new Sink();
uint nbFrame = fileSize / inputSamples;
int32_t* tmpInputBuffer = new int32_t[inputSamples];
for (uint i = 0; i < nbFrame; i++)
{
strncpy((char*)tmpInputBuffer, (const char*)&inputBuffer[i * inputSamples], inputSamples);
nbBytesWritten = faacEncEncode(hEncoder, tmpInputBuffer, inputSamples, outputBuffer, maxOutputBytes);
cout << 100.0 * (float)i / nbFrame << "%\t nbBytesWritten = " << nbBytesWritten << "\n";
if (nbBytesWritten > 0)
{
sink->AddAACStream(outputBuffer, nbBytesWritten);
}
}
sink->WriteToFile("output.aac");
// Close AAC Encoder
faacEncClose(hEncoder);
// Supprimer tous les pointeurs
delete sink;
//delete decoder;
delete[] fileInput;
//delete[] inputBuffer;
delete[] outputBuffer;
delete[] tmpInputBuffer;
system("pause");
return 0;
}
When the output Data is Dumped into an .acc file (as RAW AAC), I use the application mp4muxer.exe to create an .mp4 file to listen to the final converted sound. But the sound is not good at all...
I'm wondering if there is something I'm not seeing or do not unserstand that I should.
Thank you in advance for your useful inputs.
Each call to faacEncEncode encodes inputSamples samples, not just one. Your main loop should read that many samples from the WAV file into the input buffer, then call faacEncEncode once for that buffer, and finally write the output buffer to the AAC file.
It's possible that I've misunderstood what you're doing (if so, it would be useful to know: (1) What's the OpenFile function you're calling, and does it (despite its name) actually read the file as well as opening it? (2) How is inputBuffer set up?) but:
faacEncEncode expects to be given a whole frame's worth of samples. A frame is the number of samples you got passed back in inputSamples when you called faacEncOpen. (You can give it less than a whole frame if you've reached the end of the input, of course.)
So you're getting 460 and 539 bytes for each of two frames -- not for 16 bits in each case. And it looks as if your input-data pointers are actually offset by only one sample each time, so you're handing it badly overlapping frames. (And the wrong number of them; nbChunk is not the number of frames you have.)