Blank frames when explicitly copying pixel values in OpenCV - c++

I have been porting some video processing code to C++ using OpenCV 2.4.3. The following test program closely mimics how my code will read each frame from a video, operate on its contents, and then write new frames to a new video file.
Strangely, the output frames are entirely black when the pixels are set individually, but are written correctly when the entire frame is cloned.
In practice, I'd use the two macros to access and assign desired values, but the sequential scan used in the example shows the idea more clearly.
Does anyone know where I'm going wrong?
test.cpp:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <string>
using namespace std;
using namespace cv;
#define RGB_REF(PR,NC,R,C,CH) (*((PR) + ((3*(NC)*(R)+(C))+(CH))))
#define GRAY_REF(PR,NC,R,C) (*((PR) + (NC)*(R)+(C)))
int main(int argc, char* argv[])
{
string video_path(argv[1]);
cerr << "Video path is " + video_path + "\n";
VideoCapture capture(video_path);
if ( !capture.isOpened() )
{
cerr << "Input file could not be opened\n";
return 1;
} else
{
string output_path(argv[2]);
VideoWriter output;
int ex = (int)capture.get(CV_CAP_PROP_FOURCC);
Size S = Size((int) capture.get(CV_CAP_PROP_FRAME_WIDTH),
(int) capture.get(CV_CAP_PROP_FRAME_HEIGHT));
output.open(output_path,ex,capture.get(CV_CAP_PROP_FPS),S,true);
if ( !output.isOpened() )
{
cerr << "Output file could not be opened\n";
return 1;
}
unsigned int numFrames = (unsigned int) capture.get(CV_CAP_PROP_FRAME_COUNT);
unsigned int m = (unsigned int) capture.get(CV_CAP_PROP_FRAME_HEIGHT);
unsigned int n = (unsigned int) capture.get(CV_CAP_PROP_FRAME_WIDTH);
unsigned char* im = (unsigned char*) malloc(m*n*3*sizeof(unsigned char));
unsigned char* bw = (unsigned char*) malloc(m*n*3*sizeof(unsigned char));
Mat frame(m,n,CV_8UC3,im);
Mat outputFrame(m,n,CV_8UC3,bw);
for (size_t i=0; i<numFrames; i++)
{
capture >> frame;
for (size_t x=0;x<(3*m*n);x++)
{
bw[x] = im[x];
}
output << outputFrame; // blank frames
// output << frame; // works
// output << (outputFrame = frame); // works
}
}
}

When you query a frame from VideoCapture as capture >> frame;, frame is modified. Say, it has a new data buffer. So im no longer points to the buffer of frame.
Try
bm[x] = frame.ptr()[x];

Related

Capturing video with monochrome camera in aruco opencv

I'm currently trying to use a monochrome camera with the aruco and opencv libraries in order to accelerate the computation and get better marker capturing. The problem i am having is that the monochrome feed is being tripled on screen when running the aruco_test program and so the resolution in diminished by two thirds and the markers are being detected three times each instead of one.
I saw feeds which talk about similar problems with monochrome cameras in opencv. Some answers suggested cropping the image (which fixes the tripling problem but not the smaller resolution) but it all seems to be caused by the conversion from either BGR2GRAY or GRAY2BGR.
Any help on what exactly is causing the images being tripled and how to bypass that part either in the aruco source code or opencv source code would be appreciated.
INFO :
Driver Info (not using libv4l2):
Driver name : uvcvideo
Card type : oCam-1MGN-U
Bus info : usb-0000:00:1d.0-1.5
Driver version: 3.13.11
Capabilities : 0x84000001
Video Capture
Streaming
Device Capabilities
Device Caps : 0x04000001
Video Capture
Streaming
Priority: 2
Video input : 0 (Camera 1: ok)
Format Video Capture:
Width/Height : 1280/960
Pixel Format : 'GREY'
Field : None
Bytes per Line: 1280
Size Image : 1228800
Colorspace : Unknown (00000000)
Crop Capability Video Capture:
Bounds : Left 0, Top 0, Width 1280, Height 960
Default : Left 0, Top 0, Width 1280, Height 960
Pixel Aspect: 1/1
Streaming Parameters Video Capture:
Capabilities : timeperframe
Frames per second: 30.000 (30/1)
Read buffers : 0
brightness (int) : min=0 max=127 step=1 default=64 value=64
exposure_absolute (int) : min=1 max=625 step=1 default=39 value=39
Using Aruco 2.0.19 and OpenCV 3.2
Pixel Format not being YUYV i cannot simply take the Y channel from the camera feed.
code executed :
#include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include "aruco.h"
#include "cvdrawingutils.h"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace aruco;
MarkerDetector MDetector;
VideoCapture TheVideoCapturer;
vector< Marker > TheMarkers;
Mat TheInputImage, TheInputImageCopy;
CameraParameters TheCameraParameters;
void cvTackBarEvents(int pos, void *);
pair< double, double > AvrgTime(0, 0); // determines the average time required for detection
int iThresParam1, iThresParam2;
int waitTime = 0;
class CmdLineParser{int argc; char **argv; public: CmdLineParser(int _argc,char **_argv):argc(_argc),argv(_argv){} bool operator[] ( string param ) {int idx=-1; for ( int i=0; i<argc && idx==-1; i++ ) if ( string ( argv[i] ) ==param ) idx=i; return ( idx!=-1 ) ; } string operator()(string param,string defvalue="-1"){int idx=-1; for ( int i=0; i<argc && idx==-1; i++ ) if ( string ( argv[i] ) ==param ) idx=i; if ( idx==-1 ) return defvalue; else return ( argv[ idx+1] ); }};
cv::Mat resize(const cv::Mat &in,int width){
if (in.size().width<=width) return in;
float yf=float( width)/float(in.size().width);
cv::Mat im2;
cv::resize(in,im2,cv::Size(width,float(in.size().height)*yf));
return im2;
}
int main(int argc, char **argv) {
try {
CmdLineParser cml(argc,argv);
if (argc < 2 || cml["-h"]) {
cerr << "Invalid number of arguments" << endl;
cerr << "Usage: (in.avi|live[:idx_cam=0]) [-c camera_params.yml] [-s marker_size_in_meters] [-d dictionary:ARUCO by default] [-h]" << endl;
cerr<<"\tDictionaries: "; for(auto dict:aruco::Dictionary::getDicTypes()) cerr<<dict<<" ";cerr<<endl;
cerr<<"\t Instead of these, you can directly indicate the path to a file with your own generated dictionary"<<endl;
return false;
}
/////////// PARSE ARGUMENTS
string TheInputVideo = argv[1];
// read camera parameters if passed
if (cml["-c"] ) TheCameraParameters.readFromXMLFile(cml("-c"));
float TheMarkerSize = std::stof(cml("-s","-1"));
//aruco::Dictionary::DICT_TYPES TheDictionary= Dictionary::getTypeFromString( cml("-d","ARUCO") );
/////////// OPEN VIDEO
// read from camera or from file
if (TheInputVideo.find("live") != string::npos) {
int vIdx = 0;
// check if the :idx is here
char cad[100];
if (TheInputVideo.find(":") != string::npos) {
std::replace(TheInputVideo.begin(), TheInputVideo.end(), ':', ' ');
sscanf(TheInputVideo.c_str(), "%s %d", cad, &vIdx);
}
cout << "Opening camera index " << vIdx << endl;
TheVideoCapturer.open(vIdx);
waitTime = 10;
}
else TheVideoCapturer.open(TheInputVideo);
// check video is open
if (!TheVideoCapturer.isOpened()) throw std::runtime_error("Could not open video");
///// CONFIGURE DATA
// read first image to get the dimensions
TheVideoCapturer >> TheInputImage;
if (TheCameraParameters.isValid())
TheCameraParameters.resize(TheInputImage.size());
MDetector.setDictionary(cml("-d","ARUCO"));//sets the dictionary to be employed (ARUCO,APRILTAGS,ARTOOLKIT,etc)
MDetector.setThresholdParams(7, 7);
MDetector.setThresholdParamRange(2, 0);
// MDetector.setCornerRefinementMethod(aruco::MarkerDetector::SUBPIX);
//gui requirements : the trackbars to change this parameters
iThresParam1 = MDetector.getParams()._thresParam1;
iThresParam2 = MDetector.getParams()._thresParam2;
cv::namedWindow("in");
cv::createTrackbar("ThresParam1", "in", &iThresParam1, 25, cvTackBarEvents);
cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
//go!
char key = 0;
int index = 0;
// capture until press ESC or until the end of the video
do {
TheVideoCapturer.retrieve(TheInputImage);
// copy image
double tick = (double)getTickCount(); // for checking the speed
// Detection of markers in the image passed
TheMarkers= MDetector.detect(TheInputImage, TheCameraParameters, TheMarkerSize);
// chekc the speed by calculating the mean speed of all iterations
AvrgTime.first += ((double)getTickCount() - tick) / getTickFrequency();
AvrgTime.second++;
cout << "\rTime detection=" << 1000 * AvrgTime.first / AvrgTime.second << " milliseconds nmarkers=" << TheMarkers.size() << std::endl;
// print marker info and draw the markers in image
TheInputImage.copyTo(TheInputImageCopy);
for (unsigned int i = 0; i < TheMarkers.size(); i++) {
cout << TheMarkers[i]<<endl;
TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));
}
// draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid() && TheMarkerSize>0)
for (unsigned int i = 0; i < TheMarkers.size(); i++) {
CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
}
// DONE! Easy, right?
// show input with augmented information and the thresholded image
cv::imshow("in", resize(TheInputImageCopy,1280));
cv::imshow("thres", resize(MDetector.getThresholdedImage(),1280));
key = cv::waitKey(waitTime); // wait for key to be pressed
if(key=='s') waitTime= waitTime==0?10:0;
index++; // number of images captured
} while (key != 27 && (TheVideoCapturer.grab() ));
} catch (std::exception &ex)
{
cout << "Exception :" << ex.what() << endl;
}
}
void cvTackBarEvents(int pos, void *) {
(void)(pos);
if (iThresParam1 < 3) iThresParam1 = 3;
if (iThresParam1 % 2 != 1) iThresParam1++;
if (iThresParam1 < 1) iThresParam1 = 1;
MDetector.setThresholdParams(iThresParam1, iThresParam2);
// recompute
MDetector.detect(TheInputImage, TheMarkers, TheCameraParameters);
TheInputImage.copyTo(TheInputImageCopy);
for (unsigned int i = 0; i < TheMarkers.size(); i++)
TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));
// draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid())
for (unsigned int i = 0; i < TheMarkers.size(); i++)
CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
cv::imshow("in", resize(TheInputImageCopy,1280));
cv::imshow("thres", resize(MDetector.getThresholdedImage(),1280));
}

Either segmentation fault 11 or (-215) N >= K

I am trying to create a Bag of visual Words program, but I am running into an issue. Every time I run the program, I either get a segmentation fault: 11 error, or if I change the dictSize variable I get a error: (-215) N >= K in function kmeans. I have tried resizing the images, using different ones but nothing seems to help. Here is what I have up to now:
#include <opencv2/core/core.hpp>
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/xfeatures2d.hpp>
#include <iostream>
#include <stdio.h>
#include <dirent.h>
#include <string.h>
using namespace std;
using namespace cv;
int main(int argc, const char** argv) {
//=================================== LEARN ===================================
struct dirent *de = NULL;
DIR *d = NULL;
d = opendir(argv[1]);
if(d == NULL)
{
perror("Couldn't open directory");
return(2);
}
Mat input;
vector<KeyPoint> keypoints;
Mat descriptor;
Mat featuresUnclustered;
Ptr<DescriptorExtractor> detector = xfeatures2d::SIFT::create();
while((de = readdir(d))){
if ((strcmp(de->d_name,".") != 0) && (strcmp(de->d_name,"..") != 0) && (strcmp(de->d_name,".DS_Store") != 0)) {
char fullPath[] = "./";
strcat(fullPath, argv[1]);
strcat(fullPath, de->d_name);
printf("Current File: %s\n",fullPath);
input = imread(fullPath,CV_LOAD_IMAGE_GRAYSCALE);
cout << "Img size => x: " << input.size().width << ", y: " << input.size().height << endl;
// If the incoming frame is too big, resize it
if (input.size().width > 3000) {
double ratio = (3000.0)/(double)input.size().width;
resize(input, input, cvSize(0, 0), ratio, ratio);
cout << "New size => x: " << input.size().width << ", y: " << input.size().height << endl;
}
detector->detect(input, keypoints);
detector->compute(input, keypoints, descriptor);
featuresUnclustered.push_back(descriptor);
}
}
closedir(d);
int dictSize = 200;
TermCriteria tc(CV_TERMCRIT_ITER,100,0.001);
int retries = 1;
int flags = KMEANS_PP_CENTERS;
BOWKMeansTrainer bowTrainer(dictSize,tc,retries,flags);
Mat dictionary = bowTrainer.cluster(featuresUnclustered);
FileStorage fs("dict.yml",FileStorage::WRITE);
fs << "vocabulary" << dictionary;
fs.release();
return 0;
}
char fullPath[] = "./";
strcat(fullPath, argv[1]);
strcat(fullPath, de->d_name);
That part of your code is a serious bug (undefined behavior, but seg fault most likely).
strcat does not allocate any extra space for the concatenation. It only overwrites whatever follows the terminating null in the first string.
Your fullPath is allocated with just enough space for the initial 2 characters plus terminating null. Whatever follows that terminating null may be memory belonging to some other part of your program.
If you know the maximum file path length for your OS, you can use the crude correction of putting that max plus 2 as a number (or named constant) between the [] in the declaration of fullPath.
The less crude correction is to compute the required lenth of the string you want to build and malloc that much space (be sure to count the terminating null) and combine the three strings there.

Reading and Writing to BMP file

I'm new to C++ and programming. I have the following flawed code to read from a BMP file and Write to another BMP file. I did not want to use any external libraries.
I have an 800kb 24bit bmp file. mybmp.bmp. Will try and upload it to dropbox.
`#include <iostream>
#include <conio.h>
#include <fstream>
#include<stdio.h>
#include<stdlib.h>
using namespace std;
unsigned char* editing(char* filename)
{
int i;
int j;
FILE* mybmpfilespointer;
mybmpfilespointer = fopen(filename, "rb");
unsigned char headerinfo[54];
fread(headerinfo, sizeof(unsigned char), 54, mybmpfilespointer); // read the 54-byte header size_t fread ( void * ptr, size_t size, size_t count, FILE * stream );
// extract image height and width from header
int width = *(int*)&headerinfo[18];
int height = *(int*)&headerinfo[22];
int size = 3 * width * height;
unsigned char* imagesdata = new unsigned char[size]; // allocate 3 bytes per pixel
fread(imagesdata, sizeof(unsigned char), size, mybmpfilespointer); // read the rest of the imagesdata at once
// display image height and width from header
cout << " width:" << width << endl;
cout << " height:" << height << endl;
ofstream arrayfile("bmpofstream.bmp"); // File Creation
for(int a = 0; a < 53; a++) //bgr to rgb
{
arrayfile << headerinfo[a];
}
for(int k=0; k<size; k++)
{
arrayfile<<imagesdata[k]<<endl; //Outputs array to file
}
arrayfile.close();
delete[] mybmpfilespointer;
delete[] imagesdata;
fclose(mybmpfilespointer);
return imagesdata;
return headerinfo;
}
int main()
{
FILE* mybmpfilespointer = fopen("mybmp.bmp", "rb");
if (mybmpfilespointer)
{
editing("mybmp.bmp");
}
else
{
cout << "Cant Read File";
}
}`
As you can see I read from mybmp.bmp which is 819680bytes
and write to bmpofstream.bmp as it is.
But somehow the resulting file is exactly 3x times the size of the mybmp around 2460826bytes.
I read header from mybmp file as headerinfo.
and
data from mybmp as imagesdata.
When I write to bmpofstream.bmp these arrays it is a messed up bmp file.
1) I'm guessing the increase in filesize is related to reading individual pixels and writing them 3 times or something but couldnt figure out. Why do you think this would be?
2) Once I figure out how to read and write this file as it is, I wanted to modify it. So I might as well ask this now:
I wanted to modify this image so that I can increase the value of each pixel by 50, so this would end up in a darker image. Can I do this directly as:
for(j = 0; j < size; j++)
{
imagesdata[j]=imagesdata[j]+50;
}
thank you.
I suggest taking a look at some existing library, even if you want to code yourself you may learn a lot. See for example libbmp source code,
https://code.google.com/p/libbmp/
try this
#include <iostream>
#include <conio.h>
#include <fstream>
#include <vector>
#include <iterator>
void editing( char* filename )
{
std::ifstream input( filename, std::ios::binary );
if( !input.is_open() )
{
std::cout << "Can`t open file" << std::endl;
return;
}
// copies all data into buffer
std::vector<char> buffer( ( std::istreambuf_iterator<char>( input ) ), ( std::istreambuf_iterator<char>() ) );
input.close();
std::vector<char> headerinfo;
{
auto it = std::next( buffer.begin(), 54 );
std::move( buffer.begin(), it, std::back_inserter( headerinfo ) );
buffer.erase( buffer.begin(), it );
}
// extract image height and width from header
int width = *reinterpret_cast<int*>( (char*)headerinfo.data() + 18 );
int height = *reinterpret_cast<int*>( (char*)headerinfo.data() + 22 );
int size = 3 * width * height;
std::vector<char> imagesdata;
{
auto it = std::next( buffer.begin(), size );
std::move( buffer.begin(), it, std::back_inserter( imagesdata ) );
buffer.erase( buffer.begin(), it );
}
// display image height and width from header
std::cout << " width:" << width << std::endl;
std::cout << " height:" << height << std::endl;
// paste your code here
// ...
std::ofstream arrayfile( "bmpofstream.bmp" ); // File Creation
std::ostream_iterator<char> output_iterator( arrayfile );
std::copy( headerinfo.begin(), headerinfo.end(), output_iterator ); // write header to file
std::copy( imagesdata.begin(), imagesdata.end(), output_iterator ); // write image data to file
}
int main(int argc, char**argv)
{
editing( argv[1] );
}

Cannot access to my webcam opencv ubuntu

here is my code
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
const int KEY_ENTER = 10;
const int KEY_ESC = 27;
const int KEY_1 = 49;
const int KEY_2 = 50;
const int KEY_3 = 51;
const int KEY_4 = 52;
const int KEY_5 = 53;
const int KEY_6 = 54;
const int DELAY = 30;
const string WIN_NAME = "Camera View";
const string NAME[6] = {"me", "serk", "prot", "vitkt", "st", "tara"};
struct pg
{
string name;
int cnt;
pg(): name(""), cnt (0) {};
pg(string s, int c) : name(s) , cnt(c) {};
};
pg crew[6];
int main()
{
for(int i = 0; i < 6; ++i)
crew[i] = pg(NAME[i], 0);
cv::VideoCapture cam;
cam.open(0);
cv::Mat frame;
pg cur = crew[0];
int c = 0;
for(;cam.isOpened();)
{
try
{
cam >> frame;
cv::imshow(WIN_NAME, frame);
int key = cv::waitKey(DELAY);
cur = (key >= KEY_1 && key <= KEY_6) ? crew[key - KEY_1] : cur;
if(KEY_ENTER == key)
cv::imwrite(cv::format("%s%d.jpg", cur.name.c_str(), cur.cnt++), frame);
if(KEY_ESC == key)
break;
} catch (cv::Exception e)
{
cout << e.err << endl;
}
}
cam.release();
return 0;
}
but I cannot capture a video from camera. =(
I've got Ubuntu 12.04 on my PC,
I did exactly every instruction in linux install istructions
I googled my problem and installed additional dependencies
this
python-opencv
libhighgui2.3
libhighgui-dev
ffmpeg
libgstreamer0.10-0
libv4l-0
libv4l-dev
libxine2
libunicap2
libdc1394-22
and many others which I can find.
but it still doesn't work.
It's ridiculous but this code works on my laptop, with the same distribution of ubuntu.
I have no compilation errors.
in terminal
gstreamer-properties
opens that camera.
Does someone know what to do? Help me please.
I've noticed that it even doesn't load pictures from file
code example
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace std;
int main()
{
system("clear");
cv::Mat picture;
picture = cv::imread("boobies.jpg");
cout << picture.rows << endl;
cv::imshow("Smile", picture);
char ch;
cin >> ch;
cv::destroyWindow("Smile");
return 0;
}
haven't load the picture from project folder
You forgot to initilize cam. you must use the constructor with int as parameter.
// the constructor that opens video file
VideoCapture(const string& filename);
// the constructor that starts streaming from the camera
VideoCapture(int device);
Do it like:
cv::VideoCapture cam(0);
cam.open(0);
Also, you could use cvCaptureFromCAM:
CvCapture *capture;
capture = cvCaptureFromCAM( 0 );
This will allocates and initializes your capture instance.
if you are under Opencv 2.4.6 it has been hotfixed: http://opencv.org/hot-fix-for-opencv-2-4-6.html

Decoding with FFMPEG on Visual Studio 2010

I just started using FFMPEG with C++ and try to code an audio decoder then write the decoded audio into a file.
However i'm not sure about which data to write to the output file. As far as i know from looking at the sample codes it seems to be the AVFrame -> data[0].
But when i try to print it on the consoles, i get some random numbers that are different each time i run the program. And when i try to write this AVFrame->data[0] into a file i keep getting an error.
So my question is how can I write the decoded audio after I call the function av_codec_decode_audio4 ?
Below I attached my code and I pass the argument "C:\02.mp3" which is a path for a valid mp3 file on my PC.
Thank you for your help.
// TestFFMPEG.cpp : Audio Decoder
//
#include "stdafx.h"
#include <iostream>
#include <fstream>
#include <sstream>
extern "C" {
#include <avcodec.h>
#include <avformat.h>
#include <swscale.h>
}
using namespace std;
int main(int argc, char* argv[])
{
int audioStream = -1;
AVCodec *aCodec;
AVPacket avPkt;
AVFrame *decode_frame = avcodec_alloc_frame();
AVCodecContext *aCodecCtxt;
AVFormatContext *pFormatCtxt = NULL;
if(argc != 2) { // Checking whether there is enough argument
return -1;
}
av_register_all(); //Initialize CODEC
avformat_network_init();
av_init_packet (&avPkt);
if (avformat_open_input (&pFormatCtxt, argv[1],NULL,NULL)!= 0 ){ //Opening File
return -2;
}
if(avformat_find_stream_info (pFormatCtxt,NULL) < 0){ //Get Streams Info
return -3;
}
AVStream *stream = NULL;
//av_read_play (pFormatCtxt); //open streams
for (int i = 0; i < pFormatCtxt->nb_streams ; i++) { //Find Audio Stream
if (pFormatCtxt->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){
audioStream =i;
}
}
aCodecCtxt = pFormatCtxt ->streams [audioStream]->codec; // opening decoder
aCodec = avcodec_find_decoder( pFormatCtxt->streams [audioStream] ->codec->codec_id);
if (!aCodec) {
return -8;
}
if (avcodec_open2(aCodecCtxt,aCodec,NULL)!=0) {
return -9;
}
int cnt = 0;
while(av_read_frame(pFormatCtxt,&avPkt) >= 0 ){
if (avPkt.stream_index == audioStream){
int check = 0;
int result = avcodec_decode_audio4 (aCodecCtxt,decode_frame,&check, &avPkt);
cout << "Decoded : "<< (int) decode_frame->data[0] <<", "<< "Check : " << check << ", Format :" << decode_frame->format <<" " << decode_frame->linesize[0]<< " "<<cnt <<endl;
}
av_free_packet(&avPkt);
cnt++;
}
return aCodec ->id;
}
You're doing it right.
The data that you decode contain at the pointer decode_frame->data[0]. Data size in bytes is decode_frame->linesize[0], the number of audio samples is decode_frame->nb_samples.
Thus, you can copy the audio data into your own buffer as follows:
memcpy(OutputBuffer, decode_frame->data[0], decode_frame->linesize[0]);
You can try using ffms2 which will directly give you decoded audio sample. It internally used ffmpeg/libav. So you don't have to worry about decoding stuff.
https://code.google.com/p/ffmpegsource/