FFMPEG: Can not free AVPacket when decode H264 stream? - c++

I'm using FFMPEG to decode H264 stream, my code is below
AVFormatContext *pFormatCtx = NULL;
AVCodecContext *pCodecCtx = NULL;
AVFrame *pFrame = NULL;
AVPacket packet;
packet.data = NULL;
pFormatCtx = avformat_alloc_context();
avformat_open_input(&pFormatCtx, videoStreamPath, NULL, NULL);
liveHeader.pCodecCtx = pFormatCtx->streams[videoStreamIndex]->codec;
int bytesDecoded = 0;
int frameFinished = 0;
while (true)
{
while (packet.size > 0)
{
// Decode the next chunk of data
bytesDecoded = avcodec_decode_video2(pCodecCtx, pFrame,
&frameFinished, &packet);
// Was there an error?
if (bytesDecoded < 0)
{
printf(strErr, "Error while decoding frame\n");
commonGlobal->WriteRuntimeRecLogs(strErr);
return RS_NOT_OK;
}
packet.size -= bytesDecoded;
packet.data += bytesDecoded;
if (frameFinished)
{
//av_free_packet(&packet); //(free 1)
return RS_OK;
}
// Did we finish the current frame? Then we can return
}
do
{
try
{
int ret = av_read_frame(pFormatCtx, &packet);
if (ret < 0)
{
char strErr[STR_LENGTH_256];
if (ret == AVERROR_EOF || (pFormatCtx->pb && pFormatCtx->pb->eof_reached))
{
sprintf(strErr, "Error end of file line %d", __LINE__);
}
if (pFormatCtx->pb && pFormatCtx->pb->error)
{
sprintf(strErr, "Error end of file line %d", __LINE__);
}
packet.data = NULL;
return RS_NOT_OK;
}
}
catch (...)
{
packet.data = NULL;
return RS_NOT_OK;
}
} while (packet.stream_index != videoStreamIndex);
}
//av_free_packet(&packet); //(free 2)
The problem is I don't know how to free memory of packet correctly.
I have tried to delete packet's data by calling one of two places av_free_packet(&packet); (free 1) and av_free_packet(&packet); (free 2). And the result is the application was crashed with the message "Heap Corruption..."
If I do not free the packet, the memory leak is occur.
Note that the above code is successful when decode H264 stream, the main problem is memory leak and crashed when I try to free the packet
Someone can show me the problems in my code.
Many thanks,
T&T

av_free_packet will clear your packet data, the same pointer that was allocated inside av_read_frame. But you changed it in packet.data += bytesDecoded; => crash.
Several advices:
No need to call av_init_packet if the first use of your packet is av_read_frame (it is done inside this function). But if you keep your code, you need it in order to initialize packet.size to 0 (tested, but not initialized the first time)
Call av_free_packet each time you are done with the packet data, only when the decode is successful. In your code, it means you must call it after avcodec_decode_video2, even if the frame is not finished.
Once your packed is decoded (i.e. avcodec_decode_video2 is ok, no matter if frameFinished is true or false), you can free it. No need to keep it and change data pointer. The process is "read packet, decode it, free it. read next packet, decode it, free it.". (Note that this does not apply to audio packets).
I suggest simplify your main loop by something like (read first, decode after):
while(true)
{
// Step 1: Read packet
while(true)
{
av_read_frame(pFormatCtx, &packet);
// todo: Error handling
if(packet.stream_index != videoStreamIndex)
{
av_free_packet(&packet);
}
else
{
break;
}
} while (packet.stream_index != videoStreamIndex);
// Step 2/3: Decode and free
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
av_free_packet(&packet);
// todo: Error handling and checking frameFinished if needed
// Of course, if you need to use the packet now, move the av_free_packet() after this
}

You should initialize your packet before usage as following:
AVPacket packet;
av_init_packet(&packet);
Also your not deleting your AVFormatContext instance.
avformat_free_context(pFormatCtx);

Related

Get video from webcam using FFmpeg Libav

I am trying to record webcam video using FFmpeg C libraries (libav), on a Mac computer. I made changes to the transcode.c example so that it opens a device instead of a file. However, for some reason the code only receives a single packet and then closes.
static int open_input_source(const char *dev_name) {
int ret;
unsigned int i;
AVDictionary *p_av_options = NULL;
AVInputFormat *p_av_input_format = av_find_input_format("avfoundation");
av_dict_set(&p_av_options, "framerate", "30", 0);
ifmt_ctx = NULL;
if ((ret = avformat_open_input(&ifmt_ctx, dev_name, p_av_input_format, &p_av_options) < 0)) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
stream_ctx = av_calloc(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
if (!stream_ctx)
return AVERROR(ENOMEM);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
AVStream *stream = ifmt_ctx->streams[i];
const AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
AVCodecContext *codec_ctx;
if (!dec) {
av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
return AVERROR_DECODER_NOT_FOUND;
}
codec_ctx = avcodec_alloc_context3(dec);
if (!codec_ctx) {
av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
return AVERROR(ENOMEM);
}
ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
"for stream #%u\n", i);
return ret;
}
/* Reencode video & audio and remux subtitles etc. */
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
/* Open decoder */
ret = avcodec_open2(codec_ctx, dec, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
}
stream_ctx[i].dec_ctx = codec_ctx;
stream_ctx[i].dec_frame = av_frame_alloc();
if (!stream_ctx[i].dec_frame)
return AVERROR(ENOMEM);
}
av_dump_format(ifmt_ctx, 0, dev_name, 0);
return 0;
}
I have looked for other code examples but they are all deprecated and no longer compile in updated FFmpeg.
Is there some missing setting in my open_input_source function? Alternatively, is the problem in using transcoding is my basis? Should I try to use some other example?
In general, is there is a C source code reference which fulfills my requirements?
Thanks
This is a pretty fleshed out example that might meet your requirements:
https://github.com/argvk/ffmpeg-examples/blob/master/dshow_capture_video.c
I don't think you need nearly as much code as is include there, but you might be able to just update lines 260 with how long you want it to run (that example is 300 frames) and line 83 to open your webcam (sounds like you've already successfully done this with ret = avformat_open_input(&ifmt_ctx, dev_name, p_av_input_format, &p_av_options) in your code.
There are lots of other options there which you might want to remove, keep, or change depending on the details which are not provided here. Unfortunately I don't have a simplified code sample I'm able to share on here, but this dshow example is doing everything I expect.
Hope it helps some.

ffmpeg memory leak in the avcodec_open2 method

I've developed an application which handles live video stream. The problem is that it should run as a service and over time I am noticing some memory increase. When I check the application with valgrind - it did not find any leak related issues.
So I've check it with google profile tools. This is a result(substracting the one of the first dumps from the latest) after approximately 6 hour run:
30.0 35.7% 35.7% 30.0 35.7% av_malloc
28.9 34.4% 70.2% 28.9 34.4% av_reallocp
24.5 29.2% 99.4% 24.5 29.2% x264_malloc
When I check the memory on the graph I see, that these allocations are related to avcodec_open2. The client code is:
` g_EncoderMutex.lock();
ffmpeg_encoder_start(OutFileName.c_str(), AV_CODEC_ID_H264, m_FPS, width, height);
for (pts = 0; pts < VideoImages.size(); pts++) {
m_frame->pts = pts;
ffmpeg_encoder_encode_frame(VideoImages[pts].RGBimage[0]);
}
ffmpeg_encoder_finish();
g_EncoderMutex.unlock()
The ffmpeg_encoder_start method is:
void VideoEncoder::ffmpeg_encoder_start(const char *filename, int codec_id, int fps, int width, int height)
{
int ret;
m_FPS=fps;
AVOutputFormat * fmt = av_guess_format(filename, NULL, NULL);
m_oc = NULL;
avformat_alloc_output_context2(&m_oc, NULL, NULL, filename);
m_stream = avformat_new_stream(m_oc, 0);
AVCodec *codec=NULL;
codec = avcodec_find_encoder(codec_id);
if (!codec)
{
fprintf(stderr, "Codec not found\n");
return; //-1
}
m_c=m_stream->codec;
avcodec_get_context_defaults3(m_c, codec);
m_c->bit_rate = 400000;
m_c->width = width;
m_c->height = height;
m_c->time_base.num = 1;
m_c->time_base.den = m_FPS;
m_c->gop_size = 10;
m_c->max_b_frames = 1;
m_c->pix_fmt = AV_PIX_FMT_YUV420P;
if (codec_id == AV_CODEC_ID_H264)
av_opt_set(m_c->priv_data, "preset", "ultrafast", 0);
if (m_oc->oformat->flags & AVFMT_GLOBALHEADER)
m_c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
avcodec_open2( m_c, codec, NULL );
m_stream->time_base=(AVRational){1, m_FPS};
if (avio_open(&m_oc->pb, filename, AVIO_FLAG_WRITE) < 0)
{
printf( "Could not open '%s'\n", filename);
exit(1);
}
avformat_write_header(m_oc, NULL);
m_frame = av_frame_alloc();
if (!m_frame) {
printf( "Could not allocate video frame\n");
exit(1);
}
m_frame->format = m_c->pix_fmt;
m_frame->width = m_c->width;
m_frame->height = m_c->height;
ret = av_image_alloc(m_frame->data, m_frame->linesize, m_c->width, m_c->height, m_c->pix_fmt, 32);
if (ret < 0) {
printf("Could not allocate raw picture buffer\n");
exit(1);
}
}
The ffmpeg_encoder_encode_frame is:
void VideoEncoder::ffmpeg_encoder_encode_frame(uint8_t *rgb)
{
int ret, got_output;
ffmpeg_encoder_set_frame_yuv_from_rgb(rgb);
av_init_packet(&m_pkt);
m_pkt.data = NULL;
m_pkt.size = 0;
ret = avcodec_encode_video2(m_c, &m_pkt, m_frame, &got_output);
if (ret < 0) {
printf("Error encoding frame\n");
exit(1);
}
if (got_output)
{
av_packet_rescale_ts(&m_pkt,
(AVRational){1, m_FPS}, m_stream->time_base);
m_pkt.stream_index = m_stream->index;
int ret = av_interleaved_write_frame(m_oc, &m_pkt);
av_packet_unref(&m_pkt);
}
}
ffmpeg_encoder_finish code is:
void VideoEncoder::ffmpeg_encoder_finish(void)
{
int got_output, ret;
do {
ret = avcodec_encode_video2(m_c, &m_pkt, NULL, &got_output);
if (ret < 0) {
printf( "Error encoding frame\n");
exit(1);
}
if (got_output) {
av_packet_rescale_ts(&m_pkt,
(AVRational){1, m_FPS}, m_stream->time_base);
m_pkt.stream_index = m_stream->index;
int ret = av_interleaved_write_frame(m_oc, &m_pkt);
av_packet_unref(&m_pkt);
}
} while (got_output);
av_write_trailer(m_oc);
avio_closep(&m_oc->pb);
avformat_free_context(m_oc);
av_freep(&m_frame->data[0]);
av_frame_free(&m_frame);
av_packet_unref(&m_pkt);
sws_freeContext(m_sws_context);
}
This code runs multiple times in the loop.
So my question is - what am I doing wrong? maybe ffmpeg is using some kind of internal buffering? If so, how to disable it? Because such an increase in memory usage is unacceptable at all.
You didn't close encoder context. Add avcodec_close(m_c) to ffmpeg_encoder_finish().
See ffmpeg.org
User is required to call avcodec_close() and avformat_free_context() to clean up the allocation by avformat_new_stream().
Plus I don't see how m_c is allocated. Usually it is allocated with avcodec_alloc_context and must be deallocated with av_free (after closing of course).
Don't use valgrind to check memory leaks for your own projects, use sanitizers, with these you can pin point the source of the leak. Check this out: Multi-Threaded Video Decoder Leaks Memory
Hope that helps.
It's sufficient to call 'avcodec_free_context(m_c)', this procedure calls 'avcodec_close' and also de-allocates 'extradata'(if it's was allocated) and 'subtitle_header' (if it was allocated).

ffmpeg hangs in avcodec_encode_video2

I encode the video stream using ffmeg. Shell QT Creator. OC Windows 7. The compiler MinGW. H264 codec.
There is a function, which I give to the input frame, and I must return an encoded byte array. Within this function it hangs when calling avcodec_encode_video2, but not on the first call, and at random.
That is, an arbitrary coding frame hangs (100 to 10000).
QByteArray VideoEncoder::createFrameVideoFromImage(QImage picture)
{
AVFrame* frame_source = av_frame_alloc();
avpicture_fill((AVPicture*)frame_source, picture.bits(), AV_PIX_FMT_RGB24, this->width_frame, this->height_frame);
AVFrame* frame_dst = av_frame_alloc();
avpicture_fill((AVPicture*)frame_dst, (uint8_t*)this->inbuffer, AV_PIX_FMT_YUV420P, this->width_frame, this->height_frame);
sws_scale(this->convert_rgb_yuv, frame_source->data, frame_source->linesize,
0, this->ctx_codec_in->height, frame_dst->data, frame_dst->linesize);
AVPacket packet;
av_init_packet(&packet);
packet.data = NULL;
packet.size = 0;
packet.pts = packet.dts = AV_NOPTS_VALUE;
int nOutputSize = 0;
if (avcodec_encode_video2(this->ctx_codec_in, &packet, frame_dst, &nOutputSize) < 0)
{
qDebug() << "VideoEncoder error";
}
this->traffic += packet.size;
QByteArray data_frame = QByteArray((char*)packet.data, packet.size);
av_frame_free(&frame_source);
av_frame_free(&frame_dst);
av_free_packet(&packet);
return data_frame;
}
Thank you in advance!

Problems converting .flv to mp3 using FFmpeg SDK

I'm using the FFmpeg SDK to programmatically convert videos to mp3.
I read the audio frames of the video this way:
while(av_read_frame(pFromCtx, &pkt) >= 0)
{
if(pkt.stream_index == audioStreamIndex)
{
avcodec_get_frame_defaults(frame);
got_frame = 0;
ret = avcodec_decode_audio4(pCodecCtx, frame, &got_frame, &pkt);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error decoding audio frame.\n");
continue;
}
if(got_frame)
{
// Write the decoded audio frame
write_audio_frame(pToCtx, pToCtx->streams[pToCtx->nb_streams-1], frame);
}
}
av_free_packet(&pkt);
}
Decoding the audio from the input video file works fine. The problem occurs when I try to encode a mp3 frame:
static void write_audio_frame(AVFormatContext *oc, AVStream *st, AVFrame *frame)
{
AVCodecContext *enc = st->codec;
AVPacket pkt;
int got_packet = 0;
int ret = 0;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
ret = avcodec_encode_audio2(enc, &pkt, frame, &got_packet);
if (ret < 0) {
// PROBLEM
fprintf(stderr, "Error encoding audio frame. \n");
exit(1);
}
}
I get the following console output:
[libmp3lame] inadequate AVFrame plane padding
The only happens with .flv files, the code works fine for .mp4 files. Any clue what the error message means?
Thanks
The source code containing the error message is here: http://ffmpeg.org/doxygen/trunk/libmp3lame_8c-source.html. The relevant source says:
if (frame->linesize[0] < 4 * FFALIGN(frame->nb_samples, 8)) {
av_log(avctx, AV_LOG_ERROR, "inadequate AVFrame plane padding\n");
return AVERROR(EINVAL);
}
FFALIGN is defined as
#define FFALIGN (x,a)(((x)+(a)-1)&~((a)-1))

How do I dump the buffer when encoding H264 with FFMPEG?

I'm using a c++ library to write images captured from a webcam to an libx264 encoded mp4 file.
The encoding is working properly but when it starts it writes 40 frames to the buffer. When I close the file these frames aren't flushed so about 6 seconds of video are left unwritten (cam is about 6fps).
So i'm calling:
out_size = libffmpeg::avcodec_encode_video( codecContext, data->VideoOutputBuffer,data->VideoOutputBufferSize, data->VideoFrame );
// if zero size, it means the image was buffered
if ( out_size > 0 )
{
//... write to file
}
I can't see a way of accessing the images that are left in the buffer. Any ideas?
I've got this working using the following code to flush the buffer. Seems that I was searching for the wrong term - should have been "delayed frames"...
void VideoFileWriter::Flush(void)
{
if ( data != nullptr )
{
int out_size = 0;
int ret = 0;
libffmpeg::AVCodecContext* c = data->VideoStream->codec;
/* get the delayed frames */
while (1) {
libffmpeg::AVPacket packet;
libffmpeg::av_init_packet(&packet);
out_size = libffmpeg::avcodec_encode_video(c, data->VideoOutputBuffer, data->VideoOutputBufferSize, NULL);
if (out_size < 0) {
//fprintf(stderr, "Error encoding delayed frame %d\n", out_size);
break;
}
if (out_size == 0) {
break;
}
if (c->coded_frame->pts != AV_NOPTS_VALUE) {
packet.pts = av_rescale_q(c->coded_frame->pts,
c->time_base,
data->VideoStream->time_base);
//fprintf(stderr, "Video Frame PTS: %d\n", (int)packet.pts);
} else {
//fprintf(stderr, "Video Frame PTS: not set\n");
}
if (c->coded_frame->key_frame) {
packet.flags |= AV_PKT_FLAG_KEY;
}
packet.stream_index = data->VideoStream->index;
packet.data = data->VideoOutputBuffer;
packet.size = out_size;
ret = libffmpeg::av_interleaved_write_frame( data->FormatContext, &packet );
if (ret != 0) {
//fprintf(stderr, "Error writing delayed frame %d\n", ret);
break;
}
}
libffmpeg::avcodec_flush_buffers(data->VideoStream->codec);
}
}
Here is a tutorial regarding ffmpeg with avcodec, stating that avcodec uses some internal buffers which need to be flushed. There is also some code showing how flushing of these buffers is done ("Flushing our buffers").