MP4 Created Using FFmpeg API Can't Be Played in Media Players - c++

I've been struggling with this issue for days. There are similar issues posted here and around the web, but none of the solutions seem to work for me. They are possibly outdated?
Here is the current iteration of code I'm using to generate the MP4 file.
It generates a simple 2 second .mp4 file that fails to play in any player I've tried. If I run that mp4 file back through the FFmpeg command line, it will generate a perfectly playable movie out of it. So the data is there.
Also, if you modify the output file name in this code from .mp4 to .avi, this code generates a playable avi file too. So whatever it is, it is tied to the H.264 format.
I'm sure I'm missing something simple, but for the life of me, I can't figure out what that is.
Any help would be greatly appreciated!
Here is a link to the VC++ project. MovieMaker.zip
MovieMaker.h
#pragma once
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/opt.h>
}
class FMovieMaker
{
public:
~FMovieMaker();
bool Initialize(const char* FileName, int Width = 1920, int Height = 1080, int FPS = 30, int BitRate = 2000);
bool RecordFrame(uint8_t* BGRAData);
bool Finalize();
bool IsInitialized() const { return bInitialized; }
int GetWidth() const { return CodecContext ? CodecContext->width : 0; }
int GetHeight() const { return CodecContext ? CodecContext->height : 0; }
private:
bool EncodeFrame(bool bFinalize);
void Log(const char* fmt, ...);
AVOutputFormat* OutputFormat = nullptr;
AVFormatContext* FormatContext = nullptr;
AVCodecContext* CodecContext = nullptr;
AVFrame* Frame = nullptr;
SwsContext* ColorConverter = nullptr;
int64_t RecordedFrames = 0;
bool bInitialized = false;
};
MovieMaker.cpp
#include "MovieMaker.h"
FMovieMaker::~FMovieMaker()
{
if (IsInitialized())
Finalize();
}
bool FMovieMaker::Initialize(const char* FileName, int Width /*= 1920*/, int Height /*= 1080*/, int FPS /*= 30*/, int BitRate /*= 2000*/)
{
OutputFormat = av_guess_format(nullptr, FileName, nullptr);
if (!OutputFormat)
{
Log("Couldn't guess the output format from the filename: %s", FileName);
return false;
}
AVCodecID CodecID = OutputFormat->video_codec;
if (CodecID == AV_CODEC_ID_NONE)
{
Log("Could not determine a codec to use");
return false;
}
/* allocate the output media context */
int ErrorCode = avformat_alloc_output_context2(&FormatContext, OutputFormat, nullptr, FileName);
if (ErrorCode < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Failed to allocate format context: %s", Error);
return false;
}
else if (!FormatContext)
{
Log("Failed to get format from filename: %s", FileName);
return false;
}
/* find the video encoder */
const AVCodec* Codec = avcodec_find_encoder(CodecID);
if (!Codec)
{
Log("Codec '%d' not found", CodecID);
return false;
}
/* create the video stream */
AVStream* Stream = avformat_new_stream(FormatContext, Codec);
if (!Stream)
{
Log("Failed to allocate stream");
return false;
}
/* create the codec context */
CodecContext = avcodec_alloc_context3(Codec);
if (!CodecContext)
{
Log("Could not allocate video codec context");
return false;
}
Stream->codecpar->codec_id = OutputFormat->video_codec;
Stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
Stream->codecpar->width = Width;
Stream->codecpar->height = Height;
Stream->codecpar->format = AV_PIX_FMT_YUV420P;
Stream->codecpar->bit_rate = (int64_t)BitRate * 1000;
avcodec_parameters_to_context(CodecContext, Stream->codecpar);
CodecContext->time_base = { 1, FPS };
CodecContext->max_b_frames = 2;
CodecContext->gop_size = 12;
CodecContext->framerate = { FPS, 1 };
if (Stream->codecpar->codec_id == AV_CODEC_ID_H264)
av_opt_set(CodecContext, "preset", "medium", 0);
else if (Stream->codecpar->codec_id == AV_CODEC_ID_H265)
av_opt_set(CodecContext, "preset", "medium", 0);
avcodec_parameters_from_context(Stream->codecpar, CodecContext);
if (FormatContext->oformat->flags & AVFMT_GLOBALHEADER)
CodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
if ((ErrorCode = avcodec_open2(CodecContext, Codec, NULL)) < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Failed to open codec: %s", Error);
return false;
}
if (!(OutputFormat->flags & AVFMT_NOFILE))
{
if ((ErrorCode = avio_open(&FormatContext->pb, FileName, AVIO_FLAG_WRITE)) < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Failed to open file: %s", Error);
return false;
}
}
Stream->time_base = CodecContext->time_base;
if ((ErrorCode = avformat_write_header(FormatContext, NULL)) < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Failed to write header: %s", Error);
return false;
}
CodecContext->time_base = Stream->time_base;
av_dump_format(FormatContext, 0, FileName, 1);
// create the frame
{
Frame = av_frame_alloc();
if (!Frame)
{
Log("Could not allocate video frame");
return false;
}
Frame->format = CodecContext->pix_fmt;
Frame->width = CodecContext->width;
Frame->height = CodecContext->height;
ErrorCode = av_frame_get_buffer(Frame, 32);
if (ErrorCode < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Could not allocate the video frame data: %s", Error);
return false;
}
}
// create a color converter
{
ColorConverter = sws_getContext(CodecContext->width, CodecContext->height, AV_PIX_FMT_BGRA,
CodecContext->width, CodecContext->height, AV_PIX_FMT_YUV420P, 0, 0, 0, 0);
if (!ColorConverter)
{
Log("Could not allocate color converter");
return false;
}
}
bInitialized = true;
return true;
}
bool FMovieMaker::RecordFrame(uint8_t* BGRAData)
{
if (!bInitialized)
{
Log("Cannot record frames on an uninitialized Video Recorder");
return false;
}
/*make sure the frame data is writable */
int ErrorCode = av_frame_make_writable(Frame);
if (ErrorCode < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Could not make the frame writable: %s", Error);
return false;
}
/* convert the bgra bitmap data into yuv frame data */
int inLinesize[1] = { 4 * CodecContext->width }; // RGB stride
sws_scale(ColorConverter, &BGRAData, inLinesize, 0, CodecContext->height, Frame->data, Frame->linesize);
//Frame->pts = RecordedFrames++;
Frame->pts = CodecContext->time_base.den / CodecContext->time_base.num * CodecContext->framerate.den / CodecContext->framerate.num * (RecordedFrames++);
//The following assumes that codecContext->time_base = (AVRational){1, 1};
//Frame->pts = frameduration * (RecordedFrames++) * Stream->time_base.den / (Stream->time_base.num * fps);
//Frame->pts += av_rescale_q(1, CodecContext->time_base, Stream->time_base);
return EncodeFrame(false);
}
bool FMovieMaker::EncodeFrame(bool bFinalize)
{
/* send the frame to the encoder */
int ErrorCode = avcodec_send_frame(CodecContext, bFinalize ? nullptr : Frame);
if (ErrorCode < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Error sending a frame for encoding: %s", Error);
return false;
}
AVPacket Packet;
av_init_packet(&Packet);
Packet.data = NULL;
Packet.size = 0;
Packet.flags |= AV_PKT_FLAG_KEY;
Packet.pts = Frame->pts;
if (avcodec_receive_packet(CodecContext, &Packet) == 0)
{
//std::cout << "pkt key: " << (Packet.flags & AV_PKT_FLAG_KEY) << " " << Packet.size << " " << (counter++) << std::endl;
uint8_t* size = ((uint8_t*)Packet.data);
//std::cout << "first: " << (int)size[0] << " " << (int)size[1] << " " << (int)size[2] << " " << (int)size[3] << " " << (int)size[4] << " " << (int)size[5] << " " << (int)size[6] << " " << (int)size[7] << std::endl;
av_interleaved_write_frame(FormatContext, &Packet);
av_packet_unref(&Packet);
}
return true;
}
bool FMovieMaker::Finalize()
{
if (!bInitialized)
{
Log("Cannot finalize uninitialized Video Recorder");
return false;
}
//DELAYED FRAMES
AVPacket Packet;
av_init_packet(&Packet);
Packet.data = NULL;
Packet.size = 0;
for (;;)
{
avcodec_send_frame(CodecContext, NULL);
if (avcodec_receive_packet(CodecContext, &Packet) == 0)
{
av_interleaved_write_frame(FormatContext, &Packet);
av_packet_unref(&Packet);
}
else
break;
}
av_write_trailer(FormatContext);
if (!(OutputFormat->flags & AVFMT_NOFILE))
{
int ErrorCode = avio_close(FormatContext->pb);
if (ErrorCode < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Failed to close file: %s", Error);
}
}
if (Frame)
{
av_frame_free(&Frame);
Frame = nullptr;
}
if (CodecContext)
{
avcodec_free_context(&CodecContext);
CodecContext = nullptr;
}
if (FormatContext)
{
avformat_free_context(FormatContext);
FormatContext = nullptr;
}
if (ColorConverter)
{
sws_freeContext(ColorConverter);
ColorConverter = nullptr;
}
bInitialized = false;
return true;
}
void FMovieMaker::Log(const char* fmt, ...)
{
va_list args;
fprintf(stderr, "LOG: ");
va_start(args, fmt);
vfprintf(stderr, fmt, args);
va_end(args);
fprintf(stderr, "\n");
}
Main.cpp
#include "MovieMaker.h"
uint8_t FtoB(float x)
{
if (x <= 0.0f)
return 0;
if (x >= 1.0f)
return 255;
else
return (uint8_t)(x * 255.0f);
}
void SetPixelColor(float X, float Y, float Width, float Height, float t, uint8_t* BGRA)
{
t += 12.0f; // more interesting colors at this time
float P[2] = { 0.1f * X - 25.0f, 0.1f * Y - 25.0f };
float V = sqrtf(P[0] * P[0] + P[1] * P[1]);
BGRA[0] = FtoB(sinf(V + t / 0.78f));
BGRA[1] = FtoB(sinf(V + t / 10.0f));
BGRA[2] = FtoB(sinf(V + t / 36e2f));
BGRA[3] = 255;
}
int main()
{
FMovieMaker MovieMaker;
const char* FileName = "C:\\ffmpeg\\MyMovieMakerMovie.mp4";
int Width = 640;
int Height = 480;
int FPS = 30;
int BitRateKBS = 2000;
if (MovieMaker.Initialize(FileName, Width, Height, FPS, BitRateKBS))
{
int Size = Width * 4 * Height;
uint8_t* BGRAData = new uint8_t[Size];
memset(BGRAData, 255, Size);
for (float Frame = 0; Frame < 60; Frame++)
{
// fill the image data with something interesting
for (float Y = 0; Y < Height; Y++)
{
for (float X = 0; X < Width; X++)
{
SetPixelColor(X, Y, (float)Width, (float)Height, Frame / (float)FPS, &BGRAData[(int)(Y * Width + X) * 4]);
}
}
if (!MovieMaker.RecordFrame(BGRAData))
break;
}
delete[] BGRAData;
MovieMaker.Finalize();
}
}
If I have the lines that add the AV_CODEC_FLAG_GLOBAL_HEADER flag like shown above, I get all sorts of issues in the output from ffprobe MyMovieMakerMovie.mp4.
C:\ffmpeg>ffprobe MyMovieMakerMovie.mp4
ffprobe version 4.2.2 Copyright (c) 2007-2019 the FFmpeg developers
built with gcc 9.2.1 (GCC) 20200122
configuration: --disable-static --enable-shared --enable-gpl --enable-version3 --enable-sdl2 --enable-fontconfig --enable-gnutls --enable-iconv --enable-libass --enable-libdav1d --enable-libbluray --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libtheora --enable-libtwolame --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libzimg --enable-lzma --enable-zlib --enable-gmp --enable-libvidstab --enable-libvorbis --enable-libvo-amrwbenc --enable-libmysofa --enable-libspeex --enable-libxvid --enable-libaom --enable-libmfx --enable-amf --enable-ffnvcodec --enable-cuvid --enable-d3d11va --enable-nvenc --enable-nvdec --enable-dxva2 --enable-avisynth --enable-libopenmpt
libavutil 56. 31.100 / 56. 31.100
libavcodec 58. 54.100 / 58. 54.100
libavformat 58. 29.100 / 58. 29.100
libavdevice 58. 8.100 / 58. 8.100
libavfilter 7. 57.100 / 7. 57.100
libswscale 5. 5.100 / 5. 5.100
libswresample 3. 5.100 / 3. 5.100
libpostproc 55. 5.100 / 55. 5.100
[h264 # 000001d44b795b00] non-existing PPS 0 referenced
[h264 # 000001d44b795b00] decode_slice_header error
[h264 # 000001d44b795b00] no frame!
...
[h264 # 000001d44b795b00] non-existing PPS 0 referenced
[h264 # 000001d44b795b00] decode_slice_header error
[h264 # 000001d44b795b00] no frame!
[mov,mp4,m4a,3gp,3g2,mj2 # 000001d44b783880] decoding for stream 0 failed
[mov,mp4,m4a,3gp,3g2,mj2 # 000001d44b783880] Could not find codec parameters for stream 0 (Video: h264 (avc1 / 0x31637661), none, 640x480, 20528 kb/s): unspecified pixel format
Consider increasing the value for the 'analyzeduration' and 'probesize' options
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'MyMovieMakerMovie.mp4':
Metadata:
major_brand : isom
minor_version : 512
compatible_brands: isomiso2avc1mp41
encoder : Lavf58.29.100
Duration: 00:00:01.97, start: 0.000000, bitrate: 20529 kb/s
Stream #0:0(und): Video: h264 (avc1 / 0x31637661), none, 640x480, 20528 kb/s, 30.51 fps, 30 tbr, 15360 tbn, 30720 tbc (default)
Metadata:
handler_name : VideoHandler
Without adding the AV_CODEC_FLAG_GLOBAL_HEADER flag, I get a clean output from ffprobe, but the video still doesn't play. Notice it thinks the frame rate is 30.51, I'm not sure why.
C:\ffmpeg>ffprobe MyMovieMakerMovie.mp4
ffprobe version 4.2.2 Copyright (c) 2007-2019 the FFmpeg developers
built with gcc 9.2.1 (GCC) 20200122
configuration: --disable-static --enable-shared --enable-gpl --enable-version3 --enable-sdl2 --enable-fontconfig --enable-gnutls --enable-iconv --enable-libass --enable-libdav1d --enable-libbluray --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libtheora --enable-libtwolame --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libzimg --enable-lzma --enable-zlib --enable-gmp --enable-libvidstab --enable-libvorbis --enable-libvo-amrwbenc --enable-libmysofa --enable-libspeex --enable-libxvid --enable-libaom --enable-libmfx --enable-amf --enable-ffnvcodec --enable-cuvid --enable-d3d11va --enable-nvenc --enable-nvdec --enable-dxva2 --enable-avisynth --enable-libopenmpt
libavutil 56. 31.100 / 56. 31.100
libavcodec 58. 54.100 / 58. 54.100
libavformat 58. 29.100 / 58. 29.100
libavdevice 58. 8.100 / 58. 8.100
libavfilter 7. 57.100 / 7. 57.100
libswscale 5. 5.100 / 5. 5.100
libswresample 3. 5.100 / 3. 5.100
libpostproc 55. 5.100 / 55. 5.100
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'MyMovieMakerMovie.mp4':
Metadata:
major_brand : isom
minor_version : 512
compatible_brands: isomiso2avc1mp41
encoder : Lavf58.29.100
Duration: 00:00:01.97, start: 0.000000, bitrate: 20530 kb/s
Stream #0:0(und): Video: h264 (High) (avc1 / 0x31637661), yuv420p, 640x480, 20528 kb/s, 30.51 fps, 30 tbr, 15360 tbn, 60 tbc (default)
Metadata:
handler_name : VideoHandler

Related

c++ - using FFmpeg encode and UDP with a Webcam

I'm trying to get frames from a Webcam using OpenCV, encode them with FFmpeg and send them using UDP.
I did before a similar project that instead of sending the packets with UDP, it saved them in a video file.
My code is.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
}
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
#define WIDTH 640
#define HEIGHT 480
#define CODEC_ID AV_CODEC_ID_H264
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P
static AVFrame *frame, *pFrameBGR;
int main(int argc, char **argv)
{
VideoCapture cap(0);
const char *url = "udp://127.0.0.1:8080";
AVFormatContext *formatContext;
AVStream *stream;
AVCodec *codec;
AVCodecContext *c;
AVDictionary *opts = NULL;
int ret, got_packet;
if (!cap.isOpened())
{
return -1;
}
av_log_set_level(AV_LOG_TRACE);
av_register_all();
avformat_network_init();
avformat_alloc_output_context2(&formatContext, NULL, "h264", url);
if (!formatContext)
{
av_log(NULL, AV_LOG_FATAL, "Could not allocate an output context for '%s'.\n", url);
}
codec = avcodec_find_encoder(CODEC_ID);
if (!codec)
{
av_log(NULL, AV_LOG_ERROR, "Could not find encoder.\n");
}
stream = avformat_new_stream(formatContext, codec);
c = avcodec_alloc_context3(codec);
stream->id = formatContext->nb_streams - 1;
stream->time_base = (AVRational){1, 25};
c->codec_id = CODEC_ID;
c->bit_rate = 400000;
c->width = WIDTH;
c->height = HEIGHT;
c->time_base = stream->time_base;
c->gop_size = 12;
c->pix_fmt = STREAM_PIX_FMT;
if (formatContext->flags & AVFMT_GLOBALHEADER)
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
av_dict_set(&opts, "preset", "fast", 0);
av_dict_set(&opts, "tune", "zerolatency", 0);
ret = avcodec_open2(c, codec, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n");
}
pFrameBGR = av_frame_alloc();
if (!pFrameBGR)
{
av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");
}
frame = av_frame_alloc();
if (!frame)
{
av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = avcodec_parameters_from_context(stream->codecpar, c);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n");
}
av_dump_format(formatContext, 0, url, 1);
ret = avformat_write_header(formatContext, NULL);
if (ret != 0)
{
av_log(NULL, AV_LOG_ERROR, "Failed to connect to '%s'.\n", url);
}
Mat image(Size(HEIGHT, WIDTH), CV_8UC3);
SwsContext *swsctx = sws_getContext(WIDTH, HEIGHT, AV_PIX_FMT_BGR24, WIDTH, HEIGHT, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);
int frame_pts = 0;
while (1)
{
cap >> image;
int numBytesYUV = av_image_get_buffer_size(STREAM_PIX_FMT, WIDTH, HEIGHT, 1);
uint8_t *bufferYUV = (uint8_t *)av_malloc(numBytesYUV * sizeof(uint8_t));
avpicture_fill((AVPicture *)pFrameBGR, image.data, AV_PIX_FMT_BGR24, WIDTH, HEIGHT);
avpicture_fill((AVPicture *)frame, bufferYUV, STREAM_PIX_FMT, WIDTH, HEIGHT);
sws_scale(swsctx, (uint8_t const *const *)pFrameBGR->data, pFrameBGR->linesize, 0, HEIGHT, frame->data, frame->linesize);
AVPacket pkt = {0};
av_init_packet(&pkt);
frame->pts = frame_pts;
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Error encoding frame\n");
}
if (got_packet)
{
pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, c->time_base, stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, c->time_base, stream->time_base);
pkt.stream_index = stream->index;
return av_interleaved_write_frame(formatContext, &pkt);
cout << "Seguro que si" << endl;
}
frame_pts++;
}
avcodec_free_context(&c);
av_frame_free(&frame);
avformat_free_context(formatContext);
return 0;
}
The code compiles but it returns Segmentation fault in the function av_interleaved_write_frame(). I've tried several implementations or several codecs (in this case I'm using libopenh264, but using mpeg2video returns the same segmentation fault). I tried also with av_write_frame() but it returns the same error.
As I told before, I only want to grab frames from a webcam connected via USB, encode them to H264 and send the packets through UDP to another PC.
My console log when I run the executable is.
[100%] Built target display
[OpenH264] this = 0x0x244b4f0, Info:CWelsH264SVCEncoder::SetOption():ENCODER_OPTION_TRACE_CALLBACK callback = 0x7f0c302a87c0.
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:CWelsH264SVCEncoder::InitEncoder(), openh264 codec version = 5a5c4f1
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:iUsageType = 0,iPicWidth= 640;iPicHeight= 480;iTargetBitrate= 400000;iMaxBitrate= 400000;iRCMode= 0;iPaddingFlag= 0;iTemporalLayerNum= 1;iSpatialLayerNum= 1;fFrameRate= 25.000000f;uiIntraPeriod= 12;eSpsPpsIdStrategy = 0;bPrefixNalAddingCtrl = 0;bSimulcastAVC=0;bEnableDenoise= 0;bEnableBackgroundDetection= 1;bEnableSceneChangeDetect = 1;bEnableAdaptiveQuant= 1;bEnableFrameSkip= 0;bEnableLongTermReference= 0;iLtrMarkPeriod= 30, bIsLosslessLink=0;iComplexityMode = 0;iNumRefFrame = 1;iEntropyCodingModeFlag = 0;uiMaxNalSize = 0;iLTRRefNum = 0;iMultipleThreadIdc = 1;iLoopFilterDisableIdc = 0 (offset(alpha/beta): 0,0;iComplexityMode = 0,iMaxQp = 51;iMinQp = 0)
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:sSpatialLayers[0]: .iVideoWidth= 640; .iVideoHeight= 480; .fFrameRate= 25.000000f; .iSpatialBitrate= 400000; .iMaxSpatialBitrate= 400000; .sSliceArgument.uiSliceMode= 1; .sSliceArgument.iSliceNum= 0; .sSliceArgument.uiSliceSizeConstraint= 1500;uiProfileIdc = 66;uiLevelIdc = 41
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:SliceArgumentValidationFixedSliceMode(), unsupported setting with Resolution and uiSliceNum combination under RC on! So uiSliceNum is changed to 6!
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:Setting MaxSpatialBitrate (400000) the same at SpatialBitrate (400000) will make the actual bit rate lower than SpatialBitrate
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:bEnableFrameSkip = 0,bitrate can't be controlled for RC_QUALITY_MODE,RC_BITRATE_MODE and RC_TIMESTAMP_MODE without enabling skip frame.
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:Change QP Range from(0,51) to (12,42)
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WELS CPU features/capacities (0x4007fe3f) detected: HTT: Y, MMX: Y, MMXEX: Y, SSE: Y, SSE2: Y, SSE3: Y, SSSE3: Y, SSE4.1: Y, SSE4.2: Y, AVX: Y, FMA: Y, X87-FPU: Y, 3DNOW: N, 3DNOWEX: N, ALTIVEC: N, CMOV: Y, MOVBE: Y, AES: Y, NUMBER OF LOGIC PROCESSORS ON CHIP: 8, CPU CACHE LINE SIZE (BYTES): 64
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WelsInitEncoderExt() exit, overall memory usage: 4542878 bytes
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WelsInitEncoderExt(), pCtx= 0x0x245a400.
Output #0, h264, to 'udp://192.168.100.39:8080':
Stream #0:0, 0, 1/25: Video: h264 (libopenh264), 1 reference frame, yuv420p, 640x480 (0x0), 0/1, q=2-31, 400 kb/s, 25 tbn
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:RcUpdateIntraComplexity iFrameDqBits = 385808,iQStep= 2016,iIntraCmplx = 777788928
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:[Rc]Layer 0: Frame timestamp = 0, Frame type = 2, encoding_qp = 30, average qp = 30, max qp = 33, min qp = 27, index = 0, iTid = 0, used = 385808, bitsperframe = 16000, target = 64000, remainingbits = -257808, skipbuffersize = 200000
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerNum = 2,iFrameSize = 48252
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerId = 0,iNalType = 0,iNalCount = 2, first Nal Length=18,uiSpatialId = 0,uiTemporalId = 0,iSubSeqId = 0
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerId = 1,iNalType = 1,iNalCount = 6, first Nal Length=6057,uiSpatialId = 0,uiTemporalId = 0,iSubSeqId = 0
[libopenh264 # 0x244aa00] 6 slices
./scriptBuild.sh: line 20: 10625 Segmentation fault (core dumped) ./display
As you can see, FFmpeg uses libopenh264 and configures it correctly. However, no matter what. It always returns the same Segmentation fault error...
I've used commands like this.
ffmpeg -s 640x480 -f video4linux2 -i /dev/video0 -r 30 -vcodec libopenh264 -an -f h264 udp://127.0.0.1:8080
And it works perfectly, but I need to process the frames before sending them. Thats why I'm trying to use the libs.
My FFmpeg version is.
ffmpeg version 3.3.6 Copyright (c) 2000-2017 the FFmpeg developers
built with gcc 4.8 (Ubuntu 4.8.4-2ubuntu1~14.04.3)
configuration: --disable-yasm --enable-shared --enable-libopenh264 --cc='gcc -fPIC'
libavutil 55. 58.100 / 55. 58.100
libavcodec 57. 89.100 / 57. 89.100
libavformat 57. 71.100 / 57. 71.100
libavdevice 57. 6.100 / 57. 6.100
libavfilter 6. 82.100 / 6. 82.100
libswscale 4. 6.100 / 4. 6.100
libswresample 2. 7.100 / 2. 7.100
I tried to get more information of the error using gbd, but it didn't give me debugging info.
How can I solve this problem? I don't know what else can I try...
Thank you!
avpicture_fill is deprecated. I think this is source of the error. Try with av_image_fill_arrays()
An example line should be:
av_image_fill_arrays(pFrameBGR.data, /* destination */
pFrameBGR.linesize, /* destination */
image.data, /* source */
AV_PIX_FMT_BGR24, /* source */
WIDTH, HEIGHT, 1); /* source w+h & alingment */
Hope that helps.

Libav AVFrame to Opencv Mat to AVPacket conversion

I am new to libav and I am writing a video manipulation software which uses opencv as its heart. What I did is briefly as below:
1- read the video packet
2- decode the packet into AVFrame
3- convert
the AVFrame to CV Mat
4- manipulate the Mat
5- convert the CV Mat
into AVFrame
6- encode the AVFrame into AVPacket
7- write the packet
8- goto 1
I read dranger tutorial in http://dranger.com/ffmpeg/tutorial01.html and I also used decoding_encoding example. I can read the video, extract video frames and convert them to CV Mat. My problem starts from converting from cv Mat to AVFrame and encode it to AVPacket.
Would you please help me with this?
Here is my code :
int main(int argc, char **argv)
{
AVOutputFormat *ofmt = NULL;
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
AVPacket pkt;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVFrame *pFrame = NULL;
AVFrame *pFrameRGB = NULL;
int videoStream=-1;
int audioStream=-1;
int frameFinished;
int numBytes;
uint8_t *buffer = NULL;
struct SwsContext *sws_ctx = NULL;
FrameManipulation *mal_frame;
const char *in_filename, *out_filename;
int ret, i;
if (argc < 3) {
printf("usage: %s input output\n"
"API example program to remux a media file with libavformat and libavcodec.\n"
"The output format is guessed according to the file extension.\n"
"\n", argv[0]);
return 1;
}
in_filename = arg[1];
out_filename = arg[2];
av_register_all();
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
fprintf(stderr, "Could not open input file '%s'", in_filename);
goto end;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
fprintf(stderr, "Failed to retrieve input stream information");
goto end;
}
av_dump_format(ifmt_ctx, 0, in_filename, 0);
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx) {
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ofmt = ofmt_ctx->oformat;
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
AVStream *in_stream = ifmt_ctx->streams[i];
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
videoStream < 0) {
videoStream=i;
}
if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
audioStream < 0) {
audioStream=i;
}
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
if (ret < 0) {
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
goto end;
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
pCodec=avcodec_find_decoder(ifmt_ctx->streams[videoStream]->codec->codec_id);
pCodecCtx = avcodec_alloc_context3(pCodec);
if(avcodec_copy_context(pCodecCtx, ifmt_ctx->streams[videoStream]->codec) != 0) {
fprintf(stderr, "Couldn't copy codec context");
return -1; // Error copying codec context
}
// Open codec
if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
return -1; // Could not open codec
// Allocate video frame
pFrame=av_frame_alloc();
// Allocate an AVFrame structure
pFrameRGB=av_frame_alloc();
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(AV_PIX_FMT_RGB24, ifmt_ctx->streams[videoStream]->codec->width,
ifmt_ctx->streams[videoStream]->codec->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_BGR24,
ifmt_ctx->streams[videoStream]->codec->width, ifmt_ctx->streams[videoStream]->codec->height);
av_dump_format(ofmt_ctx, 0, out_filename, 1);
if (!(ofmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open output file '%s'", out_filename);
goto end;
}
}
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
goto end;
}
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_BGR24,
ifmt_ctx->streams[videoStream]->codec->width,
ifmt_ctx->streams[videoStream]->codec->height);
// initialize SWS context for software scaling
sws_ctx = sws_getContext(
ifmt_ctx->streams[videoStream]->codec->width,
ifmt_ctx->streams[videoStream]->codec->height,
ifmt_ctx->streams[videoStream]->codec->pix_fmt,
ifmt_ctx->streams[videoStream]->codec->width,
ifmt_ctx->streams[videoStream]->codec->height,
AV_PIX_FMT_BGR24,
SWS_BICUBIC,
NULL,
NULL,
NULL
);
// Loop through packets
while (1) {
AVStream *in_stream, *out_stream;
ret = av_read_frame(ifmt_ctx, &pkt);
if(pkt.stream_index==videoStream)
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &pkt);
if(frameFinished) {
sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
pFrame->linesize, 0, pCodecCtx->height,
pFrameRGB->data, pFrameRGB->linesize);
cv::Mat img= mal_frame->process(
pFrameRGB,pFrame->width,pFrame->height);
/* My problem is Here ------------*/
avpicture_fill((AVPicture*)pFrameRGB,
img.data,
PIX_FMT_BGR24,
outStream->codec->width,
outStream->codec->height);
pFrameRGB->width = ifmt_ctx->streams[videoStream]->codec->width;
pFrameRGB->height = ifmt_ctx->streams[videoStream]->codec->height;
avcodec_encode_video2(ifmt_ctx->streams[videoStream]->codec ,
&pkt , pFrameRGB , &gotPacket);
/*
I get this error
[swscaler # 0x14b58a0] bad src image pointers
[swscaler # 0x14b58a0] bad src image pointers
*/
/* My Problem Ends here ---------- */
}
if (ret < 0)
break;
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = ofmt_ctx->streams[pkt.stream_index];
//log_packet(ifmt_ctx, &pkt, "in");
/* copy packet */
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base,
AV_ROUND_NEAR_INF);
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
log_packet(ofmt_ctx, &pkt, "out");
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
break;
}
av_free_packet(&pkt);
}
av_write_trailer(ofmt_ctx);
end:
avformat_close_input(&ifmt_ctx);
/* close output */
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
avio_closep(&ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
if (ret < 0 && ret != AVERROR_EOF) {
return 1;
}
return 0;
}
When I run this code, I get unknown fatal error in this part:
/* My problem is Here ------------*/
avpicture_fill((AVPicture*)pFrameRGB,
img.data,
PIX_FMT_BGR24,
outStream->codec->width,
outStream->codec->height);
pFrameRGB->width = ifmt_ctx->streams[videoStream]->codec->width;
pFrameRGB->height = ifmt_ctx->streams[videoStream]->codec->height;
avcodec_encode_video2(ifmt_ctx->streams[videoStream]->codec ,
&pkt , pFrameRGB , &gotPacket);
/*
I get this error
[swscaler # 0x14b58a0] bad src image pointers
[swscaler # 0x14b58a0] bad src image pointers
*/
/* My Problem Ends here ---------- */
Here is where I want to convert back cv Mat to AVFrame and encode it to AVPacket. I appreciate your help.
After reading some examples, reading source code and some helps people offered, I managed to make the code run. I used transcoding and encoding examples and mixed them up. Here is my code
here are the highlights:
1- libswscale should be used to convert AVFrame with required packet format to be fed into openCV Mat. To do so, we define
struct SwsContext *sws_ctx = NULL;
sws_ctx = sws_getContext(pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width,
pCodecCtx->height,
AV_PIX_FMT_BGR24,
SWS_BICUBIC,
NULL,
NULL,
NULL
);
To convert back the opencv Mat to AVFrame, one should again use swscale and translate opencv BGR frame format to YUV. So, I do this:
struct SwsContext *sws_ctx_bgr_yuv = NULL;
sws_ctx_bgr_yuv = sws_getContext(pCodecCtx->width,
pCodecCtx->height,
AV_PIX_FMT_BGR24,
pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt //AV_PIX_FMT_YUV420p
,0,0,NULL,NULL);
And, here is frame reading/decoding/encoding loop:
while (1) {
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
break;
stream_index = packet.stream_index;
type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
stream_index);
if (filter_ctx[stream_index].filter_graph) {
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
frame = av_frame_alloc();
if (!frame) {
ret = AVERROR(ENOMEM);
break;
}
av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
ifmt_ctx->streams[stream_index]->codec->time_base);
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
avcodec_decode_audio4;
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
&got_frame, &packet);
if (ret < 0) {
av_frame_free(&frame);
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
break;
}
if (got_frame) {
if(stream_index==video_index){
sws_scale(sws_ctx, (uint8_t const * const *)frame->data,
frame->linesize, 0, pCodecCtx->height,
pFrameRGB->data, pFrameRGB->linesize);
/*------------------------------------------------------------------------
/* Frame converts to opencv Mat
/*------------------------------------------------------------------------*/
cv::Mat img(frame->height,frame->width,CV_8UC3,pFrameRGB->data[0]);
img=manipulate_image(img); //this is opencv Mat, do whatever you want, but don't change its dimensions and format
//manipulate_function can be considered as as simple as blurring
const int stride[] = {img.step[0] };
/* opencv Mat converts back to AVFrame */
sws_scale(sws_ctx_bgr_yuv, &img.data, stride, 0, img.rows, frame->data, frame->linesize);
}
frame->pts = av_frame_get_best_effort_timestamp(frame);
/* AVFrame re-encodes to AVPacket and will be sent to encoder */
ret = filter_encode_write_frame(frame, stream_index);
av_frame_free(&frame);
if (ret < 0)
goto end;
} else {
av_frame_free(&frame);
}
} else {
/* remux this frame without reencoding */
av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
ofmt_ctx->streams[stream_index]->time_base);
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
if (ret < 0)
goto end;
}
av_free_packet(&packet);
}
Here is another way to convert between cv::Mat and AVframe using c++ based on some code I found and debugged. Please note its specifically for 8 bit 3 channel images but that can be changed by changing AV_PIX_FMT_BGR24 in both functions.
Hope this helps.
AVFrame cvmat_to_avframe(cv::Mat* frame)
{
AVFrame dst;
cv::Size frameSize = frame->size();
AVCodec *encoder = avcodec_find_encoder(AV_CODEC_ID_RAWVIDEO);
AVFormatContext* outContainer = avformat_alloc_context();
AVStream *outStream = avformat_new_stream(outContainer, encoder);
avcodec_get_context_defaults3(outStream->codec, encoder);
outStream->codec->pix_fmt = AV_PIX_FMT_BGR24;
outStream->codec->width = frame->cols;
outStream->codec->height = frame->rows;
avpicture_fill((AVPicture*)&dst, frame->data, AV_PIX_FMT_BGR24, outStream->codec->width, outStream->codec->height);
dst.width = frameSize.width;
dst.height = frameSize.height;
return dst;
}
cv::Mat avframe_to_cvmat(AVFrame *frame)
{
AVFrame dst;
cv::Mat m;
memset(&dst, 0, sizeof(dst));
int w = frame->width, h = frame->height;
m = cv::Mat(h, w, CV_8UC3);
dst.data[0] = (uint8_t *)m.data;
avpicture_fill( (AVPicture *)&dst, dst.data[0], AV_PIX_FMT_BGR24, w, h);
struct SwsContext *convert_ctx=NULL;
enum AVPixelFormat src_pixfmt = AV_PIX_FMT_BGR24;
enum AVPixelFormat dst_pixfmt = AV_PIX_FMT_BGR24;
convert_ctx = sws_getContext(w, h, src_pixfmt, w, h, dst_pixfmt,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
sws_scale(convert_ctx, frame->data, frame->linesize, 0, h,
dst.data, dst.linesize);
sws_freeContext(convert_ctx);
return m;
}

Audio/Video encoding with ffmpeg

Audio/Video encoding with ffmpeg:
I am trying to create an avi file with encoded video and audio, using ffmpeg.
First, I create the file:
//define BITRATE 10000000
//define GOP 300
//define FPS 60
//define VIDEOTYPE "avi"
if (!encoder_->createFile(QFileInfo(*(videoFile_.data())).absoluteFilePath(), targetRect.width(), targetRect.height(), BITRATE*(1000 / FPS), GOP, 1000))
The buffers are initialized as:
audio_outbuf_size = 44100 * 0.005 * 16; //5ms of audio should be encoded, each time this function is called
audio_outbuf = new uint8_t[audio_outbuf_size];
outbuf_size = getWidth()*getHeight() * 3;
outbuf = new uint8_t[outbuf_size];
Then add audio and video streams (audio: CODEC_ID_PCM_S16LE, 16000 kb/s and 44100 Hz, video: PIX_FMT_YUV420P)
void MediaMuxer::addAudioStream(QString fileName, ffmpeg::CodecID codec_id)
{
// Add the audio stream
ffmpeg::AVCodec *encoder = avcodec_find_encoder(codec_id);
pAudioStream_ = ffmpeg::av_new_stream(pOutputFormatCtx_, 0);
if (!pAudioStream_) {
printf("Could not allocate stream\n");
return;
}
pAudioCodecCtx_ = pAudioStream_->codec;
pAudioCodecCtx_->codec_id = codec_id;
pAudioCodecCtx_->codec_type = ffmpeg::AVMEDIA_TYPE_AUDIO;
pAudioCodecCtx_->sample_fmt = ffmpeg::AV_SAMPLE_FMT_S16;
pAudioCodecCtx_->sample_fmt = encoder->sample_fmts[0];
pAudioCodecCtx_->bit_rate = 16000;
//pAudioCodecCtx_->bit_rate = 64000;
pAudioCodecCtx_->sample_rate = N;
pAudioCodecCtx_->channels = 1;
pAudioCodecCtx_->time_base.den = FPS;
pAudioCodecCtx_->time_base.num = 1;
avcodec_thread_init(pAudioCodecCtx_, 10);
// some formats want stream headers to be separate
if (pOutputFormatCtx_->oformat->flags & AVFMT_GLOBALHEADER)
pAudioCodecCtx_->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (av_set_parameters(pOutputFormatCtx_, NULL) < 0)
{
printf("Invalid output format parameters\n");
return;
}
//ffmpeg::dump_format(pOutputFormatCtx_, 0, fileName.toStdString().c_str(), 1);
// open_video
// find the audio encoder
pAudioCodec_ = avcodec_find_encoder(pAudioCodecCtx_->codec_id);
if (!pAudioCodec_)
{
printf("codec not found\n");
return;
}
// open the codec
if (avcodec_open(pAudioCodecCtx_, pAudioCodec_) < 0)
{
printf("could not open codec\n");
return;
}
// Allocate memory for output
if (!initAudioOutputBuf())
{
printf("Can't allocate memory for audio output bitstream\n");
return;
}
// Allocate the audio frame
if (!initAudioFrame())
{
printf("Can't init audio frame\n");
return;
}
if (url_fopen(&pOutputFormatCtx_->pb, fileName.toStdString().c_str(), URL_WRONLY) < 0)
{
printf("Could not open '%s'\n", fileName.toStdString().c_str());
return;
}
av_write_header(pOutputFormatCtx_);
}
void MediaMuxer::addVideoStream(QString fileName)
{
// Add the video stream
pVideoStream_ = ffmpeg::av_new_stream(pOutputFormatCtx_, 0);
if (!pVideoStream_)
{
printf("Could not allocate stream\n");
return;
}
pVideoCodecCtx_ = pVideoStream_->codec;
pVideoCodecCtx_->codec_id = pOutputFormat_->video_codec;
pVideoCodecCtx_->codec_type = ffmpeg::AVMEDIA_TYPE_VIDEO;
pVideoCodecCtx_->bit_rate = Bitrate;
pVideoCodecCtx_->width = getWidth();
pVideoCodecCtx_->height = getHeight();
pVideoCodecCtx_->time_base.den = FPS;
pVideoCodecCtx_->time_base.num = 1;
pVideoCodecCtx_->gop_size = Gop;
pVideoCodecCtx_->pix_fmt = ffmpeg::PIX_FMT_YUV420P;
avcodec_thread_init(pVideoCodecCtx_, 10);
// some formats want stream headers to be separate
if (pOutputFormatCtx_->oformat->flags & AVFMT_GLOBALHEADER)
pVideoCodecCtx_->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (av_set_parameters(pOutputFormatCtx_, NULL) < 0)
{
printf("Invalid output format parameters\n");
return;
}
//ffmpeg::dump_format(pOutputFormatCtx_, 0, fileName.toStdString().c_str(), 1);
// open_video
// find the video encoder
pVideoCodec_ = avcodec_find_encoder(pVideoCodecCtx_->codec_id);
if (!pVideoCodec_)
{
printf("codec not found\n");
return;
}
// open the codec
if (avcodec_open(pVideoCodecCtx_, pVideoCodec_) < 0)
{
printf("could not open codec\n");
return;
}
// Allocate memory for output
if (!initOutputBuf())
{
printf("Can't allocate memory for output bitstream\n");
return;
}
// Allocate the YUV frame
if (!initFrame())
{
printf("Can't init frame\n");
return;
}
if (url_fopen(&pOutputFormatCtx_->pb, fileName.toStdString().c_str(), URL_WRONLY) < 0)
{
printf("Could not open '%s'\n", fileName.toStdString().c_str());
return;
}
av_write_header(pOutputFormatCtx_);
}
Finally, I call alternatively encodeVideo/encodeAudio to encode video and PCM audio frames at specific recording times(pts):
int MediaMuxer::encodeVideo(const QImage &img, unsigned pts)
{
convertImage_sws(img); // SWS conversion
pVideoCodecCtx_->coded_frame->pts = pts; // Set the time stamp
int out_size = ffmpeg::avcodec_encode_video(pVideoCodecCtx_, outbuf, outbuf_size, ppicture);
pVideoCodecCtx_->coded_frame->pts = pts; // Set the time stamp
if (out_size > 0)
{
ffmpeg::av_init_packet(&pkt);
if (pVideoCodecCtx_->coded_frame->pts != (0x8000000000000000LL))
pkt.pts = av_rescale_q(pVideoCodecCtx_->coded_frame->pts, pVideoCodecCtx_->time_base, pVideoStream_->time_base);
if (pVideoCodecCtx_->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = pVideoStream_->index;
pkt.data = outbuf;
pkt.size = out_size;
int ret = ffmpeg::av_interleaved_write_frame(pOutputFormatCtx_, &pkt);
if (ret<0)
return -1;
}
return out_size;
}
int MediaMuxer::encodeAudio(unsigned pts)
{
pAudioCodecCtx_->coded_frame->pts = pts; // Set the time stamp
// simple sound encoding
int16_t samples[220] = { 0 }; // buffer
int n; // buffer index
double Fs = 44100.0; // sampling frequency
// Generate audio data
for (n = 0; n < 220; ++n) //220 samples (44100*.005sec as the interval between 2 video frames is 10ms)
samples[n] = 16383.0 * sin(n*1000.0*2.0*M_PI / Fs); //sine wav
int out_size = ffmpeg::avcodec_encode_audio(pAudioCodecCtx_, audio_outbuf, audio_outbuf_size, (const short*)samples);
pAudioCodecCtx_->coded_frame->pts = pts; // Set the time stamp
if (out_size>0)
{
// Packet
ffmpeg::AVPacket pkt = { 0 };
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
if (pAudioCodecCtx_->coded_frame->pts != (0x8000000000000000LL))
pkt.pts = av_rescale_q(pAudioCodecCtx_->coded_frame->pts, pAudioCodecCtx_->time_base, pAudioStream_->time_base);
if (pAudioCodecCtx_->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = pAudioStream_->index;
pkt.data = audio_outbuf;
pkt.size = out_size;
int ret = av_interleaved_write_frame(pOutputFormatCtx_, &pkt);
if (ret<0)
return -1;
av_free_packet(&pkt);
}
//end simple sound encoding
return pkt.size;
}
The result is a nice video with some audio behind (either a regular beeping sound at regular intervals but ending way earlier than the video or a continuous longer sound that also last shorter than the video).
I want to generate a beeping sound each time the function encodeAudio() is called - at non-regular intervals. I have tried to modify the sampling rate, the buffer size, the pkt size and the number of samples but without any success. I also tried to set the pts at different times but it did not get me where I want to be. Could someone please help?

libavcodec: ffprobe on file encoded with FFV1 codec reports "read_quant_table error"

I'm using the following code to encode a series of frames into an mkv or avi file with FFV1 encoding:
HRESULT Session::createContext(LPCSTR filename, UINT width, UINT height, UINT fps_num, UINT fps_den) {
LOG("Exporting to file: ", filename);
AVCodecID codecId = AV_CODEC_ID_FFV1;
this->pixelFormat = AV_PIX_FMT_YUV420P;
this->codec = avcodec_find_encoder(codecId);
RET_IF_NULL(this->codec, "Could not create codec", E_FAIL);
this->oformat = av_guess_format(NULL, filename, NULL);
RET_IF_NULL(this->oformat, "Could not create format", E_FAIL);
this->oformat->video_codec = codecId;
this->width = width;
this->height = height;
this->codecContext = avcodec_alloc_context3(this->codec);
RET_IF_NULL(this->codecContext, "Could not allocate context for the codec", E_FAIL);
this->codecContext->codec = this->codec;
this->codecContext->codec_id = codecId;
this->codecContext->pix_fmt = pixelFormat;
this->codecContext->width = this->width;
this->codecContext->height = this->height;
this->codecContext->time_base.num = fps_den;
this->codecContext->time_base.den = fps_num;
this->codecContext->gop_size = 1;
RET_IF_FAILED_AV(avformat_alloc_output_context2(&fmtContext, this->oformat, NULL, NULL), "Could not allocate format context", E_FAIL);
RET_IF_NULL(this->fmtContext, "Could not allocate format context", E_FAIL);
this->fmtContext->oformat = this->oformat;
this->fmtContext->video_codec_id = codecId;
this->stream = avformat_new_stream(this->fmtContext, this->codec);
RET_IF_NULL(this->stream, "Could not create new stream", E_FAIL);
this->stream->time_base = this->codecContext->time_base;
RET_IF_FAILED_AV(avcodec_parameters_from_context(this->stream->codecpar, this->codecContext), "Could not convert AVCodecContext to AVParameters", E_FAIL);
if (this->fmtContext->oformat->flags & AVFMT_GLOBALHEADER)
{
this->codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
av_opt_set_int(this->codecContext->priv_data, "coder", 0, 0);
av_opt_set_int(this->codecContext->priv_data, "context", 1, 0);
av_opt_set_int(this->codecContext->priv_data, "slicecrc", 1, 0);
//av_opt_set_int(this->codecContext->priv_data, "slicecrc", 1, 0);
//av_opt_set_int(this->codecContext->priv_data, "pix_fmt", pixelFormat, 0);
RET_IF_FAILED_AV(avcodec_open2(this->codecContext, this->codec, NULL), "Could not open codec", E_FAIL);
RET_IF_FAILED_AV(avio_open(&this->fmtContext->pb, filename, AVIO_FLAG_WRITE), "Could not open output file", E_FAIL);
RET_IF_NULL(this->fmtContext->pb, "Could not open output file", E_FAIL);
RET_IF_FAILED_AV(avformat_write_header(this->fmtContext, NULL), "Could not write header", E_FAIL);
frame = av_frame_alloc();
RET_IF_NULL(frame, "Could not allocate frame", E_FAIL);
frame->format = this->codecContext->pix_fmt;
frame->width = width;
frame->height = height;
return S_OK;
}
HRESULT Session::writeFrame(IMFSample * pSample) {
IMFMediaBuffer *mediaBuffer = NULL;
BYTE *pDataNV12 = NULL;
DWORD length;
RET_IF_FAILED(pSample->ConvertToContiguousBuffer(&mediaBuffer), "Could not convert IMFSample to contagiuous buffer", E_FAIL);
RET_IF_FAILED(mediaBuffer->GetCurrentLength(&length), "Could not get buffer length", E_FAIL);
RET_IF_FAILED(mediaBuffer->Lock(&pDataNV12, NULL, NULL), "Could not lock the buffer", E_FAIL);
BYTE *pDataYUV420P = new BYTE[length];
this->convertNV12toYUV420P(pDataNV12, pDataYUV420P, this->width, this->height);
RET_IF_FAILED(av_image_fill_arrays(frame->data, frame->linesize, pDataYUV420P, pixelFormat, this->width, this->height, 1), "Could not fill the frame with data from the buffer", E_FAIL);
LOG_IF_FAILED(mediaBuffer->Unlock(), "Could not unlock the buffer");
frame->pts = av_rescale_q(this->pts++, this->codecContext->time_base, this->stream->time_base);
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
RET_IF_FAILED_AV(avcodec_send_frame(this->codecContext, frame), "Could not send the frame to the encoder", E_FAIL);
delete[] pDataYUV420P;
if (SUCCEEDED(avcodec_receive_packet(this->codecContext, &pkt))) {
RET_IF_FAILED_AV(av_interleaved_write_frame(this->fmtContext, &pkt), "Could not write the received packet.", E_FAIL);
}
av_packet_unref(&pkt);
return S_OK;
}
HRESULT Session::endSession() {
LOG("Ending session...");
LOG("Closing files...")
LOG_IF_FAILED_AV(av_write_trailer(this->fmtContext), "Could not finalize the output file.");
LOG_IF_FAILED_AV(avio_close(this->fmtContext->pb), "Could not close the output file.");
LOG_IF_FAILED_AV(avcodec_close(this->codecContext), "Could not close the codec.");
av_free(this->codecContext);
LOG("Done.")
return S_OK;
}
The problem is that the generated file is not playable in either VLC or MPC-HC. However, MPC-HC reports following info in file properties:
General
Unique ID : 202978442142665779317960161865934977227 (0x98B439D9BE859109BD5EC00A62A238CB)
Complete name : T:\Test.mkv
Format : Matroska
Format version : Version 4 / Version 2
File size : 24.6 MiB
Duration : 147ms
Overall bit rate : 1 401 Mbps
Writing application : Lavf57.57.100
Writing library : Lavf57.57.100
Video
ID : 1
Format : FFV1
Format version : Version 0
Codec ID : V_MS/VFW/FOURCC / FFV1
Duration : 147ms
Width : 1 280 pixels
Height : 720 pixels
Display aspect ratio : 16:9
Frame rate mode : Constant
Frame rate : 1 000.000 fps
Color space : YUV
Chroma subsampling : 4:2:0
Bit depth : 8 bits
Compression mode : Lossless
Default : Yes
Forced : No
DURATION : 00:00:00.147000000
coder_type : Golomb Rice
Something to note is that it reports 1000 FPS which is weird since I've set AVCodecContext::time_base in the code.
UPDATE 1:
I managed to set the correct fps by setting time_base property of the stream:
this->stream->time_base.den = fps_num;
this->stream->time_base.num = fps_den;
VLC plays the output file but it shows VLC logo instead of the video, as if there is no video stream in the file.
UPDATE 2:
Cleaned up the code. Now if I set codecId = AV_CODEC_ID_MPEG2VIDEO the output file is valid and is played in both VLC and MPC-HC. Using ffprobe on the file with FFV1 encoding yields the following result:
C:\root\apps\ffmpeg>ffprobe.exe t:\test.avi
ffprobe version 3.2 Copyright (c) 2007-2016 the FFmpeg developers
built with gcc 5.4.0 (GCC)
configuration: --disable-static --enable-shared --enable-gpl --enable-version3 --disable-w32threads --enable-dxva2 --enable-libmfx --enable-nvenc --enable-avisynth --enable-bzlib --enable-libebur128 --enable-fontconfig --enable-frei0r --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme --enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 --enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libschroedinger --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg --enable-lzma --enable-decklink --enable-zlib
libavutil 55. 34.100 / 55. 34.100
libavcodec 57. 64.100 / 57. 64.100
libavformat 57. 56.100 / 57. 56.100
libavdevice 57. 1.100 / 57. 1.100
libavfilter 6. 65.100 / 6. 65.100
libswscale 4. 2.100 / 4. 2.100
libswresample 2. 3.100 / 2. 3.100
libpostproc 54. 1.100 / 54. 1.100
[ffv1 # 00000000006b83a0] read_quant_table error
Input #0, avi, from 't:\test.avi':
Metadata:
encoder : Lavf57.56.100
Duration: 00:00:04.94, start: 0.000000, bitrate: 107005 kb/s
Stream #0:0: Video: ffv1 (FFV1 / 0x31564646), yuv420p, 1280x720, 107717 kb/s, 29.97 fps, 29.97 tbr, 29.97 tbn, 29.97 tbc
I managed to create valid output file using libavcodec 2.8.6. I was trying to use latest 3.x version but the API seems to be unstable.

ffmpeg Bmp to yuv : Crash at sws_scale

The context :
I have a succession of continuous bitmap and I want to encode them into a light video format.
I use ffmpeg version 2.8.3 (the build here), under qt5, qt IDE, and msvc2013 for win32.
The problem :
My code crash at sws_scale () (and sometimes at avcodec_encode_video2()). When I explore the stack, the crash event occurs at sws_getCachedContext (). (I can only see the stack with these ffmpeg builds).
I only use these ffmpeg libraries (from the Qt .pro file) :
LIBS += -lavcodec -lavformat -lswscale -lavutil
It's swscale which bug. And this is the code :
void newVideo ()
{
ULONG_PTR gdiplusToken;
GdiplusStartupInput gdiplusStartupInput;
GdiplusStartup(&gdiplusToken, &gdiplusStartupInput, NULL);
initBitmap (); //init bmp
int screenWidth = bmp.bmiHeader.biWidth;
int screenHeight = bmp.bmiHeader.biHeight;
AVCodec * codec;
AVCodecContext * c = NULL;
uint8_t * outbuf;
int i, out_size, outbuf_size;
avcodec_register_all();
qDebug () << "Video encoding\n";
// Find the mpeg1 video encoder
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec)
{
qDebug () << "Codec not found\n";
avcodec_close(c);
av_free(c);
return;
}
else
qDebug () << "H264 codec found\n";
c = avcodec_alloc_context3(codec);
c->bit_rate = 1000000;
c->width = 800; // resolution must be a multiple of two (1280x720),(1900x1080),(720x480)
c->height = 600;
c->time_base.num = 1; // framerate numerator
c->time_base.den = 25; // framerate denominator
c->gop_size = 30; // emit one intra frame every ten frames
c->max_b_frames = 1; // maximum number of b-frames between non b-frames
c->pix_fmt = AV_PIX_FMT_YUV420P; //Converstion RGB to YUV ?
c->codec_id = AV_CODEC_ID_H264;
struct SwsContext* fooContext = sws_getContext(screenWidth, screenHeight,
AV_PIX_FMT_RGB32,
c->width, c->height,
AV_PIX_FMT_YUV420P,
SWS_FAST_BILINEAR,
NULL, NULL, NULL);
// Open the encoder
if (avcodec_open2(c, codec, NULL) < 0)
{
qDebug () << "Could not open codec\n";
avcodec_close(c);
av_free(c);
return;
}
else qDebug () << "H264 codec opened\n";
outbuf_size = 100000 + c->width*c->height*(32>>3);//*(32>>3); // alloc image and output buffer
outbuf = static_cast<uint8_t *>(malloc(outbuf_size));
qDebug() << "Setting buffer size to: " << outbuf_size << "\n";
FILE* f = fopen("TEST.mpg","wb");
if(!f) qDebug() << "x - Cannot open video file for writing\n";
else qDebug() << "Opened video file for writing\n";
// encode 5 seconds of video
for (i = 0; i < STREAM_FRAME_RATE*STREAM_DURATION; i++) //the stop condition i < 5.0*5
{
qDebug () << "i = " << i;
fflush(stdout);
HBITMAP hBmp;
if (GetScreen(hBmp) == -1) return;
BYTE * pPixels;// = new BYTE [bmp.bmiHeader.biSizeImage];
pPixels = getPixels (hBmp);
DeleteObject (hBmp);
int nbytes = avpicture_get_size(AV_PIX_FMT_YUV420P, c->width, c->height);
uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes*sizeof(uint8_t));
if(!outbuffer) // check if(outbuf) instead
{
qDebug () << "Bytes cannot be allocated";
return;
}
AVFrame* inpic = avcodec_alloc_frame(); //av_frame_alloc () ?
AVFrame* outpic = avcodec_alloc_frame();
outpic->pts = (int64_t)((float)i * (1000.0/((float)(c->time_base.den))) * 90);
if (avpicture_fill((AVPicture*) inpic, (uint8_t*) pPixels, AV_PIX_FMT_RGB32,
screenWidth, screenHeight) < 0)
qDebug () << "avpicture_fill Fill picture with image failed"; //Fill picture with image
if(avpicture_fill((AVPicture*) outpic, outbuffer, AV_PIX_FMT_YUV420P,
c->width, c->height) < 0)
qDebug () << "avpicture_fill failed";
if (av_image_alloc(outpic->data, outpic->linesize, c->width, c->height,
c->pix_fmt, 1) < 0)
qDebug () << "av_image_alloc failed";
inpic->data[0] += inpic->linesize[0]*(screenHeight - 1); // Flipping frame
inpic->linesize[0] = -inpic->linesize[0]; // Flipping frame
////////////////////////////HERE THE BUG////////////////////////////////
sws_scale(fooContext,
inpic->data, inpic->linesize,
0, c->height,
outpic->data, outpic->linesize); //HERE THE BUG
av_free_packet((AVPacket *)outbuf);
// encode the image
out_size = avcodec_encode_video2 (c, (AVPacket *) outbuf,
(AVFrame *) outbuf_size, (int *) outpic);
///////////////////////THE CODE DONT GO BEYOND/////////////////////////////////
qDebug () << "Encoding frame" << i <<" (size=" << out_size <<"\n";
fwrite(outbuf, 1, out_size, f);
delete [] pPixels;
av_free(outbuffer);
av_free(inpic);
av_freep(outpic);
}
// get the delayed frames
for(; out_size; i++)
{
fflush(stdout);
out_size = avcodec_encode_video2 (c, (AVPacket *) outbuf,
(AVFrame *) outbuf_size, NULL);
qDebug () << "Writing frame" << i <<" (size=" << out_size <<"\n";
fwrite(outbuf, 1, out_size, f);
}
// add sequence end code to have a real mpeg file
outbuf[0] = 0x00;
outbuf[1] = 0x00;
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
fclose(f);
avcodec_close(c);
free(outbuf);
av_free(c);
qDebug () << "Closed codec and Freed\n";
}
And the output :
Video encoding
H264 codec found
H264 codec opened
Setting buffer size to: 2020000
Opened video file for writing
i = 0
**CRASH**
I have thougth that my bitmap wasn't good so I have crafted a bitmap just for testing, the code was :
uint8_t* pPixels = new uint8_t[Width * 3 * Height];
int x = 50;
for(unsigned int i = 0; i < Width * 3 * Height; i = i + 3) // loop for generating color changing images
{
pPixels [i] = x % 255; //R
pPixels [i + 1] = (x) % 255; //G
pPixels [i + 2] = (255 - x) % 255; //B
}
However the crash continue. Perhaps, it might prove that it's not the bitmap (pPixels) which has a problem.
If anyone know, why I get this bug : Maybe don't I set one parameter well ? Or one ffmpeg deprecated function ? etc.
EDIT 1 27/12/15
Thanks to Ronald S. Bultje The function sws_scale () does not crash with this code, however I get an error from it bad dst image pointers. My code:
//DESTINATION FRAME
if (avpicture_alloc ((AVPicture*) dst_frame, AV_PIX_FMT_YUV420P, c->width, c->height) < 0)
{
qDebug () << "# avpicture_alloc failed";
return;
}
if(avpicture_fill((AVPicture*) dst_frame, NULL, AV_PIX_FMT_YUV420P,
c->width, c->height) < 0)
qDebug () << "avpicture_fill failed";
avcodec_align_dimensions2 (c, &c->width, &c->height, dst_frame->linesize);
//SOURCE FRAME
if (avpicture_fill((AVPicture*) src_frame, (uint8_t *) pPixels, AV_PIX_FMT_RGB32,
tmp_screenWidth, tmp_screenHeight) < 0)
qDebug () << "# avpicture_fill Fill picture with image failed"; //Fill picture with image
avcodec_align_dimensions2 (c, &tmp_screenWidth, &tmp_screenHeight, src_frame->linesize);
struct SwsContext* conversionContext = sws_getContext(tmp_screenWidth,tmp_screenHeight,AV_PIX_FMT_RGB32,c->width, c->height, AV_PIX_FMT_YUV420P,SWS_FAST_BILINEAR, NULL, NULL, NULL);
int output_Height = sws_scale(conversionContext,
src_frame->data, src_frame->linesize,
0, tmp_screenHeight,
dst_frame->data, dst_frame->linesize); //return 0 -> bad dst image pointers error
EDIT 2 28/12/15
I have tried to follow the Ronald S. Bultje's suggestion and now I get a bad src image pointers error, I have investigated and worked many hours but I do not find a solution. Here, there is the new snippet :
AVFrame* src_frame = av_frame_alloc ();
AVFrame* dst_frame = av_frame_alloc ();
AVFrame* tmp_src_frame = av_frame_alloc ();
/*........I do not use them until this snippet..........*/
//DESTINATION
//avpicture_free ((AVPicture*)dst_frame);
avcodec_align_dimensions2 (c, &c->width, &c->height, dst_frame->linesize);
if (avpicture_alloc ((AVPicture*) dst_frame, AV_PIX_FMT_YUV420P, c->width, c->height) < 0)
{
qDebug () << "# avpicture_alloc failed";
return;
}
//SOURCE
//stride = src_frame->linesize [0] = ((((screenWidth * bitPerPixel) + 31) & ~31) >> 3); do I need to do that ?
//== stride - I have gotten this formula from : https://msdn.microsoft.com/en-us/library/windows/desktop/dd318229(v=vs.85).aspx
if (avpicture_fill((AVPicture*) src_frame, (uint8_t *) pPixels, AV_PIX_FMT_RGB32,
screenWidth, screenHeight) < 0)
qDebug () << "# avpicture_fill Fill picture with image failed"; //Fill picture with image
//linesize [0] == 21760 like commented stride
//Source TO TMP Source
avcodec_align_dimensions2 (c, &tmp_screenWidth, &tmp_screenHeight, tmp_src_frame->linesize);
if (avpicture_fill((AVPicture*) tmp_src_frame, NULL, AV_PIX_FMT_RGB32,
tmp_screenWidth, tmp_screenHeight) < 0)
qDebug () << "# avpicture_fill Fill picture with image failed"; //Fill picture with image
av_picture_copy ((AVPicture*) tmp_src_frame, (const AVPicture*) src_frame, AV_PIX_FMT_RGB32,
screenWidth, screenHeight);
struct SwsContext* conversionContext = sws_getContext(tmp_screenWidth, tmp_screenHeight,
AV_PIX_FMT_RGB32,
c->width, c->height,
AV_PIX_FMT_YUV420P,
SWS_FAST_BILINEAR,
NULL, NULL, NULL);
int output_Height = sws_scale(conversionContext,
tmp_src_frame->data, tmp_src_frame->linesize,
0, tmp_screenHeight,
dst_frame->data, dst_frame->linesize);
//ffmpeg error = bad src image pointers
// output_Height == 0
EDIT 3
For temp Picture I have done an avcode_align_dimension2() then a avpicture_alloc() for allocating memory and avpicture_fill() in order to fill the picture pointer. Below the updated code:
//DESTINATION
//avpicture_free ((AVPicture*)dst_frame);
avcodec_align_dimensions2 (c, &c->width, &c->height, dst_frame->linesize);
if (avpicture_alloc ((AVPicture*) dst_frame, AV_PIX_FMT_YUV420P, c->width, c->height) < 0)
{
qDebug () << "# avpicture_alloc failed";
return;
}
//SOURCE
//src_frame->linesize [0] = ((((screenWidth * bpp) + 31) & ~31) >> 3);
//src_frame->linesize [0] = stride;
if (avpicture_fill((AVPicture*) src_frame, (uint8_t *) pPixels, AV_PIX_FMT_RGB32,
screenWidth, screenHeight) < 0)
qDebug () << "# avpicture_fill Fill picture with image failed"; //Fill picture with image
//Source TO TMP Source
avcodec_align_dimensions2 (c, &tmp_screenWidth, &tmp_screenHeight, tmp_src_frame->linesize);
if (avpicture_alloc ((AVPicture*) tmp_src_frame, AV_PIX_FMT_RGB32, tmp_screenWidth, tmp_screenHeight) < 0)
{
qDebug () << "# avpicture_alloc failed";
return;
}
int outbuf_size = tmp_screenWidth*tmp_screenHeight*4;// alloc image and output buffer
outbuf = static_cast<uint8_t *>(malloc(outbuf_size));
if (avpicture_fill((AVPicture*) tmp_src_frame, outbuf, AV_PIX_FMT_RGB32,
tmp_screenWidth, tmp_screenHeight) < 0)
qDebug () << "# avpicture_fill Fill picture with image failed"; //Fill picture with image
av_picture_copy ((AVPicture*) tmp_src_frame, (const AVPicture*) src_frame, AV_PIX_FMT_RGB32,
tmp_screenWidth, tmp_screenHeight);
struct SwsContext* conversionContext = sws_getContext(tmp_screenWidth, tmp_screenHeight,
AV_PIX_FMT_RGB32,
c->width, c->height,
AV_PIX_FMT_YUV420P,
SWS_FAST_BILINEAR,
NULL, NULL, NULL);
int output_Height = sws_scale(conversionContext,
tmp_src_frame->data, tmp_src_frame->linesize,
0, tmp_screenHeight,
dst_frame->data, dst_frame->linesize);
The call stack is as follow : av_picture_copy() is called then av_image_copy() then _VEC_memcpy() then fastcopy_I() and crash ... The problem is not the dimensions (tmp_screenWidth/Height) ? (With av_picture_copy () could we copy a picture P1 with dim W1xH1 to a picture P2 with dimension W2xH2 ?)
EDIT 4
Crash at av_picture_copy() which call _aligned_malloc() then av_image_copy _VEC_memcpy() and fastcopy_I()
//SOURCE
if (avpicture_fill((AVPicture*) src_frame, (uint8_t *) pPixels, AV_PIX_FMT_RGB32,
screenWidth, screenHeight) < 0)
qDebug () << "# avpicture_fill Fill picture with image failed"; //Fill picture with image
//Source TO TMP Source
avcodec_align_dimensions2 (c, &tmp_screenWidth, &tmp_screenHeight, tmp_src_frame->linesize);
if (avpicture_alloc ((AVPicture*) tmp_src_frame, AV_PIX_FMT_RGB32, tmp_screenWidth, tmp_screenHeight) < 0)
{
qDebug () << "# avpicture_alloc failed";
return;
}
av_picture_copy ((AVPicture*) tmp_src_frame, (const AVPicture*) src_frame, AV_PIX_FMT_RGB32,
tmp_screenWidth, tmp_screenHeight);
You're using avpicture_fill(), which is implemented like this:
int avpicture_fill(AVPicture *picture, const uint8_t *ptr,
enum AVPixelFormat pix_fmt, int width, int height)
{
return av_image_fill_arrays(picture->data, picture->linesize,
ptr, pix_fmt, width, height, 1);
}
Note the last argument to av_image_fill_arrays(), align=1. This means buffer lines will be unaligned. Unfortunately, and this isn't clear at all in the documentation, most FFmpeg functions require buffer lines to be aligned to a power-of-two that allows SSE2 or AVX2 optimizations, e.g. align=32. See the second bullet point in this response on how to do that programmatically.
Aside, in your test code, you're using new (instead of av_malloc) to allocate memory, and the returned pointer from new is also not guaranteed to be aligned by 32 byte.