c++ - using FFmpeg encode and UDP with a Webcam - c++

I'm trying to get frames from a Webcam using OpenCV, encode them with FFmpeg and send them using UDP.
I did before a similar project that instead of sending the packets with UDP, it saved them in a video file.
My code is.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
}
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
#define WIDTH 640
#define HEIGHT 480
#define CODEC_ID AV_CODEC_ID_H264
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P
static AVFrame *frame, *pFrameBGR;
int main(int argc, char **argv)
{
VideoCapture cap(0);
const char *url = "udp://127.0.0.1:8080";
AVFormatContext *formatContext;
AVStream *stream;
AVCodec *codec;
AVCodecContext *c;
AVDictionary *opts = NULL;
int ret, got_packet;
if (!cap.isOpened())
{
return -1;
}
av_log_set_level(AV_LOG_TRACE);
av_register_all();
avformat_network_init();
avformat_alloc_output_context2(&formatContext, NULL, "h264", url);
if (!formatContext)
{
av_log(NULL, AV_LOG_FATAL, "Could not allocate an output context for '%s'.\n", url);
}
codec = avcodec_find_encoder(CODEC_ID);
if (!codec)
{
av_log(NULL, AV_LOG_ERROR, "Could not find encoder.\n");
}
stream = avformat_new_stream(formatContext, codec);
c = avcodec_alloc_context3(codec);
stream->id = formatContext->nb_streams - 1;
stream->time_base = (AVRational){1, 25};
c->codec_id = CODEC_ID;
c->bit_rate = 400000;
c->width = WIDTH;
c->height = HEIGHT;
c->time_base = stream->time_base;
c->gop_size = 12;
c->pix_fmt = STREAM_PIX_FMT;
if (formatContext->flags & AVFMT_GLOBALHEADER)
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
av_dict_set(&opts, "preset", "fast", 0);
av_dict_set(&opts, "tune", "zerolatency", 0);
ret = avcodec_open2(c, codec, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n");
}
pFrameBGR = av_frame_alloc();
if (!pFrameBGR)
{
av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");
}
frame = av_frame_alloc();
if (!frame)
{
av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = avcodec_parameters_from_context(stream->codecpar, c);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n");
}
av_dump_format(formatContext, 0, url, 1);
ret = avformat_write_header(formatContext, NULL);
if (ret != 0)
{
av_log(NULL, AV_LOG_ERROR, "Failed to connect to '%s'.\n", url);
}
Mat image(Size(HEIGHT, WIDTH), CV_8UC3);
SwsContext *swsctx = sws_getContext(WIDTH, HEIGHT, AV_PIX_FMT_BGR24, WIDTH, HEIGHT, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);
int frame_pts = 0;
while (1)
{
cap >> image;
int numBytesYUV = av_image_get_buffer_size(STREAM_PIX_FMT, WIDTH, HEIGHT, 1);
uint8_t *bufferYUV = (uint8_t *)av_malloc(numBytesYUV * sizeof(uint8_t));
avpicture_fill((AVPicture *)pFrameBGR, image.data, AV_PIX_FMT_BGR24, WIDTH, HEIGHT);
avpicture_fill((AVPicture *)frame, bufferYUV, STREAM_PIX_FMT, WIDTH, HEIGHT);
sws_scale(swsctx, (uint8_t const *const *)pFrameBGR->data, pFrameBGR->linesize, 0, HEIGHT, frame->data, frame->linesize);
AVPacket pkt = {0};
av_init_packet(&pkt);
frame->pts = frame_pts;
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Error encoding frame\n");
}
if (got_packet)
{
pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, c->time_base, stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, c->time_base, stream->time_base);
pkt.stream_index = stream->index;
return av_interleaved_write_frame(formatContext, &pkt);
cout << "Seguro que si" << endl;
}
frame_pts++;
}
avcodec_free_context(&c);
av_frame_free(&frame);
avformat_free_context(formatContext);
return 0;
}
The code compiles but it returns Segmentation fault in the function av_interleaved_write_frame(). I've tried several implementations or several codecs (in this case I'm using libopenh264, but using mpeg2video returns the same segmentation fault). I tried also with av_write_frame() but it returns the same error.
As I told before, I only want to grab frames from a webcam connected via USB, encode them to H264 and send the packets through UDP to another PC.
My console log when I run the executable is.
[100%] Built target display
[OpenH264] this = 0x0x244b4f0, Info:CWelsH264SVCEncoder::SetOption():ENCODER_OPTION_TRACE_CALLBACK callback = 0x7f0c302a87c0.
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:CWelsH264SVCEncoder::InitEncoder(), openh264 codec version = 5a5c4f1
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:iUsageType = 0,iPicWidth= 640;iPicHeight= 480;iTargetBitrate= 400000;iMaxBitrate= 400000;iRCMode= 0;iPaddingFlag= 0;iTemporalLayerNum= 1;iSpatialLayerNum= 1;fFrameRate= 25.000000f;uiIntraPeriod= 12;eSpsPpsIdStrategy = 0;bPrefixNalAddingCtrl = 0;bSimulcastAVC=0;bEnableDenoise= 0;bEnableBackgroundDetection= 1;bEnableSceneChangeDetect = 1;bEnableAdaptiveQuant= 1;bEnableFrameSkip= 0;bEnableLongTermReference= 0;iLtrMarkPeriod= 30, bIsLosslessLink=0;iComplexityMode = 0;iNumRefFrame = 1;iEntropyCodingModeFlag = 0;uiMaxNalSize = 0;iLTRRefNum = 0;iMultipleThreadIdc = 1;iLoopFilterDisableIdc = 0 (offset(alpha/beta): 0,0;iComplexityMode = 0,iMaxQp = 51;iMinQp = 0)
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:sSpatialLayers[0]: .iVideoWidth= 640; .iVideoHeight= 480; .fFrameRate= 25.000000f; .iSpatialBitrate= 400000; .iMaxSpatialBitrate= 400000; .sSliceArgument.uiSliceMode= 1; .sSliceArgument.iSliceNum= 0; .sSliceArgument.uiSliceSizeConstraint= 1500;uiProfileIdc = 66;uiLevelIdc = 41
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:SliceArgumentValidationFixedSliceMode(), unsupported setting with Resolution and uiSliceNum combination under RC on! So uiSliceNum is changed to 6!
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:Setting MaxSpatialBitrate (400000) the same at SpatialBitrate (400000) will make the actual bit rate lower than SpatialBitrate
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:bEnableFrameSkip = 0,bitrate can't be controlled for RC_QUALITY_MODE,RC_BITRATE_MODE and RC_TIMESTAMP_MODE without enabling skip frame.
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:Change QP Range from(0,51) to (12,42)
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WELS CPU features/capacities (0x4007fe3f) detected: HTT: Y, MMX: Y, MMXEX: Y, SSE: Y, SSE2: Y, SSE3: Y, SSSE3: Y, SSE4.1: Y, SSE4.2: Y, AVX: Y, FMA: Y, X87-FPU: Y, 3DNOW: N, 3DNOWEX: N, ALTIVEC: N, CMOV: Y, MOVBE: Y, AES: Y, NUMBER OF LOGIC PROCESSORS ON CHIP: 8, CPU CACHE LINE SIZE (BYTES): 64
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WelsInitEncoderExt() exit, overall memory usage: 4542878 bytes
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WelsInitEncoderExt(), pCtx= 0x0x245a400.
Output #0, h264, to 'udp://192.168.100.39:8080':
Stream #0:0, 0, 1/25: Video: h264 (libopenh264), 1 reference frame, yuv420p, 640x480 (0x0), 0/1, q=2-31, 400 kb/s, 25 tbn
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:RcUpdateIntraComplexity iFrameDqBits = 385808,iQStep= 2016,iIntraCmplx = 777788928
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:[Rc]Layer 0: Frame timestamp = 0, Frame type = 2, encoding_qp = 30, average qp = 30, max qp = 33, min qp = 27, index = 0, iTid = 0, used = 385808, bitsperframe = 16000, target = 64000, remainingbits = -257808, skipbuffersize = 200000
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerNum = 2,iFrameSize = 48252
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerId = 0,iNalType = 0,iNalCount = 2, first Nal Length=18,uiSpatialId = 0,uiTemporalId = 0,iSubSeqId = 0
[libopenh264 # 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerId = 1,iNalType = 1,iNalCount = 6, first Nal Length=6057,uiSpatialId = 0,uiTemporalId = 0,iSubSeqId = 0
[libopenh264 # 0x244aa00] 6 slices
./scriptBuild.sh: line 20: 10625 Segmentation fault (core dumped) ./display
As you can see, FFmpeg uses libopenh264 and configures it correctly. However, no matter what. It always returns the same Segmentation fault error...
I've used commands like this.
ffmpeg -s 640x480 -f video4linux2 -i /dev/video0 -r 30 -vcodec libopenh264 -an -f h264 udp://127.0.0.1:8080
And it works perfectly, but I need to process the frames before sending them. Thats why I'm trying to use the libs.
My FFmpeg version is.
ffmpeg version 3.3.6 Copyright (c) 2000-2017 the FFmpeg developers
built with gcc 4.8 (Ubuntu 4.8.4-2ubuntu1~14.04.3)
configuration: --disable-yasm --enable-shared --enable-libopenh264 --cc='gcc -fPIC'
libavutil 55. 58.100 / 55. 58.100
libavcodec 57. 89.100 / 57. 89.100
libavformat 57. 71.100 / 57. 71.100
libavdevice 57. 6.100 / 57. 6.100
libavfilter 6. 82.100 / 6. 82.100
libswscale 4. 6.100 / 4. 6.100
libswresample 2. 7.100 / 2. 7.100
I tried to get more information of the error using gbd, but it didn't give me debugging info.
How can I solve this problem? I don't know what else can I try...
Thank you!

avpicture_fill is deprecated. I think this is source of the error. Try with av_image_fill_arrays()
An example line should be:
av_image_fill_arrays(pFrameBGR.data, /* destination */
pFrameBGR.linesize, /* destination */
image.data, /* source */
AV_PIX_FMT_BGR24, /* source */
WIDTH, HEIGHT, 1); /* source w+h & alingment */
Hope that helps.

Related

MP4 Created Using FFmpeg API Can't Be Played in Media Players

I've been struggling with this issue for days. There are similar issues posted here and around the web, but none of the solutions seem to work for me. They are possibly outdated?
Here is the current iteration of code I'm using to generate the MP4 file.
It generates a simple 2 second .mp4 file that fails to play in any player I've tried. If I run that mp4 file back through the FFmpeg command line, it will generate a perfectly playable movie out of it. So the data is there.
Also, if you modify the output file name in this code from .mp4 to .avi, this code generates a playable avi file too. So whatever it is, it is tied to the H.264 format.
I'm sure I'm missing something simple, but for the life of me, I can't figure out what that is.
Any help would be greatly appreciated!
Here is a link to the VC++ project. MovieMaker.zip
MovieMaker.h
#pragma once
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/opt.h>
}
class FMovieMaker
{
public:
~FMovieMaker();
bool Initialize(const char* FileName, int Width = 1920, int Height = 1080, int FPS = 30, int BitRate = 2000);
bool RecordFrame(uint8_t* BGRAData);
bool Finalize();
bool IsInitialized() const { return bInitialized; }
int GetWidth() const { return CodecContext ? CodecContext->width : 0; }
int GetHeight() const { return CodecContext ? CodecContext->height : 0; }
private:
bool EncodeFrame(bool bFinalize);
void Log(const char* fmt, ...);
AVOutputFormat* OutputFormat = nullptr;
AVFormatContext* FormatContext = nullptr;
AVCodecContext* CodecContext = nullptr;
AVFrame* Frame = nullptr;
SwsContext* ColorConverter = nullptr;
int64_t RecordedFrames = 0;
bool bInitialized = false;
};
MovieMaker.cpp
#include "MovieMaker.h"
FMovieMaker::~FMovieMaker()
{
if (IsInitialized())
Finalize();
}
bool FMovieMaker::Initialize(const char* FileName, int Width /*= 1920*/, int Height /*= 1080*/, int FPS /*= 30*/, int BitRate /*= 2000*/)
{
OutputFormat = av_guess_format(nullptr, FileName, nullptr);
if (!OutputFormat)
{
Log("Couldn't guess the output format from the filename: %s", FileName);
return false;
}
AVCodecID CodecID = OutputFormat->video_codec;
if (CodecID == AV_CODEC_ID_NONE)
{
Log("Could not determine a codec to use");
return false;
}
/* allocate the output media context */
int ErrorCode = avformat_alloc_output_context2(&FormatContext, OutputFormat, nullptr, FileName);
if (ErrorCode < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Failed to allocate format context: %s", Error);
return false;
}
else if (!FormatContext)
{
Log("Failed to get format from filename: %s", FileName);
return false;
}
/* find the video encoder */
const AVCodec* Codec = avcodec_find_encoder(CodecID);
if (!Codec)
{
Log("Codec '%d' not found", CodecID);
return false;
}
/* create the video stream */
AVStream* Stream = avformat_new_stream(FormatContext, Codec);
if (!Stream)
{
Log("Failed to allocate stream");
return false;
}
/* create the codec context */
CodecContext = avcodec_alloc_context3(Codec);
if (!CodecContext)
{
Log("Could not allocate video codec context");
return false;
}
Stream->codecpar->codec_id = OutputFormat->video_codec;
Stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
Stream->codecpar->width = Width;
Stream->codecpar->height = Height;
Stream->codecpar->format = AV_PIX_FMT_YUV420P;
Stream->codecpar->bit_rate = (int64_t)BitRate * 1000;
avcodec_parameters_to_context(CodecContext, Stream->codecpar);
CodecContext->time_base = { 1, FPS };
CodecContext->max_b_frames = 2;
CodecContext->gop_size = 12;
CodecContext->framerate = { FPS, 1 };
if (Stream->codecpar->codec_id == AV_CODEC_ID_H264)
av_opt_set(CodecContext, "preset", "medium", 0);
else if (Stream->codecpar->codec_id == AV_CODEC_ID_H265)
av_opt_set(CodecContext, "preset", "medium", 0);
avcodec_parameters_from_context(Stream->codecpar, CodecContext);
if (FormatContext->oformat->flags & AVFMT_GLOBALHEADER)
CodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
if ((ErrorCode = avcodec_open2(CodecContext, Codec, NULL)) < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Failed to open codec: %s", Error);
return false;
}
if (!(OutputFormat->flags & AVFMT_NOFILE))
{
if ((ErrorCode = avio_open(&FormatContext->pb, FileName, AVIO_FLAG_WRITE)) < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Failed to open file: %s", Error);
return false;
}
}
Stream->time_base = CodecContext->time_base;
if ((ErrorCode = avformat_write_header(FormatContext, NULL)) < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Failed to write header: %s", Error);
return false;
}
CodecContext->time_base = Stream->time_base;
av_dump_format(FormatContext, 0, FileName, 1);
// create the frame
{
Frame = av_frame_alloc();
if (!Frame)
{
Log("Could not allocate video frame");
return false;
}
Frame->format = CodecContext->pix_fmt;
Frame->width = CodecContext->width;
Frame->height = CodecContext->height;
ErrorCode = av_frame_get_buffer(Frame, 32);
if (ErrorCode < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Could not allocate the video frame data: %s", Error);
return false;
}
}
// create a color converter
{
ColorConverter = sws_getContext(CodecContext->width, CodecContext->height, AV_PIX_FMT_BGRA,
CodecContext->width, CodecContext->height, AV_PIX_FMT_YUV420P, 0, 0, 0, 0);
if (!ColorConverter)
{
Log("Could not allocate color converter");
return false;
}
}
bInitialized = true;
return true;
}
bool FMovieMaker::RecordFrame(uint8_t* BGRAData)
{
if (!bInitialized)
{
Log("Cannot record frames on an uninitialized Video Recorder");
return false;
}
/*make sure the frame data is writable */
int ErrorCode = av_frame_make_writable(Frame);
if (ErrorCode < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Could not make the frame writable: %s", Error);
return false;
}
/* convert the bgra bitmap data into yuv frame data */
int inLinesize[1] = { 4 * CodecContext->width }; // RGB stride
sws_scale(ColorConverter, &BGRAData, inLinesize, 0, CodecContext->height, Frame->data, Frame->linesize);
//Frame->pts = RecordedFrames++;
Frame->pts = CodecContext->time_base.den / CodecContext->time_base.num * CodecContext->framerate.den / CodecContext->framerate.num * (RecordedFrames++);
//The following assumes that codecContext->time_base = (AVRational){1, 1};
//Frame->pts = frameduration * (RecordedFrames++) * Stream->time_base.den / (Stream->time_base.num * fps);
//Frame->pts += av_rescale_q(1, CodecContext->time_base, Stream->time_base);
return EncodeFrame(false);
}
bool FMovieMaker::EncodeFrame(bool bFinalize)
{
/* send the frame to the encoder */
int ErrorCode = avcodec_send_frame(CodecContext, bFinalize ? nullptr : Frame);
if (ErrorCode < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Error sending a frame for encoding: %s", Error);
return false;
}
AVPacket Packet;
av_init_packet(&Packet);
Packet.data = NULL;
Packet.size = 0;
Packet.flags |= AV_PKT_FLAG_KEY;
Packet.pts = Frame->pts;
if (avcodec_receive_packet(CodecContext, &Packet) == 0)
{
//std::cout << "pkt key: " << (Packet.flags & AV_PKT_FLAG_KEY) << " " << Packet.size << " " << (counter++) << std::endl;
uint8_t* size = ((uint8_t*)Packet.data);
//std::cout << "first: " << (int)size[0] << " " << (int)size[1] << " " << (int)size[2] << " " << (int)size[3] << " " << (int)size[4] << " " << (int)size[5] << " " << (int)size[6] << " " << (int)size[7] << std::endl;
av_interleaved_write_frame(FormatContext, &Packet);
av_packet_unref(&Packet);
}
return true;
}
bool FMovieMaker::Finalize()
{
if (!bInitialized)
{
Log("Cannot finalize uninitialized Video Recorder");
return false;
}
//DELAYED FRAMES
AVPacket Packet;
av_init_packet(&Packet);
Packet.data = NULL;
Packet.size = 0;
for (;;)
{
avcodec_send_frame(CodecContext, NULL);
if (avcodec_receive_packet(CodecContext, &Packet) == 0)
{
av_interleaved_write_frame(FormatContext, &Packet);
av_packet_unref(&Packet);
}
else
break;
}
av_write_trailer(FormatContext);
if (!(OutputFormat->flags & AVFMT_NOFILE))
{
int ErrorCode = avio_close(FormatContext->pb);
if (ErrorCode < 0)
{
char Error[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(Error, AV_ERROR_MAX_STRING_SIZE, ErrorCode);
Log("Failed to close file: %s", Error);
}
}
if (Frame)
{
av_frame_free(&Frame);
Frame = nullptr;
}
if (CodecContext)
{
avcodec_free_context(&CodecContext);
CodecContext = nullptr;
}
if (FormatContext)
{
avformat_free_context(FormatContext);
FormatContext = nullptr;
}
if (ColorConverter)
{
sws_freeContext(ColorConverter);
ColorConverter = nullptr;
}
bInitialized = false;
return true;
}
void FMovieMaker::Log(const char* fmt, ...)
{
va_list args;
fprintf(stderr, "LOG: ");
va_start(args, fmt);
vfprintf(stderr, fmt, args);
va_end(args);
fprintf(stderr, "\n");
}
Main.cpp
#include "MovieMaker.h"
uint8_t FtoB(float x)
{
if (x <= 0.0f)
return 0;
if (x >= 1.0f)
return 255;
else
return (uint8_t)(x * 255.0f);
}
void SetPixelColor(float X, float Y, float Width, float Height, float t, uint8_t* BGRA)
{
t += 12.0f; // more interesting colors at this time
float P[2] = { 0.1f * X - 25.0f, 0.1f * Y - 25.0f };
float V = sqrtf(P[0] * P[0] + P[1] * P[1]);
BGRA[0] = FtoB(sinf(V + t / 0.78f));
BGRA[1] = FtoB(sinf(V + t / 10.0f));
BGRA[2] = FtoB(sinf(V + t / 36e2f));
BGRA[3] = 255;
}
int main()
{
FMovieMaker MovieMaker;
const char* FileName = "C:\\ffmpeg\\MyMovieMakerMovie.mp4";
int Width = 640;
int Height = 480;
int FPS = 30;
int BitRateKBS = 2000;
if (MovieMaker.Initialize(FileName, Width, Height, FPS, BitRateKBS))
{
int Size = Width * 4 * Height;
uint8_t* BGRAData = new uint8_t[Size];
memset(BGRAData, 255, Size);
for (float Frame = 0; Frame < 60; Frame++)
{
// fill the image data with something interesting
for (float Y = 0; Y < Height; Y++)
{
for (float X = 0; X < Width; X++)
{
SetPixelColor(X, Y, (float)Width, (float)Height, Frame / (float)FPS, &BGRAData[(int)(Y * Width + X) * 4]);
}
}
if (!MovieMaker.RecordFrame(BGRAData))
break;
}
delete[] BGRAData;
MovieMaker.Finalize();
}
}
If I have the lines that add the AV_CODEC_FLAG_GLOBAL_HEADER flag like shown above, I get all sorts of issues in the output from ffprobe MyMovieMakerMovie.mp4.
C:\ffmpeg>ffprobe MyMovieMakerMovie.mp4
ffprobe version 4.2.2 Copyright (c) 2007-2019 the FFmpeg developers
built with gcc 9.2.1 (GCC) 20200122
configuration: --disable-static --enable-shared --enable-gpl --enable-version3 --enable-sdl2 --enable-fontconfig --enable-gnutls --enable-iconv --enable-libass --enable-libdav1d --enable-libbluray --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libtheora --enable-libtwolame --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libzimg --enable-lzma --enable-zlib --enable-gmp --enable-libvidstab --enable-libvorbis --enable-libvo-amrwbenc --enable-libmysofa --enable-libspeex --enable-libxvid --enable-libaom --enable-libmfx --enable-amf --enable-ffnvcodec --enable-cuvid --enable-d3d11va --enable-nvenc --enable-nvdec --enable-dxva2 --enable-avisynth --enable-libopenmpt
libavutil 56. 31.100 / 56. 31.100
libavcodec 58. 54.100 / 58. 54.100
libavformat 58. 29.100 / 58. 29.100
libavdevice 58. 8.100 / 58. 8.100
libavfilter 7. 57.100 / 7. 57.100
libswscale 5. 5.100 / 5. 5.100
libswresample 3. 5.100 / 3. 5.100
libpostproc 55. 5.100 / 55. 5.100
[h264 # 000001d44b795b00] non-existing PPS 0 referenced
[h264 # 000001d44b795b00] decode_slice_header error
[h264 # 000001d44b795b00] no frame!
...
[h264 # 000001d44b795b00] non-existing PPS 0 referenced
[h264 # 000001d44b795b00] decode_slice_header error
[h264 # 000001d44b795b00] no frame!
[mov,mp4,m4a,3gp,3g2,mj2 # 000001d44b783880] decoding for stream 0 failed
[mov,mp4,m4a,3gp,3g2,mj2 # 000001d44b783880] Could not find codec parameters for stream 0 (Video: h264 (avc1 / 0x31637661), none, 640x480, 20528 kb/s): unspecified pixel format
Consider increasing the value for the 'analyzeduration' and 'probesize' options
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'MyMovieMakerMovie.mp4':
Metadata:
major_brand : isom
minor_version : 512
compatible_brands: isomiso2avc1mp41
encoder : Lavf58.29.100
Duration: 00:00:01.97, start: 0.000000, bitrate: 20529 kb/s
Stream #0:0(und): Video: h264 (avc1 / 0x31637661), none, 640x480, 20528 kb/s, 30.51 fps, 30 tbr, 15360 tbn, 30720 tbc (default)
Metadata:
handler_name : VideoHandler
Without adding the AV_CODEC_FLAG_GLOBAL_HEADER flag, I get a clean output from ffprobe, but the video still doesn't play. Notice it thinks the frame rate is 30.51, I'm not sure why.
C:\ffmpeg>ffprobe MyMovieMakerMovie.mp4
ffprobe version 4.2.2 Copyright (c) 2007-2019 the FFmpeg developers
built with gcc 9.2.1 (GCC) 20200122
configuration: --disable-static --enable-shared --enable-gpl --enable-version3 --enable-sdl2 --enable-fontconfig --enable-gnutls --enable-iconv --enable-libass --enable-libdav1d --enable-libbluray --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libtheora --enable-libtwolame --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libzimg --enable-lzma --enable-zlib --enable-gmp --enable-libvidstab --enable-libvorbis --enable-libvo-amrwbenc --enable-libmysofa --enable-libspeex --enable-libxvid --enable-libaom --enable-libmfx --enable-amf --enable-ffnvcodec --enable-cuvid --enable-d3d11va --enable-nvenc --enable-nvdec --enable-dxva2 --enable-avisynth --enable-libopenmpt
libavutil 56. 31.100 / 56. 31.100
libavcodec 58. 54.100 / 58. 54.100
libavformat 58. 29.100 / 58. 29.100
libavdevice 58. 8.100 / 58. 8.100
libavfilter 7. 57.100 / 7. 57.100
libswscale 5. 5.100 / 5. 5.100
libswresample 3. 5.100 / 3. 5.100
libpostproc 55. 5.100 / 55. 5.100
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'MyMovieMakerMovie.mp4':
Metadata:
major_brand : isom
minor_version : 512
compatible_brands: isomiso2avc1mp41
encoder : Lavf58.29.100
Duration: 00:00:01.97, start: 0.000000, bitrate: 20530 kb/s
Stream #0:0(und): Video: h264 (High) (avc1 / 0x31637661), yuv420p, 640x480, 20528 kb/s, 30.51 fps, 30 tbr, 15360 tbn, 60 tbc (default)
Metadata:
handler_name : VideoHandler

avcodec_open2 error -542398533 : "Generic error in an external library"

I am encountering an error when trying to open the codec with avcodec_open2(). I have tried the same code without any problems if I specify avi instead of h264 in the av_guess_format() function.
I don't know what to make of it. Has anyone else encountered a similar problem?
The library that I'm using is ffmpeg-20160219-git-98a0053-win32-dev. I would really really appreciate if you could help me out of this confusion.
This is my console output:
Video encoding
[libx264 # 01383460] broken ffmpeg default settings detected
[libx264 # 01383460] use an encoding preset (e.g. -vpre medium)
[libx264 # 01383460] preset usage: -vpre -vpre
[libx264 # 01383460] speed presets are listed in x264 --help
[libx264 # 01383460] profile is optional; x264 defaults to high
Cannot open video codec, -542398533
This is the code that I'm working with:
// Video encoding sample
AVCodec *codec = NULL;
AVCodecContext *codecCtx= NULL;
AVFormatContext *pFormatCtx = NULL;
AVOutputFormat *pOutFormat = NULL;
AVStream * pVideoStream = NULL;;
AVFrame *picture = NULL;;
int i, x, y, ret;
printf("Video encoding\n");
// Register all formats and codecs
av_register_all();
// guess format from file extension
pOutFormat = av_guess_format("h264", NULL, NULL);
if (NULL==pOutFormat){
cerr << "Could not guess output format" << endl;
return -1;
}
// allocate context
pFormatCtx = avformat_alloc_context();
pFormatCtx->oformat = pOutFormat;
memcpy(pFormatCtx->filename,filename,
min(strlen(filename), sizeof(pFormatCtx->filename)));
// Add stream to pFormatCtx
pVideoStream = avformat_new_stream(pFormatCtx, 0);
if (!pVideoStream)
{
printf("Cannot add new video stream\n");
return -1;
}
// Set stream's codec context
codecCtx = pVideoStream->codec;
codecCtx->codec_id = (AVCodecID)pOutFormat->video_codec;
codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
codecCtx->frame_number = 0;
// Put sample parameters.
codecCtx->bit_rate = 2000000;
// Resolution must be a multiple of two.
codecCtx->width = 320;
codecCtx->height = 240;
codecCtx->time_base.den = 10;
codecCtx->time_base.num = 1;
pVideoStream->time_base.den = 10;
pVideoStream->time_base.num = 1;
codecCtx->gop_size = 12; // emit one intra frame every twelve frames at most
codecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
if (codecCtx->codec_id == AV_CODEC_ID_H264)
{
// Just for testing, we also add B frames
codecCtx->mb_decision = 2;
}
// Some formats want stream headers to be separate.
if(pFormatCtx->oformat->flags & AVFMT_GLOBALHEADER)
{
codecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
if(codecCtx->codec_id == AV_CODEC_ID_H264)
av_opt_set(codecCtx->priv_data, "preset", "slow", 0);
// Open the codec.
codec = avcodec_find_encoder(codecCtx->codec_id);
if (codec == NULL) {
fprintf(stderr, "Codec not found\n");
return -1;
}
ret = avcodec_open2(codecCtx, codec, NULL); // returns -542398533 here
if (ret < 0)
{
printf("Cannot open video codec, %d\n",ret);
return -1;
}
Your problem is this line:
codecCtx = pVideoStream->codec;
This AVCodecContext was allocated using global defaults, which x264 rejects because they are not optimal. Instead, use avcodec_alloc_context3 to allocate it, which will set x264-specific defaults. At the end of your encoding, don't forget to avcodec_free_context the returned pointer.
you should pass codec param to avformat_new_stream
codec = avcodec_find_encoder(codecCtx->codec_id);
pVideoStream = avformat_new_stream(pFormatCtx, codec);

Can't open encoder when use libavcodec

I'm using libavcodec, version 9.7, to write a simple demo, almost exactly like example in official example.
However, I can't open encoder. Also, av_opt_set(context->priv_data, "preset", "slow", 0) always leads to crush.
This is my code:
// other code...
int ret = 0;
avcodec_register_all();
AVCodec* codec = NULL;
AVCodecContext* context = NULL;
AVFrame* frame = NULL;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if(!codec){
qDebug()<<"cannot find encoder";
return;
}
qDebug()<<"encoder found";
context = avcodec_alloc_context3(codec);
if(!context){
qDebug()<<"cannot alloc context";
return;
}
qDebug()<<"context allocted";
context->bit_rate = 400000;
/* resolution must be a multiple of two */
context->width = 352;
context->height = 288;
/* frames per second */
context->time_base= (AVRational){1,25};
context->gop_size = 10; /* emit one intra frame every ten frames */
context->max_b_frames=1;
context->pix_fmt = AV_PIX_FMT_YUV420P;
qDebug()<<"context init";
// av_opt_set(context->priv_data, "preset", "slow", 0); // this will crush
AVDictionary *d = NULL;
av_dict_set(&d, "preset", "ultrafast",0); // this won't
ret = avcodec_open2(context, codec, &d);
if ( ret < 0) {
qDebug()<<"cannot open codec"<<ret;
return;
}
qDebug()<<"codec open";
// other code...
This outputs:
encoder found
context allocted
context init
cannot open codec -22
[libx264 # 0340B340] [IMGUTILS # 0028FC34] Picture size 0x10 is invalid
[libx264 # 0340B340] ignoring invalid width/height values
[libx264 # 0340B340] Specified pix_fmt is not supported
I don't think the width/height is invalid and format there either. I have no idea what's wrong here.
Any help. plz?
This is an issue of libav, I haven't checked its issue list though. The code runs great when I use another daily build, 20131101.

Encoding a screenshot into a video using FFMPEG

I'm trying to get the pixels from the screen, and encode the screenshot into a video using ffmpeg. I've seen a couple of examples but they either assume you already have the pixel data, or use image file input. It seems like whether I use sws_scale() or not (which is included in the examples I've seen), or whether I'm typecasting a HBITMAP or RGBQUAD* it's telling me that the image src data is bad and is encoding a blank image rather than the screenshot. Is there something I'm missing here?
AVCodec* codec;
AVCodecContext* c = NULL;
AVFrame* inpic;
uint8_t* outbuf, *picture_buf;
int i, out_size, size, outbuf_size;
HBITMAP hBmp;
//int x,y;
avcodec_register_all();
printf("Video encoding\n");
// Find the mpeg1 video encoder
codec = avcodec_find_encoder(CODEC_ID_H264);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
else printf("H264 codec found\n");
c = avcodec_alloc_context3(codec);
inpic = avcodec_alloc_frame();
c->bit_rate = 400000;
c->width = screenWidth; // resolution must be a multiple of two
c->height = screenHeight;
c->time_base.num = 1;
c->time_base.den = 25;
c->gop_size = 10; // emit one intra frame every ten frames
c->max_b_frames=1;
c->pix_fmt = PIX_FMT_YUV420P;
c->codec_id = CODEC_ID_H264;
//c->codec_type = AVMEDIA_TYPE_VIDEO;
//av_opt_set(c->priv_data, "preset", "slow", 0);
//printf("Setting presets to slow for performance\n");
// Open the encoder
if (avcodec_open2(c, codec,NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
else printf("H264 codec opened\n");
outbuf_size = 100000 + 12*c->width*c->height; // alloc image and output buffer
//outbuf_size = 100000;
outbuf = static_cast<uint8_t *>(malloc(outbuf_size));
size = c->width * c->height;
picture_buf = static_cast<uint8_t*>(malloc((size*3)/2));
printf("Setting buffer size to: %d\n",outbuf_size);
FILE* f = fopen("example.mpg","wb");
if(!f) printf("x - Cannot open video file for writing\n");
else printf("Opened video file for writing\n");
/*inpic->data[0] = picture_buf;
inpic->data[1] = inpic->data[0] + size;
inpic->data[2] = inpic->data[1] + size / 4;
inpic->linesize[0] = c->width;
inpic->linesize[1] = c->width / 2;
inpic->linesize[2] = c->width / 2;*/
//int x,y;
// encode 1 second of video
for(i=0;i<c->time_base.den;i++) {
fflush(stdout);
HWND hDesktopWnd = GetDesktopWindow();
HDC hDesktopDC = GetDC(hDesktopWnd);
HDC hCaptureDC = CreateCompatibleDC(hDesktopDC);
hBmp = CreateCompatibleBitmap(GetDC(0), screenWidth, screenHeight);
SelectObject(hCaptureDC, hBmp);
BitBlt(hCaptureDC, 0, 0, screenWidth, screenHeight, hDesktopDC, 0, 0, SRCCOPY|CAPTUREBLT);
BITMAPINFO bmi = {0};
bmi.bmiHeader.biSize = sizeof(bmi.bmiHeader);
bmi.bmiHeader.biWidth = screenWidth;
bmi.bmiHeader.biHeight = screenHeight;
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = 32;
bmi.bmiHeader.biCompression = BI_RGB;
RGBQUAD *pPixels = new RGBQUAD[screenWidth*screenHeight];
GetDIBits(hCaptureDC,hBmp,0,screenHeight,pPixels,&bmi,DIB_RGB_COLORS);
inpic->pts = (float) i * (1000.0/(float)(c->time_base.den))*90;
avpicture_fill((AVPicture*)inpic, (uint8_t*)pPixels, PIX_FMT_BGR32, c->width, c->height); // Fill picture with image
av_image_alloc(inpic->data, inpic->linesize, c->width, c->height, c->pix_fmt, 1);
//printf("Allocated frame\n");
//SaveBMPFile(L"screenshot.bmp",hBmp,hDc,screenWidth,screenHeight);
ReleaseDC(hDesktopWnd,hDesktopDC);
DeleteDC(hCaptureDC);
DeleteObject(hBmp);
// encode the image
out_size = avcodec_encode_video(c, outbuf, outbuf_size, inpic);
printf("Encoding frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, out_size, f);
}
// get the delayed frames
for(; out_size; i++) {
fflush(stdout);
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
printf("Writing frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, out_size, f);
}
// add sequence end code to have a real mpeg file
outbuf[0] = 0x00;
outbuf[1] = 0x00;
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
fclose(f);
free(picture_buf);
free(outbuf);
avcodec_close(c);
av_free(c);
av_free(inpic);
printf("Closed codec and Freed\n");
I managed to get it working after quite a bit of trial-and-error. I was allocating the image after filling it, which was the first problem. The second problem was that the screenshot was not being created properly, and should have been casted as a COLORREF* rather than an RGBQUAD.
AVCodec* codec;
AVCodecContext* c = NULL;
uint8_t* outbuf;
int i, out_size, outbuf_size;
avcodec_register_all(); // mandatory to register ffmpeg functions
printf("Video encoding\n");
codec = avcodec_find_encoder(CODEC_ID_H264); // finding the H264 encoder
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
else printf("H264 codec found\n");
c = avcodec_alloc_context3(codec);
c->bit_rate = 400000;
c->width = 1280; // resolution must be a multiple of two (1280x720),(1900x1080),(720x480)
c->height = 720;
c->time_base.num = 1; // framerate numerator
c->time_base.den = 25; // framerate denominator
c->gop_size = 10; // emit one intra frame every ten frames
c->max_b_frames = 1; // maximum number of b-frames between non b-frames
c->keyint_min = 1; // minimum GOP size
c->i_quant_factor = (float)0.71; // qscale factor between P and I frames
c->b_frame_strategy = 20; ///// find out exactly what this does
c->qcompress = (float)0.6; ///// find out exactly what this does
c->qmin = 20; // minimum quantizer
c->qmax = 51; // maximum quantizer
c->max_qdiff = 4; // maximum quantizer difference between frames
c->refs = 4; // number of reference frames
c->trellis = 1; // trellis RD Quantization
c->pix_fmt = PIX_FMT_YUV420P; // universal pixel format for video encoding
c->codec_id = CODEC_ID_H264;
c->codec_type = AVMEDIA_TYPE_VIDEO;
if (avcodec_open2(c, codec,NULL) < 0) {
fprintf(stderr, "Could not open codec\n"); // opening the codec
exit(1);
}
else printf("H264 codec opened\n");
outbuf_size = 100000 + c->width*c->height*(32>>3); // allocate output buffer
outbuf = static_cast<uint8_t *>(malloc(outbuf_size));
printf("Setting buffer size to: %d\n",outbuf_size);
FILE* f = fopen("example.mpg","wb"); // opening video file for writing
if(!f) printf("x - Cannot open video file for writing\n");
else printf("Opened video file for writing\n");
// encode video
for(i=0;i<STREAM_FRAME_RATE*STREAM_DURATION;i++) {
fflush(stdout);
screenCap(); // taking screenshot
int nbytes = avpicture_get_size(PIX_FMT_YUV420P, c->width, c->height); // allocating outbuffer
uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes*sizeof(uint8_t));
AVFrame* inpic = avcodec_alloc_frame(); // mandatory frame allocation
AVFrame* outpic = avcodec_alloc_frame();
outpic->pts = (int64_t)((float)i * (1000.0/((float)(c->time_base.den))) * 90); // setting frame pts
avpicture_fill((AVPicture*)inpic, (uint8_t*)pPixels, PIX_FMT_RGB32, c->width, c->height); // fill image with input screenshot
avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, c->width, c->height); // clear output picture for buffer copy
av_image_alloc(outpic->data, outpic->linesize, c->width, c->height, c->pix_fmt, 1);
inpic->data[0] += inpic->linesize[0]*(screenHeight-1); // flipping frame
inpic->linesize[0] = -inpic->linesize[0]; // flipping frame
struct SwsContext* fooContext = sws_getContext(screenWidth, screenHeight, PIX_FMT_RGB32, c->width, c->height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
sws_scale(fooContext, inpic->data, inpic->linesize, 0, c->height, outpic->data, outpic->linesize); // converting frame size and format
out_size = avcodec_encode_video(c, outbuf, outbuf_size, outpic); // encoding video
printf("Encoding frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, out_size, f);
delete [] pPixels; // freeing memory
av_free(outbuffer);
av_free(inpic);
av_free(outpic);
}
for(; out_size; i++) {
fflush(stdout);
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL); // encode the delayed frames
printf("Writing frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, out_size, f);
}
outbuf[0] = 0x00;
outbuf[1] = 0x00; // add sequence end code to have a real mpeg file
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
fclose(f);
avcodec_close(c); // freeing memory
free(outbuf);
av_free(c);
printf("Closed codec and Freed\n");

FFMpeg encoded video will only play in FFPlay

I've been debugging my program for a couple of weeks now with the output video only showing a blank screen (was testing with VLC, WMP and WMPClassic). I happened to try using FFPlay and lo and behold the video works perfectly. I've read that this is usually caused by an incorrect pixel format, and that switching to PIX_FMT_YUV420P will make it work universally...but I'm already using that pixel format in the encoding process. Is there anything else that is causing this?
AVCodec* codec;
AVCodecContext* c = NULL;
uint8_t* outbuf;
int i, out_size, outbuf_size;
avcodec_register_all();
printf("Video encoding\n");
// Find the mpeg1 video encoder
codec = avcodec_find_encoder(CODEC_ID_H264);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
else printf("H264 codec found\n");
c = avcodec_alloc_context3(codec);
c->bit_rate = 400000;
c->width = 1920; // resolution must be a multiple of two (1280x720),(1900x1080),(720x480)
c->height = 1200;
c->time_base.num = 1; // framerate numerator
c->time_base.den = 25; // framerate denominator
c->gop_size = 10; // emit one intra frame every ten frames
c->max_b_frames = 1; // maximum number of b-frames between non b-frames
//c->keyint_min = 1; // minimum GOP size
//c->i_quant_factor = (float)0.71; // qscale factor between P and I frames
//c->b_frame_strategy = 20;
//c->qcompress = (float)0.6;
//c->qmin = 20; // minimum quantizer
//c->qmax = 51; // maximum quantizer
//c->max_qdiff = 4; // maximum quantizer difference between frames
//c->refs = 4; // number of reference frames
//c->trellis = 1; // trellis RD Quantization
c->pix_fmt = PIX_FMT_YUV420P;
c->codec_id = CODEC_ID_H264;
//c->codec_type = AVMEDIA_TYPE_VIDEO;
// Open the encoder
if (avcodec_open2(c, codec,NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
else printf("H264 codec opened\n");
outbuf_size = 100000 + c->width*c->height*(32>>3);//*(32>>3); // alloc image and output buffer
outbuf = static_cast<uint8_t *>(malloc(outbuf_size));
printf("Setting buffer size to: %d\n",outbuf_size);
FILE* f = fopen("example.mpg","wb");
if(!f) printf("x - Cannot open video file for writing\n");
else printf("Opened video file for writing\n");
// encode 5 seconds of video
for(i=0;i<STREAM_FRAME_RATE*STREAM_DURATION;i++) {
fflush(stdout);
screenCap();
int nbytes = avpicture_get_size(PIX_FMT_YUV420P, c->width, c->height);
uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes*sizeof(uint8_t));
AVFrame* inpic = avcodec_alloc_frame();
AVFrame* outpic = avcodec_alloc_frame();
outpic->pts = (int64_t)((float)i * (1000.0/((float)(c->time_base.den))) * 90);
avpicture_fill((AVPicture*)inpic, (uint8_t*)pPixels, PIX_FMT_RGB32, c->width, c->height); // Fill picture with image
avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, c->width, c->height);
av_image_alloc(outpic->data, outpic->linesize, c->width, c->height, c->pix_fmt, 1);
inpic->data[0] += inpic->linesize[0]*(screenHeight-1); // Flipping frame
inpic->linesize[0] = -inpic->linesize[0]; // Flipping frame
struct SwsContext* fooContext = sws_getContext(screenWidth, screenHeight, PIX_FMT_RGB32, c->width, c->height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
sws_scale(fooContext, inpic->data, inpic->linesize, 0, c->height, outpic->data, outpic->linesize);
// encode the image
out_size = avcodec_encode_video(c, outbuf, outbuf_size, outpic);
printf("Encoding frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, out_size, f);
delete [] pPixels;
av_free(outbuffer);
av_free(inpic);
av_free(outpic);
}
// get the delayed frames
for(; out_size; i++) {
fflush(stdout);
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
printf("Writing frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, out_size, f);
}
// add sequence end code to have a real mpeg file
outbuf[0] = 0x00;
outbuf[1] = 0x00;
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
fclose(f);
avcodec_close(c);
free(outbuf);
av_free(c);
printf("Closed codec and Freed\n");
Try saving your file as example.h264, instead of example.mpg.