FFMPEG H264 encode each single image - c++

i encode currently a QImage from RGB888 to H264, but i want to encode each image (even if this is not the perfect way) by itself.
Im able to encode the image, but its needed to send the same image 46 times. And i dont know what i do wrong (probably wrong config of the encode, but i cannot find the issue there).
Afterwards i decode this image and then convert it back to a QImage. I do this only for testing some other code.
avcodec_register_all();
AVCodec *nVidiaCodec = avcodec_find_encoder_by_name("h264_nvenc");
if (!nVidiaCodec)
{
return false;
}
AVCodecContext* av_codec_context_ = NULL;
av_codec_context_ = avcodec_alloc_context3(nVidiaCodec);
if (!av_codec_context_)
{
return false;
}
av_codec_context_->width = dst->width;
av_codec_context_->height = dst->height;
av_codec_context_->pix_fmt = AV_PIX_FMT_YUV420P;
av_codec_context_->gop_size = 1;
av_codec_context_->keyint_min = 0;
av_codec_context_->scenechange_threshold = 0;
av_codec_context_->bit_rate = 8000000;
av_codec_context_->time_base.den = 1;
av_codec_context_->time_base.num = 1;
av_codec_context_->refs = 0;
av_codec_context_->qmin = 1;
av_codec_context_->qmax = 1;
av_codec_context_->b_frame_strategy = 0;
av_codec_context_->max_b_frames = 0;
av_codec_context_->thread_count = 1;
av_opt_set(av_codec_context_, "preset", "slow", 0);
av_opt_set(av_codec_context_, "tune", "zerolatency", 0);
int ret = avcodec_open2(av_codec_context_, nVidiaCodec, NULL);
if (0 > ret)
{
return false;
}
AVFrame *picture = av_frame_alloc();
picture->format = AV_PIX_FMT_RGB24;
picture->width = dst->width;
picture->height = dst->height;
ret = avpicture_fill((AVPicture *)picture, imgSrc.bits(), AV_PIX_FMT_RGB24, dst->width, dst->height);
if (0 > ret)
{
return false;
}
AVFrame *tmp_picture = av_frame_alloc();
tmp_picture->format = AV_PIX_FMT_YUV420P;
tmp_picture->width = dst->width;
tmp_picture->height = dst->height;
ret = av_frame_get_buffer(tmp_picture, 32);
SwsContext *img_convert_ctx = sws_getContext(av_codec_context_->width, av_codec_context_->height, AV_PIX_FMT_RGB24, av_codec_context_->width, av_codec_context_->height, av_codec_context_->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL);
if (!img_convert_ctx)
{
return false;
}
ret = sws_scale(img_convert_ctx, picture->data, picture->linesize, 0, av_codec_context_->height, tmp_picture->data, tmp_picture->linesize);
if (0 > ret)
{
return false;
}
ret = avcodec_send_frame(av_codec_context_, tmp_picture);
if (0 > ret)
{
return false;
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
do
{
ret = avcodec_receive_packet(av_codec_context_, &pkt);
if (ret == 0)
{
break;
}
else if ((ret < 0) && (ret != AVERROR(EAGAIN)))
{
return false;
}
else if (ret == AVERROR(EAGAIN))
{
ret = avcodec_send_frame(av_codec_context_, tmp_picture);
if (0 > ret)
{
return false;
}
}
} while (ret == 0);
// the do while is called 46 times, then i get the packet, but i want to get the packet at the first call
It would be very nice if you can help me.
Thanks guys.

I assume you just want to encode a single frame. You need to flush the encoder after you have sent your single uncompressed frame by sending NULL instead of a valid buffer.
int result = 0;
// encoder init
// send one uncompressed frame
result = avcodec_send_frame(av_codec_context_, tmp_picture);
if (result < 0) return false;
// send NULL to indicate flushing
result = avcodec_send_frame(av_codec_context_, NULL);
if (result < 0) return false;
while (result != AVERROR_EOF)
{
result = avcodec_receive_packet(av_codec_context_, &pkt);
if (!result)
{
// you should have your encoded frame; do something with it
}
}

Related

Adding unregistered SEI data to every frame (ffmpeg / C++ / Windows)

I am working with FFMPEG 5.2 using it with C++ in Visual Studio. What I require to do is to add a SEI Unregistered message (5) to every frame of a stream, for that, I am demuxing a MP4 container, then taking the video stream, decoding every packet to get a frame, then add SEI message to every frame, encoding and remuxing a new video stream (video only) and saving the new stream to a separate container.
To add the SEI data I use this specific code:
const char* sideDataMsg = "139FB1A9446A4DEC8CBF65B1E12D2CFDHola";;
size_t sideDataSize = sizeof(sideDataMsg);
AVBufferRef* sideDataBuffer = av_buffer_alloc(20);
sideDataBuffer->data = (uint8_t*)sideDataMsg;
AVFrameSideData* sideData = av_frame_new_side_data_from_buf(frame, AV_FRAME_DATA_SEI_UNREGISTERED, sideDataBuffer);
regarding the format of the sideDataMsg I have tried several apporaches including setting it like: "139FB1A9-446A-4DEC-8CBF65B1E12D2CFD+Hola!" which is indicated to be the required format in H.264 specs, however, even when in memory I see the SEI data is added to every frame as we observe as follows:
the resulting stream/container does not shows the expected data, below my entire code, this is mostly code taken/adapted from doc/examples folder of FFMPEG library.
BTW: I also tried setting AVCodecContext->export_side_data to different bit values (0 to FF) understanding that this can indicate the encoder to export the SEI data in every frame to be encoded but no luck.
I appreciate in advance any help from you!
// FfmpegTests.cpp : This file contains the 'main' function. Program execution begins and ends there.
//
#pragma warning(disable : 4996)
extern "C"
{
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavfilter/avfilter.h"
#include "libavutil/opt.h"
#include "libavutil/avutil.h"
#include "libavutil/error.h"
#include "libavfilter/buffersrc.h"
#include "libavfilter/buffersink.h"
#include "libswscale/swscale.h"
}
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "swscale.lib")
#include <cstdio>
#include <iostream>
#include <chrono>
#include <thread>
static AVFormatContext* fmt_ctx;
static AVCodecContext* dec_ctx;
AVFilterGraph* filter_graph;
AVFilterContext* buffersrc_ctx;
AVFilterContext* buffersink_ctx;
static int video_stream_index = -1;
const char* filter_descr = "scale=78:24,transpose=cclock";
static int64_t last_pts = AV_NOPTS_VALUE;
// FOR SEI NAL INSERTION
const AVOutputFormat* ofmt = NULL;
AVFormatContext* ofmt_ctx = NULL;
int stream_index = 0;
int* stream_mapping = NULL;
int stream_mapping_size = 0;
int FRAMES_COUNT = 0;
const AVCodec* codec_enc;
AVCodecContext* c = NULL;
static int open_input_file(const char* filename)
{
const AVCodec* dec;
int ret;
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
/* select the video stream */
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
return ret;
}
video_stream_index = ret;
/* create decoding context */
dec_ctx = avcodec_alloc_context3(dec);
if (!dec_ctx)
return AVERROR(ENOMEM);
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar);
FRAMES_COUNT = fmt_ctx->streams[video_stream_index]->nb_frames;
/* init the video decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
return ret;
}
return 0;
}
static int init_filters(const char* filters_descr)
{
char args[512];
int ret = 0;
const AVFilter* buffersrc = avfilter_get_by_name("buffer");
const AVFilter* buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut* outputs = avfilter_inout_alloc();
AVFilterInOut* inputs = avfilter_inout_alloc();
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
/* buffer video source: the decoded frames from the decoder will be inserted here. */
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
time_base.num, time_base.den,
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
goto end;
}
/* buffer video sink: to terminate the filter chain. */
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
goto end;
}
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
goto end;
}
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
static void display_frame(const AVFrame* frame, AVRational time_base)
{
int x, y;
uint8_t* p0, * p;
int64_t delay;
if (frame->pts != AV_NOPTS_VALUE) {
if (last_pts != AV_NOPTS_VALUE) {
/* sleep roughly the right amount of time;
* usleep is in microseconds, just like AV_TIME_BASE. */
AVRational timeBaseQ;
timeBaseQ.num = 1;
timeBaseQ.den = AV_TIME_BASE;
delay = av_rescale_q(frame->pts - last_pts, time_base, timeBaseQ);
if (delay > 0 && delay < 1000000)
std::this_thread::sleep_for(std::chrono::microseconds(delay));
}
last_pts = frame->pts;
}
/* Trivial ASCII grayscale display. */
p0 = frame->data[0];
puts("\033c");
for (y = 0; y < frame->height; y++) {
p = p0;
for (x = 0; x < frame->width; x++)
putchar(" .-+#"[*(p++) / 52]);
putchar('\n');
p0 += frame->linesize[0];
}
fflush(stdout);
}
int save_frame_as_jpeg(AVCodecContext* pCodecCtx, AVFrame* pFrame, int FrameNo) {
int ret = 0;
const AVCodec* jpegCodec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
if (!jpegCodec) {
return -1;
}
AVCodecContext* jpegContext = avcodec_alloc_context3(jpegCodec);
if (!jpegContext) {
return -1;
}
jpegContext->pix_fmt = pCodecCtx->pix_fmt;
jpegContext->height = pFrame->height;
jpegContext->width = pFrame->width;
jpegContext->time_base = AVRational{ 1,10 };
jpegContext->strict_std_compliance = FF_COMPLIANCE_UNOFFICIAL;
ret = avcodec_open2(jpegContext, jpegCodec, NULL);
if (ret < 0) {
return ret;
}
FILE* JPEGFile;
char JPEGFName[256];
AVPacket packet;
packet.data = NULL;
packet.size = 0;
av_init_packet(&packet);
int gotFrame;
ret = avcodec_send_frame(jpegContext, pFrame);
if (ret < 0) {
return ret;
}
ret = avcodec_receive_packet(jpegContext, &packet);
if (ret < 0) {
return ret;
}
sprintf(JPEGFName, "c:\\folder\\dvr-%06d.jpg", FrameNo);
JPEGFile = fopen(JPEGFName, "wb");
fwrite(packet.data, 1, packet.size, JPEGFile);
fclose(JPEGFile);
av_packet_unref(&packet);
avcodec_close(jpegContext);
return 0;
}
int initialize_output_stream(AVFormatContext* input_fctx, const char* out_filename) {
int ret = 0;
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx) {
fprintf(stderr, "Could not create output context\n");
return -1;
}
stream_mapping_size = input_fctx->nb_streams;
stream_mapping = (int*)av_calloc(stream_mapping_size, sizeof(*stream_mapping));
if (!stream_mapping) {
ret = AVERROR(ENOMEM);
return -1;
}
for (int i = 0; i < input_fctx->nb_streams; i++) {
AVStream* out_stream;
AVStream* in_stream = input_fctx->streams[i];
AVCodecParameters* in_codecpar = in_stream->codecpar;
if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
stream_mapping[i] = -1;
continue;
}
stream_mapping[i] = stream_index++;
out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
return ret;
}
ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
if (ret < 0) {
fprintf(stderr, "Failed to copy codec parameters\n");
return -1;
}
out_stream->codecpar->codec_tag = 0;
}
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open output file '%s'", out_filename);
return -1;
}
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
return -1;
}
// ENCODER
codec_enc = avcodec_find_encoder_by_name("libx264");
if (!codec_enc) {
fprintf(stderr, "Codec '%s' not found\n", "libx264");
return -1;
}
c = avcodec_alloc_context3(codec_enc);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
c->bit_rate = dec_ctx->bit_rate;
c->width = dec_ctx->width;
c->height = dec_ctx->height;
c->time_base = dec_ctx->time_base;
c->framerate = dec_ctx->framerate;
c->gop_size = dec_ctx->gop_size;
c->max_b_frames = dec_ctx->max_b_frames;
c->pix_fmt = dec_ctx->pix_fmt;
c->time_base = AVRational{ 1,1 };
c->export_side_data = 255;
if (codec_enc->id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
ret = avcodec_open2(c, codec_enc, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open codec\n");
return ret;
}
}
int add_frame_output_stream(AVFrame* frame) {
int ret;
AVPacket* pkt;
pkt = av_packet_alloc();
ret = avcodec_send_frame(c, frame);
if (ret < 0) {
fprintf(stderr, "Error sending a frame for decoding\n");
return ret;
}
while (ret >= 0) {
ret = avcodec_receive_packet(c, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return 0;
else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
return -1;
}
pkt->stream_index = stream_mapping[pkt->stream_index];
ret = av_interleaved_write_frame(ofmt_ctx, pkt);
av_packet_unref(pkt);
}
return 0;
}
int main(int argc, char** argv)
{
AVFrame* frame;
AVFrame* filt_frame;
AVPacket* packet;
int ret, count = 0;
// FOR SEI NAL INSERTION
const char* out_filename;
if (argc < 2) {
fprintf(stderr, "Usage: %s file\n", argv[0]);
exit(1);
}
frame = av_frame_alloc();
filt_frame = av_frame_alloc();
packet = av_packet_alloc();
if (!frame || !filt_frame || !packet) {
fprintf(stderr, "Could not allocate frame or packet\n");
exit(1);
}
if ((ret = open_input_file(argv[1])) < 0)
goto end;
if ((ret = init_filters(filter_descr)) < 0)
goto end;
out_filename = argv[2];
initialize_output_stream(fmt_ctx, out_filename);
while (count < FRAMES_COUNT)
{
if ((ret = av_read_frame(fmt_ctx, packet)) < 0)
break;
if (packet->stream_index == video_stream_index) {
ret = avcodec_send_packet(dec_ctx, packet);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
break;
}
while (ret >= 0)
{
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
}
else if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
goto end;
}
frame->pts = frame->best_effort_timestamp;
/* push the decoded frame into the filtergraph */
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
break;
}
/* pull filtered frames from the filtergraph */
while (1) {
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
goto end;
// display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
av_frame_unref(filt_frame);
/* ret = save_frame_as_jpeg(dec_ctx, frame, dec_ctx->frame_number);
if (ret < 0)
goto end; */
//2. Add metadata to frames SEI
ret = av_frame_make_writable(frame);
if (ret < 0)
exit(1);
char sideDataSei[43] = "139FB1A9446A4DEC8CBF65B1E12D2CFDHola";
const char* sideDataMsg = "139FB1A9446A4DEC8CBF65B1E12D2CFDHola";
size_t sideDataSize = sizeof(sideDataMsg);
AVBufferRef* sideDataBuffer = av_buffer_alloc(20);
sideDataBuffer->data = (uint8_t*)sideDataMsg;
AVFrameSideData* sideData = av_frame_new_side_data_from_buf(frame, AV_FRAME_DATA_SEI_UNREGISTERED, sideDataBuffer);
ret = add_frame_output_stream(frame);
if (ret < 0)
goto end;
}
av_frame_unref(frame);
count++;
}
}
av_packet_unref(packet);
}
av_write_trailer(ofmt_ctx);
end:
avfilter_graph_free(&filter_graph);
avcodec_free_context(&dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
av_frame_free(&filt_frame);
av_packet_free(&packet);
if (ret < 0 && ret != AVERROR_EOF) {
char errBuf[AV_ERROR_MAX_STRING_SIZE]{ 0 };
int res = av_strerror(ret, errBuf, AV_ERROR_MAX_STRING_SIZE);
fprintf(stderr, "Error: %s\n", errBuf);
exit(1);
}
exit(0);
}
Well, I came up with this solution from a friend, just had to add:
*av_opt_set_int(c->priv_data, "udu_sei", 1, 0);*
In the function initialize_output_stream after all parameters are set for AVCodecContext (c) that is being used for the output stream encoding.
Hope this helps someone!

Capture and encode desktop with libav in real time not giving corect images

As part of a larger project I want to be able to capture and encode the desktop frame by frame in real time. I have the following test code to reproduce the issue shown in the screenshot:
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include <string.h>
#include <math.h>
extern "C"
{
#include "libavdevice/avdevice.h"
#include "libavutil/channel_layout.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
}
/* 5 seconds stream duration */
#define STREAM_DURATION 5.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
int videoStreamIndx;
int framerate = 30;
int width = 1920;
int height = 1080;
int encPacketCounter;
AVFormatContext* ifmtCtx;
AVCodecContext* avcodecContx;
AVFormatContext* ofmtCtx;
AVStream* videoStream;
AVCodecContext* avCntxOut;
AVPacket* avPkt;
AVFrame* avFrame;
AVFrame* outFrame;
SwsContext* swsCtx;
std::ofstream fs;
AVDictionary* ConfigureScreenCapture()
{
AVDictionary* options = NULL;
//Try adding "-rtbufsize 100M" as in https://stackoverflow.com/questions/6766333/capture-windows-screen-with-ffmpeg
av_dict_set(&options, "rtbufsize", "100M", 0);
av_dict_set(&options, "framerate", std::to_string(framerate).c_str(), 0);
char buffer[16];
sprintf(buffer, "%dx%d", width, height);
av_dict_set(&options, "video_size", buffer, 0);
return options;
}
AVCodecParameters* ConfigureAvCodec()
{
AVCodecParameters* av_codec_par_out = avcodec_parameters_alloc();
av_codec_par_out->width = width;
av_codec_par_out->height = height;
av_codec_par_out->bit_rate = 40000;
av_codec_par_out->codec_id = AV_CODEC_ID_H264; //AV_CODEC_ID_MPEG4; //Try H.264 instead of MPEG4
av_codec_par_out->codec_type = AVMEDIA_TYPE_VIDEO;
av_codec_par_out->format = 0;
return av_codec_par_out;
}
int GetVideoStreamIndex()
{
int VideoStreamIndx = -1;
avformat_find_stream_info(ifmtCtx, NULL);
/* find the first video stream index . Also there is an API available to do the below operations */
for (int i = 0; i < (int)ifmtCtx->nb_streams; i++) // find video stream position/index.
{
if (ifmtCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
VideoStreamIndx = i;
break;
}
}
if (VideoStreamIndx == -1)
{
}
return VideoStreamIndx;
}
void CreateFrames(AVCodecParameters* av_codec_par_in, AVCodecParameters* av_codec_par_out)
{
avFrame = av_frame_alloc();
avFrame->width = avcodecContx->width;
avFrame->height = avcodecContx->height;
avFrame->format = av_codec_par_in->format;
av_frame_get_buffer(avFrame, 0);
outFrame = av_frame_alloc();
outFrame->width = avCntxOut->width;
outFrame->height = avCntxOut->height;
outFrame->format = av_codec_par_out->format;
av_frame_get_buffer(outFrame, 0);
}
bool Init()
{
AVCodecParameters* avCodecParOut = ConfigureAvCodec();
AVDictionary* options = ConfigureScreenCapture();
AVInputFormat* ifmt = av_find_input_format("gdigrab");
auto ifmtCtxLocal = avformat_alloc_context();
if (avformat_open_input(&ifmtCtxLocal, "desktop", ifmt, &options) < 0)
{
return false;
}
ifmtCtx = ifmtCtxLocal;
videoStreamIndx = GetVideoStreamIndex();
AVCodecParameters* avCodecParIn = avcodec_parameters_alloc();
avCodecParIn = ifmtCtx->streams[videoStreamIndx]->codecpar;
AVCodec* avCodec = avcodec_find_decoder(avCodecParIn->codec_id);
if (avCodec == NULL)
{
return false;
}
avcodecContx = avcodec_alloc_context3(avCodec);
if (avcodec_parameters_to_context(avcodecContx, avCodecParIn) < 0)
{
return false;
}
//av_dict_set
int value = avcodec_open2(avcodecContx, avCodec, NULL); //Initialize the AVCodecContext to use the given AVCodec.
if (value < 0)
{
return false;
}
AVOutputFormat* ofmt = av_guess_format("h264", NULL, NULL);
if (ofmt == NULL)
{
return false;
}
auto ofmtCtxLocal = avformat_alloc_context();
avformat_alloc_output_context2(&ofmtCtxLocal, ofmt, NULL, NULL);
if (ofmtCtxLocal == NULL)
{
return false;
}
ofmtCtx = ofmtCtxLocal;
AVCodec* avCodecOut = avcodec_find_encoder(avCodecParOut->codec_id);
if (avCodecOut == NULL)
{
return false;
}
videoStream = avformat_new_stream(ofmtCtx, avCodecOut);
if (videoStream == NULL)
{
return false;
}
avCntxOut = avcodec_alloc_context3(avCodecOut);
if (avCntxOut == NULL)
{
return false;
}
if (avcodec_parameters_copy(videoStream->codecpar, avCodecParOut) < 0)
{
return false;
}
if (avcodec_parameters_to_context(avCntxOut, avCodecParOut) < 0)
{
return false;
}
avCntxOut->gop_size = 30; //3; //Use I-Frame frame every 30 frames.
avCntxOut->max_b_frames = 0;
avCntxOut->time_base.num = 1;
avCntxOut->time_base.den = framerate;
//avio_open(&ofmtCtx->pb, "", AVIO_FLAG_READ_WRITE);
if (avformat_write_header(ofmtCtx, NULL) < 0)
{
return false;
}
value = avcodec_open2(avCntxOut, avCodecOut, NULL); //Initialize the AVCodecContext to use the given AVCodec.
if (value < 0)
{
return false;
}
if (avcodecContx->codec_id == AV_CODEC_ID_H264)
{
av_opt_set(avCntxOut->priv_data, "preset", "ultrafast", 0);
av_opt_set(avCntxOut->priv_data, "zerolatency", "1", 0);
av_opt_set(avCntxOut->priv_data, "tune", "ull", 0);
}
if ((ofmtCtx->oformat->flags & AVFMT_GLOBALHEADER) != 0)
{
avCntxOut->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
CreateFrames(avCodecParIn, avCodecParOut);
swsCtx = sws_alloc_context();
if (sws_init_context(swsCtx, NULL, NULL) < 0)
{
return false;
}
swsCtx = sws_getContext(avcodecContx->width, avcodecContx->height, avcodecContx->pix_fmt,
avCntxOut->width, avCntxOut->height, avCntxOut->pix_fmt, SWS_FAST_BILINEAR,
NULL, NULL, NULL);
if (swsCtx == NULL)
{
return false;
}
return true;
}
void Encode(AVCodecContext* enc_ctx, AVFrame* frame, AVPacket* pkt)
{
int ret;
/* send the frame to the encoder */
ret = avcodec_send_frame(enc_ctx, frame);
if (ret < 0)
{
return;
}
while (ret >= 0)
{
ret = avcodec_receive_packet(enc_ctx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return;
if (ret < 0)
{
return;
}
fs.write((char*)pkt->data, pkt->size);
av_packet_unref(pkt);
}
}
void EncodeFrames(int noFrames)
{
int frameCount = 0;
avPkt = av_packet_alloc();
AVPacket* outPacket = av_packet_alloc();
encPacketCounter = 0;
while (av_read_frame(ifmtCtx, avPkt) >= 0)
{
if (frameCount++ == noFrames)
break;
if (avPkt->stream_index != videoStreamIndx) continue;
avcodec_send_packet(avcodecContx, avPkt);
if (avcodec_receive_frame(avcodecContx, avFrame) >= 0) // Frame successfully decoded :)
{
outPacket->data = NULL; // packet data will be allocated by the encoder
outPacket->size = 0;
outPacket->pts = av_rescale_q(encPacketCounter, avCntxOut->time_base, videoStream->time_base);
if (outPacket->dts != AV_NOPTS_VALUE)
outPacket->dts = av_rescale_q(encPacketCounter, avCntxOut->time_base, videoStream->time_base);
outPacket->dts = av_rescale_q(encPacketCounter, avCntxOut->time_base, videoStream->time_base);
outPacket->duration = av_rescale_q(1, avCntxOut->time_base, videoStream->time_base);
outFrame->pts = av_rescale_q(encPacketCounter, avCntxOut->time_base, videoStream->time_base);
outFrame->pkt_duration = av_rescale_q(encPacketCounter, avCntxOut->time_base, videoStream->time_base);
encPacketCounter++;
int sts = sws_scale(swsCtx,
avFrame->data, avFrame->linesize, 0, avFrame->height,
outFrame->data, outFrame->linesize);
/* make sure the frame data is writable */
auto ret = av_frame_make_writable(outFrame);
if (ret < 0)
break;
Encode(avCntxOut, outFrame, outPacket);
}
av_frame_unref(avFrame);
av_packet_unref(avPkt);
}
}
void Dispose()
{
fs.close();
auto ifmtCtxLocal = ifmtCtx;
avformat_close_input(&ifmtCtx);
avformat_free_context(ifmtCtx);
avcodec_free_context(&avcodecContx);
}
int main(int argc, char** argv)
{
avdevice_register_all();
fs.open("out.h264");
if (Init())
{
EncodeFrames(300);
}
else
{
std::cout << "Failed to Init \n";
}
Dispose();
return 0;
}
As far as I can tell the setup of the encoding process is correct as it is largely unchanged from how the example given in the official documentation is working: https://libav.org/documentation/doxygen/master/encode__video_8c_source.html
However there is limited documentation around the desktop capture online so I am not sure if I have set that up correctly.
We have to open the out.h264 as binary file.
Replace fs.open("out.h264"); with fs.open("out.h264", std::ios::binary);.
The default file type in Windows is "text file".
That means that each \n in converted to \r\n when writing, and the encoded stream get "messed up".
It took me quite a long time to figure out the problem...
There is another small issue:
There is a missing loop at the end, that flushes the remaining encoded packets.
We can use FFprobe for counting the number of encoded frames:
ffprobe -v error -select_streams v:0 -count_frames -show_entries stream=nb_read_frames -print_format csv out.h264
The result is 263 instead of 300.
The solution is adding the following loop at the end of void EncodeFrames(int noFrames) function:
int ret = 0;
avcodec_send_frame(avCntxOut, NULL);
do
{
av_packet_unref(outPacket);
ret = avcodec_receive_packet(avCntxOut, outPacket);
if (!ret)
{
fs.write((char*)outPacket->data, outPacket->size);
}
} while (!ret);
This is not a solution to the problem directly but maybe so?
AVDictionary * pDic = NULL;
av_dict_set(&pDic, "tune", "zerolatency", 0);
avcodec_open2(avCntxOut, avCodecOut, &pDic);

Why image decoded by ffmpeg has a border?

After decoding some videos from iphone, I find the image has a strange border on the right.
It is not black but some random pix (width:4).
why i get the border?
how to cut this border or to turn it to black?
Most of videos dont have this problem.
here is the sample code:
the code will load a video from memory, and decode it
int VideoParseFFmpeg::read_packet(void *opaque, uint8_t *buf, int buf_size) {
struct buffer_data *bd = (struct buffer_data *) opaque;
buf_size = FFMIN(buf_size, bd->size_);
if (!buf_size)
return AVERROR_EOF;
memcpy(buf, bd->ptr_, buf_size);
bd->ptr_ += buf_size;
bd->size_ -= buf_size;
return buf_size;
}
int VideoParseFFmpeg::LoadContent(const std::string &video_content) {
int ret = 0;
...
video_size_ = video_content.size();
avio_ctx_buffer_size_ = video_size_ + AV_INPUT_BUFFER_PADDING_SIZE;
avio_ctx_buffer_ = static_cast<uint8_t *>(av_malloc(avio_ctx_buffer_size_));
bd_.ptr_ = (uint8_t *)(video_content.c_str());
bd_.size_ = video_content.size();
av_register_all();
input_ctx_ = avformat_alloc_context();
avio_ctx_ = avio_alloc_context(avio_ctx_buffer_,
avio_ctx_buffer_size_,
0,
&bd_,
&read_packet,
NULL,
NULL);
if ((ret = av_probe_input_buffer(avio_ctx_, &in_fmt_, "", NULL, 0, 0)) < 0) {
LOGGER_WARN(Log::GetLog(), "fail to prob input, err [{}]", AVERROR(ret));
return -1;
}
input_ctx_->pb = avio_ctx_;
/* open the input file */
if ((ret = avformat_open_input(&input_ctx_, "", in_fmt_, NULL)) != 0) {
LOGGER_WARN(Log::GetLog(), "fail to open input, err [{}]", AVERROR(ret));
return -1;
}
if ((ret = avformat_find_stream_info(input_ctx_, NULL)) < 0) {
LOGGER_WARN(Log::GetLog(),
"fail to find input stream information, err[{}]",
AVERROR(ret));
return -1;
}
/* find the video stream information */
if ((ret = av_find_best_stream(input_ctx_, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder_, 0)) < 0) {
LOGGER_WARN(Log::GetLog(), "fail to find a video stream from input, err[{}]", ret);
return -1;
}
video_stream_idx_ = ret;
if (!(decoder_ctx_ = avcodec_alloc_context3(decoder_))) {
LOGGER_WARN(Log::GetLog(), "fail to alloc avcodec context");
return -1;
}
video_stream_ = input_ctx_->streams[video_stream_idx_];
if ((ret = avcodec_parameters_to_context(decoder_ctx_, video_stream_->codecpar)) < 0) {
LOGGER_WARN(Log::GetLog(), "fail to convert parameters to context, err [{}]", ret);
return -1;
}
if ((ret = avcodec_open2(decoder_ctx_, decoder_, NULL)) < 0) {
LOGGER_WARN(Log::GetLog(), "fail to open decodec, err[{}]", ret);
return -1;
}
return 0;
}
void VideoParseFFmpeg::GetFrames(std::vector<cv::Mat> &frames) {
while (1) {
...
decode_write(decoder_ctx_, &packet_, &buffer, frames);
...
}
//flush
...
}
cv::Mat VideoParseFFmpeg::avFrame2Mat(AVFrame *pAvFrame,
AVCodecContext *pCodecCtx,
AVPixelFormat src_fmt) {
AVFrame *pFrameBGR = av_frame_alloc(); // 存储解码后转换的BGR数据
int w = pAvFrame->width;
int h = pAvFrame->height;
auto size = av_image_get_buffer_size(AV_PIX_FMT_BGR24, w, h, 1);
cv::Mat out(h, w, CV_8UC3);
int ret = av_image_fill_arrays(pFrameBGR->data,
pFrameBGR->linesize,
out.data,
AV_PIX_FMT_BGR24,
w,
h,
1);
img_convert_ctx_ = sws_getCachedContext(img_convert_ctx_,
w,
h,
ConvertDeprecatedFormat(src_fmt),
w,
h,
AV_PIX_FMT_BGR24,
SWS_BICUBIC,
NULL, NULL, NULL);
sws_scale(img_convert_ctx_,
(const uint8_t *const *) pAvFrame->data,
pAvFrame->linesize,
0,
pCodecCtx->height,
pFrameBGR->data,
pFrameBGR->linesize);
av_free(pFrameBGR);
return out;
}
int VideoParseFFmpeg::decode_write(AVCodecContext *avctx,
AVPacket *packet,
uint8_t **buffer,
std::vector<cv::Mat> &p_mat_out) {
AVFrame *frame = NULL, *sw_frame = NULL;
AVFrame *tmp_frame = NULL;
int ret = 0;
ret = avcodec_send_packet(avctx, packet);
...
while (1) {
auto clear = [&frame, &sw_frame, this] {
if (frame != NULL)
av_frame_free(&frame);
if (sw_frame != NULL)
av_frame_free(&sw_frame);
av_packet_unref(&packet_);
};
...
ret = avcodec_receive_frame(avctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
clear();
return 0;
} else if (ret < 0) {
LOGGER_WARN(Log::GetLog(), "error while decoding, err[{}]", AVERROR(ret));
clear();
return ret;
}
tmp_frame = frame;
p_mat_out.push_back(avFrame2Mat(tmp_frame,
avctx,
(AVPixelFormat) tmp_frame->format));
clear();
}
}
After replacing lib tcmalloc with a newer version, the border turns to be a black line (used to be a random line), like this.
there is image content in the area of the border in the original image, how to fix it?

Add actual timestamp to mp4 using ffmpeg

I'm using ffmpeg to write an h264 stream to a mp4 file.
Everything is working, but now I need to embed to this file the actual timestamp in milliseconds of each frame.
Is it possible?
This is my code:
void mp4_file_create(mp4_par * par, t_image * img_in)
{
AVCodec * codec = NULL;
AVCodecContext * cc_in;
AVFormatContext * av_fmt_ctx_out;
AVStream * av_stream;
AVPacket av_pkt;
AVFormatContext * ifmt_ctx;
unsigned long long last_frame_ts_utc;
unsigned long long last_frame_ts_absolute;
unsigned long long last_pts;
t_mp4_dict_metadata metadata;
char file_name[1024];
char TSstrdate[128];
av_register_all();
cc_in = NULL;
av_stream = NULL;
if (avformat_alloc_output_context2(&mp4h->av_fmt_ctx_out, NULL, NULL, file_name) < 0) {
trace_error("avformat_alloc_output_context2 failed");
goto FnExit;
}
/* find the H264 RAW encoder */
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec) {
int ret;
AVStream *in_stream = NULL;
if (av_fmt_ctx_in == NULL)
{
trace_error("av_fmt_ctx_in is NULL");
goto FnExit;
}
in_stream = av_fmt_ctx_in->streams[0];
in_stream->codec->width = par.width;
in_stream->codec->height = par.height;
in_stream->codec->coded_width = par.width;
in_stream->codec->coded_height = par.height;
in_stream->codec->bit_rate = 1024;
in_stream->codec->flags = CODEC_FLAG_GLOBAL_HEADER;
in_stream->codec->time_base.num = 1;
in_stream->codec->time_base.den = par.frame_rate;
in_stream->codec->gop_size = par.gop;
in_stream->codec->pix_fmt = AV_PIX_FMT_YUV420P;
av_stream = avformat_new_stream(mp4h->av_fmt_ctx_out, in_stream->codec->codec);
if (!av_stream) {
trace_error("Failed allocating output stream");
goto FnExit;
}
ret = avcodec_copy_context(av_stream->codec, in_stream->codec);
if (ret != 0) {
goto FnExit;
}
}
else {
int ret;
av_stream = avformat_new_stream(mp4h->av_fmt_ctx_out, NULL);
if (!av_stream) {
goto FnExit;
}
cc_in = avcodec_alloc_context3(codec);
if (cc_in == NULL) {
goto FnExit;
}
cc_in->width = par.width;
cc_in->height = par.height;
cc_in->bit_rate = 1024;
cc_in->flags = CODEC_FLAG_GLOBAL_HEADER;
cc_in->time_base.num = 1;
cc_in->time_base.den = par.frame_rate;
cc_in->gop_size = par.gop;
cc_in->pix_fmt = AV_PIX_FMT_YUVJ420P;
cc_in->extradata = (unsigned char*)av_mallocz(sizeof(sample_spspps));
cc_in->extradata_size = sizeof(sample_spspps);
memcpy(cc_in->extradata, sample_spspps, cc_in->extradata_size);
ret = avcodec_copy_context(av_stream->codec, cc_in);
if (ret != 0) {
goto FnExit;
}
}
av_stream->codec->codec_tag = 0;
if (av_fmt_ctx_out->oformat->flags & AVFMT_GLOBALHEADER)
av_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (!(av_fmt_ctx_out->flags & AVFMT_NOFILE)) {
int ret = avio_open(&av_fmt_ctx_out->pb, file_name, AVIO_FLAG_READ_WRITE);
if (ret < 0) {
trace_error("Could not open output file '%s'", file_name);
goto FnExit;
}
}
av_fmt_ctx_out->streams[0]->time_base.num = 1;
av_fmt_ctx_out->streams[0]->time_base.den = par.frame_rate;
av_fmt_ctx_out->streams[0]->codec->time_base.num = 1;
av_fmt_ctx_out->streams[0]->codec->time_base.den = par.frame_rate;
AVRational fps;
fps.num = 1;
fps.den = par.frame_rate;
av_stream_set_r_frame_rate(av_fmt_ctx_out->streams[0], fps);
mp4h->av_fmt_ctx_out->streams[0]->first_dts = AV_TIME_BASE;
av_dict_set(&pMetaData, "title", par.guid_video_function, 0);
av_dict_set(&pMetaData, "artist", "Test Artist", 0);
av_dict_set(&pMetaData, "date", TSstrdate, 0);
av_fmt_ctx_out->metadata = pMetaData;
if (avformat_write_header(av_fmt_ctx_out, NULL) < 0) {
goto FnExit;
}
//............. Now for each frame_rate........
av_init_packet(&av_pkt);
if (first_frame)
{
av_pkt.pts = 0;
av_pkt.dts = 0;
}
else
{
av_pkt.pts = last_pts + (long long int)((img->timestamp_absolute - last_frame_ts_absolute) * (unsigned long long)av_stream->time_base.den / 1000000ULL);
av_pkt.dts = last_pts + (long long int)((img->timestamp_absolute - last_frame_ts_absolute) * (unsigned long long)av_stream->time_base.den / 1000000ULL);
}
mp4h->av_pkt.duration = 0;
mp4h->av_pkt.pos = -1;
last_frame_ts_utc = img->timestamp_utc.t;
last_frame_ts_absolute = img->timestamp_absolute.t;
last_pts = av_pkt.pts;
if (img->type == keyframe)
{
av_pkt.flags |= AV_PKT_FLAG_KEY;
}
av_pkt.data = img->ptr;
av_pkt.size = img->size;
av_pkt.stream_index = av_stream->index;
ret = av_interleaved_write_frame(av_fmt_ctx_out, &av_pkt);
if (ret != 0) {
char strE[256];
av_strerror(ret, strE, sizeof(strE));
trace_error("av_write_frame returns %d - %s", ret, strE);
return;
}
//........then I will close the file
FnExit:
if (av_fmt_ctx_out && av_fmt_ctx_out->pb != NULL) {
if (av_write_trailer(mp4h->av_fmt_ctx_out) != 0) {
trace_error("av_write_trailer Error!");
}
}
if (ifmt_ctx)
avformat_close_input(&ifmt_ctx);
avio_closep(&av_fmt_ctx_out->pb);
avcodec_close(av_stream->codec);
avformat_free_context(av_fmt_ctx_out);
}
How can I modify it in order to embed the actual timestamp of each frame?
I tried to add the actual timestamp to the first frame pts instead of setting it to zero, but it didn't work.

FFMPEG (C++) convert & compress a single image out of buffer

i try to encode (with compression) and decode (without compression) a image with ffmpeg. But if i want to get the sent image back with avcodec_receive_packet i get only the error AVERROR(EAGAIN).
It doesnt matter what i change ... allways AVERROR(EAGAIN) is the outcome. Is it maybe a problem of sending just one single frame to the encoder? And if yes, how to fix it?
Code (only relevant stuff shown):
avcodec_register_all();
/* ------ init codec ------------------*/
AVCodec *codec;
codec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!codec)
{
print("compressH264, could not find decoder:\"AV_CODEC_ID_H264\"!!!");
return false;
}
AVCodec *nVidiaCodec = avcodec_find_encoder_by_name("h264_nvenc");
if (!nVidiaCodec)
{
print("err");
}
/* ------ ------------ ------------------*/
/* ------ init context ------------------*/
AVCodecContext* av_codec_context_ = NULL;
av_codec_context_ = avcodec_alloc_context3(nVidiaCodec);
if (!av_codec_context_)
{
print("compressH264, avcodec_alloc_context3 failed!!!");
return false;
}
int w = imgSrc.width();
int h = imgSrc.height();
if ((w % 2) != 0)
{
++w;
}
if ((h % 2) != 0)
{
++h;
}
av_codec_context_->width = w;
av_codec_context_->height = h;
av_codec_context_->pix_fmt = AV_PIX_FMT_YUV420P;
av_codec_context_->gop_size = 1;
av_codec_context_->max_b_frames = 1;
av_codec_context_->bit_rate = 400000;
av_codec_context_->time_base.den = 1;
av_codec_context_->time_base.num = 1;
av_opt_set(av_codec_context_->priv_data, "preset", "slow", 0);
int ret = avcodec_open2(av_codec_context_, nVidiaCodec, NULL);
if (0 > ret)
{
print("compressH264, could not open codec context for decoder:\"AV_CODEC_ID_H264\"!!!");
return false;
}
AVFrame *picture = av_frame_alloc();
picture->format = AV_PIX_FMT_RGB24;
picture->width = w;
picture->height = h;
ret = avpicture_fill((AVPicture *)picture, imgSrc.bits(), AV_PIX_FMT_RGB24, w, h);
if (0 > ret)
{
print("compressH264, avpicture_fill - failed!!!");
return false;
}
AVFrame *tmp_picture = av_frame_alloc();
tmp_picture->format = AV_PIX_FMT_YUV420P;
tmp_picture->width = w;
tmp_picture->height = h;
ret = av_frame_get_buffer(tmp_picture, 32);
SwsContext *img_convert_ctx = sws_getContext(av_codec_context_->width, av_codec_context_->height, AV_PIX_FMT_RGB24, av_codec_context_->width, av_codec_context_->height, av_codec_context_->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL);
ret = sws_scale(img_convert_ctx, picture->data, picture->linesize, 0, av_codec_context_->height, tmp_picture->data, tmp_picture->linesize);
int h264Size = avpicture_get_size(AV_PIX_FMT_YUV420P, w, h);
ret = avcodec_send_frame(av_codec_context_, tmp_picture);
if (0 > ret)
{
char err[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, ret);
print("compressH264, avcodec_send_frame: %s", err);
}
AVPacket *pkt = av_packet_alloc();
while (ret >= 0)
{
ret = avcodec_receive_packet(av_codec_context_, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
break;
}
else if (ret < 0)
{
fprintf(stderr, "Error during encoding\n");
exit(1);
}
av_packet_unref(pkt);
}
print("success");
Everything works well until:
- avcodec_receive_packet ... i get all time the error AVERROR(EAGAIN).
I can start decoding just if i have the compressed image.
Thanks for your help guys.
Edit:
If i do now the following code, i get a packet and ret == 0, but i have to send 46 times the same image ... for me this makes no sence.
do
{
ret = avcodec_receive_packet(av_codec_context_, &pkt);
if (ret == 0)
{
break;
}
else if ((ret < 0) && (ret != AVERROR(EAGAIN)))
{
coutF("error");
}
else if (ret == AVERROR(EAGAIN))
{
ret = avcodec_send_frame(av_codec_context_, tmp_picture);
if (0 > ret)
{
char err[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, ret);
coutFRed("compressH264, avcodec_send_frame: %s", err);
}
coutF("cnt:%d", ++cnt);
}
} while (ret == 0);
Edit:
Good morning,
after more invest, i got the issue. I have to send the same frame a lot of time, because of the keyframe stuff for h264. The question now is, if it is possible to remove the h264 standart stuff from the encoder and just let FFMPEG convert one single frame.
I am not sure but following an ffmpeg example it seems that that just means it is done and you should return like they do in this code snippet:
/* if no more frames for output - returns AVERROR(EAGAIN)
* if flushed and no more frames for output - returns AVERROR_EOF
* rewrite retcode to 0 to show it as normal procedure completion
*/
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
ret = 0;
In their comments they seem to imply that it signals "normal procedure completion"
the answer for this issue is this:
do
{
ret = avcodec_receive_packet(av_codec_context_, &pkt);
if (ret == 0)
{
break;
}
else if ((ret < 0) && (ret != AVERROR(EAGAIN)))
{
return false;
}
else if (ret == AVERROR(EAGAIN))
{
ret = avcodec_send_frame(av_codec_context_, NULL);
if (0 > ret)
{
return false;
}
}
} while (ret == 0);
The NULL frame will flush the buffer and we are able to get the encoded frame.