I'm trying to create mp4 video file with FFmpeg and C++, but in result I receive broken file (windows player shows "Can't play ... 0xc00d36c4"). If I create .h264 file, it can be played with 'ffplay' and successfully converted to mp4 via CL.
My code:
int main() {
char *filename = "tmp.mp4";
AVOutputFormat *fmt;
AVFormatContext *fctx;
AVCodecContext *cctx;
AVStream *st;
av_register_all();
avcodec_register_all();
//auto detect the output format from the name
fmt = av_guess_format(NULL, filename, NULL);
if (!fmt) {
cout << "Error av_guess_format()" << endl; system("pause"); exit(1);
}
if (avformat_alloc_output_context2(&fctx, fmt, NULL, filename) < 0) {
cout << "Error avformat_alloc_output_context2()" << endl; system("pause"); exit(1);
}
//stream creation + parameters
st = avformat_new_stream(fctx, 0);
if (!st) {
cout << "Error avformat_new_stream()" << endl; system("pause"); exit(1);
}
st->codecpar->codec_id = fmt->video_codec;
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->width = 352;
st->codecpar->height = 288;
st->time_base.num = 1;
st->time_base.den = 25;
AVCodec *pCodec = avcodec_find_encoder(st->codecpar->codec_id);
if (!pCodec) {
cout << "Error avcodec_find_encoder()" << endl; system("pause"); exit(1);
}
cctx = avcodec_alloc_context3(pCodec);
if (!cctx) {
cout << "Error avcodec_alloc_context3()" << endl; system("pause"); exit(1);
}
avcodec_parameters_to_context(cctx, st->codecpar);
cctx->bit_rate = 400000;
cctx->width = 352;
cctx->height = 288;
cctx->time_base.num = 1;
cctx->time_base.den = 25;
cctx->gop_size = 12;
cctx->pix_fmt = AV_PIX_FMT_YUV420P;
if (st->codecpar->codec_id == AV_CODEC_ID_H264) {
av_opt_set(cctx->priv_data, "preset", "ultrafast", 0);
}
if (fctx->oformat->flags & AVFMT_GLOBALHEADER) {
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
avcodec_parameters_from_context(st->codecpar, cctx);
av_dump_format(fctx, 0, filename, 1);
//OPEN FILE + WRITE HEADER
if (avcodec_open2(cctx, pCodec, NULL) < 0) {
cout << "Error avcodec_open2()" << endl; system("pause"); exit(1);
}
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_open(&fctx->pb, filename, AVIO_FLAG_WRITE) < 0) {
cout << "Error avio_open()" << endl; system("pause"); exit(1);
}
}
if (avformat_write_header(fctx, NULL) < 0) {
cout << "Error avformat_write_header()" << endl; system("pause"); exit(1);
}
//CREATE DUMMY VIDEO
AVFrame *frame = av_frame_alloc();
frame->format = cctx->pix_fmt;
frame->width = cctx->width;
frame->height = cctx->height;
av_image_alloc(frame->data, frame->linesize, cctx->width, cctx->height, cctx->pix_fmt, 32);
AVPacket pkt;
double video_pts = 0;
for (int i = 0; i < 50; i++) {
video_pts = (double)cctx->time_base.num / cctx->time_base.den * 90 * i;
for (int y = 0; y < cctx->height; y++) {
for (int x = 0; x < cctx->width; x++) {
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
if (y < cctx->height / 2 && x < cctx->width / 2) {
/* Cb and Cr */
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
}
}
}
av_init_packet(&pkt);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.pts = frame->pts = video_pts;
pkt.data = NULL;
pkt.size = 0;
pkt.stream_index = st->index;
if (avcodec_send_frame(cctx, frame) < 0) {
cout << "Error avcodec_send_frame()" << endl; system("pause"); exit(1);
}
if (avcodec_receive_packet(cctx, &pkt) == 0) {
//cout << "Write frame " << to_string((int) pkt.pts) << endl;
av_interleaved_write_frame(fctx, &pkt);
av_packet_unref(&pkt);
}
}
//DELAYED FRAMES
for (;;) {
avcodec_send_frame(cctx, NULL);
if (avcodec_receive_packet(cctx, &pkt) == 0) {
//cout << "-Write frame " << to_string((int)pkt.pts) << endl;
av_interleaved_write_frame(fctx, &pkt);
av_packet_unref(&pkt);
}
else {
break;
}
}
//FINISH
av_write_trailer(fctx);
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_close(fctx->pb) < 0) {
cout << "Error avio_close()" << endl; system("pause"); exit(1);
}
}
av_frame_free(&frame);
avcodec_free_context(&cctx);
avformat_free_context(fctx);
system("pause");
return 0;
}
Output of program:
Output #0, mp4, to 'tmp.mp4':
Stream #0:0: Video: h264, yuv420p, 352x288, q=2-31, 400 kb/s, 25 tbn
[libx264 # 0000021c4a995ba0] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2
[libx264 # 0000021c4a995ba0] profile Constrained Baseline, level 2.0
[libx264 # 0000021c4a995ba0] 264 - core 152 r2851 ba24899 - H.264/MPEG-4 AVC codec - Copyleft 2003-2017 - http://www.videolan.org/x264.html - options: cabac=0 ref=1 deblock=0:0:0 analyse=0:0 me=dia subme=0 psy=1 psy_rd=1.00:0.00 mixed_ref=0 me_range=16 chroma_me=1 trellis=0 8x8dct=0 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=0 threads=6 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=0 weightp=0 keyint=12 keyint_min=1 scenecut=0 intra_refresh=0 rc=abr mbtree=0 bitrate=400 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=0
[libx264 # 0000021c4a995ba0] frame I:5 Avg QP: 7.03 size: 9318
[libx264 # 0000021c4a995ba0] frame P:45 Avg QP: 4.53 size: 4258
[libx264 # 0000021c4a995ba0] mb I I16..4: 100.0% 0.0% 0.0%
[libx264 # 0000021c4a995ba0] mb P I16..4: 0.0% 0.0% 0.0% P16..4: 100.0% 0.0% 0.0% 0.0% 0.0% skip: 0.0%
[libx264 # 0000021c4a995ba0] final ratefactor: 9.11
[libx264 # 0000021c4a995ba0] coded y,uvDC,uvAC intra: 18.9% 21.8% 14.5% inter: 7.8% 100.0% 15.5%
[libx264 # 0000021c4a995ba0] i16 v,h,dc,p: 4% 5% 5% 86%
[libx264 # 0000021c4a995ba0] i8c dc,h,v,p: 2% 9% 6% 82%
[libx264 # 0000021c4a995ba0] kb/s:264.68
If I will try to play mp4 file with 'ffplay' it prints:
[mov,mp4,m4a,3gp,3g2,mj2 # 00000000026bf900] Could not find codec parameters for stream 0 (Video: h264 (avc1 / 0x31637661), none, 352x288, 138953 kb/s): unspecified pixel format
[h264 # 00000000006c6ae0] non-existing PPS 0 referenced
[h264 # 00000000006c6ae0] decode_slice_header error
[h264 # 00000000006c6ae0] no frame!
I've spent a lot of time without success of finding issue, what could be the reason of it?
Thank for help!
The current solution is wrong. It's a workaround at best. It encodes the video into h264 first and then remuxes it into mp4. This is not necessary.
The true solution is removing:
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
This is my full solution:
#include <iostream>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
}
AVFrame* videoFrame = nullptr;
AVCodecContext* cctx = nullptr;
SwsContext* swsCtx = nullptr;
int frameCounter = 0;
AVFormatContext* ofctx = nullptr;
AVOutputFormat* oformat = nullptr;
int fps = 30;
int width = 1920;
int height = 1080;
int bitrate = 2000;
static void pushFrame(uint8_t* data) {
int err;
if (!videoFrame) {
videoFrame = av_frame_alloc();
videoFrame->format = AV_PIX_FMT_YUV420P;
videoFrame->width = cctx->width;
videoFrame->height = cctx->height;
if ((err = av_frame_get_buffer(videoFrame, 32)) < 0) {
std::cout << "Failed to allocate picture" << err << std::endl;
return;
}
}
if (!swsCtx) {
swsCtx = sws_getContext(cctx->width, cctx->height, AV_PIX_FMT_RGB24, cctx->width,
cctx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, 0, 0, 0);
}
int inLinesize[1] = { 3 * cctx->width };
// From RGB to YUV
sws_scale(swsCtx, (const uint8_t* const*)&data, inLinesize, 0, cctx->height,
videoFrame->data, videoFrame->linesize);
videoFrame->pts = (1.0 / 30.0) * 90000 * (frameCounter++);
std::cout << videoFrame->pts << " " << cctx->time_base.num << " " <<
cctx->time_base.den << " " << frameCounter << std::endl;
if ((err = avcodec_send_frame(cctx, videoFrame)) < 0) {
std::cout << "Failed to send frame" << err << std::endl;
return;
}
AV_TIME_BASE;
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
pkt.flags |= AV_PKT_FLAG_KEY;
if (avcodec_receive_packet(cctx, &pkt) == 0) {
static int counter = 0;
if (counter == 0) {
FILE* fp = fopen("dump_first_frame1.dat", "wb");
fwrite(pkt.data, pkt.size, 1, fp);
fclose(fp);
}
std::cout << "pkt key: " << (pkt.flags & AV_PKT_FLAG_KEY) << " " <<
pkt.size << " " << (counter++) << std::endl;
uint8_t* size = ((uint8_t*)pkt.data);
std::cout << "first: " << (int)size[0] << " " << (int)size[1] <<
" " << (int)size[2] << " " << (int)size[3] << " " << (int)size[4] <<
" " << (int)size[5] << " " << (int)size[6] << " " << (int)size[7] <<
std::endl;
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
}
static void finish() {
//DELAYED FRAMES
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
for (;;) {
avcodec_send_frame(cctx, NULL);
if (avcodec_receive_packet(cctx, &pkt) == 0) {
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
else {
break;
}
}
av_write_trailer(ofctx);
if (!(oformat->flags & AVFMT_NOFILE)) {
int err = avio_close(ofctx->pb);
if (err < 0) {
std::cout << "Failed to close file" << err << std::endl;
}
}
}
static void free() {
if (videoFrame) {
av_frame_free(&videoFrame);
}
if (cctx) {
avcodec_free_context(&cctx);
}
if (ofctx) {
avformat_free_context(ofctx);
}
if (swsCtx) {
sws_freeContext(swsCtx);
}
}
int main(int argc, char* argv[])
{
av_register_all();
avcodec_register_all();
oformat = av_guess_format(nullptr, "test.mp4", nullptr);
if (!oformat)
{
std::cout << "can't create output format" << std::endl;
return -1;
}
//oformat->video_codec = AV_CODEC_ID_H265;
int err = avformat_alloc_output_context2(&ofctx, oformat, nullptr, "test.mp4");
if (err)
{
std::cout << "can't create output context" << std::endl;
return -1;
}
AVCodec* codec = nullptr;
codec = avcodec_find_encoder(oformat->video_codec);
if (!codec)
{
std::cout << "can't create codec" << std::endl;
return -1;
}
AVStream* stream = avformat_new_stream(ofctx, codec);
if (!stream)
{
std::cout << "can't find format" << std::endl;
return -1;
}
cctx = avcodec_alloc_context3(codec);
if (!cctx)
{
std::cout << "can't create codec context" << std::endl;
return -1;
}
stream->codecpar->codec_id = oformat->video_codec;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codecpar->width = width;
stream->codecpar->height = height;
stream->codecpar->format = AV_PIX_FMT_YUV420P;
stream->codecpar->bit_rate = bitrate * 1000;
avcodec_parameters_to_context(cctx, stream->codecpar);
cctx->time_base = (AVRational){ 1, 1 };
cctx->max_b_frames = 2;
cctx->gop_size = 12;
cctx->framerate = (AVRational){ fps, 1 };
//must remove the following
//cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
if (stream->codecpar->codec_id == AV_CODEC_ID_H264) {
av_opt_set(cctx, "preset", "ultrafast", 0);
}
else if (stream->codecpar->codec_id == AV_CODEC_ID_H265)
{
av_opt_set(cctx, "preset", "ultrafast", 0);
}
avcodec_parameters_from_context(stream->codecpar, cctx);
if ((err = avcodec_open2(cctx, codec, NULL)) < 0) {
std::cout << "Failed to open codec" << err << std::endl;
return -1;
}
if (!(oformat->flags & AVFMT_NOFILE)) {
if ((err = avio_open(&ofctx->pb, "test.mp4", AVIO_FLAG_WRITE)) < 0) {
std::cout << "Failed to open file" << err << std::endl;
return -1;
}
}
if ((err = avformat_write_header(ofctx, NULL)) < 0) {
std::cout << "Failed to write header" << err << std::endl;
return -1;
}
av_dump_format(ofctx, 0, "test.mp4", 1);
uint8_t* frameraw = new uint8_t[1920 * 1080 * 4];
memset(frameraw, 222, 1920 * 1080 * 4);
for (int i = 0;i < 60;++i) {
pushFrame(frameraw);
}
delete[] frameraw;
finish();
free();
return 0;
}
here is how I debugged the issue:
I started with the questioner's solution first. I compared the packets generated by encoding and by remuxing,
during the first encoding the first packet misses a header:
When remuxing, the first packet has a header:
I then noticed AV_CODEC_FLAG_GLOBAL_HEADER. judging by its name, it seems to write a global header and strip the packet header.
So I removed it, and everything worked. I have detailed my discovery here.
code is here with makefile: https://github.com/shi-yan/videosamples
Working solution:
#include "VideoCapture.h"
#define VIDEO_TMP_FILE "tmp.h264"
#define FINAL_FILE_NAME "record.mp4"
using namespace std;
void VideoCapture::Init(int width, int height, int fpsrate, int bitrate) {
fps = fpsrate;
int err;
if (!(oformat = av_guess_format(NULL, VIDEO_TMP_FILE, NULL))) {
Debug("Failed to define output format", 0);
return;
}
if ((err = avformat_alloc_output_context2(&ofctx, oformat, NULL, VIDEO_TMP_FILE) < 0)) {
Debug("Failed to allocate output context", err);
Free();
return;
}
if (!(codec = avcodec_find_encoder(oformat->video_codec))) {
Debug("Failed to find encoder", 0);
Free();
return;
}
if (!(videoStream = avformat_new_stream(ofctx, codec))) {
Debug("Failed to create new stream", 0);
Free();
return;
}
if (!(cctx = avcodec_alloc_context3(codec))) {
Debug("Failed to allocate codec context", 0);
Free();
return;
}
videoStream->codecpar->codec_id = oformat->video_codec;
videoStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
videoStream->codecpar->width = width;
videoStream->codecpar->height = height;
videoStream->codecpar->format = AV_PIX_FMT_YUV420P;
videoStream->codecpar->bit_rate = bitrate * 1000;
videoStream->time_base = { 1, fps };
avcodec_parameters_to_context(cctx, videoStream->codecpar);
cctx->time_base = { 1, fps };
cctx->max_b_frames = 2;
cctx->gop_size = 12;
if (videoStream->codecpar->codec_id == AV_CODEC_ID_H264) {
av_opt_set(cctx, "preset", "ultrafast", 0);
}
if (ofctx->oformat->flags & AVFMT_GLOBALHEADER) {
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
avcodec_parameters_from_context(videoStream->codecpar, cctx);
if ((err = avcodec_open2(cctx, codec, NULL)) < 0) {
Debug("Failed to open codec", err);
Free();
return;
}
if (!(oformat->flags & AVFMT_NOFILE)) {
if ((err = avio_open(&ofctx->pb, VIDEO_TMP_FILE, AVIO_FLAG_WRITE)) < 0) {
Debug("Failed to open file", err);
Free();
return;
}
}
if ((err = avformat_write_header(ofctx, NULL)) < 0) {
Debug("Failed to write header", err);
Free();
return;
}
av_dump_format(ofctx, 0, VIDEO_TMP_FILE, 1);
}
void VideoCapture::AddFrame(uint8_t *data) {
int err;
if (!videoFrame) {
videoFrame = av_frame_alloc();
videoFrame->format = AV_PIX_FMT_YUV420P;
videoFrame->width = cctx->width;
videoFrame->height = cctx->height;
if ((err = av_frame_get_buffer(videoFrame, 32)) < 0) {
Debug("Failed to allocate picture", err);
return;
}
}
if (!swsCtx) {
swsCtx = sws_getContext(cctx->width, cctx->height, AV_PIX_FMT_RGB24, cctx->width, cctx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, 0, 0, 0);
}
int inLinesize[1] = { 3 * cctx->width };
// From RGB to YUV
sws_scale(swsCtx, (const uint8_t * const *)&data, inLinesize, 0, cctx->height, videoFrame->data, videoFrame->linesize);
videoFrame->pts = frameCounter++;
if ((err = avcodec_send_frame(cctx, videoFrame)) < 0) {
Debug("Failed to send frame", err);
return;
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
if (avcodec_receive_packet(cctx, &pkt) == 0) {
pkt.flags |= AV_PKT_FLAG_KEY;
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
}
void VideoCapture::Finish() {
//DELAYED FRAMES
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
for (;;) {
avcodec_send_frame(cctx, NULL);
if (avcodec_receive_packet(cctx, &pkt) == 0) {
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
else {
break;
}
}
av_write_trailer(ofctx);
if (!(oformat->flags & AVFMT_NOFILE)) {
int err = avio_close(ofctx->pb);
if (err < 0) {
Debug("Failed to close file", err);
}
}
Free();
Remux();
}
void VideoCapture::Free() {
if (videoFrame) {
av_frame_free(&videoFrame);
}
if (cctx) {
avcodec_free_context(&cctx);
}
if (ofctx) {
avformat_free_context(ofctx);
}
if (swsCtx) {
sws_freeContext(swsCtx);
}
}
void VideoCapture::Remux() {
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
int err;
if ((err = avformat_open_input(&ifmt_ctx, VIDEO_TMP_FILE, 0, 0)) < 0) {
Debug("Failed to open input file for remuxing", err);
goto end;
}
if ((err = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
Debug("Failed to retrieve input stream information", err);
goto end;
}
if ((err = avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, FINAL_FILE_NAME))) {
Debug("Failed to allocate output context", err);
goto end;
}
AVStream *inVideoStream = ifmt_ctx->streams[0];
AVStream *outVideoStream = avformat_new_stream(ofmt_ctx, NULL);
if (!outVideoStream) {
Debug("Failed to allocate output video stream", 0);
goto end;
}
outVideoStream->time_base = { 1, fps };
avcodec_parameters_copy(outVideoStream->codecpar, inVideoStream->codecpar);
outVideoStream->codecpar->codec_tag = 0;
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
if ((err = avio_open(&ofmt_ctx->pb, FINAL_FILE_NAME, AVIO_FLAG_WRITE)) < 0) {
Debug("Failed to open output file", err);
goto end;
}
}
if ((err = avformat_write_header(ofmt_ctx, 0)) < 0) {
Debug("Failed to write header to output file", err);
goto end;
}
AVPacket videoPkt;
int ts = 0;
while (true) {
if ((err = av_read_frame(ifmt_ctx, &videoPkt)) < 0) {
break;
}
videoPkt.stream_index = outVideoStream->index;
videoPkt.pts = ts;
videoPkt.dts = ts;
videoPkt.duration = av_rescale_q(videoPkt.duration, inVideoStream->time_base, outVideoStream->time_base);
ts += videoPkt.duration;
videoPkt.pos = -1;
if ((err = av_interleaved_write_frame(ofmt_ctx, &videoPkt)) < 0) {
Debug("Failed to mux packet", err);
av_packet_unref(&videoPkt);
break;
}
av_packet_unref(&videoPkt);
}
av_write_trailer(ofmt_ctx);
end:
if (ifmt_ctx) {
avformat_close_input(&ifmt_ctx);
}
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
avio_closep(&ofmt_ctx->pb);
}
if (ofmt_ctx) {
avformat_free_context(ofmt_ctx);
}
}
Header file:
#define VIDEOCAPTURE_API __declspec(dllexport)
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <cstring>
#include <math.h>
#include <string.h>
#include <algorithm>
#include <string>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavcodec/avfft.h>
#include <libavdevice/avdevice.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
// libav resample
#include <libavutil/opt.h>
#include <libavutil/common.h>
#include <libavutil/channel_layout.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
#include <libavutil/file.h>
// hwaccel
#include "libavcodec/vdpau.h"
#include "libavutil/hwcontext.h"
#include "libavutil/hwcontext_vdpau.h"
// lib swresample
#include <libswscale/swscale.h>
std::ofstream logFile;
void Log(std::string str) {
logFile.open("Logs.txt", std::ofstream::app);
logFile.write(str.c_str(), str.size());
logFile.close();
}
typedef void(*FuncPtr)(const char *);
FuncPtr ExtDebug;
char errbuf[32];
void Debug(std::string str, int err) {
Log(str + " " + std::to_string(err));
if (err < 0) {
av_strerror(err, errbuf, sizeof(errbuf));
str += errbuf;
}
Log(str);
ExtDebug(str.c_str());
}
void avlog_cb(void *, int level, const char * fmt, va_list vargs) {
static char message[8192];
vsnprintf_s(message, sizeof(message), fmt, vargs);
Log(message);
}
class VideoCapture {
public:
VideoCapture() {
oformat = NULL;
ofctx = NULL;
videoStream = NULL;
videoFrame = NULL;
swsCtx = NULL;
frameCounter = 0;
// Initialize libavcodec
av_register_all();
av_log_set_callback(avlog_cb);
}
~VideoCapture() {
Free();
}
void Init(int width, int height, int fpsrate, int bitrate);
void AddFrame(uint8_t *data);
void Finish();
private:
AVOutputFormat *oformat;
AVFormatContext *ofctx;
AVStream *videoStream;
AVFrame *videoFrame;
AVCodec *codec;
AVCodecContext *cctx;
SwsContext *swsCtx;
int frameCounter;
int fps;
void Free();
void Remux();
};
VIDEOCAPTURE_API VideoCapture* Init(int width, int height, int fps, int bitrate) {
VideoCapture *vc = new VideoCapture();
vc->Init(width, height, fps, bitrate);
return vc;
};
VIDEOCAPTURE_API void AddFrame(uint8_t *data, VideoCapture *vc) {
vc->AddFrame(data);
}
VIDEOCAPTURE_API void Finish(VideoCapture *vc) {
vc->Finish();
}
VIDEOCAPTURE_API void SetDebug(FuncPtr fp) {
ExtDebug = fp;
};
}
if (fctx->oformat->flags & AVFMT_GLOBALHEADER) {
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
//avcodec_parameters_from_context(st->codecpar, cctx); //cut this line
av_dump_format(fctx, 0, filename, 1);
//OPEN FILE + WRITE HEADER
if (avcodec_open2(cctx, pCodec, NULL) < 0) {
cout << "Error avcodec_open2()" << endl; system("pause"); exit(1);
}
avcodec_parameters_from_context(st->codecpar, cctx); //move to here
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_open(&fctx->pb, filename, AVIO_FLAG_WRITE) < 0) {
cout << "Error avio_open()" << endl; system("pause"); exit(1);
}
}
I had exactly same problem (error 0xc00d36c4). This topic and replies helped me to solve my problem. The solution was adding just this code:
if (ofctx->oformat->flags & AVFMT_GLOBALHEADER) {
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
You need to populate the codec context extradata and extradata_size fields.
Related
I want to create screen recording app in windows using flutter-desktop.
So I create plugin for that, I created the recording using FFMPEG in C++,
and successfully to record my screen, but when I recorded for a long time, my app got freeze, although my memory and my disk is still enough
I recording with a duration that I set myself, but when the recording is finished, my app returns to normal
I don't know if that is an issue in my C++ code or Flutter.
here is my C++ code for recording
const char* out_filename = "new_out.mp4";
avdevice_register_all();
const AVOutputFormat* ofmt = NULL;
const AVInputFormat* ifmt = NULL;
AVFormatContext* ifmt_ctx = avformat_alloc_context();
AVFormatContext* ofmt_ctx = avformat_alloc_context();
AVCodecParameters* av_codec_par_in = avcodec_parameters_alloc();
AVCodecParameters* av_codec_par_out = avcodec_parameters_alloc();
AVCodecContext* avcodec_contx = NULL;
const AVCodec* av_codec;
AVStream* video_stream = NULL;
av_codec_par_out->height = 1280;
av_codec_par_out->width = 1640;
av_codec_par_out->bit_rate = 40000;
av_codec_par_out->codec_id = AV_CODEC_ID_H264;
av_codec_par_out->codec_type = AVMEDIA_TYPE_VIDEO;
av_codec_par_out->format = 0;
// av_codec_par_out->sample_aspect_ratio.den = 3;
// av_codec_par_out->sample_aspect_ratio.num = 4;
AVDictionary* options = NULL;
av_dict_set(&options, "framerate", "30", 0);
av_dict_set(&options, "offset_x", "20", 0);
av_dict_set(&options, "offset_y", "40", 0);
av_dict_set(&options, "video_size", "640x480", 0);
ifmt = av_find_input_format("gdigrab");
if (avformat_open_input(&ifmt_ctx, "desktop", ifmt, &options) < 0) {
cout << "Error in opening file";
exit(1);
}
int VideoStreamIndx = -1;
avformat_find_stream_info(ifmt_ctx, NULL);
for (int i = 0; i < (int)ifmt_ctx->nb_streams; i++) {
if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
VideoStreamIndx = i;
break;
}
}
if (VideoStreamIndx == -1) {
cout << "\nunable to find the video stream index. (-1)";
exit(1);
}
av_codec_par_in = ifmt_ctx->streams[VideoStreamIndx]->codecpar;
av_codec = avcodec_find_decoder(av_codec_par_in->codec_id);
if (av_codec == NULL) {
cout << "\nunable to find the decoder";
exit(1);
}
avcodec_contx = avcodec_alloc_context3(av_codec);
if (avcodec_parameters_to_context(avcodec_contx, av_codec_par_in) < 0) {
cout << "\nerror in converting the codec contexts";
exit(1);
}
//av_dict_set
int value = avcodec_open2(avcodec_contx, av_codec, NULL);
if (value < 0) {
cout << "\nunable to open the av codec";
exit(1);
}
value = 0;
ofmt = av_guess_format(NULL, out_filename, NULL);
if (!ofmt) {
cout << "\nerror in guessing the video format. try with correct format";
exit(1);
}
avformat_alloc_output_context2(&ofmt_ctx, ofmt, NULL, out_filename);
if (!ofmt_ctx) {
cout << "\nerror in allocating av format output context";
exit(1);
}
const AVCodec* av_codec_out = avcodec_find_encoder(av_codec_par_out->codec_id);
if (av_codec_out == NULL) {
cout << "\nunable to find the encoder";
exit(1);
}
video_stream = avformat_new_stream(ofmt_ctx, av_codec_out);
if (!video_stream) {
cout << "\nerror in creating a av format new stream";
exit(1);
}
AVCodecContext* av_cntx_out;
av_cntx_out = avcodec_alloc_context3(av_codec_out);
if (!av_cntx_out) {
cout << "\nerror in allocating the codec contexts";
exit(1);
}
if (avcodec_parameters_copy(video_stream->codecpar, av_codec_par_out) < 0) {
cout << "\nCodec parameter canot copied";
exit(1);
}
if (avcodec_parameters_to_context(av_cntx_out, av_codec_par_out) < 0) {
cout << "\nerror in converting the codec contexts";
exit(1);
}
//av_cntx_out->pix_fmt = AV_PIX_FMT_YUV420P;
av_cntx_out->gop_size = 30;
av_cntx_out->max_b_frames = 2;
av_cntx_out->time_base.num = 1;
av_cntx_out->time_base.den = 30;
value = avcodec_open2(av_cntx_out, av_codec_out, NULL);//Initialize the AVCodecContext to use the given AVCodec.
if (value < 0) {
cout << "\nunable to open the av codec";
exit(1);
}
if (avcodec_contx->codec_id == AV_CODEC_ID_H264) {
av_opt_set(av_cntx_out->priv_data, "preset", "slow", 0);
}
avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_READ_WRITE);
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
av_cntx_out->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
if (avformat_write_header(ofmt_ctx, NULL) < 0) {
cout << "\nerror in writing the header context";
exit(1);
}
AVPacket* av_pkt = av_packet_alloc();
//av_init_packet(av_pkt); //error C4996: 'av_init_packet': was declared deprecated
memset(av_pkt, 0, sizeof(AVPacket)); //???
AVFrame* av_frame = av_frame_alloc();
if (!av_frame) {
cout << "\nunable to release the avframe resources";
exit(1);
}
AVFrame* outFrame = av_frame_alloc();//Allocate an AVFrame and set its fields to default values.
if (!outFrame) {
cout << "\nunable to release the avframe resources for outframe";
exit(1);
}
av_frame->width = avcodec_contx->width;
av_frame->height = avcodec_contx->height;
av_frame->format = av_codec_par_in->format;
outFrame->width = av_cntx_out->width;
outFrame->height = av_cntx_out->height;
outFrame->format = av_codec_par_out->format;
av_frame_get_buffer(av_frame, 0);
av_frame_get_buffer(outFrame, 0);
SwsContext* swsCtx = sws_alloc_context();
if (sws_init_context(swsCtx, NULL, NULL) < 0) {
cout << "\nUnable to Initialize the swscaler context sws_context.";
exit(1);
}
swsCtx = sws_getContext(avcodec_contx->width, avcodec_contx->height, avcodec_contx->pix_fmt,
av_cntx_out->width, av_cntx_out->height, av_cntx_out->pix_fmt,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (swsCtx == NULL) {
cout << "\n Cannot allocate SWC Context";
exit(1);
}
int ii = 0;
int enc_packet_counter = 0;
int no_frames = 500; // for duration
int frameFinished;
AVPacket* outPacket = av_packet_alloc();
isRecording = true;
while (av_read_frame(ifmt_ctx, av_pkt) >= 0) {
// while(true) {
cout << "\nwew 1 av_read_frame";
if (ii++ == no_frames)
break;
// if(!isRecording) break;
if (av_pkt->stream_index == VideoStreamIndx){
int ret = avcodec_send_packet(avcodec_contx, av_pkt);
if (ret < 0) {
printf("Error while sending packet");
}
frameFinished = true;
int response = 0;
response = avcodec_receive_frame(avcodec_contx, av_frame);
if (response < 0) {
printf("Error while receiving frame from decoder");
frameFinished = false;
}
if (frameFinished){
memset(outPacket, 0, sizeof(AVPacket));
outPacket->data = NULL;
outPacket->size = 0;
outPacket->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
if (outPacket->dts != AV_NOPTS_VALUE)
outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???
outFrame->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outFrame->pkt_duration = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
enc_packet_counter++;
int sts = sws_scale(swsCtx, //struct SwsContext *c,
av_frame->data, //const uint8_t *const srcSlice[],
av_frame->linesize, //const int srcStride[],
0, //int srcSliceY,
av_frame->height, //int srcSliceH,
outFrame->data, //uint8_t *const dst[],
outFrame->linesize); //const int dstStride[]);
if (sts < 0) {
printf("Error while executing sws_scale");
}
do {
cout << "\nwew 1 avcodec_send_frame";
if (ret == AVERROR(EAGAIN)) {
av_packet_unref(outPacket);
ret = avcodec_receive_packet(av_cntx_out, outPacket);
if (ret) break; // deal with error
outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???
av_write_frame(ofmt_ctx, outPacket);
}
else if (ret != 0) {
char str2[] = "";
cout << "\nError :" << av_make_error_string(str2, sizeof(str2), ret);
// return -1;
}
ret = avcodec_send_frame(av_cntx_out, outFrame);
} while (ret);
} // frameFinished
}
// av_packet_unref(&av_pkt);
// av_packet_free(&av_pkt);
// av_packet_unref(&outPacket);
// av_packet_free(&outPacket);
}
int ret = 0;
avcodec_send_frame(av_cntx_out, NULL);
do {
cout << "\nwew 1 av_write_frame";
av_packet_unref(outPacket);
ret = avcodec_receive_packet(av_cntx_out, outPacket);
if (!ret)
{
outPacket->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???
av_write_frame(ofmt_ctx, outPacket);
enc_packet_counter++;
}
} while (!ret);
value = av_write_trailer(ofmt_ctx);
if (value < 0) {
cout << "\nerror in writing av trailer";
exit(1);
}
avformat_close_input(&ifmt_ctx);
if (!ifmt_ctx) {
cout << "\nfile closed successfully";
}
else {
cout << "\nunable to close the file";
exit(1);
}
avformat_free_context(ifmt_ctx);
if (!ifmt_ctx) {
cout << "\navformat free successfully";
}
else {
cout << "\nunable to free avformat context";
exit(1);
}
avcodec_free_context(&av_cntx_out);
if (!av_cntx_out) {
cout << "\navcodec free successfully";
}
else {
cout << "\nunable to free avcodec context";
exit(1);
}
here is my Flutter plugin
class MethodChannelScreenrecorder extends ScreenrecorderPlatform {
static const MethodChannel _channel =
MethodChannel('screenrecorder');
#override
Future<bool> startRecordScreen(String path, int windowID) async {
final bool start = await _channel
.invokeMethod('startRecordScreen', {"path": path, "windowID": windowID});
return start;
}
#override
Future<bool> get stopRecordScreen async {
final bool path = await _channel.invokeMethod('stopRecordScreen');
return path;
}
}
class Screenrecorder {
static Future<bool> startRecordScreen({String path = "", int windowID = 0}) async{
final bool start = await ScreenrecorderPlatform.instance.startRecordScreen(path, windowID);
return start;
}
static Future<bool> get stopRecordScreen async {
final bool path = await ScreenrecorderPlatform.instance.stopRecordScreen;
return path;
}
}
I'm trying to encode a video (mov) from png images with alpha channel in FFMPEG.
Using cmd:
ffmpeg -i %d.png -r 25 -vcodec png -b:v 2500K out.mov -y
It works well.
I need to create video from png images with alpha channel in c code
Now I want to use in c++ code,
#include "VideoCapture.h"
#define FINAL_FILE_NAME "record.mov"
#define VIDEO_TMP_FILE "tmp.avi"
using namespace std;
FILE *fp_write;
static int write_packet(void *opaque, uint8_t *buf, size_t buf_size)
{
struct buffer_data *bd = (struct buffer_data *)opaque;
printf("ptr :%p size:%zu\n", bd->ptr, bd->size);
memcpy(bd->ptr + bd->size, buf, buf_size);
bd->size = buf_size + bd->size;
return 1;
}
void VideoCapture::Init(string filename, int width, int height, int fpsrate, int bitrate) {
fps = fpsrate;
int err;
uint8_t *outbuffer=nullptr;
outbuffer=(uint8_t*)av_malloc(32768);
bd = (struct buffer_data*)malloc(sizeof(struct buffer_data));
bd->ptr = (uint8_t*)av_malloc(1000000000);
bd->size = 0;
avio_out =avio_alloc_context(outbuffer, 32768,1,bd,nullptr,write_packet,nullptr);
if (!(oformat = av_guess_format("mov", nullptr, nullptr))) {
cout << "Failed to define output format"<< endl;
return;
}
oformat->video_codec = AV_CODEC_ID_PNG;
cout << "oformat->video_codec " << oformat->video_codec << endl;
if ((err = avformat_alloc_output_context2(&ofctx, oformat, nullptr, nullptr) < 0)) {
cout <<"Failed to allocate output context"<< endl;
//Free();
return;
}
cout << "oformat->video_codec " << oformat->video_codec << endl;
if (!(codec = avcodec_find_encoder(oformat->video_codec))) {
cout <<"Failed to find encoder"<< endl;
//Free();
return;
}
if (!(videoStream = avformat_new_stream(ofctx, codec))) {
cout <<"Failed to create new stream"<< endl;
//Free();
return;
}
if (!(cctx = avcodec_alloc_context3(codec))) {
cout <<"Failed to allocate codec context"<< endl;
//Free();
return;
}
videoStream->codecpar->codec_id = oformat->video_codec;
videoStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
videoStream->codecpar->width = width;
videoStream->codecpar->height = height;
videoStream->codecpar->format = AV_PIX_FMT_RGBA;
videoStream->codecpar->bit_rate = bitrate * 1000;
videoStream->time_base = { 1, fps };
avcodec_parameters_to_context(cctx, videoStream->codecpar);
cctx->time_base = { 1, fps };
cctx->max_b_frames = 2;
cctx->gop_size = 12;
if (videoStream->codecpar->codec_id == AV_CODEC_ID_PNG) {
//av_opt_set(cctx, "preset", "ultrafast", 0);
}
if (ofctx->oformat->flags & AVFMT_GLOBALHEADER) {
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
avcodec_parameters_from_context(videoStream->codecpar, cctx);
if ((err = avcodec_open2(cctx, codec, nullptr)) < 0) {
cout <<"Failed to open codec"<< endl;
Free();
return;
}
ofctx->pb = avio_out;
//不知道有什么用
ofctx->flags=AVFMT_FLAG_CUSTOM_IO;
if ((err = avformat_write_header(ofctx, nullptr)) < 0) {
cout <<"Failed to write header"<< endl;
Free();
return;
}
//av_dump_format(ofctx, 0, VIDEO_TMP_FILE, 1);
cout << "init com" << endl;
}
void VideoCapture::AddFrame(uint8_t *data) {
int err;
if (!videoFrame) {
videoFrame = av_frame_alloc();
videoFrame->format = AV_PIX_FMT_RGBA;
videoFrame->width = cctx->width;
videoFrame->height = cctx->height;
if ((err = av_frame_get_buffer(videoFrame, 32)) < 0) {
cout <<"Failed to allocate picture"<< endl;
return;
}
}
cout << "finish" << endl;
if (!swsCtx) {
swsCtx = sws_getContext(cctx->width, cctx->height, AV_PIX_FMT_RGBA, cctx->width, cctx->height, AV_PIX_FMT_RGBA, SWS_BICUBIC, 0, 0, 0);
}
int inLinesize[1] = { 4 * cctx->width};
sws_scale(swsCtx, (const uint8_t * const *)&data, inLinesize, 0, cctx->height, videoFrame->data, videoFrame->linesize);
videoFrame->pts = (1.0 / 30.0) * 90000 * (frameCounter++);;
if ((err = avcodec_send_frame(cctx, videoFrame)) < 0) {
cout <<"Failed to send frame"<< endl;
return;
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = nullptr;
pkt.size = 0;
if (avcodec_receive_packet(cctx, &pkt) == 0) {
pkt.flags |= AV_PKT_FLAG_KEY;
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
}
void VideoCapture::Finish() {
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = nullptr;
pkt.size = 0;
for (;;) {
avcodec_send_frame(cctx, nullptr);
if (avcodec_receive_packet(cctx, &pkt) == 0) {
av_interleaved_write_frame(ofctx, &pkt);
av_packet_unref(&pkt);
}
else {
break;
}
}
av_write_trailer(ofctx);
/*
if (!(oformat->flags & AVFMT_NOFILE)) {
int err = avio_close(ofctx->pb);
if (err < 0) {
cout <<"Failed to close file"<< endl;
}
}
*/
fp_write = fopen(VIDEO_TMP_FILE, "wb");
if (!feof(fp_write)) {
int true_size = fwrite(bd->ptr, 1, bd->size, fp_write);
std::cout << true_size << std::endl;
}
fcloseall();
//Remux();
//Free();
}
void VideoCapture::Free() {
if (videoFrame) {
//std::cout << "videoFrame " << std::endl;
av_frame_free(&videoFrame);
}
if (cctx) {
//std::cout << "cctx" << std::endl;
avcodec_free_context(&cctx);
}
if (ofctx) {
//std::cout << "ofctx" << ofctx << std::endl;
avformat_free_context(ofctx);
}
if (swsCtx) {
//std::cout << "swsCtx" << std::endl;
sws_freeContext(swsCtx);
}
/*
if (bd->ptr != (void*)0)
{
free(bd->ptr);
}
free(bd);*/
}
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
{
struct buffer_data *bd = (struct buffer_data *)opaque;
buf_size = FFMIN(buf_size, bd->size);
if(buf_size == 0) return -1;
//printf("read ptr:%p size:%zu\n", bd->ptr, bd->size);
/* copy internal buffer data to buf */
memcpy(buf, bd->ptr, buf_size);
bd->ptr += buf_size;
bd->size -= buf_size;
return buf_size;
}
void VideoCapture::Remux() {
AVFormatContext *ifmt_ctx = nullptr, *ofmt_ctx = nullptr;
int err;
unsigned char* inbuffer=nullptr;
inbuffer=(unsigned char*)av_malloc(32768);
ifmt_ctx = avformat_alloc_context();
AVIOContext *avio_in =avio_alloc_context(inbuffer, 32768 ,0,bd,read_packet,nullptr,nullptr);
ifmt_ctx->pb=avio_in;
if (!(oformat = av_guess_format(nullptr, nullptr, "h264"))) {
cout << "Failed to define output format";
return;
}
if ((err = avformat_open_input(&ifmt_ctx, "nullptr", 0, 0)) < 0) {
cout <<"Failed to open input file for remuxing"<< endl;
}
if ((err = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
cout <<"Failed to retrieve input stream information"<< endl;
}
if ((err = avformat_alloc_output_context2(&ofmt_ctx, oformat, nullptr, nullptr))) {
cout <<"Failed to allocate output context"<< endl;
}
AVStream *inVideoStream = ifmt_ctx->streams[0];
AVStream *outVideoStream = avformat_new_stream(ofmt_ctx, nullptr);
if (!outVideoStream) {
cout <<"Failed to allocate output video stream" << endl;
}
outVideoStream->time_base = { 1, fps };
avcodec_parameters_copy(outVideoStream->codecpar, inVideoStream->codecpar);
outVideoStream->codecpar->codec_tag = 0;
uint8_t *outbuffer=nullptr;
outbuffer=(uint8_t*)av_malloc(32768);
res_video = (struct buffer_data*)malloc(sizeof(struct buffer_data));
res_video->ptr = (uint8_t*)av_malloc(100000000);
res_video->size = 0;
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
avio_out =avio_alloc_context(outbuffer, 32768,1, res_video, nullptr, write_packet, nullptr);
ofmt_ctx->pb = avio_out;
}
AVDictionary* opts = nullptr;
av_dict_set(&opts, "movflags", "frag_keyframe+empty_moov", 0);
if ((err = avformat_write_header(ofmt_ctx, &opts)) < 0) {
cout <<"Failed to write header to output file"<< endl;
}
AVPacket videoPkt;
int ts = 0;
while (true) {
if ((err = av_read_frame(ifmt_ctx, &videoPkt)) < 0) {
break;
}
videoPkt.stream_index = outVideoStream->index;
videoPkt.pts = ts;
videoPkt.dts = ts;
videoPkt.duration = av_rescale_q(videoPkt.duration, inVideoStream->time_base, outVideoStream->time_base);
ts += videoPkt.duration;
videoPkt.pos = -1;
if ((err = av_interleaved_write_frame(ofmt_ctx, &videoPkt)) < 0) {
cout <<"Failed to mux packet"<< endl;
av_packet_unref(&videoPkt);
break;
}
av_packet_unref(&videoPkt);
}
av_write_trailer(ofmt_ctx);
cout << "res_video->size " << res_video->size << endl;
fp_write=fopen(FINAL_FILE_NAME,"wb");
if(!feof(fp_write)){
int true_size=fwrite(res_video->ptr,1, res_video->size,fp_write);
std::cout << true_size << std::endl;
}
fcloseall();
}
Header file:
#ifndef _RENDER_H_
#define _RENDER_H_
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <cstring>
#include <math.h>
#include <string.h>
#include <algorithm>
#include <string>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavcodec/avfft.h>
#include <libavdevice/avdevice.h>
#include <libavfilter/avfilter.h>
//#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavutil/opt.h>
#include <libavutil/common.h>
#include <libavutil/channel_layout.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
#include <libavutil/file.h>
//#include "libavcodec/vdpau.h"
#include "libavutil/hwcontext.h"
//#include "libavutil/hwcontext_vdpau.h"
#include <libswscale/swscale.h>
class VideoCapture {
public:
VideoCapture() {
oformat = nullptr;
ofctx = nullptr;
videoStream = nullptr;
videoFrame = nullptr;
swsCtx = nullptr;
frameCounter = 0;
av_register_all();
//av_log_set_callback(avlog_cb);
}
~VideoCapture() {
//Free();
}
void Init(std::string name, int width, int height, int fpsrate, int bitrate);
void AddFrame(uint8_t *data);
void Finish();
private:
AVOutputFormat *oformat;
AVFormatContext *ofctx;
AVIOContext *avio_out;
AVStream *videoStream;
AVFrame *videoFrame;
AVCodec *codec;
AVCodecContext *cctx;
struct buffer_data *bd;
struct buffer_data* res_video;
SwsContext *swsCtx;
//FILE *fp_write;
char* filename;
//int buf_len;
int frameCounter;
int fps;
void Free();
void Remux();
};
struct buffer_data {
uint8_t *ptr;
size_t size; ///< size left in the buffer
};
}
#endif
It got error:
[mov # 0x29bf0c0] muxer does not support non seekable output
Failed to write header
How can I solve this? muxer mov maybe support png encoder
ffmpeg show muxer mov
Default video codec: h264
How can I change the cmd to c++ code?
Well, since FFMPEG documentation and code examples are absolute garbage, I guess my only choise is to go here and aks.
So what I'm trying to do is simply record audio from microphione and write it to the file. So I initialize my input and out formats, I get an audio packet decode it, resample, encode and write. But everytime I try to play and audio there's only a stub of data. It seems like for some reason it writes only a start packet. Which is still very strange and let me explain why:
if((response = swr_config_frame(resampleContext, audioOutputFrame, frame) < 0)) qDebug() << "can't configure frame!" << av_make_error(response);
if((response = swr_convert_frame(resampleContext, audioOutputFrame, frame) < 0)) qDebug() << "can't resample frame!" << av_make_error(response);
Here's the code I'm using to resample. My frame has data but swr_convert_frame writes empty data to audioOutputFrame
How do I fix that? FFMPEG literally driving me crazy.
Here's the full code of my class
VideoReader.h
#ifndef VIDEOREADER_H
#define VIDEOREADER_H
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavdevice/avdevice.h>
#include "libavutil/audio_fifo.h"
#include "libavformat/avio.h"
#include "libswresample/swresample.h"
#include <inttypes.h>
}
#include <QString>
#include <QElapsedTimer>
class VideoReader
{
public:
VideoReader();
bool open(const char* filename);
bool fillFrame();
bool readFrame(uint8_t *&frameData);
void close();
int width, height;
private:
bool configInput();
bool configOutput(const char *filename);
bool configResampler();
bool encode(AVFrame *frame, AVCodecContext *encoderContext, AVPacket *outputPacket, int streamIndex, QString type);
int audioStreamIndex = -1;
int videoStreamIndex = -1;
int64_t videoStartPts = 0;
int64_t audioStartPts = 0;
AVFormatContext* inputFormatContext = nullptr;
AVFormatContext* outputFormatContext = nullptr;
AVCodecContext* videoDecoderContext = nullptr;
AVCodecContext* videoEncoderContext = nullptr;
AVCodecContext* audioDecoderContext = nullptr;
AVCodecContext* audioEncoderContext = nullptr;
AVFrame* videoInputFrame = nullptr;
AVFrame* audioInputFrame = nullptr;
AVFrame* videoOutputFrame = nullptr;
AVFrame* audioOutputFrame = nullptr;
AVPacket* inputPacket = nullptr;
AVPacket* videoOutputPacket = nullptr;
AVPacket* audioOutputPacket = nullptr;
SwsContext* innerScaleContext = nullptr;
SwsContext* outerScaleContext = nullptr;
SwrContext *resampleContext = nullptr;
};
#endif // VIDEOREADER_H
VideoReader.cpp
#include "VideoReader.h"
#include <QDebug>
static const char* av_make_error(int errnum)
{
static char str[AV_ERROR_MAX_STRING_SIZE];
memset(str, 0, sizeof(str));
return av_make_error_string(str, AV_ERROR_MAX_STRING_SIZE, errnum);
}
VideoReader::VideoReader()
{
}
bool VideoReader::open(const char *filename)
{
if(!configInput()) return false;
if(!configOutput(filename)) return false;
if(!configResampler()) return false;
return true;
}
bool VideoReader::fillFrame()
{
auto convertToYUV = [=](AVFrame* frame)
{
int response = 0;
if((response = sws_scale(outerScaleContext, frame->data, frame->linesize, 0, videoEncoderContext->height, videoOutputFrame->data, videoOutputFrame->linesize)) < 0) qDebug() << "can't rescale" << av_make_error(response);
};
auto convertAudio = [this](AVFrame* frame)
{
int response = 0;
auto& out = audioOutputFrame;
qDebug() << out->linesize[0] << out->nb_samples;
if((response = swr_convert_frame(resampleContext, audioOutputFrame, frame)) < 0) qDebug() << "can't resample frame!" << av_make_error(response);
qDebug() << "poop";
};
auto decodeEncode = [=](AVPacket* inputPacket, AVFrame* inputFrame, AVCodecContext* decoderContext,
AVPacket* outputPacket, AVFrame* outputFrame, AVCodecContext* encoderContext,
std::function<void (AVFrame*)> convertFunc,
int streamIndex, int64_t startPts, QString type)
{
int response = avcodec_send_packet(decoderContext, inputPacket);
if(response < 0) { qDebug() << "failed to send" << type << "packet!" << av_make_error(response); return false; }
response = avcodec_receive_frame(decoderContext, inputFrame);
if(response == AVERROR(EAGAIN) || response == AVERROR_EOF) { av_packet_unref(inputPacket); return false; }
else if (response < 0) { qDebug() << "failed to decode" << type << "frame!" << response << av_make_error(response); return false; }
if(encoderContext)
{
outputFrame->pts = inputPacket->pts - startPts;
convertFunc(inputFrame);
if(!encode(outputFrame, encoderContext, outputPacket, streamIndex, type)) return false;
}
av_packet_unref(inputPacket);
return true;
};
while(av_read_frame(inputFormatContext, inputPacket) >= 0) //actually read packet
{
if(inputPacket->stream_index == videoStreamIndex)
{
if(!videoStartPts) videoStartPts = inputPacket->pts;
if(decodeEncode(inputPacket, videoInputFrame, videoDecoderContext, videoOutputPacket, videoOutputFrame, videoEncoderContext, convertToYUV, videoStreamIndex, videoStartPts, "video")) break;
}
else if(inputPacket->stream_index == audioStreamIndex)
{
if(!audioStartPts) audioStartPts = inputPacket->pts;
if(decodeEncode(inputPacket, audioInputFrame, audioDecoderContext, audioOutputPacket, audioOutputFrame, audioEncoderContext, convertAudio, audioStreamIndex, audioStartPts, "audio")) break;
}
}
return true;
}
bool VideoReader::readFrame(uint8_t *&frameData)
{
if(!fillFrame()) { qDebug() << "readFrame method failed!"; return false; };
const int bytesPerPixel = 4;
uint8_t* destination[bytesPerPixel] = {frameData, NULL, NULL, NULL};
int destinationLinesize[bytesPerPixel] = { videoInputFrame->width * bytesPerPixel, 0, 0, 0};
sws_scale(innerScaleContext, videoInputFrame->data, videoInputFrame->linesize, 0, videoInputFrame->height, destination, destinationLinesize);
return true;
}
void VideoReader::close()
{
encode(NULL, videoEncoderContext, videoOutputPacket, videoStreamIndex, "video");
encode(NULL, audioEncoderContext, audioOutputPacket, audioStreamIndex, "audio");
if(av_write_trailer(outputFormatContext) < 0) { qDebug() << "failed to write trailer"; };
avformat_close_input(&outputFormatContext);
avformat_free_context(outputFormatContext);
avformat_close_input(&inputFormatContext);
avformat_free_context(inputFormatContext);
av_frame_free(&videoInputFrame);
av_frame_free(&audioInputFrame);
av_frame_free(&videoOutputFrame);
av_frame_free(&audioOutputFrame);
av_packet_free(&inputPacket);
av_packet_free(&videoOutputPacket);
av_packet_free(&audioOutputPacket);
avcodec_free_context(&videoDecoderContext);
avcodec_free_context(&videoEncoderContext);
avcodec_free_context(&audioDecoderContext);
avcodec_free_context(&audioEncoderContext);
sws_freeContext(innerScaleContext);
sws_freeContext(outerScaleContext);
swr_free(&resampleContext);
}
bool VideoReader::configInput()
{
avdevice_register_all();
inputFormatContext = avformat_alloc_context();
if(!inputFormatContext) { qDebug() << "can't create context!"; return false; }
const char* inputFormatName = "dshow";/*"gdigrab"*/
AVInputFormat* inputFormat = av_find_input_format(inputFormatName);
if(!inputFormat){ qDebug() << "Can't find" << inputFormatName; return false; }
AVDictionary* options = NULL;
av_dict_set(&options, "framerate", "30", 0);
av_dict_set(&options, "video_size", "1920x1080", 0);
if(avformat_open_input(&inputFormatContext, "video=HD USB Camera:audio=Microphone (High Definition Audio Device)" /*"desktop"*/, inputFormat, &options) != 0) { qDebug() << "can't open video file!"; return false; }
AVCodecParameters* videoCodecParams = nullptr;
AVCodecParameters* audioCodecParams = nullptr;
AVCodec* videoDecoder = nullptr;
AVCodec* audioDecoder = nullptr;
for (uint i = 0; i < inputFormatContext->nb_streams; ++i)
{
auto stream = inputFormatContext->streams[i];
auto codecParams = stream->codecpar;
if(codecParams->codec_type == AVMEDIA_TYPE_AUDIO) { audioStreamIndex = i; audioDecoder = avcodec_find_decoder(codecParams->codec_id); audioCodecParams = codecParams; }
if(codecParams->codec_type == AVMEDIA_TYPE_VIDEO) { videoStreamIndex = i; videoDecoder = avcodec_find_decoder(codecParams->codec_id); videoCodecParams = codecParams; }
if(audioStreamIndex != -1 && videoStreamIndex != -1) break;
}
if(audioStreamIndex == -1) { qDebug() << "failed to find audio stream inside file"; return false; }
if(videoStreamIndex == -1) { qDebug() << "failed to find video stream inside file"; return false; }
auto configureCodecContext = [=](AVCodecContext*& context, AVCodec* decoder, AVCodecParameters* params, AVFrame*& frame, QString type)
{
context = avcodec_alloc_context3(decoder);
if(!context) { qDebug() << "failed to create" << type << "decoder context!"; return false; }
if(avcodec_parameters_to_context(context, params) < 0) { qDebug() << "can't initialize input" << type << "decoder context"; return false; }
if(avcodec_open2(context, decoder, NULL) < 0) { qDebug() << "can't open" << type << "decoder"; return false; }
frame = av_frame_alloc();
if(!frame) { qDebug() << "can't allocate" << type << "frame"; return false; }
return true;
};
if(!configureCodecContext(videoDecoderContext, videoDecoder, videoCodecParams, videoInputFrame, "video")) return false;
if(!configureCodecContext(audioDecoderContext, audioDecoder, audioCodecParams, audioInputFrame, "audio")) return false;
audioDecoderContext->channel_layout = AV_CH_LAYOUT_STEREO;
audioInputFrame->channel_layout = audioDecoderContext->channel_layout;
inputPacket = av_packet_alloc();
if(!inputPacket) { qDebug() << "can't allocate input packet!"; return false; }
//first frame, needed fo initialization
if(!fillFrame()) { qDebug() << "Failed to fill frame on init!"; return false; };
width = videoDecoderContext->width;
height = videoDecoderContext->height;
innerScaleContext = sws_getContext(width, height, videoDecoderContext->pix_fmt,
width, height, AV_PIX_FMT_RGB0,
SWS_FAST_BILINEAR,
NULL,
NULL,
NULL);
outerScaleContext = sws_getContext(width, height, videoDecoderContext->pix_fmt,
width, height, AV_PIX_FMT_YUV420P,
SWS_FAST_BILINEAR,
NULL,
NULL,
NULL);
if(!innerScaleContext) { qDebug() << "failed to initialize scaler context"; return false; }
return true;
}
bool VideoReader::configOutput(const char *filename)
{
avformat_alloc_output_context2(&outputFormatContext, NULL, NULL, filename);
if(!outputFormatContext) { qDebug() << "failed to create output context"; return false; }
AVOutputFormat* outputFormat = outputFormatContext->oformat;
auto prepareOutputContext = [=](AVCodecContext*& encoderContext,
std::function<void (AVCodecContext*, AVCodec*)> configureContextFunc,
std::function<void (AVFrame*)> configureFrameFunc,
AVCodecID codecId, AVFrame*& frame, AVPacket*& packet, QString type)
{
auto stream = avformat_new_stream(outputFormatContext, NULL);
if(!stream) { qDebug() << "failed to allocate output" << type << "stream"; return false; }
AVCodec* encoder = avcodec_find_encoder(codecId);
if(!encoder) { qDebug() << "failed to find" << type << "encoder!"; return false; }
encoderContext = avcodec_alloc_context3(encoder);
if(!encoderContext) { qDebug() << "failed to create video encoder context!"; return false; }
configureContextFunc(encoderContext, encoder);
int result = avcodec_open2(encoderContext, encoder, NULL);
if(result < 0) { qDebug() << "failed to open audio encoder" << av_make_error(result); return false; }
if(avcodec_parameters_from_context(stream->codecpar, encoderContext) < 0) { qDebug() << "failed to copy parameters to audio output stream"; return false; }
packet = av_packet_alloc();
if(!packet) {qDebug() << "failed allocate output" << type << "packet"; return false;}
frame = av_frame_alloc();
if(!frame) { qDebug() << "can't allocate output" << type << "frame"; return false; }
configureFrameFunc(frame);
av_frame_get_buffer(frame, 0);
return true;
};
auto configureAudioFrame = [=](AVFrame* frame)
{
frame->nb_samples = audioEncoderContext->frame_size;
frame->format = audioEncoderContext->sample_fmt;
frame->sample_rate = audioEncoderContext->sample_rate;
frame->channel_layout = av_get_default_channel_layout(audioDecoderContext->channels);
};
auto configureAudioEncoderContext = [=](AVCodecContext* encoderContext, AVCodec* encoder)
{
encoderContext->bit_rate = 64000;
encoderContext->sample_fmt = encoder->sample_fmts[0];
encoderContext->sample_rate = 44100;
encoderContext->codec_type = AVMEDIA_TYPE_AUDIO;
encoderContext->channel_layout = AV_CH_LAYOUT_STEREO;
encoderContext->channels = av_get_channel_layout_nb_channels(encoderContext->channel_layout);
};
auto configureVideoFrame = [=](AVFrame* frame)
{
frame->format = videoEncoderContext->pix_fmt;
frame->width = videoEncoderContext->width;
frame->height = videoEncoderContext->height;
};
auto configureVideoEncoderContext = [=](AVCodecContext* encoderContext, AVCodec* encoder)
{
encoderContext->width = videoDecoderContext->width;
encoderContext->height = videoDecoderContext->height;
encoderContext->pix_fmt = encoder->pix_fmts[0];
encoderContext->gop_size = 10;
encoderContext->max_b_frames = 1;
encoderContext->framerate = AVRational{30, 1};
encoderContext->time_base = AVRational{1, 30};
av_opt_set(encoderContext->priv_data, "preset", "ultrafast", 0);
av_opt_set(encoderContext->priv_data, "tune", "zerolatency", 0);
};
if(!prepareOutputContext(videoEncoderContext, configureVideoEncoderContext, configureVideoFrame, outputFormat->video_codec, videoOutputFrame, videoOutputPacket, "video")) return false;
if(!prepareOutputContext(audioEncoderContext, configureAudioEncoderContext, configureAudioFrame, outputFormat->audio_codec, audioOutputFrame, audioOutputPacket, "audio")) return false;
if(outputFormat->flags & AVFMT_GLOBALHEADER) outputFormat->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
int result = 0;
if(!(outputFormat->flags & AVFMT_NOFILE))
if((result = avio_open(&outputFormatContext->pb, filename, AVIO_FLAG_WRITE)) < 0)
{ qDebug() << "failed to open file" << av_make_error(result); return false; }
result = avformat_write_header(outputFormatContext, NULL);
if(result < 0) {qDebug() << "failed to write header!" << av_make_error(result); return false; }
return true;
}
bool VideoReader::configResampler()
{
resampleContext = swr_alloc_set_opts(NULL,
av_get_default_channel_layout(audioEncoderContext->channels),
audioEncoderContext->sample_fmt,
audioEncoderContext->sample_rate,
av_get_default_channel_layout(audioDecoderContext->channels),
audioDecoderContext->sample_fmt,
audioDecoderContext->sample_rate,
0, NULL);
if (!resampleContext) { qDebug() << "Could not allocate resample context"; return false; }
int error;
if ((error = swr_init(resampleContext)) < 0) { qDebug() << "Could not open resample context"; swr_free(&resampleContext); return false; }
return true;
}
bool VideoReader::encode(AVFrame* frame, AVCodecContext* encoderContext, AVPacket* outputPacket, int streamIndex, QString type)
{
int response;
response = avcodec_send_frame(encoderContext, frame);
if(response < 0) { qDebug() << "failed to send" << type << "frame" << av_make_error(response); return false; }
while(response >= 0)
{
response = avcodec_receive_packet(encoderContext, outputPacket);
if(response == AVERROR(EAGAIN) || response == AVERROR_EOF) { av_packet_unref(outputPacket); continue; }
else if (response < 0) { qDebug() << "failed to encode" << type << "frame!" << response << av_make_error(response); return false; }
outputPacket->stream_index = streamIndex;
AVStream *inStream = inputFormatContext->streams[streamIndex];
AVStream *outStream = outputFormatContext->streams[streamIndex];
av_packet_rescale_ts(outputPacket, inStream->time_base, outStream->time_base);
if((response = av_interleaved_write_frame(outputFormatContext, outputPacket)) != 0) { qDebug() << "Failed to write" << type << "packet!" << av_make_error(response); av_packet_unref(outputPacket); return false; }
av_packet_unref(outputPacket);
}
return true;
}
I could try to write down shorter example if needed
As far as I know, here are several circumstances that swr_convert_frame might write nothing:
You did not initialize your output frame correctly. If so, checkout the following snippet:
audioFrame = av_frame_alloc();
if (audioFrame == NULL) {
// error handling
}
audioFrame->format = /* the sample format you'd like to use */;
audioFrame->channel_layout = audioCodecContext->channel_layout;
audioFrame->nb_samples = audioCodecContext->frame_size;
if (av_frame_get_buffer(encoder->audioFrame, 0) < 0) {
// error handling
}
The samples in your input frame is not enough for producing a complete output frame. If so, you need swr_get_delay.
if (swr_convert(swrContext, audioFrame->data,
audioFrame->nb_samples,
(uint8_t const**)frame->data, frame->nb_samples) < 0) {
// handle error
}
// do stuff with your audioFrame
...
while (swr_get_delay(swrContext, audioCodecContext->sample_rate)
> audioFrame->nb_samples) {
if (swr_convert(swrContext, audioFrame->data,
audioFrame->nb_samples, NULL, 0) < 0) {
// handle error
}
// do stuff with your audioFrame
}
Anyway, more informations, at least a minimal reproducible sample should be provided for further diagnose.
I have to agree that documentation of libav is too poor, it used to drive me crazy too. But cursing authors of libav won't do any help, and what's more, Open Source contributors do not owe you anything.
I use FFMPEG to record video from a RTSP stream. What my code does is get current day time, create a folder with this format year/month/day/hour/minute and save the video to that folder.
When a new minute arrive, I create the new folder base on the new minute and run the record again to the new folder.
Basically It works, but the next video start time is continue the end of previous video. For example:
video1: 00:00 -> 00:55
video2: 00:56 -> ...
I hope I can set for all videos start from 00:00. Can I do that?
Here my code
ffmpeg.h
class CtFfmpeg {
public:
CtFfmpeg();
~CtFfmpeg();
void init();
int getInput();
int getOutputName(const char *filename);
int release();
int ret;
AVFormatContext *ifmt_ctx, *ofmt_ctx;
AVStream *in_stream, *out_stream;
AVPacket pkt;
const char *in_filename;
char *out_filename;
private:
int setOutput(const char *outfilename);
AVOutputFormat *ofmt;
};
ffmpeg.cpp
#include "ctffmpeg.h"
CtFfmpeg::CtFfmpeg() {
in_filename = new char [1024];
out_filename = new char [1024];
}
CtFfmpeg::~CtFfmpeg() {
delete [] in_filename;
delete [] out_filename;
}
void CtFfmpeg::init() {
avcodec_register_all();
av_register_all();
avformat_network_init();
pkt = { 0 };
av_init_packet(&pkt);
ofmt = NULL;
ifmt_ctx = NULL;
ofmt_ctx = NULL;
return;
}
int CtFfmpeg::release() {
av_write_trailer(ofmt_ctx);
avcodec_close(out_stream->codec);
// avcodec_close(in_stream->codec);
// avformat_close_input(&ifmt_ctx);
/* close output */
if (!(ofmt->flags & AVFMT_NOFILE))
avio_close(ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
av_free_packet(&pkt);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred\n");
return 1;
}
}
int CtFfmpeg::getInput() {
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
fprintf(stderr, "Could not open input file '%s'", in_filename);
release();
}
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
fprintf(stderr, "Failed to retrieve input stream information");
release();
}
av_dump_format(ifmt_ctx, 0, in_filename, 0);
}
int CtFfmpeg::setOutput(const char *outfilename) {
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, outfilename);
if (!ofmt_ctx) {
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
release();
}
ofmt = ofmt_ctx->oformat;
for (int i = 0; i < ifmt_ctx->nb_streams; i++) {
in_stream = ifmt_ctx->streams[i];
out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
release();
}
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
if (ret < 0) {
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
release();
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
} // for
av_dump_format(ofmt_ctx, 0, outfilename, 1);
if (!(ofmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, outfilename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open output file '%s'", outfilename);
release();
}
}
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
release();
}
}
int CtFfmpeg::getOutputName(const char *filename){
sprintf(out_filename,filename);
setOutput(out_filename);
}
main.cpp
#include "ctfolder.h"
#include "ctffmpeg.h"
CtFfmpeg * ff;
int main(int argc, char** argv) {
if (argc < 2) {
printf("usage: %s <RTSP link> \n", argv[0]);
return 1;
}
ff = new CtFfmpeg();
ff->in_filename = argv[1]; //RTSP input link
ff->init();
ff->getInput();
string filename;
videoRecorder obj;
int start, now;
start = obj.get_current_min();
if(obj.create_folder(0755))
cout << "Cannot create folder, maybe it already exists" << endl;
else
cout << "Create folder succesfully" << endl;
int skip = 0;
while(1){
filename = obj.update_filename();
ff->getOutputName(filename.c_str());
while((now = obj.get_current_min()) == start) {
ff->ret = av_read_frame(ff->ifmt_ctx, &(ff->pkt));
skip++;
if(skip==1)
continue;
if(skip>2)
skip=2;
if (ff->ret < 0)
continue;
ff->pkt.pts = av_rescale_q_rnd(ff->pkt.pts, ff->in_stream->time_base, ff->out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
ff->pkt.dts = av_rescale_q_rnd(ff->pkt.dts, ff->in_stream->time_base, ff->out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
ff->pkt.duration = av_rescale_q(ff->pkt.duration, ff->in_stream->time_base, ff->out_stream->time_base);
ff->pkt.pos = -1;
ff->ret = av_interleaved_write_frame(ff->ofmt_ctx, &(ff->pkt));
if (ff->ret < 0) {
fprintf(stderr, "Error muxing packet\n");
continue;
}
av_free_packet(&(ff->pkt));
}
ff->release();
cout << "New minute!" << endl;
if(obj.create_folder(0755))
cout << "Cannot create folder, something's wrong" << endl;
else
cout << "Create folder succesfully" << endl;
start = now;
}
return 0;
}
You need to shift your recording packet's pts to 0.
while(<some condition>)
{
//...
int64_t pts_offset = AV_NOPTS_VALUE ;
while((now = obj.get_current_min()) == start)
{
//...
ff.pkt.pts = ...
//...
if( pts_offset == AV_NOPTS_VALUE )
{
pts_offset = ff.pkt.pts ;
}
ff.pkt.pts -= pts_offset ;
// ...
}
}
I tried to build your code and add Alexander Chernin suggestion to it but I face to muxer error!
When you decrease recording packet's pts, it's value go lower than recording packet's dts. In avcodec.h, above declaration of pts I found this comment:
pts MUST be larger or equal to dts as presentation cannot happen before decompression.
I solved this error by decreasing recording packet's dts.
ff->pkt.pts = av_rescale_q_rnd(ff->pkt.pts, ff->in_stream->ff->out_stream->(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
if (pts_offset == AV_NOPTS_VALUE) {
pts_offset = ff->pkt.pts;
}
ff->pkt.pts -= pts_offset;
ff->pkt.dts -= pts_offset;
ff->pkt.dts = av_rescale_q_rnd(ff->pkt.dts, ff->in_stream->time_base,ff->out_stream->time_base,(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
ff->pkt.duration = av_rescale_q(ff->pkt.duration,ff->in_stream->time_base,ff->out_stream->time_base);
ff->pkt.pos = -1;
I am having issues encoding screen captures, into a h.264 file for viewing. The program below is cobbled together from examples here and here. The first example, uses an older version of the ffmpeg api. So I tried to update that example for use in my program. The file is created and has something written to it, but when I view the file. The encoded images are all distorted. I am able to run the video encoding example from the ffmpeg api successfully. This is my first time posting, so if I missed anything please let me know.
I appreciate any assistance that is given.
My program:
#include <Windows.h>
#include <string>
#include <sstream>
#include <time.h>
#include <iostream>
#include <dwmapi.h>
extern "C"{
#include <stdint.h>
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#include <libavutil/opt.h>
}
using namespace std;
void ScreenShot(const char* BmpName, uint8_t *frame)
{
HWND DesktopHwnd = GetDesktopWindow();
RECT DesktopParams;
HDC DevC = GetDC(DesktopHwnd);
GetWindowRect(DesktopHwnd,&DesktopParams);
DWORD Width = DesktopParams.right - DesktopParams.left;
DWORD Height = DesktopParams.bottom - DesktopParams.top;
DWORD FileSize = sizeof(BITMAPFILEHEADER)+sizeof(BITMAPINFOHEADER)+(sizeof(RGBTRIPLE)+1*(Width*Height*4));
char *BmpFileData = (char*)GlobalAlloc(0x0040,FileSize);
PBITMAPFILEHEADER BFileHeader = (PBITMAPFILEHEADER)BmpFileData;
PBITMAPINFOHEADER BInfoHeader = (PBITMAPINFOHEADER)&BmpFileData[sizeof(BITMAPFILEHEADER)];
BFileHeader->bfType = 0x4D42; // BM
BFileHeader->bfSize = sizeof(BITMAPFILEHEADER);
BFileHeader->bfOffBits = sizeof(BITMAPFILEHEADER)+sizeof(BITMAPINFOHEADER);
BInfoHeader->biSize = sizeof(BITMAPINFOHEADER);
BInfoHeader->biPlanes = 1;
BInfoHeader->biBitCount = 32;
BInfoHeader->biCompression = BI_RGB;
BInfoHeader->biHeight = Height;
BInfoHeader->biWidth = Width;
RGBTRIPLE *Image = (RGBTRIPLE*)&BmpFileData[sizeof(BITMAPFILEHEADER)+sizeof(BITMAPINFOHEADER)];
RGBTRIPLE color;
//pPixels = (RGBQUAD **)new RGBQUAD[sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER)];
int start = clock();
HDC CaptureDC = CreateCompatibleDC(DevC);
HBITMAP CaptureBitmap = CreateCompatibleBitmap(DevC,Width,Height);
SelectObject(CaptureDC,CaptureBitmap);
BitBlt(CaptureDC,0,0,Width,Height,DevC,0,0,SRCCOPY|CAPTUREBLT);
GetDIBits(CaptureDC,CaptureBitmap,0,Height,frame,(LPBITMAPINFO)BInfoHeader, DIB_RGB_COLORS);
int end = clock();
cout << "it took " << end - start << " to capture a frame" << endl;
DWORD Junk;
HANDLE FH = CreateFileA(BmpName,GENERIC_WRITE,FILE_SHARE_WRITE,0,CREATE_ALWAYS,0,0);
WriteFile(FH,BmpFileData,FileSize,&Junk,0);
CloseHandle(FH);
GlobalFree(BmpFileData);
}
void video_encode_example(const char *filename, int codec_id)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int i, ret, x, y, got_output;
FILE *f;
AVFrame *frame;
AVPacket pkt;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
printf("Encode video file %s\n", filename);
/* find the mpeg1 video encoder */
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec) {
fprintf(stderr, "Codec not found\n");
cin.get();
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
cin.get();
exit(1);
}
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* frames per second */
c->time_base.num=1;
c->time_base.den = 25;
c->gop_size = 10; /* emit one intra frame every ten frames */
c->max_b_frames=1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if(codec_id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
/* the image can be allocated by any means and av_image_alloc() is
just the most convenient way if av_malloc() is to be used */
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(1);
}
/* encode 1 second of video */
for(i=0;i<250;i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
fflush(stdout);
/* prepare a dummy image */
/* Y */
for(y=0;y<c->height;y++) {
for(x=0;x<c->width;x++) {
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for(y=0;y<c->height/2;y++) {
for(x=0;x<c->width/2;x++) {
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
}
}
frame->pts = i;
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
/* get the delayed frames */
for (got_output = 1; got_output; i++) {
fflush(stdout);
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
/* add sequence end code to have a real mpeg file */
fwrite(endcode, 1, sizeof(endcode), f);
fclose(f);
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
av_frame_free(&frame);
printf("\n");
}
void write_video_frame()
{
}
int lineSizeOfFrame(int width)
{
return (width*24 + 31)/32 * 4;//((width*24 / 8) + 3) & ~3;//(width*24 + 31)/32 * 4;
}
int getScreenshotWithCursor(uint8_t* frame)
{
int successful = 0;
HDC screen, bitmapDC;
HBITMAP screen_bitmap;
screen = GetDC(NULL);
RECT DesktopParams;
HWND desktop = GetDesktopWindow();
GetWindowRect(desktop, &DesktopParams);
int width = DesktopParams.right;
int height = DesktopParams.bottom;
bitmapDC = CreateCompatibleDC(screen);
screen_bitmap = CreateCompatibleBitmap(screen, width, height);
SelectObject(bitmapDC, screen_bitmap);
if (BitBlt(bitmapDC, 0, 0, width, height, screen, 0, 0, SRCCOPY))
{
int pos_x, pos_y;
HICON hcur;
ICONINFO icon_info;
CURSORINFO cursor_info;
cursor_info.cbSize = sizeof(CURSORINFO);
if (GetCursorInfo(&cursor_info))
{
if (cursor_info.flags == CURSOR_SHOWING)
{
hcur = CopyIcon(cursor_info.hCursor);
if (GetIconInfo(hcur, &icon_info))
{
pos_x = cursor_info.ptScreenPos.x - icon_info.xHotspot;
pos_y = cursor_info.ptScreenPos.y - icon_info.yHotspot;
DrawIcon(bitmapDC, pos_x, pos_y, hcur);
if (icon_info.hbmColor) DeleteObject(icon_info.hbmColor);
if (icon_info.hbmMask) DeleteObject(icon_info.hbmMask);
}
}
}
int header_size = sizeof(BITMAPINFOHEADER) + 256*sizeof(RGBQUAD);
size_t line_size = lineSizeOfFrame(width);
PBITMAPINFO lpbi = (PBITMAPINFO) malloc(header_size);
lpbi->bmiHeader.biSize = header_size;
lpbi->bmiHeader.biWidth = width;
lpbi->bmiHeader.biHeight = height;
lpbi->bmiHeader.biPlanes = 1;
lpbi->bmiHeader.biBitCount = 24;
lpbi->bmiHeader.biCompression = BI_RGB;
lpbi->bmiHeader.biSizeImage = height*line_size;
lpbi->bmiHeader.biXPelsPerMeter = 0;
lpbi->bmiHeader.biYPelsPerMeter = 0;
lpbi->bmiHeader.biClrUsed = 0;
lpbi->bmiHeader.biClrImportant = 0;
if (GetDIBits(bitmapDC, screen_bitmap, 0, height, (LPVOID)frame, lpbi, DIB_RGB_COLORS))
{
int i;
uint8_t *buf_begin = frame;
uint8_t *buf_end = frame + line_size*(lpbi->bmiHeader.biHeight - 1);
void *temp = malloc(line_size);
for (i = 0; i < lpbi->bmiHeader.biHeight / 2; ++i)
{
memcpy(temp, buf_begin, line_size);
memcpy(buf_begin, buf_end, line_size);
memcpy(buf_end, temp, line_size);
buf_begin += line_size;
buf_end -= line_size;
}
cout << *buf_begin << endl;
free(temp);
successful = 1;
}
free(lpbi);
}
DeleteObject(screen_bitmap);
DeleteDC(bitmapDC);
ReleaseDC(NULL, screen);
return successful;
}
int main()
{
RECT DesktopParams;
HWND desktop = GetDesktopWindow();
GetWindowRect(desktop, &DesktopParams);
int width = DesktopParams.right;
int height = DesktopParams.bottom;
uint8_t *frame = (uint8_t *)malloc(width * height);
AVCodec *codec;
AVCodecContext *codecContext = NULL;
AVPacket packet;
FILE *f;
AVFrame *pictureYUV = NULL;
AVFrame *pictureRGB;
avcodec_register_all();
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if(!codec)
{
cout << "codec not found!" << endl;
cin.get();
return 1;
}
else
{
cout << "codec h265 found!" << endl;
}
codecContext = avcodec_alloc_context3(codec);
codecContext->bit_rate = width * height * 4;
codecContext->width = width;
codecContext->height = height;
codecContext->time_base.num = 1;
codecContext->time_base.den = 250;
codecContext->gop_size = 10;
codecContext->max_b_frames = 1;
codecContext->keyint_min = 1;
codecContext->i_quant_factor = (float)0.71; // qscale factor between P and I frames
codecContext->b_frame_strategy = 20; ///// find out exactly what this does
codecContext->qcompress = (float)0.6; ///// find out exactly what this does
codecContext->qmin = 20; // minimum quantizer
codecContext->qmax = 51; // maximum quantizer
codecContext->max_qdiff = 4; // maximum quantizer difference between frames
codecContext->refs = 4; // number of reference frames
codecContext->trellis = 1;
codecContext->pix_fmt = AV_PIX_FMT_YUV420P;
codecContext->codec_id = AV_CODEC_ID_H264;
codecContext->codec_type = AVMEDIA_TYPE_VIDEO;
if(avcodec_open2(codecContext, codec, NULL) < 0)
{
cout << "couldn't open codec" << endl;
cout << stderr << endl;
cin.get();
return 1;
}
else
{
cout << "opened h265 codec!" << endl;
cin.get();
}
f = fopen("test.h264", "wb");
if(!f)
{
cout << "Unable to open file" << endl;
return 1;
}
struct SwsContext *img_convert_ctx = sws_getContext(codecContext->width, codecContext->height, PIX_FMT_RGB32, codecContext->width,
codecContext->height, codecContext->pix_fmt, SWS_BILINEAR, NULL, NULL, NULL);
int got_output = 0, i = 0;
uint8_t encode[] = { 0, 0, 1, 0xb7 };
try
{
for(i = 0; i < codecContext->time_base.den; i++)
{
av_init_packet(&packet);
packet.data = NULL;
packet.size = 0;
pictureRGB = av_frame_alloc();
pictureYUV = av_frame_alloc();
getScreenshotWithCursor(frame);
//ScreenShot("example.bmp", frame);
int nbytes = avpicture_get_size(AV_PIX_FMT_YUV420P, codecContext->width, codecContext->height); // allocating outbuffer
uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes*sizeof(uint8_t));
pictureRGB = av_frame_alloc();
pictureYUV = av_frame_alloc();
avpicture_fill((AVPicture*)pictureRGB, frame, PIX_FMT_RGB32, codecContext->width, codecContext->height); // fill image with input screenshot
avpicture_fill((AVPicture*)pictureYUV, outbuffer, PIX_FMT_YUV420P, codecContext->width, codecContext->height);
av_image_alloc(pictureYUV->data, pictureYUV->linesize, codecContext->width, codecContext->height, codecContext->pix_fmt, 32);
sws_scale(img_convert_ctx, pictureRGB->data, pictureRGB->linesize, 0, codecContext->height, pictureYUV->data, pictureYUV->linesize);
pictureYUV->pts = i;
avcodec_encode_video2(codecContext, &packet, pictureYUV, &got_output);
if(got_output)
{
printf("Write frame %3d (size=%5d)\n", i, packet.size);
fwrite(packet.data, 1, packet.size, f);
av_free_packet(&packet);
}
//av_frame_free(&pictureRGB);
//av_frame_free(&pictureYUV);
}
for(got_output = 1; got_output; i++)
{
fflush(stdout);
avcodec_encode_video2(codecContext, &packet, NULL, &got_output);
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, packet.size);
fwrite(packet.data, 1, packet.size, f);
av_free_packet(&packet);
}
}
}
catch(std::exception ex)
{
cout << ex.what() << endl;
}
avcodec_close(codecContext);
av_free(codecContext);
av_freep(&pictureYUV->data[0]);
//av_frame_free(&picture);
fwrite(encode, 1, sizeof(encode), f);
fclose(f);
cin.get();
return 0;
}