I'm trying to record the whole desktop stream with FFmpeg on Windows.
I found a working example here. The Problem is that some og the functions depricated. So I tried to replace them with the updated ones.
But there are some slight problems. The error "has triggered a breakpoint." occurse and also "not able to read the location."
The bigger problem is that I don't know if this is the right way to do this..
My code looks like this:
using namespace std;
/* initialize the resources*/
Recorder::Recorder()
{
av_register_all();
avcodec_register_all();
avdevice_register_all();
cout<<"\nall required functions are registered successfully";
}
/* uninitialize the resources */
Recorder::~Recorder()
{
avformat_close_input(&pAVFormatContext);
if( !pAVFormatContext )
{
cout<<"\nfile closed sucessfully";
}
else
{
cout<<"\nunable to close the file";
exit(1);
}
avformat_free_context(pAVFormatContext);
if( !pAVFormatContext )
{
cout<<"\navformat free successfully";
}
else
{
cout<<"\nunable to free avformat context";
exit(1);
}
}
/* establishing the connection between camera or screen through its respective folder */
int Recorder::openCamera()
{
value = 0;
options = NULL;
pAVFormatContext = NULL;
pAVFormatContext = avformat_alloc_context();//Allocate an AVFormatContext.
openScreen(pAVFormatContext);
/* set frame per second */
value = av_dict_set( &options,"framerate","30",0 );
if(value < 0)
{
cout<<"\nerror in setting dictionary value";
exit(1);
}
value = av_dict_set( &options, "preset", "medium", 0 );
if(value < 0)
{
cout<<"\nerror in setting preset values";
exit(1);
}
// value = avformat_find_stream_info(pAVFormatContext,NULL);
if(value < 0)
{
cout<<"\nunable to find the stream information";
exit(1);
}
VideoStreamIndx = -1;
/* find the first video stream index . Also there is an API available to do the below operations */
for(int i = 0; i < pAVFormatContext->nb_streams; i++ ) // find video stream posistion/index.
{
if( pAVFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO )
{
VideoStreamIndx = i;
break;
}
}
if( VideoStreamIndx == -1)
{
cout<<"\nunable to find the video stream index. (-1)";
exit(1);
}
// assign pAVFormatContext to VideoStreamIndx
pAVCodecContext = pAVFormatContext->streams[VideoStreamIndx]->codec;
pAVCodec = avcodec_find_decoder(pAVCodecContext->codec_id);
if( pAVCodec == NULL )
{
cout<<"\nunable to find the decoder";
exit(1);
}
value = avcodec_open2(pAVCodecContext , pAVCodec , NULL);//Initialize the AVCodecContext to use the given AVCodec.
if( value < 0 )
{
cout<<"\nunable to open the av codec";
exit(1);
}
}
/* initialize the video output file and its properties */
int Recorder::init_outputfile()
{
outAVFormatContext = NULL;
value = 0;
output_file = "output.mp4";
avformat_alloc_output_context2(&outAVFormatContext, NULL, NULL, output_file);
if (!outAVFormatContext)
{
cout<<"\nerror in allocating av format output context";
exit(1);
}
/* Returns the output format in the list of registered output formats which best matches the provided parameters, or returns NULL if there is no match. */
output_format = av_guess_format(NULL, output_file ,NULL);
if( !output_format )
{
cout<<"\nerror in guessing the video format. try with correct format";
exit(1);
}
video_st = avformat_new_stream(outAVFormatContext ,NULL);
if( !video_st )
{
cout<<"\nerror in creating a av format new stream";
exit(1);
}
if (codec_id == AV_CODEC_ID_H264)
{
av_opt_set(outAVCodecContext->priv_data, "preset", "slow", 0);
}
outAVCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
if (!outAVCodec)
{
cout << "\nerror in finding the av codecs. try again with correct codec";
exit(1);
}
outAVCodecContext = avcodec_alloc_context3(outAVCodec);
if( !outAVCodecContext )
{
cout<<"\nerror in allocating the codec contexts";
exit(1);
}
/* set property of the video file */
outAVCodecContext = video_st->codec;
outAVCodecContext->codec_id = AV_CODEC_ID_MPEG4;// AV_CODEC_ID_MPEG4; // AV_CODEC_ID_H264 // AV_CODEC_ID_MPEG1VIDEO
outAVCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
outAVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
outAVCodecContext->bit_rate = 400000; // 2500000
outAVCodecContext->width = 1920;
outAVCodecContext->height = 1080;
outAVCodecContext->gop_size = 3;
outAVCodecContext->max_b_frames = 2;
outAVCodecContext->time_base.num = 1;
outAVCodecContext->time_base.den = 30; //15fps
/* Some container formats (like MP4) require global headers to be present
Mark the encoder so that it behaves accordingly. */
if ( outAVFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
{
outAVCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
value = avcodec_open2(outAVCodecContext, outAVCodec, NULL);
if( value < 0)
{
cout<<"\nerror in opening the avcodec";
exit(1);
}
/* create empty video file */
if ( !(outAVFormatContext->flags & AVFMT_NOFILE) )
{
if( avio_open2(&outAVFormatContext->pb , output_file , AVIO_FLAG_WRITE ,NULL, NULL) < 0 )
{
cout<<"\nerror in creating the video file";
exit(1);
}
}
if(!outAVFormatContext->nb_streams)
{
cout<<"\noutput file dose not contain any stream";
exit(1);
}
/* imp: mp4 container or some advanced container file required header information*/
value = avformat_write_header(outAVFormatContext , &options);
if(value < 0)
{
cout<<"\nerror in writing the header context";
exit(1);
}
/*
// uncomment here to view the complete video file informations
cout<<"\n\nOutput file information :\n\n";
av_dump_format(outAVFormatContext , 0 ,output_file ,1);
*/
}
int Recorder::stop() {
threading = false;
demux->join();
rescale->join();
mux->join();
return 0;
}
int Recorder::start() {
initVideoThreads();
return 0;
}
int Recorder::initVideoThreads() {
demux = new thread(&Recorder::demuxVideoStream, this, pAVCodecContext, pAVFormatContext, VideoStreamIndx);
rescale = new thread(&Recorder::rescaleVideoStream, this, pAVCodecContext, outAVCodecContext);
demux = new thread(&Recorder::encodeVideoStream, this, outAVCodecContext);
return 0;
}
void Recorder::demuxVideoStream(AVCodecContext* codecContext, AVFormatContext* formatContext, int streamIndex)
{
// init packet
AVPacket* packet = (AVPacket*)av_malloc(sizeof(AVPacket));
av_init_packet(packet);
int ctr = 0;
while (threading)
{
if (av_read_frame(formatContext, packet) < 0) {
exit(1);
}
if (packet->stream_index == streamIndex)
{
int return_value; // = 0;
ctr++;
do
{
return_value = avcodec_send_packet(codecContext, packet);
} while (return_value == AVERROR(EAGAIN) && threading);
//int i = avcodec_send_packet(codecContext, packet);
if (return_value < 0 && threading) { // call Decoder
cout << "unable to decode video";
exit(1);
}
}
}
avcodec_send_packet(codecContext, NULL); // flush decoder
// return 0;
}
void Recorder::rescaleVideoStream(AVCodecContext* inCodecContext, AVCodecContext* outCodecContext)
{
bool closing = false;
AVFrame* inFrame = av_frame_alloc();
if (!inFrame)
{
cout << "\nunable to release the avframe resources";
exit(1);
}
int nbytes = av_image_get_buffer_size(outAVCodecContext->pix_fmt, outAVCodecContext->width, outAVCodecContext->height, 32);
uint8_t* video_outbuf = (uint8_t*)av_malloc(nbytes);
if (video_outbuf == NULL)
{
cout << "\nunable to allocate memory";
exit(1);
}
AVFrame* outFrame = av_frame_alloc();//Allocate an AVFrame and set its fields to default values.
if (!outFrame)
{
cout << "\nunable to release the avframe resources for outframe";
exit(1);
}
// Setup the data pointers and linesizes based on the specified image parameters and the provided array.
int value = av_image_fill_arrays(outFrame->data, outFrame->linesize, video_outbuf, AV_PIX_FMT_YUV420P, outAVCodecContext->width, outAVCodecContext->height, 1); // returns : the size in bytes required for src
if (value < 0)
{
cout << "\nerror in filling image array";
}
int ctr = 0;
while (threading || !closing) {
int value = avcodec_receive_frame(inCodecContext, inFrame);
if (value == 0) {
ctr++;
SwsContext* swsCtx_ = sws_getContext(inCodecContext->width,
inCodecContext->height,
inCodecContext->pix_fmt,
outAVCodecContext->width,
outAVCodecContext->height,
outAVCodecContext->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
sws_scale(swsCtx_, inFrame->data, inFrame->linesize, 0, inCodecContext->height, outFrame->data, outFrame->linesize);
int return_value;
do
{
return_value = avcodec_send_frame(outCodecContext, outFrame);
} while (return_value == AVERROR(EAGAIN) && threading);
}
closing = (value == AVERROR_EOF);
}
avcodec_send_frame(outCodecContext, NULL);
// av_free(video_outbuf);
// return 0;
}
void Recorder::encodeVideoStream(AVCodecContext* codecContext)
{
bool closing = true;
AVPacket* packet = (AVPacket*)av_malloc(sizeof(AVPacket));
av_init_packet(packet);
int ctr = 0;
while (threading || !closing) {
packet->data = NULL; // packet data will be allocated by the encoder
packet->size = 0;
ctr++;
int value = avcodec_receive_packet(codecContext, packet);
if (value == 0) {
if (packet->pts != AV_NOPTS_VALUE)
packet->pts = av_rescale_q(packet->pts, video_st->codec->time_base, video_st->time_base);
if (packet->dts != AV_NOPTS_VALUE)
packet->dts = av_rescale_q(packet->dts, video_st->codec->time_base, video_st->time_base);
//printf("Write frame %3d (size= %2d)\n", j++, packet->size / 1000);
if (av_write_frame(outAVFormatContext, packet) != 0)
{
cout << "\nerror in writing video frame";
}
}
closing = (value == AVERROR_EOF);
}
value = av_write_trailer(outAVFormatContext);
if (value < 0)
{
cout << "\nerror in writing av trailer";
exit(1);
}
// av_free(packet);
// return 0;
}
int Recorder::openScreen(AVFormatContext* pFormatCtx) {
/*
X11 video input device.
To enable this input device during configuration you need libxcb installed on your system. It will be automatically detected during configuration.
This device allows one to capture a region of an X11 display.
refer : https://www.ffmpeg.org/ffmpeg-devices.html#x11grab
*/
/* current below is for screen recording. to connect with camera use v4l2 as a input parameter for av_find_input_format */
pAVInputFormat = av_find_input_format("gdigrab");
//value = avformat_open_input(&pAVFormatContext, ":0.0+10,250", pAVInputFormat, NULL);
value = avformat_open_input(&pAVFormatContext, "desktop", pAVInputFormat, NULL);
if (value != 0)
{
cout << "\nerror in opening input device";
exit(1);
}
return 0;
}
Related
I want to create screen recording app in windows using flutter-desktop.
So I create plugin for that, I created the recording using FFMPEG in C++,
and successfully to record my screen, but when I recorded for a long time, my app got freeze, although my memory and my disk is still enough
I recording with a duration that I set myself, but when the recording is finished, my app returns to normal
I don't know if that is an issue in my C++ code or Flutter.
here is my C++ code for recording
const char* out_filename = "new_out.mp4";
avdevice_register_all();
const AVOutputFormat* ofmt = NULL;
const AVInputFormat* ifmt = NULL;
AVFormatContext* ifmt_ctx = avformat_alloc_context();
AVFormatContext* ofmt_ctx = avformat_alloc_context();
AVCodecParameters* av_codec_par_in = avcodec_parameters_alloc();
AVCodecParameters* av_codec_par_out = avcodec_parameters_alloc();
AVCodecContext* avcodec_contx = NULL;
const AVCodec* av_codec;
AVStream* video_stream = NULL;
av_codec_par_out->height = 1280;
av_codec_par_out->width = 1640;
av_codec_par_out->bit_rate = 40000;
av_codec_par_out->codec_id = AV_CODEC_ID_H264;
av_codec_par_out->codec_type = AVMEDIA_TYPE_VIDEO;
av_codec_par_out->format = 0;
// av_codec_par_out->sample_aspect_ratio.den = 3;
// av_codec_par_out->sample_aspect_ratio.num = 4;
AVDictionary* options = NULL;
av_dict_set(&options, "framerate", "30", 0);
av_dict_set(&options, "offset_x", "20", 0);
av_dict_set(&options, "offset_y", "40", 0);
av_dict_set(&options, "video_size", "640x480", 0);
ifmt = av_find_input_format("gdigrab");
if (avformat_open_input(&ifmt_ctx, "desktop", ifmt, &options) < 0) {
cout << "Error in opening file";
exit(1);
}
int VideoStreamIndx = -1;
avformat_find_stream_info(ifmt_ctx, NULL);
for (int i = 0; i < (int)ifmt_ctx->nb_streams; i++) {
if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
VideoStreamIndx = i;
break;
}
}
if (VideoStreamIndx == -1) {
cout << "\nunable to find the video stream index. (-1)";
exit(1);
}
av_codec_par_in = ifmt_ctx->streams[VideoStreamIndx]->codecpar;
av_codec = avcodec_find_decoder(av_codec_par_in->codec_id);
if (av_codec == NULL) {
cout << "\nunable to find the decoder";
exit(1);
}
avcodec_contx = avcodec_alloc_context3(av_codec);
if (avcodec_parameters_to_context(avcodec_contx, av_codec_par_in) < 0) {
cout << "\nerror in converting the codec contexts";
exit(1);
}
//av_dict_set
int value = avcodec_open2(avcodec_contx, av_codec, NULL);
if (value < 0) {
cout << "\nunable to open the av codec";
exit(1);
}
value = 0;
ofmt = av_guess_format(NULL, out_filename, NULL);
if (!ofmt) {
cout << "\nerror in guessing the video format. try with correct format";
exit(1);
}
avformat_alloc_output_context2(&ofmt_ctx, ofmt, NULL, out_filename);
if (!ofmt_ctx) {
cout << "\nerror in allocating av format output context";
exit(1);
}
const AVCodec* av_codec_out = avcodec_find_encoder(av_codec_par_out->codec_id);
if (av_codec_out == NULL) {
cout << "\nunable to find the encoder";
exit(1);
}
video_stream = avformat_new_stream(ofmt_ctx, av_codec_out);
if (!video_stream) {
cout << "\nerror in creating a av format new stream";
exit(1);
}
AVCodecContext* av_cntx_out;
av_cntx_out = avcodec_alloc_context3(av_codec_out);
if (!av_cntx_out) {
cout << "\nerror in allocating the codec contexts";
exit(1);
}
if (avcodec_parameters_copy(video_stream->codecpar, av_codec_par_out) < 0) {
cout << "\nCodec parameter canot copied";
exit(1);
}
if (avcodec_parameters_to_context(av_cntx_out, av_codec_par_out) < 0) {
cout << "\nerror in converting the codec contexts";
exit(1);
}
//av_cntx_out->pix_fmt = AV_PIX_FMT_YUV420P;
av_cntx_out->gop_size = 30;
av_cntx_out->max_b_frames = 2;
av_cntx_out->time_base.num = 1;
av_cntx_out->time_base.den = 30;
value = avcodec_open2(av_cntx_out, av_codec_out, NULL);//Initialize the AVCodecContext to use the given AVCodec.
if (value < 0) {
cout << "\nunable to open the av codec";
exit(1);
}
if (avcodec_contx->codec_id == AV_CODEC_ID_H264) {
av_opt_set(av_cntx_out->priv_data, "preset", "slow", 0);
}
avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_READ_WRITE);
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
av_cntx_out->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
if (avformat_write_header(ofmt_ctx, NULL) < 0) {
cout << "\nerror in writing the header context";
exit(1);
}
AVPacket* av_pkt = av_packet_alloc();
//av_init_packet(av_pkt); //error C4996: 'av_init_packet': was declared deprecated
memset(av_pkt, 0, sizeof(AVPacket)); //???
AVFrame* av_frame = av_frame_alloc();
if (!av_frame) {
cout << "\nunable to release the avframe resources";
exit(1);
}
AVFrame* outFrame = av_frame_alloc();//Allocate an AVFrame and set its fields to default values.
if (!outFrame) {
cout << "\nunable to release the avframe resources for outframe";
exit(1);
}
av_frame->width = avcodec_contx->width;
av_frame->height = avcodec_contx->height;
av_frame->format = av_codec_par_in->format;
outFrame->width = av_cntx_out->width;
outFrame->height = av_cntx_out->height;
outFrame->format = av_codec_par_out->format;
av_frame_get_buffer(av_frame, 0);
av_frame_get_buffer(outFrame, 0);
SwsContext* swsCtx = sws_alloc_context();
if (sws_init_context(swsCtx, NULL, NULL) < 0) {
cout << "\nUnable to Initialize the swscaler context sws_context.";
exit(1);
}
swsCtx = sws_getContext(avcodec_contx->width, avcodec_contx->height, avcodec_contx->pix_fmt,
av_cntx_out->width, av_cntx_out->height, av_cntx_out->pix_fmt,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (swsCtx == NULL) {
cout << "\n Cannot allocate SWC Context";
exit(1);
}
int ii = 0;
int enc_packet_counter = 0;
int no_frames = 500; // for duration
int frameFinished;
AVPacket* outPacket = av_packet_alloc();
isRecording = true;
while (av_read_frame(ifmt_ctx, av_pkt) >= 0) {
// while(true) {
cout << "\nwew 1 av_read_frame";
if (ii++ == no_frames)
break;
// if(!isRecording) break;
if (av_pkt->stream_index == VideoStreamIndx){
int ret = avcodec_send_packet(avcodec_contx, av_pkt);
if (ret < 0) {
printf("Error while sending packet");
}
frameFinished = true;
int response = 0;
response = avcodec_receive_frame(avcodec_contx, av_frame);
if (response < 0) {
printf("Error while receiving frame from decoder");
frameFinished = false;
}
if (frameFinished){
memset(outPacket, 0, sizeof(AVPacket));
outPacket->data = NULL;
outPacket->size = 0;
outPacket->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
if (outPacket->dts != AV_NOPTS_VALUE)
outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???
outFrame->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outFrame->pkt_duration = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
enc_packet_counter++;
int sts = sws_scale(swsCtx, //struct SwsContext *c,
av_frame->data, //const uint8_t *const srcSlice[],
av_frame->linesize, //const int srcStride[],
0, //int srcSliceY,
av_frame->height, //int srcSliceH,
outFrame->data, //uint8_t *const dst[],
outFrame->linesize); //const int dstStride[]);
if (sts < 0) {
printf("Error while executing sws_scale");
}
do {
cout << "\nwew 1 avcodec_send_frame";
if (ret == AVERROR(EAGAIN)) {
av_packet_unref(outPacket);
ret = avcodec_receive_packet(av_cntx_out, outPacket);
if (ret) break; // deal with error
outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???
av_write_frame(ofmt_ctx, outPacket);
}
else if (ret != 0) {
char str2[] = "";
cout << "\nError :" << av_make_error_string(str2, sizeof(str2), ret);
// return -1;
}
ret = avcodec_send_frame(av_cntx_out, outFrame);
} while (ret);
} // frameFinished
}
// av_packet_unref(&av_pkt);
// av_packet_free(&av_pkt);
// av_packet_unref(&outPacket);
// av_packet_free(&outPacket);
}
int ret = 0;
avcodec_send_frame(av_cntx_out, NULL);
do {
cout << "\nwew 1 av_write_frame";
av_packet_unref(outPacket);
ret = avcodec_receive_packet(av_cntx_out, outPacket);
if (!ret)
{
outPacket->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???
av_write_frame(ofmt_ctx, outPacket);
enc_packet_counter++;
}
} while (!ret);
value = av_write_trailer(ofmt_ctx);
if (value < 0) {
cout << "\nerror in writing av trailer";
exit(1);
}
avformat_close_input(&ifmt_ctx);
if (!ifmt_ctx) {
cout << "\nfile closed successfully";
}
else {
cout << "\nunable to close the file";
exit(1);
}
avformat_free_context(ifmt_ctx);
if (!ifmt_ctx) {
cout << "\navformat free successfully";
}
else {
cout << "\nunable to free avformat context";
exit(1);
}
avcodec_free_context(&av_cntx_out);
if (!av_cntx_out) {
cout << "\navcodec free successfully";
}
else {
cout << "\nunable to free avcodec context";
exit(1);
}
here is my Flutter plugin
class MethodChannelScreenrecorder extends ScreenrecorderPlatform {
static const MethodChannel _channel =
MethodChannel('screenrecorder');
#override
Future<bool> startRecordScreen(String path, int windowID) async {
final bool start = await _channel
.invokeMethod('startRecordScreen', {"path": path, "windowID": windowID});
return start;
}
#override
Future<bool> get stopRecordScreen async {
final bool path = await _channel.invokeMethod('stopRecordScreen');
return path;
}
}
class Screenrecorder {
static Future<bool> startRecordScreen({String path = "", int windowID = 0}) async{
final bool start = await ScreenrecorderPlatform.instance.startRecordScreen(path, windowID);
return start;
}
static Future<bool> get stopRecordScreen async {
final bool path = await ScreenrecorderPlatform.instance.stopRecordScreen;
return path;
}
}
I'm trying to write a function to cut videos/audios FFmpeg C APIs in C++. I started with the remuxing.c example from FFmpeg GitHub repository, and tried to apply the same changes mentioned in this question, but I'm getting blank screen in the beginning of the output equal to the duration that I want to cut. This is the function I came with (Differences between the function and the remuxing example noted with // <- HERE):
int cut_video(const char *in_filename, const char *out_filename, double from_seconds, double end_seconds) {
const AVOutputFormat *ofmt = NULL;
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
AVPacket *pkt = NULL;
int ret, i;
int stream_index = 0;
int *stream_mapping = NULL;
int stream_mapping_size = 0;
pkt = av_packet_alloc();
if (!pkt) {
fprintf(stderr, "Could not allocate AVPacket\n");
return 1;
}
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
fprintf(stderr, "Could not open input file '%s'", in_filename);
goto end;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
fprintf(stderr, "Failed to retrieve input stream information");
goto end;
}
av_dump_format(ifmt_ctx, 0, in_filename, 0);
ret = av_seek_frame(ifmt_ctx, -1, from_seconds * AV_TIME_BASE, AVSEEK_FLAG_ANY); // <- HERE
if (ret < 0) { // <- HERE
fprintf(stderr, "Error seek\n"); // <- HERE
goto end; // <- HERE
} // <- HERE
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx) {
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
stream_mapping_size = ifmt_ctx->nb_streams;
stream_mapping = (int *)av_calloc(stream_mapping_size, sizeof(*stream_mapping));
if (!stream_mapping) {
ret = AVERROR(ENOMEM);
goto end;
}
ofmt = ofmt_ctx->oformat;
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
AVStream *out_stream;
AVStream *in_stream = ifmt_ctx->streams[i];
AVCodecParameters *in_codecpar = in_stream->codecpar;
if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO && in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
stream_mapping[i] = -1;
continue;
}
stream_mapping[i] = stream_index++;
out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
if (ret < 0) {
fprintf(stderr, "Failed to copy codec parameters\n");
goto end;
}
out_stream->codecpar->codec_tag = 0;
}
av_dump_format(ofmt_ctx, 0, out_filename, 1);
if (!(ofmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open output file '%s'", out_filename);
goto end;
}
}
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
goto end;
}
while (1) {
AVStream *in_stream, *out_stream;
ret = av_read_frame(ifmt_ctx, pkt);
if (ret < 0) break;
in_stream = ifmt_ctx->streams[pkt->stream_index];
if (pkt->stream_index >= stream_mapping_size || stream_mapping[pkt->stream_index] < 0 ||
av_q2d(in_stream->time_base) * pkt->pts > end_seconds) { // <- HERE
av_packet_unref(pkt);
continue;
}
pkt->stream_index = stream_mapping[pkt->stream_index];
out_stream = ofmt_ctx->streams[pkt->stream_index];
log_packet(ifmt_ctx, pkt, "in");
/* copy packet */
av_packet_rescale_ts(pkt, in_stream->time_base, out_stream->time_base);
pkt->pos = -1;
log_packet(ofmt_ctx, pkt, "out");
ret = av_interleaved_write_frame(ofmt_ctx, pkt);
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
* its contents and resets pkt), so that no unreferencing is necessary.
* This would be different if one used av_write_frame(). */
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
break;
}
}
av_write_trailer(ofmt_ctx);
end:
av_packet_free(&pkt);
avformat_close_input(&ifmt_ctx);
/* close output */
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) avio_closep(&ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
av_freep(&stream_mapping);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
return 1;
}
return 0;
}
And here is how I call it:
cut_video("/Users/aliosm/Desktop/1.mp4", "/Users/aliosm/Desktop/2.mp4", 10, 40);
I searched a lot on Google and I didn't find anything useful related to this specific use-case, do you have any idea?
Finally, I was able to do that by the help from #ffmpeg channel community on Libera.Chat IRC. The final code:
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libavutil/timestamp.h>
}
/**
* #brief Print the information of the passed packet.
*
* #fn logPacket
* #param avFormatContext AVFormatContext of the given packet.
* #param avPacket AVPacket to log.
* #param tag String to tag the log output.
*/
void logPacket(const AVFormatContext *avFormatContext, const AVPacket *avPacket, const QString tag) {
AVRational *timeBase = &avFormatContext->streams[avPacket->stream_index]->time_base;
qDebug() << QString("%1: pts:%2 pts_time:%3 dts:%4 dts_time:%5 duration:%6 duration_time:%7 stream_index:%8")
.arg(tag)
.arg(av_ts2str(avPacket->pts))
.arg(av_ts2timestr(avPacket->pts, timeBase))
.arg(av_ts2str(avPacket->dts))
.arg(av_ts2timestr(avPacket->dts, timeBase))
.arg(av_ts2str(avPacket->duration))
.arg(av_ts2timestr(avPacket->duration, timeBase))
.arg(avPacket->stream_index);
}
/**
* #brief Cut a file in the given input file path based on the start and end seconds, and output the cutted file to the
* given output file path.
*
* #fn cutFile
* #param inputFilePath Input file path to be cutted.
* #param startSeconds Cutting start time in seconds.
* #param endSeconds Cutting end time in seconds.
* #param outputFilePath Output file path to write the new cutted file.
*
* #details This function will take an input file path and cut it based on the given start and end seconds. The cutted
* file will then be outputted to the given output file path.
*
* #return True if the cutting operation finished successfully, false otherwise.
*/
bool cutFile(const QString& inputFilePath, const long long& startSeconds, const long long& endSeconds,
const QString& outputFilePath) {
int operationResult;
AVPacket* avPacket = NULL;
AVFormatContext* avInputFormatContext = NULL;
AVFormatContext* avOutputFormatContext = NULL;
avPacket = av_packet_alloc();
if (!avPacket) {
qCritical("Failed to allocate AVPacket.");
return false;
}
try {
operationResult = avformat_open_input(&avInputFormatContext, inputFilePath.toStdString().c_str(), 0, 0);
if (operationResult < 0) {
throw std::runtime_error(QString("Failed to open the input file '%1'.").arg(inputFilePath).toStdString().c_str());
}
operationResult = avformat_find_stream_info(avInputFormatContext, 0);
if (operationResult < 0) {
throw std::runtime_error(QString("Failed to retrieve the input stream information.").toStdString().c_str());
}
avformat_alloc_output_context2(&avOutputFormatContext, NULL, NULL, outputFilePath.toStdString().c_str());
if (!avOutputFormatContext) {
operationResult = AVERROR_UNKNOWN;
throw std::runtime_error(QString("Failed to create the output context.").toStdString().c_str());
}
int streamIndex = 0;
int streamMapping[avInputFormatContext->nb_streams];
int streamRescaledStartSeconds[avInputFormatContext->nb_streams];
int streamRescaledEndSeconds[avInputFormatContext->nb_streams];
// Copy streams from the input file to the output file.
for (int i = 0; i < avInputFormatContext->nb_streams; i++) {
AVStream* outStream;
AVStream* inStream = avInputFormatContext->streams[i];
streamRescaledStartSeconds[i] = av_rescale_q(startSeconds * AV_TIME_BASE, AV_TIME_BASE_Q, inStream->time_base);
streamRescaledEndSeconds[i] = av_rescale_q(endSeconds * AV_TIME_BASE, AV_TIME_BASE_Q, inStream->time_base);
if (inStream->codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
inStream->codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
inStream->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
streamMapping[i] = -1;
continue;
}
streamMapping[i] = streamIndex++;
outStream = avformat_new_stream(avOutputFormatContext, NULL);
if (!outStream) {
operationResult = AVERROR_UNKNOWN;
throw std::runtime_error(QString("Failed to allocate the output stream.").toStdString().c_str());
}
operationResult = avcodec_parameters_copy(outStream->codecpar, inStream->codecpar);
if (operationResult < 0) {
throw std::runtime_error(
QString("Failed to copy codec parameters from input stream to output stream.").toStdString().c_str());
}
outStream->codecpar->codec_tag = 0;
}
if (!(avOutputFormatContext->oformat->flags & AVFMT_NOFILE)) {
operationResult = avio_open(&avOutputFormatContext->pb, outputFilePath.toStdString().c_str(), AVIO_FLAG_WRITE);
if (operationResult < 0) {
throw std::runtime_error(
QString("Failed to open the output file '%1'.").arg(outputFilePath).toStdString().c_str());
}
}
operationResult = avformat_write_header(avOutputFormatContext, NULL);
if (operationResult < 0) {
throw std::runtime_error(QString("Error occurred when opening output file.").toStdString().c_str());
}
operationResult = avformat_seek_file(avInputFormatContext, -1, INT64_MIN, startSeconds * AV_TIME_BASE,
startSeconds * AV_TIME_BASE, 0);
if (operationResult < 0) {
throw std::runtime_error(
QString("Failed to seek the input file to the targeted start position.").toStdString().c_str());
}
while (true) {
operationResult = av_read_frame(avInputFormatContext, avPacket);
if (operationResult < 0) break;
// Skip packets from unknown streams and packets after the end cut position.
if (avPacket->stream_index >= avInputFormatContext->nb_streams || streamMapping[avPacket->stream_index] < 0 ||
avPacket->pts > streamRescaledEndSeconds[avPacket->stream_index]) {
av_packet_unref(avPacket);
continue;
}
avPacket->stream_index = streamMapping[avPacket->stream_index];
logPacket(avInputFormatContext, avPacket, "in");
// Shift the packet to its new position by subtracting the rescaled start seconds.
avPacket->pts -= streamRescaledStartSeconds[avPacket->stream_index];
avPacket->dts -= streamRescaledStartSeconds[avPacket->stream_index];
av_packet_rescale_ts(avPacket, avInputFormatContext->streams[avPacket->stream_index]->time_base,
avOutputFormatContext->streams[avPacket->stream_index]->time_base);
avPacket->pos = -1;
logPacket(avOutputFormatContext, avPacket, "out");
operationResult = av_interleaved_write_frame(avOutputFormatContext, avPacket);
if (operationResult < 0) {
throw std::runtime_error(QString("Failed to mux the packet.").toStdString().c_str());
}
}
av_write_trailer(avOutputFormatContext);
} catch (std::runtime_error e) {
qCritical("%s", e.what());
}
av_packet_free(&avPacket);
avformat_close_input(&avInputFormatContext);
if (avOutputFormatContext && !(avOutputFormatContext->oformat->flags & AVFMT_NOFILE))
avio_closep(&avOutputFormatContext->pb);
avformat_free_context(avOutputFormatContext);
if (operationResult < 0 && operationResult != AVERROR_EOF) {
qCritical("%s", QString("Error occurred: %1.").arg(av_err2str(operationResult)).toStdString().c_str());
return false;
}
return true;
}
The code is written in C++, and it is using some Qt related classes, you can remove them and use the code on plain C++ projects.
I tried my best to make it readable, I hope it is good and helpful.
Update 1: I updated the code to fix a bug in it.
Update 2: I updated the code to do some refactoring.
I have a set of tiny cameras, which are streaming H264 encoded video over TCP. I need to somehow connect to them on-demand based on user actions in the browser and display the live stream to them.
I've been searching all over the Internet on how this can be achieved but not successfully.
The closest I got to this result was writing a small program using libav and C++ to connect to the video stream, save them as motion JPEG and then use mjpg_streamer to display the result as a live stream. But this solution is overly complicated and my program crashes with errors like:
Failed to decode av_out_packet: Operation now in progress
Or
Failed to read av_frame
Here's the piece of code I use to decode the stream.
void decode_stream(const char * address, int threadIdx, const char * output_dir) {
std::cout << "Started decoding thread ID: " << std::this_thread::get_id() << " TID: " << threadIdx << std::endl;
AVFormatContext *av_format_ctx = avformat_alloc_context();
// register timeout callback
auto * ith = new ffmpeg_interrupt_handler(default_timeout * 10);
av_format_ctx->interrupt_callback.opaque = (void *)ith;
av_format_ctx->interrupt_callback.callback = &ffmpeg_interrupt_handler::check_interrupt;
AVInputFormat *av_input_fmt = av_find_input_format("h264");
if (avformat_open_input(&av_format_ctx, address, av_input_fmt, nullptr) != 0) {
avformat_close_input(&av_format_ctx);
perror("Could not open input context");
exit(EXIT_FAILURE);
}
int video_stream_index = -1;
AVCodec* av_codec;
AVCodecParameters * av_codec_params;
//find valid video stream
for (int i = 0; i < av_format_ctx->nb_streams; ++i) {
av_codec_params = av_format_ctx->streams[i]->codecpar;
av_codec = avcodec_find_decoder(av_codec_params->codec_id);
if (!av_codec) {
perror("Could not find coded decoder");
continue;
}
if (av_codec_params->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_index = i;
break;
}
}
if (video_stream_index == -1) {
perror("Could find valid video stream.");
exit(EXIT_FAILURE);
}
//allocate codec context
AVCodecContext * av_codec_ctx = avcodec_alloc_context3(av_codec);
if (!av_codec_ctx) {
perror("Could not create AVCodec Context\n");
exit(EXIT_FAILURE);
}
if (avcodec_parameters_to_context(av_codec_ctx, av_codec_params) < 0) {
perror("Could not initialize AVCodec Context\n");
exit(EXIT_FAILURE);
}
if (avcodec_open2(av_codec_ctx, av_codec, nullptr) < 0) {
perror("Could not open AVCodec\n");
exit(EXIT_FAILURE);
}
AVFrame* av_frame = av_frame_alloc();
if (!av_frame) {
perror("Could not allocate AVFrame");
exit(EXIT_FAILURE);
}
AVPacket *av_packet = av_packet_alloc();
if (!av_packet) {
perror("Could not allocate AVFrame");
exit(EXIT_FAILURE);
}
AVCodec *av_out_codec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
if (!av_out_codec) {
perror("Could not find MJPEG codec");
exit(EXIT_FAILURE);
}
AVCodecContext *av_out_codec_ctx = avcodec_alloc_context3(av_out_codec);
if (!av_out_codec_ctx) {
perror("Could not allocate output context");
exit(EXIT_FAILURE);
}
av_out_codec_ctx->width = 1280;
av_out_codec_ctx->height = 720;
av_out_codec_ctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
av_out_codec_ctx->time_base = (AVRational){5, AVFMT_VARIABLE_FPS};
if (avcodec_open2(av_out_codec_ctx, av_out_codec, nullptr) < 0) {
perror("Could not open output codec");
exit(EXIT_FAILURE);
}
AVPacket *av_out_packet = av_packet_alloc();
std::string output_filename = output_dir;
if (! fs::exists(output_dir)) {
fs::create_directory(output_dir);
} else if ( fs::exists(output_dir) && ! fs::is_directory(output_dir)) {
perror("Target output is not a directory!");
exit(EXIT_FAILURE);
}
std::string output_final_dir = output_dir;
output_final_dir += "stream_" + std::to_string(threadIdx);
if (! fs::exists(output_final_dir)) {
fs::create_directory(output_final_dir);
}
output_filename += "stream_" + std::to_string(threadIdx) + "/stream_" + std::to_string(threadIdx) + ".jpg";
int response;
FILE *JPEGFile = nullptr;
ith->reset(default_timeout);
while (av_read_frame(av_format_ctx, av_packet) >= 0) {
if (av_packet->stream_index == video_stream_index) {
response = avcodec_send_packet(av_codec_ctx, av_packet);
if (response < 0) {
perror("Failed to decode av_out_packet");
exit(EXIT_FAILURE);
}
response = avcodec_receive_frame(av_codec_ctx, av_frame);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
continue;
} else if (response < 0) {
perror("Failed to decode av_out_packet");
exit(EXIT_FAILURE);
}
if (av_frame->format != AV_PIX_FMT_YUV420P) {
printf("Generated file may not be a grayscale\n");
}
// send frame to encode into out format
avcodec_send_frame(av_out_codec_ctx, av_frame);
// receive encoded out data
avcodec_receive_packet(av_out_codec_ctx, av_out_packet);
// open output
JPEGFile = fopen(output_filename.c_str(), "wb");
if (JPEGFile == nullptr || JPEGFile == NULL) {
perror("Could not open output file");
fclose(JPEGFile);
JPEGFile = nullptr;
break;
}
// write to output
fwrite(av_out_packet->data, 1, av_out_packet->size, JPEGFile);
// close output
if (! fclose(JPEGFile)) {
JPEGFile = nullptr;
}
// unref out packet
av_packet_unref(av_out_packet);
av_packet_unref(av_packet);
// reset packet timeout
ith->reset(default_timeout);
}
}
if (JPEGFile != nullptr) {
fclose(JPEGFile);
JPEGFile = nullptr;
}
std::cout << "Exiting thread: " << threadIdx << std::endl;
should_stop_thread[threadIdx] = true;
av_packet_free(&av_out_packet);
avcodec_close(av_out_codec_ctx);
av_frame_free(&av_frame);
av_packet_free(&av_packet);
avformat_close_input(&av_format_ctx);
avformat_free_context(av_format_ctx);
avcodec_free_context(&av_codec_ctx);
}
Anyways, if there is a simpler solution which I am missing, I am open to it. Delay between real stream and displayed video is critical for me and can not be more than 1 second.
I use FFMPEG to record video from a RTSP stream. What my code does is get current day time, create a folder with this format year/month/day/hour/minute and save the video to that folder.
When a new minute arrive, I create the new folder base on the new minute and run the record again to the new folder.
Basically It works, but the next video start time is continue the end of previous video. For example:
video1: 00:00 -> 00:55
video2: 00:56 -> ...
I hope I can set for all videos start from 00:00. Can I do that?
Here my code
ffmpeg.h
class CtFfmpeg {
public:
CtFfmpeg();
~CtFfmpeg();
void init();
int getInput();
int getOutputName(const char *filename);
int release();
int ret;
AVFormatContext *ifmt_ctx, *ofmt_ctx;
AVStream *in_stream, *out_stream;
AVPacket pkt;
const char *in_filename;
char *out_filename;
private:
int setOutput(const char *outfilename);
AVOutputFormat *ofmt;
};
ffmpeg.cpp
#include "ctffmpeg.h"
CtFfmpeg::CtFfmpeg() {
in_filename = new char [1024];
out_filename = new char [1024];
}
CtFfmpeg::~CtFfmpeg() {
delete [] in_filename;
delete [] out_filename;
}
void CtFfmpeg::init() {
avcodec_register_all();
av_register_all();
avformat_network_init();
pkt = { 0 };
av_init_packet(&pkt);
ofmt = NULL;
ifmt_ctx = NULL;
ofmt_ctx = NULL;
return;
}
int CtFfmpeg::release() {
av_write_trailer(ofmt_ctx);
avcodec_close(out_stream->codec);
// avcodec_close(in_stream->codec);
// avformat_close_input(&ifmt_ctx);
/* close output */
if (!(ofmt->flags & AVFMT_NOFILE))
avio_close(ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
av_free_packet(&pkt);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred\n");
return 1;
}
}
int CtFfmpeg::getInput() {
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
fprintf(stderr, "Could not open input file '%s'", in_filename);
release();
}
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
fprintf(stderr, "Failed to retrieve input stream information");
release();
}
av_dump_format(ifmt_ctx, 0, in_filename, 0);
}
int CtFfmpeg::setOutput(const char *outfilename) {
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, outfilename);
if (!ofmt_ctx) {
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
release();
}
ofmt = ofmt_ctx->oformat;
for (int i = 0; i < ifmt_ctx->nb_streams; i++) {
in_stream = ifmt_ctx->streams[i];
out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
release();
}
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
if (ret < 0) {
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
release();
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
} // for
av_dump_format(ofmt_ctx, 0, outfilename, 1);
if (!(ofmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, outfilename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open output file '%s'", outfilename);
release();
}
}
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
release();
}
}
int CtFfmpeg::getOutputName(const char *filename){
sprintf(out_filename,filename);
setOutput(out_filename);
}
main.cpp
#include "ctfolder.h"
#include "ctffmpeg.h"
CtFfmpeg * ff;
int main(int argc, char** argv) {
if (argc < 2) {
printf("usage: %s <RTSP link> \n", argv[0]);
return 1;
}
ff = new CtFfmpeg();
ff->in_filename = argv[1]; //RTSP input link
ff->init();
ff->getInput();
string filename;
videoRecorder obj;
int start, now;
start = obj.get_current_min();
if(obj.create_folder(0755))
cout << "Cannot create folder, maybe it already exists" << endl;
else
cout << "Create folder succesfully" << endl;
int skip = 0;
while(1){
filename = obj.update_filename();
ff->getOutputName(filename.c_str());
while((now = obj.get_current_min()) == start) {
ff->ret = av_read_frame(ff->ifmt_ctx, &(ff->pkt));
skip++;
if(skip==1)
continue;
if(skip>2)
skip=2;
if (ff->ret < 0)
continue;
ff->pkt.pts = av_rescale_q_rnd(ff->pkt.pts, ff->in_stream->time_base, ff->out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
ff->pkt.dts = av_rescale_q_rnd(ff->pkt.dts, ff->in_stream->time_base, ff->out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
ff->pkt.duration = av_rescale_q(ff->pkt.duration, ff->in_stream->time_base, ff->out_stream->time_base);
ff->pkt.pos = -1;
ff->ret = av_interleaved_write_frame(ff->ofmt_ctx, &(ff->pkt));
if (ff->ret < 0) {
fprintf(stderr, "Error muxing packet\n");
continue;
}
av_free_packet(&(ff->pkt));
}
ff->release();
cout << "New minute!" << endl;
if(obj.create_folder(0755))
cout << "Cannot create folder, something's wrong" << endl;
else
cout << "Create folder succesfully" << endl;
start = now;
}
return 0;
}
You need to shift your recording packet's pts to 0.
while(<some condition>)
{
//...
int64_t pts_offset = AV_NOPTS_VALUE ;
while((now = obj.get_current_min()) == start)
{
//...
ff.pkt.pts = ...
//...
if( pts_offset == AV_NOPTS_VALUE )
{
pts_offset = ff.pkt.pts ;
}
ff.pkt.pts -= pts_offset ;
// ...
}
}
I tried to build your code and add Alexander Chernin suggestion to it but I face to muxer error!
When you decrease recording packet's pts, it's value go lower than recording packet's dts. In avcodec.h, above declaration of pts I found this comment:
pts MUST be larger or equal to dts as presentation cannot happen before decompression.
I solved this error by decreasing recording packet's dts.
ff->pkt.pts = av_rescale_q_rnd(ff->pkt.pts, ff->in_stream->ff->out_stream->(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
if (pts_offset == AV_NOPTS_VALUE) {
pts_offset = ff->pkt.pts;
}
ff->pkt.pts -= pts_offset;
ff->pkt.dts -= pts_offset;
ff->pkt.dts = av_rescale_q_rnd(ff->pkt.dts, ff->in_stream->time_base,ff->out_stream->time_base,(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
ff->pkt.duration = av_rescale_q(ff->pkt.duration,ff->in_stream->time_base,ff->out_stream->time_base);
ff->pkt.pos = -1;
I have a streaming device that streams mpegts video and audio. I am trying to capture those streams and save them multiple .ts file using HLS of ffmpeg.
So, I have been successful in capturing and saving the streams in a single .ts file. It seems like when I send the output file to be .m3u8, ffmpeg automatically chooses hls demuxer. But, doing so I get a floating point exception.
Here is my simple code...
static int ipcam_streaming_main_configure_input_parameters(const char *p_ifilename, AVFormatContext **ppx_ifmt_ctx)
{
AVStream *px_istream = NULL;
AVCodecContext *px_icodec_ctx = NULL;
int ret = -1;
unsigned int i = 0;
enum AVCodecID input_codec_id = AV_CODEC_ID_NONE;
AVCodec *p_decoder = NULL;
if (avformat_open_input(ppx_ifmt_ctx, p_ifilename, NULL, NULL) < 0)
{
printf("%s(): avformat_open_input() failed\n", __FUNCTION__);
}
else if (avformat_find_stream_info(*ppx_ifmt_ctx, NULL) < 0)
{
printf("%s(): avformat_find_stream_info() failed\n", __FUNCTION__);
}
else
{
/* find the input streams to be remuxed */
for (i = 0; i < (*ppx_ifmt_ctx)->nb_streams; i++)
{
/* get the stream, codec context for the stream */
px_istream = (*ppx_ifmt_ctx)->streams[i];
px_icodec_ctx = px_istream->codec;
if ((AVMEDIA_TYPE_VIDEO == px_icodec_ctx->codec_type)
|| (AVMEDIA_TYPE_AUDIO == px_icodec_ctx->codec_type))
{
/* get the codec_id for the audio/video stream */
input_codec_id = px_icodec_ctx->codec_id;
/* get the decoder for the input codec id */
p_decoder = avcodec_find_decoder(px_icodec_ctx->codec_id);
/* Open decoder for the input codec audio/video */
ret = avcodec_open2(px_icodec_ctx,
p_decoder,
NULL);
if (ret < 0)
{
printf("%s(): avcodec_open2() failed\n", __FUNCTION__);
}
else
{
printf("Input stream type <%d> with codec_id <%d> found and decoder opened\n", px_icodec_ctx->codec_type, input_codec_id);
}
}
}
}
/* dump the data into stdout */
av_dump_format(*ppx_ifmt_ctx, 0, p_ifilename, 0);
return ret;
}
static int ipcam_streaming_main_configure_output_parameters(const char *p_ofilename,
AVFormatContext *px_ifmt_ctx,
AVFormatContext **ppx_ofmt_ctx)
{
AVStream *px_ostream = NULL;
AVStream *px_istream = NULL;
AVCodecContext *px_dec_ctx = NULL;
AVCodecContext *px_enc_ctx = NULL;
int ret = -1;
unsigned int i = 0;
if ((NULL == p_ofilename) || (NULL == px_ifmt_ctx) || (NULL == ppx_ofmt_ctx))
{
printf("%s(): NULL arg(s) <%p, %p, %p>", __FUNCTION__, p_ofilename, px_ifmt_ctx, ppx_ofmt_ctx);
return -1;
}
/* remove the output file if already exists */
remove(p_ofilename);
/* allocate the output format context */
if (avformat_alloc_output_context2(ppx_ofmt_ctx, NULL, NULL, p_ofilename) < 0)
{
printf("%s(): avformat_alloc_output_context2() failed\n", __FUNCTION__);
}
else
{
for (i = 0; i < px_ifmt_ctx->nb_streams; i++)
{
if ((AVMEDIA_TYPE_VIDEO == px_ifmt_ctx->streams[i]->codec->codec_type)
|| (AVMEDIA_TYPE_AUDIO == px_ifmt_ctx->streams[i]->codec->codec_type))
{
printf("Stream <%d> is type <%d>: Adding to output stream\n", i, px_ifmt_ctx->streams[i]->codec->codec_type);
/* create a new output stream */
px_ostream = avformat_new_stream(*ppx_ofmt_ctx, NULL);
if (NULL == px_ostream)
{
printf("%s(): avformat_new_stream() failed\n", __FUNCTION__);
}
else
{
px_istream = px_ifmt_ctx->streams[i];
px_dec_ctx = px_istream->codec;
px_enc_ctx = px_ostream->codec;
/* Since, we do not need to encode the video stream, it is just remuxing
just copying the input codec context to output is sufficient */
ret = avcodec_copy_context((*ppx_ofmt_ctx)->streams[i]->codec,
px_ifmt_ctx->streams[i]->codec);
if ((*ppx_ofmt_ctx)->oformat->flags & AVFMT_GLOBALHEADER)
{
px_enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
}
}
else
{
printf("Stream <%d> is Unknown: Ignore it \n", i);
}
}
/* dump the output media file into stdout */
av_dump_format(*ppx_ofmt_ctx, 0, p_ofilename, 1);
if (0 == ((*ppx_ofmt_ctx)->oformat->flags & AVFMT_NOFILE))
{
/* open the output media file so that we can write the data into it */
ret = avio_open(&(*ppx_ofmt_ctx)->pb, p_ofilename, AVIO_FLAG_WRITE);
if (ret < 0)
{
printf("%s(): avio_open() failed\n", __FUNCTION__);
}
else
{
/* init muxer, write output file header */
ret = avformat_write_header((*ppx_ofmt_ctx), NULL);
if (ret < 0)
{
printf("%s(): avformat_write_header() failed\n", __FUNCTION__);
}
}
}
}
return ret;
}
int main(int argnum, char **argv)
{
AVFormatContext *px_ifmt_ctx = NULL;
AVFormatContext *px_ofmt_ctx = NULL;
AVPacket packet = {0};
enum AVMediaType type = AVMEDIA_TYPE_UNKNOWN;
unsigned int stream_index = -1;
unsigned int i = 0;
int ret = -1;
if (argnum != 3)
{
printf("Please enough number of parameters\n");
return -1;
}
/* register all the services requred */
av_register_all();
avformat_network_init();
if (0 != ipcam_streaming_main_configure_input_parameters(argv[1],
&px_ifmt_ctx))
{
printf("%s(): ipcam_streaming_main_configure_iput_parameters() failed\n", __FUNCTION__);
}
else if (0 != ipcam_streaming_main_configure_output_parameters(argv[2],
px_ifmt_ctx,
&px_ofmt_ctx))
{
printf("%s(): ipcam_streaming_main_configure_output_parameters() failed\n", __FUNCTION__);
}
else
{
printf("Input and output configuration done successfully: Now reading packets\n");
while (true)
{
if ((ret = av_read_frame(px_ifmt_ctx, &packet)) < 0)
{
printf("av_read_frame() failed with error <%d>: Exit\n", ret);
break;
}
/* get the stream index and codec type of the packet read */
stream_index = packet.stream_index;
type = px_ifmt_ctx->streams[stream_index]->codec->codec_type;
/* remux only if the type is video, otherwise ignore it */
if ((AVMEDIA_TYPE_VIDEO == type)
|| (AVMEDIA_TYPE_AUDIO == type))
{
printf("Remuxing the stream type <%d>, frame with stream index <%d>\n", type, stream_index);
/* remux this frame without reencoding */
packet.dts = av_rescale_q_rnd(packet.dts,
px_ifmt_ctx->streams[stream_index]->time_base,
px_ofmt_ctx->streams[stream_index]->time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
packet.pts = av_rescale_q_rnd(packet.pts,
px_ifmt_ctx->streams[stream_index]->time_base,
px_ofmt_ctx->streams[stream_index]->time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
/* write the frame into the output media file */
ret = av_write_frame(px_ofmt_ctx, &packet);
if (ret < 0)
{
printf("Ignoring video packet stream index <%d>\n", packet.stream_index);
}
/* free the packet for next use */
av_free_packet(&packet);
}
else
{
printf("Ignoring stream index <%d>, type <%d>\n", packet.stream_index, type);
}
}
}
/* write the trailer */
av_write_trailer(px_ofmt_ctx);
av_free_packet(&packet);
for (i = 0; i < px_ifmt_ctx->nb_streams; i++)
{
/* close the input codec that has been opened */
avcodec_close(px_ifmt_ctx->streams[i]->codec);
if ((NULL != px_ofmt_ctx) && (px_ofmt_ctx->nb_streams > i) &&
(NULL != px_ofmt_ctx->streams[i]) && ( NULL != px_ofmt_ctx->streams[i]->codec))
{
/* close the output code */
avcodec_close(px_ofmt_ctx->streams[i]->codec);
}
}
/* close the input */
avformat_close_input(&px_ifmt_ctx);
if ((NULL != px_ofmt_ctx) && (0 == (px_ofmt_ctx->oformat->flags & AVFMT_NOFILE)))
{
/* close the output context */
avio_close(px_ofmt_ctx->pb);
}
/* free the output context */
avformat_free_context(px_ofmt_ctx);
return ret;
}
So, If i pass the output filename to be .m3u8 file, it gives a floating point exception.