My ultimate goal will be to split multi channel WAV files into single mono ones, after few days of experiments my plan is the sequence:
Decode audio file into a frame.
Convert interleaved frame into a planar one. (in order to separate the data buffer into multiple ones)
Grab the planar frame buffers and encode each of them into a new file.
So far I'm stuck trying to convert a wav file from interleaved to a planar one, and reprint the wav file.
edit:
I've turned on guard malloc and apparently the error is within the convert function
Here's the code:
AVCodecContext* initializeAndOpenCodecContext(AVFormatContext* formatContext, AVStream* stream){
// grab our stream, most audio files only have one anyway
const AVCodec* decoder = avcodec_find_decoder(stream->codecpar->codec_id);
if (!decoder){
std::cout << "no decoder, can't go ahead!\n";
return nullptr;
}
AVCodecContext* codecContext = avcodec_alloc_context3(decoder);
avcodec_parameters_to_context(codecContext, stream->codecpar);
int err = avcodec_open2(codecContext, decoder, nullptr);
if (err < 0){
std::cout << "couldn't open codex!\n";
}
return codecContext;
}
void initialiseResampler(SwrContext* resampler, AVFrame* inputFrame, AVFrame* outputFrame){
av_opt_set_chlayout(resampler, "in_channel_layout", &inputFrame->ch_layout, 0);
av_opt_set_chlayout(resampler, "out_channel_layout", &outputFrame->ch_layout, 0);
av_opt_set_int(resampler, "in_sample_fmt", inputFrame->format, 0);
av_opt_set_int(resampler, "out_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);
av_opt_set_int(resampler, "in_sample_rate", inputFrame->sample_rate, 0);
av_opt_set_int(resampler, "out_sample_rate", outputFrame->sample_rate, 0);
}
AVFrame* initialisePlanarFrame(AVFrame* frameToInit, AVFrame* inputFrame){
//AVFrame *planar_frame = av_frame_alloc();
frameToInit->nb_samples = inputFrame->nb_samples;
frameToInit->ch_layout = inputFrame->ch_layout;
frameToInit->format = AV_SAMPLE_FMT_FLTP;
frameToInit->sample_rate = inputFrame->sample_rate;
return nullptr;
}
int main() {
AVCodecContext *codingContext= NULL;
const AVCodec *codec;
codec = avcodec_find_encoder(AV_CODEC_ID_PCM_F32LE);
codingContext = avcodec_alloc_context3(codec);
codingContext->bit_rate = 16000;
codingContext->sample_fmt = AV_SAMPLE_FMT_FLT;
codingContext->sample_rate = 48000;
codingContext->ch_layout.nb_channels = 2;
codingContext->ch_layout.order = (AVChannelOrder)0;
uint8_t **buffer_ = NULL;
AVFrame* planar_frame = NULL;
// open input
AVFormatContext* formatContext = nullptr;
int err = avformat_open_input(&formatContext, "/Users/tonytorm/Desktop/drum kits/DECAP - Drums That Knock Vol. 9/Kicks/Brash Full Metal Kick.wav", nullptr, nullptr);
if (err < 0){
fprintf(stderr, "Unable to open file!\n");
return;
}
// find audio stream
err = avformat_find_stream_info(formatContext, nullptr);
if (err > 0){
fprintf(stderr, "Unable to retrieve stream info!\n");
return;
}
int index = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1, -1, nullptr, 0);
if (index < 0){
std::cout<< "coudn't find audio stream in this file" << '\n';
}
AVStream* stream = formatContext->streams[index];
auto fileName = "/Users/tonytorm/Desktop/newFile.wav";
FILE* newFile = fopen(fileName, "w+");
// find right codec and open it
if (auto openCodecContext = initializeAndOpenCodecContext(formatContext, stream)){
AVPacket* packet = av_packet_alloc();
AVFrame* frame = av_frame_alloc();
AVFrame* planar_frame = av_frame_alloc();
SwrContext *avr = swr_alloc(); //audio resampling context
AVChannelLayout monoChannelLayout{(AVChannelOrder)0};
monoChannelLayout.nb_channels = 2;
while (!av_read_frame(formatContext, packet)){
if (packet->stream_index != stream->index) continue; // we only care about audio
int ret = avcodec_send_packet(openCodecContext, packet);
if ( ret < 0) {
if (ret != AVERROR(EAGAIN)){ // if error is actual error not EAGAIN
std::cout << "can't do shit\n";
return;
}
}
while (int bret = avcodec_receive_frame(openCodecContext, frame) == 0){
initialisePlanarFrame(planar_frame, frame);
int buffer_size_in = av_samples_get_buffer_size(nullptr,
frame->ch_layout.nb_channels,
frame->nb_samples,
(AVSampleFormat)frame->format,
0);
int buffer_size_out = buffer_size_in/frame->ch_layout.nb_channels;
//planar_frame->linesize[0] = buffer_size_out;
int ret = av_samples_alloc(planar_frame->data,
NULL,
planar_frame->ch_layout.nb_channels,
planar_frame->nb_samples,
AV_SAMPLE_FMT_FLTP,
0);
initialiseResampler(avr, frame, planar_frame);
if (int errRet = swr_init(avr) < 0) {
fprintf(stderr, "Failed to initialize the resampling context\n");
}
if (ret < 0){
char error_message[AV_ERROR_MAX_STRING_SIZE];
av_strerror(ret, error_message, AV_ERROR_MAX_STRING_SIZE);
fprintf(stderr, "Error allocating sample buffer: %s\n", error_message);
return -1;
}
int samples_converted = swr_convert(avr,
planar_frame->data,
buffer_size_out,
(const uint8_t **)frame->data,
buffer_size_in);
if (samples_converted < 0) {
// handle error
std::cout << "error in conversion\n";
return;
}
if (avcodec_open2(codingContext, codec, NULL) < 0) {
std::cout << "can't encode!\n";
return;
}
AVPacket* nu_packet = av_packet_alloc();
while (int copy = avcodec_send_frame(codingContext, planar_frame) != 0){
if (copy == AVERROR(EAGAIN) || copy == AVERROR_EOF){
std::cout << "can't encode file\n";
return;
}
if (avcodec_receive_packet(codingContext, nu_packet) >=0){
fwrite(nu_packet->data, 4, nu_packet->size, newFile);
//av_write_frame(avc, nu_packet);
}
}
av_freep(planar_frame->data);
av_frame_unref(frame);
av_frame_unref(planar_frame);
}
// av_packet_free(&packet);
// av_packet_free(&nu_packet);
}
swr_free(&avr);
avcodec_free_context(&codingContext);
}
fclose(newFile);
}
I know i should write a header to the new wave file but for now I'm just trying to write the raw audio data. I'm getting always the same error but in different parts of the code (randomly), sometimes the code even compiles (writing the raw audio data, but filling it with some rubbish as well, i end up with a data file that is thrice the original one, sometimes i end up with a slightly smaller file - i guess the raw audio without the headers), results are basically random.
Here are some of the functions that trigger the error:
int ret = av_samples_alloc(); //(this the most common one)
swr_convert()
av_freep();
the error is:
main(64155,0x101b5d5c0) malloc: Incorrect checksum for freed object 0x106802600: probably modified after being freed.
Corrupt value: 0x0
main(64155,0x101b5d5c0) malloc: *** set a breakpoint in malloc_error_break to debug */
Related
After experimenting with the examples on the FFmpeg documentation, I was finally able to create a short program that extracts every nth frame from a video. However, the output files that it produces are huge at over 15mb for each image. How can I change this to produce lower quality images?
The result I am trying to get is done easily on the command line with:
ffmpeg -i [input video] -vf "select=not(mod(n\,10))" -fps_mode vfr img_%03d.jpg
For a video with about 500 frames, this creates 50 images that are only about 800kb each; how am would I be able to mimic this in my program?
My code consists of opening the input file, decoding the packets, then saving the frames:
#include <cstdio>
#include <cstdlib>
#include <iostream>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
}
static AVFormatContext *fmt_ctx;
static AVCodecContext *dec_ctx;
static int video_stream_index = -1;
// OPEN THE INPUT FILE
static int open_input_file(const char *filename) {
// INIT VARS AND FFMPEG OBJECTS
int ret;
const AVCodec *dec;
// OPEN INPUT FILE
if((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
printf("ERROR: failed to open input file\n");
return ret;
}
// FIND STREAM INFO BASED ON INPUT FILE
if((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
printf("ERROR: failed to find stream information\n");
return ret;
}
// FIND THE BEST VIDEO STREAM FOR THE INPUT FILE
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
if(ret < 0) {
printf("ERROR: failed to find a video stream in the input file\n");
return ret;
}
video_stream_index = ret;
// ALLOCATE THE DECODING CONTEXT FOR THE INPUT FILE
dec_ctx = avcodec_alloc_context3(dec);
if(!dec_ctx) {
printf("ERROR: failed to allocate decoding context\n");
// CAN NOT ALLOCATE MEMORY ERROR
return AVERROR(ENOMEM);
}
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar);
// INIT THE VIDEO DECODER
if((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
printf("ERROR: failed to open video decoder\n");
return ret;
}
return 0;
}
// SAVE THE FILE
static void save(unsigned char *buf, int wrap, int x_size, int y_size, char *file_name) {
// INIT THE EMPTY FILE
FILE *file;
// OPEN AND WRITE THE IMAGE FILE
file = fopen(file_name, "wb");
fprintf(file, "P6\n%d %d\n%d\n", x_size, y_size, 255);
for(int i = 0; i < y_size; i++) {
fwrite(buf + i * wrap, 1, x_size * 3, file);
}
fclose(file);
}
// DECODE FRAME AND CONVERT IT TO AN RGB IMAGE
static void decode(AVCodecContext *cxt, AVFrame *frame, AVPacket *pkt,
const char *out_file_name, const char *file_ext, int mod=1) {
// INIT A BLANK CHAR TO HOLD THE FILE NAME AND AN EMPTY INT TO HOLD FUNCTION RETURN VALUES
char buf[1024];
int ret;
// SEND PACKET TO DECODER
ret = avcodec_send_packet(cxt, pkt);
if(ret < 0) {
printf("ERROR: error sending packet for decoding\n");
exit(1);
}
// CREATE A SCALAR CONTEXT FOR CONVERSION
SwsContext *sws_ctx = sws_getContext(dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, dec_ctx->width,
dec_ctx->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
// CREATE A NEW RGB FRAME FOR CONVERSION
AVFrame* rgb_frame = av_frame_alloc();
rgb_frame->format = AV_PIX_FMT_RGB24;
rgb_frame->width = dec_ctx->width;
rgb_frame->height = dec_ctx->height;
// ALLOCATE A NEW BUFFER FOR THE RGB CONVERSION FRAME
av_frame_get_buffer(rgb_frame, 0);
// WHILE RETURN COMES BACK OKAY (FUNCTION RETURNS >= 0)...
while(ret >= 0) {
// GET FRAME BACK FROM DECODER
ret = avcodec_receive_frame(cxt, frame);
// IF "RESOURCE TEMP NOT AVAILABLE" OR "END OF FILE" ERROR...
if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
return;
} else if(ret < 0) {
printf("ERROR: error during decoding\n");
exit(1);
}
// IF FRAME NUMBER IF THE (MOD)TH FRAME...
if(cxt->frame_number % mod == 0){
// OUTPUT WHICH FRAME IS BEING SAVED
printf("saving frame %03d\n", cxt->frame_number);
// REMOVES TEMPORARY BUFFERED DATA
fflush(stdout);
// SCALE (CONVERT) THE OLD FRAME TO THE NEW RGB FRAME
sws_scale(sws_ctx, frame->data, frame->linesize, 0, frame->height,
rgb_frame->data, rgb_frame->linesize);
// SET "BUF" TO THE OUTPUT FILE PATH (SAVES TO "out_file_name_###.file_ext")
snprintf(buf, sizeof(buf), "%s_%03d.%s", out_file_name, cxt->frame_number, file_ext);
// SAVE THE FRAME
save(rgb_frame->data[0], rgb_frame->linesize[0], rgb_frame->width, rgb_frame->height, buf);
}
}
}
int main() {
// SIMULATE COMMAND LINE ARGUMENTS
char argv0[] = "test";
char argv1[] = "/User/Desktop/frames/test_video.mov";
char *argv[] = {argv0, argv1, nullptr};
// INIT VARS AND FFMPEG OBJECTS
int ret;
AVPacket *packet;
AVFrame *frame;
// ALLOCATE FRAME AND PACKET
frame = av_frame_alloc();
packet = av_packet_alloc();
if (!frame || !packet) {
fprintf(stderr, "Could not allocate frame or packet\n");
exit(1);
}
// IF FILE DOESN'T OPEN, GO TO THE END
if((ret = open_input_file(argv[1])) < 0) {
goto end;
}
// READ ALL THE PACKETS - simple
while(av_read_frame(fmt_ctx, packet) >= 0) {
// IF PACKET INDEX MATCHES VIDEO INDEX...
if (packet->stream_index == video_stream_index) {
// SEND PACKET TO THE DECODER and SAVE
std::string name = "/User/Desktop/frames/img";
std::string ext = "bmp";
decode(dec_ctx, frame, packet, name.c_str(), ext.c_str(), 5);
}
// UNREFERENCE THE PACKET
av_packet_unref(packet);
}
// END MARKER
end:
avcodec_free_context(&dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
av_packet_free(&packet);
// FINAL ERROR CATCH
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
exit(1);
}
exit(0);
}
I am not sure how to go about producing images that are much smaller in size like the ones produced on the command line. I have a feeling that this is possible somehow during the conversion to RGB or the saving of the file but I can't seem to figure out how.
Also, is there any way that I could go about this much more efficiently? On the command line, this finishes very quickly (no more than a second or two for a 9 sec. movie at ~60 fps).
The command line version compresses the frame into jpeg file hence the size is very small. On the other hand, your code writes the rgb values directly into a file (regardless of the file extension). The size of the image is then Height x Width x 3 bytes, which is very big.
Solution: Adjust your save function to also compress the image.
Code example from Github - save_frame_as_jpeg.c:
int save_frame_as_jpeg(AVCodecContext *pCodecCtx, AVFrame *pFrame, int FrameNo)
{
AVCodec *jpegCodec = avcodec_find_encoder(AV_CODEC_ID_JPEG2000);
if (!jpegCodec) { return -1; }
AVCodecContext *jpegContext = avcodec_alloc_context3(jpegCodec);
if (!jpegContext) { return -1; }
jpegContext->pix_fmt = pCodecCtx->pix_fmt;
jpegContext->height = pFrame->height;
jpegContext->width = pFrame->width;
if (avcodec_open2(jpegContext, jpegCodec, NULL) < 0)
{ return -1; }
FILE *JPEGFile;
char JPEGFName[256];
AVPacket packet = {.data = NULL, .size = 0};
av_init_packet(&packet);
int gotFrame;
if (avcodec_encode_video2(jpegContext, &packet, pFrame, &gotFrame) < 0)
{ return -1; }
sprintf(JPEGFName, "dvr-%06d.jpg", FrameNo);
JPEGFile = fopen(JPEGFName, "wb");
fwrite(packet.data, 1, packet.size, JPEGFile);
fclose(JPEGFile);
av_free_packet(&packet);
avcodec_close(jpegContext);
return 0;
}
I'm trying to decode my Hevc file in c++ with using FFmpeg. I used Hevc decoder and try to save the frames in ppm format(Almost the whole source code comes from FFmpeg example [decode_video.c] https://ffmpeg.org/doxygen/trunk/decode_video_8c-example.html, what's new is the conversion from yuv to rgb). My Hevc file has 677 frames, which i checked with ffprobe in command window. But i any got 676 frames with my project. Also i have checked with other Hevc files, the results are same, i got always one frame less.
I also tried another FFmpeg example [demuxing_decoding.c] (https://ffmpeg.org/doxygen/trunk/demuxing_decoding_8c-example.html), the result is same, one frame less...
That seems to just happy with H265 and H264 files, is it a bug of FFmpeg?
Can anybody help me, i post my code here. Sorry, don't know how to attach my project and test files. Thanks a lot!
Best regards,
Ivan
#include <iostream>
extern "C"
{
#include "../Headers/libavcodec/avcodec.h"
#include "../Headers/libavformat/avformat.h"
#include "../Headers/libswscale/swscale.h"
}
#define INBUF_SIZE 4096
//Save RGB image as PPM file format
static void ppm_save(char* filename, AVFrame* frame)
{
FILE* file;
int i;
fopen_s(&file, filename, "wb");
fprintf(file, "P6\n%d %d\n%d\n", frame->width, frame->height, 255);
for (i = 0; i < frame->height; i++)
fwrite(frame->data[0] + i * frame->linesize[0], 1, frame->width * 3, file);
fclose(file);
}
void decode(AVCodecContext* dec_ctx, AVFrame* frame, AVPacket* pkt, const char* outfilePrefix)
{
char buf[1024];
int ret;
ret = avcodec_send_packet(dec_ctx, pkt);
if (ret < 0) {
fprintf(stderr, "Error sending a packet for decoding\n");
exit(1);
}
int sts;
////////////////////////////////////////////////////////////////////////////
//Create SWS Context for converting from decode pixel format (like YUV420) to RGB
struct SwsContext* sws_ctx = NULL;
sws_ctx = sws_getContext(dec_ctx->width,
dec_ctx->height,
dec_ctx->pix_fmt,
dec_ctx->width,
dec_ctx->height,
AV_PIX_FMT_RGB24,
SWS_BICUBIC,
NULL,
NULL,
NULL);
if (sws_ctx == nullptr)
{
return; //Error!
}
//Allocate frame for storing image converted to RGB.
AVFrame* pRGBFrame = av_frame_alloc();
pRGBFrame->format = AV_PIX_FMT_RGB24;
pRGBFrame->width = dec_ctx->width;
pRGBFrame->height = dec_ctx->height;
sts = av_frame_get_buffer(pRGBFrame, 0);
if (sts < 0)
{
goto free;
//return; //Error!
}
while (ret >= 0)
{
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
goto free;
//return;
else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
exit(1);
}
printf("saving frame %3d\n", dec_ctx->frame_number);//
fflush(stdout);
//////////////////////////////////////////////////////////////////////////
//Convert from input format (e.g YUV420) to RGB and save to PPM:
sts = sws_scale(sws_ctx, //struct SwsContext* c,
frame->data, //const uint8_t* const srcSlice[],
frame->linesize, //const int srcStride[],
0, //int srcSliceY,
frame->height, //int srcSliceH,
pRGBFrame->data, //uint8_t* const dst[],
pRGBFrame->linesize); //const int dstStride[]);
snprintf(buf, sizeof(buf), "%s-%d.ppm", outfilePrefix, dec_ctx->frame_number);
ppm_save(buf, pRGBFrame);
}
free:
//Free
////////////////////////////////////////////////////////////////////////////
sws_freeContext(sws_ctx);
av_frame_free(&pRGBFrame);
}
int main()
{
const char* filename, * outfilePrefix, * seqfilename;
const AVCodec* codec;
AVCodecParserContext* parser;
AVCodecContext* codecContext = NULL;
FILE* file;
AVFrame* frame;
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
uint8_t* data;
size_t data_size;
int ret;
AVPacket* pkt;
#ifdef _DEBUG
filename = "D:\\TestFiles\\sample_1280x720.hevc";
outfilePrefix = "D:\\TestFiles\\sample_1280x720_output\\output";
#else
if (argc <= 2) {
fprintf(stderr, "Usage: %s <input file> <output file>\n"
"And check your input file is encoded by mpeg1video please.\n", argv[0]);
exit(0);
}
filename = argv[1];
outfilePrefix = argv[2];
#endif
pkt = av_packet_alloc();
if (!pkt)
exit(1);
/* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
/* find the HEVC video decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_HEVC);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
parser = av_parser_init(codec->id);
if (!parser) {
fprintf(stderr, "parser not found\n");
exit(1);
}
codecContext = avcodec_alloc_context3(codec);
if (!codecContext) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* For some codecs, such as msmpeg4 and mpeg4, width and height
MUST be initialized there because this information is not
available in the bitstream. */
/* open it */
if (avcodec_open2(codecContext, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
fopen_s(&file, filename, "rb");
if (!file) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
while (!feof(file)) {
/* read raw data from the input file */
data_size = fread(inbuf, 1, INBUF_SIZE, file);
if (!data_size)
break;
/* use the parser to split the data into frames */
data = inbuf;
while (data_size > 0)
{
ret = av_parser_parse2(parser, codecContext, &pkt->data, &pkt->size,
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
if (ret < 0) {
fprintf(stderr, "Error while parsing\n");
exit(1);
}
data += ret;
data_size -= ret;
if (pkt->size)
decode(codecContext, frame, pkt, outfilePrefix);
}
}
/* flush the decoder */
decode(codecContext, frame, NULL, outfilePrefix);
fclose(file);
av_parser_close(parser);
avcodec_free_context(&codecContext);
av_frame_free(&frame);
av_packet_free(&pkt);
}
the problem is that you're not calling av_parser_parse2() with data_size=0 to signal EOF. See the API docs:
buf_size: input length, to signal EOF, this should be 0 (so that the last frame can be output).
Without that call, one frame will be cached in the parser, and that's the one missing in your output.
[edit]
To be clear, I acknowledge that you copied the example code in the API docs correctly:
[..]
while(in_len){
len = av_parser_parse2(myparser, AVCodecContext, &data, &size,
in_data, in_len,
pts, dts, pos);
[..]
However, that code is unfortunately incomplete. If you look at the relevant usage of that code in demux.c, you'll see that explicit flush is required:
[..]
1134 while (size > 0 || (flush && got_output)) {
1135 int64_t next_pts = pkt->pts;
1136 int64_t next_dts = pkt->dts;
1137 int len;
1138
1139 len = av_parser_parse2(sti->parser, sti->avctx,
1140 &out_pkt->data, &out_pkt->size, data, size,
1141 pkt->pts, pkt->dts, pkt->pos);
[..]
I am facing issue in opening the raw h264 stream of 8MP resolution over tcp server from Android in Qt Application. To open the stream in ffplay, I give following command in terminal and it is able to play it
ffplay -f h264 -codec:v h264 -probesize 32M <tcp://ipaddress:port>
But when I try to open the stream in Qt Application, avformat_open_input() gives error Invalid data found while processing input.
Below is the code I am using in Qt Application:
av_register_all();
avcodec_register_all();
avformat_network_init();
AVFormatContext *refrenceFormatCtx = NULL;
SwsContext *img_convert_ctx;
AVIOContext *avio_ctx = NULL;
int video_stream_index = 0;
AVCodecContext* codec_ctx = NULL;
AVSampleFormat *fmt = NULL;
char errorsdef[100];
AVDictionary *options = NULL;
av_dict_set(&options, "video_size","3264x2448",0);
av_dict_set(&options,"pixel_format","yuv420p",0);
av_dict_set(&options, "f", "h264", 0);
av_dict_set(&options, "codec:v", "h264", 0);
av_dict_set(&options, "codec:a", "aac", 0);
av_dict_set(&options, "probesize", "32M", 0);
int err = avformat_open_input(&refrenceFormatCtx,"tcp://192.168.42.129:2226", NULL, &options);
av_strerror(err,errorsdef,100);
qDebug() << "OPening Stream error: "<< err << " "<< errorsdef;
if(err<0)
abort();
av_dict_free(&options);
Is the path to set the options in avformat_open_input is correct? Are parameters set by me are correct?
I got the answer for my above asked question. Code for the issue and getting rgb frames from raw H.264 Frame for 8MP resolution is as follows:
avcodec_register_all();
av_register_all();
avformat_network_init();
AVDictionary *options = NULL;
AVFormatContext *refrenceFormatCtx = NULL;
AVInputFormat *fmts = av_find_input_format("h264");
char errorsdef[100];
AVCodecContext* codec_ctx = NULL;
int video_stream_index = 0;
SwsContext *img_convert_ctx = NULL;
AVFrame* picture_yuv = NULL;
AVFrame* picture_rgb = NULL;
uint8_t* picture_buffer_rgb;
uint8_t *rgb_image_data;
int sizeofrgbpicture = 0;
int initialize_rgb_requirements=1;
picture_yuv = av_frame_alloc();
av_dict_set(&options, "flags", "bicubic", 0);
av_opt_set(refrenceFormatCtx,"f","h264", AV_OPT_SEARCH_CHILDREN);
av_opt_set(refrenceFormatCtx,"codec:v","h264",AV_OPT_SEARCH_CHILDREN);
av_opt_set(refrenceFormatCtx,"probesize","32M", AV_OPT_SEARCH_CHILDREN);
// Open video file
int err = avformat_open_input(&refrenceFormatCtx,"tcp://192.168.42.129:2226", fmts, &options);
if (!options) {
int dict_count = av_dict_count(options);
qDebug() << "dict_count " << dict_count;
}
av_strerror(err,errorsdef,100);
qDebug() << "OPening Stream error: "<< err << " "<< errorsdef;
if (refrenceFormatCtx!=NULL){
err = avformat_find_stream_info(refrenceFormatCtx, &options);
if( err< 0){
av_strerror(err,errorsdef,100);
qDebug() << "Not able to find stream: "<< err << " "<< errorsdef;
}
}else{
qDebug() << "referencecontext null";
exit(1);
}
//search video stream
for (int i = 0; i < (int)refrenceFormatCtx->nb_streams; i++) {
AVStream* s = refrenceFormatCtx->streams[i];
if (s->codec == NULL){
continue;
}
codec_ctx = (s->codec);
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO){
video_stream_index = i;
}
}
AVPacket packet;
av_init_packet(&packet);
//open output file
AVFormatContext* output_ctx = avformat_alloc_context();
AVStream* stream = NULL;
//start reading packets from stream and emit data pointer to slot
av_read_play(refrenceFormatCtx); //play RTSP
avcodec_copy_context(codec_ctx, refrenceFormatCtx->streams[video_stream_index]->codec);
if (avcodec_open2(codec_ctx, avcodec_find_decoder(AV_CODEC_ID_H264), NULL) < 0){
qDebug() << "avcodec_open2 null";
}
while (av_read_frame(refrenceFormatCtx, &packet) >= 0) {
if (packet.stream_index == video_stream_index) { //packet is video
if (stream == NULL) { //create stream in file
stream = avformat_new_stream(output_ctx, refrenceFormatCtx->streams[video_stream_index]->codec->codec);
avcodec_copy_context(stream->codec, refrenceFormatCtx->streams[video_stream_index]->codec);
stream->sample_aspect_ratio = refrenceFormatCtx->streams[video_stream_index]->codec->sample_aspect_ratio;
}
int check = 0;
packet.stream_index = stream->id;
int result = avcodec_decode_video2(codec_ctx, picture_yuv, &check, &packet);
av_free_packet(&packet);
av_packet_unref(&packet);
if(result <= 0 || check == 0){
continue;
}
if(initialize_rgb_requirements)
{
sizeofrgbpicture = avpicture_get_size(AV_PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height);
picture_rgb = av_frame_alloc();
picture_buffer_rgb = (uint8_t*) (av_malloc(sizeofrgbpicture));
avpicture_fill((AVPicture *) picture_rgb, picture_buffer_rgb, AV_PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height);
img_convert_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, AV_PIX_FMT_YUV420P, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
initialize_rgb_requirements=0;
}
int height = 0;
if(picture_yuv->data != NULL)
{
height = sws_scale(img_convert_ctx, ((AVPicture*)picture_yuv)->data, ((AVPicture*)picture_yuv)->linesize, 0, codec_ctx->height, ((AVPicture*)picture_rgb)->data,((AVPicture*)picture_rgb)->linesize);
}
rgb_image_data = (uint8_t *)malloc(sizeofrgbpicture * sizeof(uint8_t));
int ret = avpicture_layout((AVPicture *)picture_rgb, AV_PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height, rgb_image_data, sizeofrgbpicture);
emit imageQueued(rgb_image_data, codec_ctx->width,codec_ctx->height);
}
msleep(1);
}
av_freep(picture_buffer_rgb);
av_frame_free(&picture_rgb);
avio_close(output_ctx->pb);
avformat_free_context(output_ctx);
avformat_close_input(&refrenceFormatCtx);
I came to know that for raw H.264 stream we have to tell ffmpeg that the format is h264. For that I have used AVInputFormat, to set other options like video codec and probesize, I have used av_op_set(). To set the default flags in ffmpeg, I have used av_dict_set(). I have emitted the data pointer to my required slot. If any one wants to create a file from it, then it can generate .ppm file by writing this pointer into file.
I want to apply processing to a video clip with sound track, extract and process frame by frame and write result to output file. Number of frames, size of frame and speed remains unchanged in output clip. Also I want to keep the same audio track as I have in source.
I can read clip, decode frames and process then using opencv. Audio packets are also writes fine. I'm stuck on forming output video stream.
The minimal runnable code I have for now (sorry it not so short, but cant do it shorter):
extern "C" {
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include "libavcodec/avcodec.h"
#include <libavutil/opt.h>
#include <libavdevice/avdevice.h>
#include <libswscale/swscale.h>
}
#include "opencv2/opencv.hpp"
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#endif
using namespace std;
using namespace cv;
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
char buf1[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_string(buf1, pkt->pts);
char buf2[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_string(buf1, pkt->dts);
char buf3[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_string(buf1, pkt->duration);
char buf4[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_time_string(buf1, pkt->pts, time_base);
char buf5[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_time_string(buf1, pkt->dts, time_base);
char buf6[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_time_string(buf1, pkt->duration, time_base);
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
buf1, buf4,
buf2, buf5,
buf3, buf6,
pkt->stream_index);
}
int main(int argc, char **argv)
{
AVOutputFormat *ofmt = NULL;
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
AVPacket pkt;
AVFrame *pFrame = NULL;
AVFrame *pFrameRGB = NULL;
int frameFinished = 0;
pFrame = av_frame_alloc();
pFrameRGB = av_frame_alloc();
const char *in_filename, *out_filename;
int ret, i;
in_filename = "../../TestClips/Audio Video Sync Test.mp4";
out_filename = "out.mp4";
// Initialize FFMPEG
av_register_all();
// Get input file format context
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0)
{
fprintf(stderr, "Could not open input file '%s'", in_filename);
goto end;
}
// Extract streams description
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0)
{
fprintf(stderr, "Failed to retrieve input stream information");
goto end;
}
// Print detailed information about the input or output format,
// such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
av_dump_format(ifmt_ctx, 0, in_filename, 0);
// Allocate an AVFormatContext for an output format.
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx)
{
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
// The output container format.
ofmt = ofmt_ctx->oformat;
// Allocating output streams
for (i = 0; i < ifmt_ctx->nb_streams; i++)
{
AVStream *in_stream = ifmt_ctx->streams[i];
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if (!out_stream)
{
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
if (ret < 0)
{
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
goto end;
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
}
// Show output format info
av_dump_format(ofmt_ctx, 0, out_filename, 1);
// Open output file
if (!(ofmt->flags & AVFMT_NOFILE))
{
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0)
{
fprintf(stderr, "Could not open output file '%s'", out_filename);
goto end;
}
}
// Write output file header
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0)
{
fprintf(stderr, "Error occurred when opening output file\n");
goto end;
}
// Search for input video codec info
AVCodec *in_codec = nullptr;
AVCodecContext* avctx = nullptr;
int video_stream_index = -1;
for (int i = 0; i < ifmt_ctx->nb_streams; i++)
{
if (ifmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
{
video_stream_index = i;
avctx = ifmt_ctx->streams[i]->codec;
in_codec = avcodec_find_decoder(avctx->codec_id);
if (!in_codec)
{
fprintf(stderr, "in codec not found\n");
exit(1);
}
break;
}
}
// Search for output video codec info
AVCodec *out_codec = nullptr;
AVCodecContext* o_avctx = nullptr;
int o_video_stream_index = -1;
for (int i = 0; i < ofmt_ctx->nb_streams; i++)
{
if (ofmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
{
o_video_stream_index = i;
o_avctx = ofmt_ctx->streams[i]->codec;
out_codec = avcodec_find_encoder(o_avctx->codec_id);
if (!out_codec)
{
fprintf(stderr, "out codec not found\n");
exit(1);
}
break;
}
}
// openCV pixel format
AVPixelFormat pFormat = AV_PIX_FMT_RGB24;
// Data size
int numBytes = avpicture_get_size(pFormat, avctx->width, avctx->height);
// allocate buffer
uint8_t *buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
// fill frame structure
avpicture_fill((AVPicture *)pFrameRGB, buffer, pFormat, avctx->width, avctx->height);
// frame area
int y_size = avctx->width * avctx->height;
// Open input codec
avcodec_open2(avctx, in_codec, NULL);
// Main loop
while (1)
{
AVStream *in_stream, *out_stream;
ret = av_read_frame(ifmt_ctx, &pkt);
if (ret < 0)
{
break;
}
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = ofmt_ctx->streams[pkt.stream_index];
log_packet(ifmt_ctx, &pkt, "in");
// copy packet
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
log_packet(ofmt_ctx, &pkt, "out");
if (pkt.stream_index == video_stream_index)
{
avcodec_decode_video2(avctx, pFrame, &frameFinished, &pkt);
if (frameFinished)
{
struct SwsContext *img_convert_ctx;
img_convert_ctx = sws_getCachedContext(NULL,
avctx->width,
avctx->height,
avctx->pix_fmt,
avctx->width,
avctx->height,
AV_PIX_FMT_BGR24,
SWS_BICUBIC,
NULL,
NULL,
NULL);
sws_scale(img_convert_ctx,
((AVPicture*)pFrame)->data,
((AVPicture*)pFrame)->linesize,
0,
avctx->height,
((AVPicture *)pFrameRGB)->data,
((AVPicture *)pFrameRGB)->linesize);
sws_freeContext(img_convert_ctx);
// Do some image processing
cv::Mat img(pFrame->height, pFrame->width, CV_8UC3, pFrameRGB->data[0],false);
cv::GaussianBlur(img,img,Size(5,5),3);
cv::imshow("Display", img);
cv::waitKey(5);
// --------------------------------
// Transform back to initial format
// --------------------------------
img_convert_ctx = sws_getCachedContext(NULL,
avctx->width,
avctx->height,
AV_PIX_FMT_BGR24,
avctx->width,
avctx->height,
avctx->pix_fmt,
SWS_BICUBIC,
NULL,
NULL,
NULL);
sws_scale(img_convert_ctx,
((AVPicture*)pFrameRGB)->data,
((AVPicture*)pFrameRGB)->linesize,
0,
avctx->height,
((AVPicture *)pFrame)->data,
((AVPicture *)pFrame)->linesize);
// --------------------------------------------
// Something must be here
// --------------------------------------------
//
// Write fideo frame (How to write frame to output stream ?)
//
// --------------------------------------------
sws_freeContext(img_convert_ctx);
}
}
else // write sound frame
{
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
}
if (ret < 0)
{
fprintf(stderr, "Error muxing packet\n");
break;
}
// Decrease packet ref counter
av_packet_unref(&pkt);
}
av_write_trailer(ofmt_ctx);
end:
avformat_close_input(&ifmt_ctx);
// close output
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
{
avio_closep(&ofmt_ctx->pb);
}
avformat_free_context(ofmt_ctx);
if (ret < 0 && ret != AVERROR_EOF)
{
char buf_err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
av_make_error_string(buf_err, AV_ERROR_MAX_STRING_SIZE, ret);
fprintf(stderr, "Error occurred: %s\n", buf_err);
return 1;
}
avcodec_close(avctx);
av_free(pFrame);
av_free(pFrameRGB);
return 0;
}
Your original code segfaults in my case. Initializing the output codec context seems to fix it. The code below works for me but I didn't test the OpenCV stuff as I don't have the lib installed.
Get the codec context:
// Search for output video codec info
AVCodec *out_codec = NULL;
AVCodecContext* o_avctx = NULL;
int o_video_stream_index = -1;
for (int i = 0; i < ofmt_ctx->nb_streams; i++)
{
if (ofmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
{
o_video_stream_index = i;
out_codec = avcodec_find_encoder(ofmt_ctx->streams[i]->codec->codec_id);
o_avctx = avcodec_alloc_context3(out_codec);
o_avctx->height = avctx->height;
o_avctx->width = avctx->width;
o_avctx->sample_aspect_ratio = avctx->sample_aspect_ratio;
if (out_codec->pix_fmts)
o_avctx->pix_fmt = out_codec->pix_fmts[0];
else
o_avctx->pix_fmt = avctx->pix_fmt;
o_avctx->time_base = avctx->time_base;
avcodec_open2(o_avctx, out_codec, NULL);
}
}
Encode and write:
// Main loop
while (1)
{
...
if (pkt.stream_index == video_stream_index)
{
avcodec_decode_video2(avctx, pFrame, &frameFinished, &pkt);
if (frameFinished)
{
...
// --------------------------------------------
// Something must be here
// --------------------------------------------
int got_packet = 0;
AVPacket enc_pkt = { 0 };
av_init_packet(&enc_pkt);
avcodec_encode_video2(o_avctx, &enc_pkt, pFrame, &got_packet);
av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
....
}
}
you should assign processed frame's packets information to your Original packets then pass it to av_interleaved_write_frame
I am trying to demux a video file into the video part (h264, mpeg4, h265, vp8, etc) and the audio part (mp3, aac, ac3, etc) and the subtitle part (srt) using ffmpeg in c++.
The audio part came out alright and played on all the media players I have, so also did the subtitle part. The video part however came out WITHOUT error and saved into a .h264 file but when I use ffprobe to check it or ffplay to play it, it always give the error "Invalid data found when processing input".
The code below
/* Separate a media file into audio, video and subtitle files (demuxing, complex) */
//TODO: mute error when subtitle is not present
#define __STDC_CONSTANT_MACROS
extern "C"
{
#include "libavformat/avformat.h"
}
int main()
{
//Input AVFormatContext and Output AVFormatContext
AVOutputFormat *ofmt_a = NULL, *ofmt_v = NULL, *ofmt_s = NULL;
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx_a = NULL, *ofmt_ctx_v = NULL, *ofmt_ctx_s = NULL;
AVPacket pkt;
int ret, i;
int videoindex=-1, audioindex=-1, srtindex=-1;
int frame_index=0;
//Input file URL
const char *in_filename = "sample.mp4";
//Output file URL
const char *out_filename_v = "sample.h264";
const char *out_filename_a = "sample.mp3";
const char *out_filename_s = "sample.srt";
av_register_all();
//Input
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
printf( "Could not open input file.");
goto end;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
printf( "Failed to retrieve input stream information");
goto end;
}
//Output
avformat_alloc_output_context2(&ofmt_ctx_v, NULL, NULL, out_filename_v);
if (!ofmt_ctx_v) {
printf( "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ofmt_v = ofmt_ctx_v->oformat;
avformat_alloc_output_context2(&ofmt_ctx_a, NULL, NULL, out_filename_a);
if (!ofmt_ctx_a) {
printf( "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ofmt_a = ofmt_ctx_a->oformat;
avformat_alloc_output_context2(&ofmt_ctx_s, NULL, NULL, out_filename_s);
if (!ofmt_ctx_a) {
printf( "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ofmt_s = ofmt_ctx_s->oformat;
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
//Create output AVStream according to input AVStream
AVFormatContext *ofmt_ctx;
AVStream *in_stream = ifmt_ctx->streams[i];
AVStream *out_stream = NULL;
if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
videoindex=i;
out_stream=avformat_new_stream(ofmt_ctx_v, in_stream->codec->codec);
ofmt_ctx=ofmt_ctx_v;
}
else if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
audioindex=i;
out_stream=avformat_new_stream(ofmt_ctx_a, in_stream->codec->codec);
ofmt_ctx=ofmt_ctx_a;
}
else if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_SUBTITLE){
srtindex=i;
out_stream=avformat_new_stream(ofmt_ctx_s, in_stream->codec->codec);
ofmt_ctx=ofmt_ctx_s;
}
else{
break;
}
if (!out_stream) {
printf( "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
//Copy the settings of AVCodecContext
if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
printf( "Failed to copy context from input to output stream codec context\n");
goto end;
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
//Dump Format------------------
printf("\n==============Input Video=============\n");
av_dump_format(ifmt_ctx, 0, in_filename, 0);
printf("\n==============Output Video============\n");
av_dump_format(ofmt_ctx_v, 0, out_filename_v, 1);
printf("\n==============Output Audio============\n");
av_dump_format(ofmt_ctx_a, 0, out_filename_a, 1);
/*printf("\n==============Output Subtitle============\n");
av_dump_format(ofmt_ctx_s, 0, out_filename_s, 1);*/
printf("\n======================================\n");
//Open output file
if (!(ofmt_v->flags & AVFMT_NOFILE)) {
if (avio_open(&ofmt_ctx_v->pb, out_filename_v, AVIO_FLAG_WRITE) < 0) {
printf( "Could not open output file '%s'", out_filename_v);
goto end;
}
}
if (!(ofmt_a->flags & AVFMT_NOFILE)) {
if (avio_open(&ofmt_ctx_a->pb, out_filename_a, AVIO_FLAG_WRITE) < 0) {
printf( "Could not open output file '%s'", out_filename_a);
goto end;
}
}
if (!(ofmt_a->flags & AVFMT_NOFILE)) {
if (avio_open(&ofmt_ctx_s->pb, out_filename_s, AVIO_FLAG_WRITE) < 0) {
printf( "Could not open output file '%s'", out_filename_s);
goto end;
}
}
//Write file header
if (avformat_write_header(ofmt_ctx_v, NULL) < 0) {
printf( "Error occurred when opening video output file\n");
goto end;
}
system("pause");
if (avformat_write_header(ofmt_ctx_a, NULL) < 0) {
printf( "Error occurred when opening audio output file\n");
goto end;
}
if (avformat_write_header(ofmt_ctx_s, NULL) < 0) {
printf( "Error occurred when opening audio output file\n");
goto end;
}
AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
while (1) {
AVFormatContext *ofmt_ctx;
AVStream *in_stream, *out_stream;
//Get an AVPacket
if (av_read_frame(ifmt_ctx, &pkt) < 0)
break;
in_stream = ifmt_ctx->streams[pkt.stream_index];
if(pkt.stream_index==videoindex){
out_stream = ofmt_ctx_v->streams[0];
ofmt_ctx=ofmt_ctx_v;
printf("Write Video Packet. size:%d\tpts:%lld\n",pkt.size,pkt.pts);
av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
}else if(pkt.stream_index==audioindex){
out_stream = ofmt_ctx_a->streams[0];
ofmt_ctx=ofmt_ctx_a;
printf("Write Audio Packet. size:%d\tpts:%lld\n",pkt.size,pkt.pts);
}
else if(pkt.stream_index==srtindex){
out_stream = ofmt_ctx_s->streams[0];
ofmt_ctx=ofmt_ctx_s;
printf("Write Subtitle Packet. size:%d\tpts:%lld\n",pkt.size,pkt.pts);
}
else{
continue;
}
//Convert PTS/DTS
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
pkt.stream_index=0;
//Write
if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
printf( "Error muxing packet\n");
break;
}
//printf("Write %8d frames to output file\n",frame_index);
av_free_packet(&pkt);
frame_index++;
}
av_bitstream_filter_close(h264bsfc);
//Write file trailer
av_write_trailer(ofmt_ctx_a);
av_write_trailer(ofmt_ctx_v);
av_write_trailer(ofmt_ctx_s);
end:
avformat_close_input(&ifmt_ctx);
/* close output */
if (ofmt_ctx_a && !(ofmt_a->flags & AVFMT_NOFILE))
avio_close(ofmt_ctx_a->pb);
if (ofmt_ctx_v && !(ofmt_v->flags & AVFMT_NOFILE))
avio_close(ofmt_ctx_v->pb);
if (ofmt_ctx_s && !(ofmt_s->flags & AVFMT_NOFILE))
avio_close(ofmt_ctx_s->pb);
avformat_free_context(ofmt_ctx_a);
avformat_free_context(ofmt_ctx_v);
avformat_free_context(ofmt_ctx_s);
system("pause");
if (ret < 0 && ret != AVERROR_EOF) {
printf( "Error occurred.\n");
return -1;
}
return 0;
}
EDIT 1
Screen shot of resultant h264 file
EDIT 2
I think the "error" has to do with FFMPEG's "Using AVStream.codec.time_base as a timebase hint to the muxer is deprecated. Set AVStream.time_base instead" error.
I revert to an older version of FFMPEG and with the same code, the resultant h264 file was ok!
You need to convert an H.264 bitstream from length prefixed mode to start code prefixed mode.This is required by some streaming formats, typically the MPEG-2 transport stream format ("mpegts").
Take a look at https://www.ffmpeg.org/ffmpeg-bitstream-filters.html#h264_005fmp4toannexb
Look at lines from 402 to 424 and from 842 to 843.
https://www.ffmpeg.org/doxygen/0.7/crystalhd_8c-source.html
I used it, to extract h264 from mp4.
//Use this filter on your first h264 input AVPacket
AVFormatContext *ifmt_ctx = NULL;
//...
//... //init input
//...
AVPacket *firstPacket;
//...
//... //get packet from stream
//...
uint8_t *dummy_p;
int dummy_int;
AVBitStreamFilterContext *filter = v_bitstream_filter_init("h264_mp4toannexb");
if (!filter)
{
printf("Can't open filter\n");
exit(1);
}
ret = av_bitstream_filter_filter(filter, ifmt_ctx->streams[videoindex]->codec, NULL,
&dummy_p, &dummy_int,
firstPacket->data, firstPacket->size, 0);
if( ret < 0 )
{
printf("Can't filter\n");
exit(1);
}
// use dummy_p to write to file, as first packet