My goal is to proceed on audio and video of mpeg-2 file independently, and to keep synchronicity on both flows. Duration of video is about 1 or 2 minutes maximum.
First, following this post "opencv for reading videos (and do the process),ffmpeg for audio , and SDL used to play both" sounds perfect. I have done some modification on the code considering recent ffmpeg naming changes. Compilation with cmake on 64-bits machine is fine. I get an error "Unsupported codec [3]" when opening codec.
The code is following.
Second, I looking for code dealing with synchronicity on both flows.
#include "opencv/highgui.h"
#include "opencv/cv.h"
#ifndef INT64_C
#define INT64_C(c) (c ## LL)
#define UINT64_C(c) (c ## ULL)
#endif
extern "C"{
#include <SDL/SDL.h>
#include <SDL/SDL_thread.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
}
#include <iostream>
#include <stdio.h>
#include <malloc.h>
using namespace cv;
#define SDL_AUDIO_BUFFER_SIZE 1024
typedef struct PacketQueue
{
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
PacketQueue audioq;
int audioStream = -1;
int videoStream = -1;
int quit = 0;
SDL_Surface* screen = NULL;
SDL_Surface* surface = NULL;
AVFormatContext* pFormatCtx = NULL;
AVCodecContext* aCodecCtx = NULL;
AVCodecContext* pCodecCtx = NULL;
void show_frame(IplImage* img){
if (!screen){
screen = SDL_SetVideoMode(img->width, img->height, 0, 0);
if (!screen){
fprintf(stderr, "SDL: could not set video mode - exiting\n");
exit(1);
}
}
// Assuming IplImage packed as BGR 24bits
SDL_Surface* surface = SDL_CreateRGBSurfaceFrom((void*)img->imageData,
img->width,
img->height,
img->depth * img->nChannels,
img->widthStep,
0xff0000, 0x00ff00, 0x0000ff, 0
);
SDL_BlitSurface(surface, 0, screen, 0);
SDL_Flip(screen);
}
void packet_queue_init(PacketQueue *q){
memset(q, 0, sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt){
AVPacketList *pkt1;
if (av_dup_packet(pkt) < 0){
return -1;
}
pkt1 = (AVPacketList*) av_malloc(sizeof(AVPacketList));
//pkt1 = (AVPacketList*) malloc(sizeof(AVPacketList));
if (!pkt1) return -1;
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if (!q->last_pkt)
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1;
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block){
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for (;;){
if( quit){
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1){
q->first_pkt = pkt1->next;
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
av_free(pkt1);
//free(pkt1);
ret = 1;
break;
}
else if (!block){
ret = 0;
break;
}
else{
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size){
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
int len1, data_size;
for (;;){
while (audio_pkt_size > 0){
data_size = buf_size;
len1 = avcodec_decode_audio3(aCodecCtx, (int16_t*)audio_buf, &data_size, &pkt);
if (len1 < 0){
// if error, skip frame
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
if (data_size <= 0){
// No data yet, get more frames
continue;
}
// We have data, return it and come back for more later
return data_size;
}
if (pkt.data)
av_free_packet(&pkt);
if (quit) return -1;
if (packet_queue_get(&audioq, &pkt, 1) < 0) return -1;
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
}
void audio_callback(void *userdata, Uint8 *stream, int len){
AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
int len1, audio_size;
static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
static unsigned int audio_buf_size = 0;
static unsigned int audio_buf_index = 0;
while (len > 0){
if (audio_buf_index >= audio_buf_size){
// We have already sent all our data; get more
audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf));
if(audio_size < 0){
// If error, output silence
audio_buf_size = 1024; // arbitrary?
memset(audio_buf, 0, audio_buf_size);
}
else{
audio_buf_size = audio_size;
}
audio_buf_index = 0;
}
len1 = audio_buf_size - audio_buf_index;
if (len1 > len)
len1 = len;
memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
len -= len1;
stream += len1;
audio_buf_index += len1;
}
}
void setup_ffmpeg(char* filename)
{
if (avformat_open_input(&pFormatCtx, filename, NULL, NULL) != 0){
fprintf(stderr, "FFmpeg failed to open file %s!\n", filename);
exit(-1);
}
if (av_find_stream_info(pFormatCtx) < 0){
fprintf(stderr, "FFmpeg failed to retrieve stream info!\n");
exit(-1);
}
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, filename, 0);
// Find the first video stream
int i = 0;
for (i; i < pFormatCtx->nb_streams; i++){
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0){
videoStream = i;
}
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && audioStream < 0){
audioStream = i;
}
}
if (videoStream == -1){
fprintf(stderr, "No video stream found in %s!\n", filename);
exit(-1);
}
if (audioStream == -1){
fprintf(stderr, "No audio stream found in %s!\n", filename);
exit(-1);
}
// Get a pointer to the codec context for the audio stream
aCodecCtx = pFormatCtx->streams[audioStream]->codec;
// Set audio settings from codec info
SDL_AudioSpec wanted_spec;
wanted_spec.freq = aCodecCtx->sample_rate;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = aCodecCtx->channels;
wanted_spec.silence = 0;
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
wanted_spec.callback = audio_callback;
wanted_spec.userdata = aCodecCtx;
SDL_AudioSpec spec;
if (SDL_OpenAudio(&wanted_spec, &spec) < 0){
fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
exit(-1);
}
AVCodec* aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
if (!aCodec){
fprintf(stderr, "Unsupported codec [1]!\n");
exit(-1);
}
avcodec_open(aCodecCtx, aCodec);
// audio_st = pFormatCtx->streams[index]
packet_queue_init(&audioq);
SDL_PauseAudio(0);
// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
AVCodec* pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL){
fprintf(stderr, "Unsupported codec [2]!\n");
exit(-1); // Codec not found
}
// Open codec
if (avcodec_open(pCodecCtx, pCodec) < 0){
fprintf(stderr, "Unsupported codec [3]!\n");
exit(-1); // Could not open codec
}
}
int main(int argc, char* argv[])
{
if (argc < 2){
std::cout << "Usage: " << argv[0] << " <video>" << std::endl;
return -1;
}
av_register_all();
// Init SDL
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))
{
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
return -1;
}
// Init ffmpeg and setup some SDL stuff related to Audio
setup_ffmpeg(argv[1]);
VideoCapture cap(argv[1]);
if (!cap.isOpened()){
std::cout << "Failed to load file!" << std::endl;
return -1;
}
AVPacket packet;
while (av_read_frame(pFormatCtx, &packet) >= 0)
{
if (packet.stream_index == videoStream)
{
// Actually this is were SYNC between audio/video would happen.
// Right now I assume that every VIDEO packet contains an entire video frame, and that's not true. A video frame can be made by multiple packets!
// But for the time being, assume 1 video frame == 1 video packet,
// so instead of reading the frame through ffmpeg, I read it through OpenCV.
Mat frame;
cap >> frame; // get a new frame from camera
// do some processing on the frame, either as a Mat or as IplImage.
// For educational purposes, applying a lame grayscale conversion
IplImage ipl_frame = frame;
for (int i = 0; i < ipl_frame.width * ipl_frame.height * ipl_frame.nChannels; i += ipl_frame.nChannels)
{
ipl_frame.imageData[i] = (ipl_frame.imageData[i] + ipl_frame.imageData[i+1] + ipl_frame.imageData[i+2])/3; //B
ipl_frame.imageData[i+1] = (ipl_frame.imageData[i] + ipl_frame.imageData[i+1] + ipl_frame.imageData[i+2])/3; //G
ipl_frame.imageData[i+2] = (ipl_frame.imageData[i] + ipl_frame.imageData[i+1] + ipl_frame.imageData[i+2])/3; //R
}
// Display it on SDL window
show_frame(&ipl_frame);
av_free_packet(&packet);
}
else if (packet.stream_index == audioStream)
{
packet_queue_put(&audioq, &packet);
}
else
{
av_free_packet(&packet);
}
SDL_Event event;
SDL_PollEvent(&event);
switch (event.type)
{
case SDL_QUIT:
SDL_FreeSurface(surface);
SDL_Quit();
break;
default:
break;
}
}
// the camera will be deinitialized automatically in VideoCapture destructor
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
return 0;
}
I solved the "Unsupported codec" error. Remplace AVMEDIA_TYPE_VIDEO with AVMEDIA_TYPE_AUDIO in the following line :
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && audioStream < 0){
audioStream = i;
}
Grayscale conversion is not true due to uchar opencv pixel format.
I still have the synchro not working ... any help ?
Thanks
See here for syncing:
http://dranger.com/ffmpeg/tutorial06.html
and here:
http://dranger.com/ffmpeg/tutorial07.html
Related
I use FFMPEG to record video from a RTSP stream. What my code does is get current day time, create a folder with this format year/month/day/hour/minute and save the video to that folder.
When a new minute arrive, I create the new folder base on the new minute and run the record again to the new folder.
Basically It works, but the next video start time is continue the end of previous video. For example:
video1: 00:00 -> 00:55
video2: 00:56 -> ...
I hope I can set for all videos start from 00:00. Can I do that?
Here my code
ffmpeg.h
class CtFfmpeg {
public:
CtFfmpeg();
~CtFfmpeg();
void init();
int getInput();
int getOutputName(const char *filename);
int release();
int ret;
AVFormatContext *ifmt_ctx, *ofmt_ctx;
AVStream *in_stream, *out_stream;
AVPacket pkt;
const char *in_filename;
char *out_filename;
private:
int setOutput(const char *outfilename);
AVOutputFormat *ofmt;
};
ffmpeg.cpp
#include "ctffmpeg.h"
CtFfmpeg::CtFfmpeg() {
in_filename = new char [1024];
out_filename = new char [1024];
}
CtFfmpeg::~CtFfmpeg() {
delete [] in_filename;
delete [] out_filename;
}
void CtFfmpeg::init() {
avcodec_register_all();
av_register_all();
avformat_network_init();
pkt = { 0 };
av_init_packet(&pkt);
ofmt = NULL;
ifmt_ctx = NULL;
ofmt_ctx = NULL;
return;
}
int CtFfmpeg::release() {
av_write_trailer(ofmt_ctx);
avcodec_close(out_stream->codec);
// avcodec_close(in_stream->codec);
// avformat_close_input(&ifmt_ctx);
/* close output */
if (!(ofmt->flags & AVFMT_NOFILE))
avio_close(ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
av_free_packet(&pkt);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred\n");
return 1;
}
}
int CtFfmpeg::getInput() {
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
fprintf(stderr, "Could not open input file '%s'", in_filename);
release();
}
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
fprintf(stderr, "Failed to retrieve input stream information");
release();
}
av_dump_format(ifmt_ctx, 0, in_filename, 0);
}
int CtFfmpeg::setOutput(const char *outfilename) {
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, outfilename);
if (!ofmt_ctx) {
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
release();
}
ofmt = ofmt_ctx->oformat;
for (int i = 0; i < ifmt_ctx->nb_streams; i++) {
in_stream = ifmt_ctx->streams[i];
out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
release();
}
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
if (ret < 0) {
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
release();
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
} // for
av_dump_format(ofmt_ctx, 0, outfilename, 1);
if (!(ofmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, outfilename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open output file '%s'", outfilename);
release();
}
}
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
release();
}
}
int CtFfmpeg::getOutputName(const char *filename){
sprintf(out_filename,filename);
setOutput(out_filename);
}
main.cpp
#include "ctfolder.h"
#include "ctffmpeg.h"
CtFfmpeg * ff;
int main(int argc, char** argv) {
if (argc < 2) {
printf("usage: %s <RTSP link> \n", argv[0]);
return 1;
}
ff = new CtFfmpeg();
ff->in_filename = argv[1]; //RTSP input link
ff->init();
ff->getInput();
string filename;
videoRecorder obj;
int start, now;
start = obj.get_current_min();
if(obj.create_folder(0755))
cout << "Cannot create folder, maybe it already exists" << endl;
else
cout << "Create folder succesfully" << endl;
int skip = 0;
while(1){
filename = obj.update_filename();
ff->getOutputName(filename.c_str());
while((now = obj.get_current_min()) == start) {
ff->ret = av_read_frame(ff->ifmt_ctx, &(ff->pkt));
skip++;
if(skip==1)
continue;
if(skip>2)
skip=2;
if (ff->ret < 0)
continue;
ff->pkt.pts = av_rescale_q_rnd(ff->pkt.pts, ff->in_stream->time_base, ff->out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
ff->pkt.dts = av_rescale_q_rnd(ff->pkt.dts, ff->in_stream->time_base, ff->out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
ff->pkt.duration = av_rescale_q(ff->pkt.duration, ff->in_stream->time_base, ff->out_stream->time_base);
ff->pkt.pos = -1;
ff->ret = av_interleaved_write_frame(ff->ofmt_ctx, &(ff->pkt));
if (ff->ret < 0) {
fprintf(stderr, "Error muxing packet\n");
continue;
}
av_free_packet(&(ff->pkt));
}
ff->release();
cout << "New minute!" << endl;
if(obj.create_folder(0755))
cout << "Cannot create folder, something's wrong" << endl;
else
cout << "Create folder succesfully" << endl;
start = now;
}
return 0;
}
You need to shift your recording packet's pts to 0.
while(<some condition>)
{
//...
int64_t pts_offset = AV_NOPTS_VALUE ;
while((now = obj.get_current_min()) == start)
{
//...
ff.pkt.pts = ...
//...
if( pts_offset == AV_NOPTS_VALUE )
{
pts_offset = ff.pkt.pts ;
}
ff.pkt.pts -= pts_offset ;
// ...
}
}
I tried to build your code and add Alexander Chernin suggestion to it but I face to muxer error!
When you decrease recording packet's pts, it's value go lower than recording packet's dts. In avcodec.h, above declaration of pts I found this comment:
pts MUST be larger or equal to dts as presentation cannot happen before decompression.
I solved this error by decreasing recording packet's dts.
ff->pkt.pts = av_rescale_q_rnd(ff->pkt.pts, ff->in_stream->ff->out_stream->(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
if (pts_offset == AV_NOPTS_VALUE) {
pts_offset = ff->pkt.pts;
}
ff->pkt.pts -= pts_offset;
ff->pkt.dts -= pts_offset;
ff->pkt.dts = av_rescale_q_rnd(ff->pkt.dts, ff->in_stream->time_base,ff->out_stream->time_base,(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
ff->pkt.duration = av_rescale_q(ff->pkt.duration,ff->in_stream->time_base,ff->out_stream->time_base);
ff->pkt.pos = -1;
I want to apply processing to a video clip with sound track, extract and process frame by frame and write result to output file. Number of frames, size of frame and speed remains unchanged in output clip. Also I want to keep the same audio track as I have in source.
I can read clip, decode frames and process then using opencv. Audio packets are also writes fine. I'm stuck on forming output video stream.
The minimal runnable code I have for now (sorry it not so short, but cant do it shorter):
extern "C" {
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include "libavcodec/avcodec.h"
#include <libavutil/opt.h>
#include <libavdevice/avdevice.h>
#include <libswscale/swscale.h>
}
#include "opencv2/opencv.hpp"
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#endif
using namespace std;
using namespace cv;
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
char buf1[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_string(buf1, pkt->pts);
char buf2[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_string(buf1, pkt->dts);
char buf3[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_string(buf1, pkt->duration);
char buf4[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_time_string(buf1, pkt->pts, time_base);
char buf5[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_time_string(buf1, pkt->dts, time_base);
char buf6[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_time_string(buf1, pkt->duration, time_base);
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
buf1, buf4,
buf2, buf5,
buf3, buf6,
pkt->stream_index);
}
int main(int argc, char **argv)
{
AVOutputFormat *ofmt = NULL;
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
AVPacket pkt;
AVFrame *pFrame = NULL;
AVFrame *pFrameRGB = NULL;
int frameFinished = 0;
pFrame = av_frame_alloc();
pFrameRGB = av_frame_alloc();
const char *in_filename, *out_filename;
int ret, i;
in_filename = "../../TestClips/Audio Video Sync Test.mp4";
out_filename = "out.mp4";
// Initialize FFMPEG
av_register_all();
// Get input file format context
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0)
{
fprintf(stderr, "Could not open input file '%s'", in_filename);
goto end;
}
// Extract streams description
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0)
{
fprintf(stderr, "Failed to retrieve input stream information");
goto end;
}
// Print detailed information about the input or output format,
// such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
av_dump_format(ifmt_ctx, 0, in_filename, 0);
// Allocate an AVFormatContext for an output format.
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx)
{
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
// The output container format.
ofmt = ofmt_ctx->oformat;
// Allocating output streams
for (i = 0; i < ifmt_ctx->nb_streams; i++)
{
AVStream *in_stream = ifmt_ctx->streams[i];
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if (!out_stream)
{
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
if (ret < 0)
{
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
goto end;
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
}
// Show output format info
av_dump_format(ofmt_ctx, 0, out_filename, 1);
// Open output file
if (!(ofmt->flags & AVFMT_NOFILE))
{
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0)
{
fprintf(stderr, "Could not open output file '%s'", out_filename);
goto end;
}
}
// Write output file header
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0)
{
fprintf(stderr, "Error occurred when opening output file\n");
goto end;
}
// Search for input video codec info
AVCodec *in_codec = nullptr;
AVCodecContext* avctx = nullptr;
int video_stream_index = -1;
for (int i = 0; i < ifmt_ctx->nb_streams; i++)
{
if (ifmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
{
video_stream_index = i;
avctx = ifmt_ctx->streams[i]->codec;
in_codec = avcodec_find_decoder(avctx->codec_id);
if (!in_codec)
{
fprintf(stderr, "in codec not found\n");
exit(1);
}
break;
}
}
// Search for output video codec info
AVCodec *out_codec = nullptr;
AVCodecContext* o_avctx = nullptr;
int o_video_stream_index = -1;
for (int i = 0; i < ofmt_ctx->nb_streams; i++)
{
if (ofmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
{
o_video_stream_index = i;
o_avctx = ofmt_ctx->streams[i]->codec;
out_codec = avcodec_find_encoder(o_avctx->codec_id);
if (!out_codec)
{
fprintf(stderr, "out codec not found\n");
exit(1);
}
break;
}
}
// openCV pixel format
AVPixelFormat pFormat = AV_PIX_FMT_RGB24;
// Data size
int numBytes = avpicture_get_size(pFormat, avctx->width, avctx->height);
// allocate buffer
uint8_t *buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
// fill frame structure
avpicture_fill((AVPicture *)pFrameRGB, buffer, pFormat, avctx->width, avctx->height);
// frame area
int y_size = avctx->width * avctx->height;
// Open input codec
avcodec_open2(avctx, in_codec, NULL);
// Main loop
while (1)
{
AVStream *in_stream, *out_stream;
ret = av_read_frame(ifmt_ctx, &pkt);
if (ret < 0)
{
break;
}
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = ofmt_ctx->streams[pkt.stream_index];
log_packet(ifmt_ctx, &pkt, "in");
// copy packet
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
log_packet(ofmt_ctx, &pkt, "out");
if (pkt.stream_index == video_stream_index)
{
avcodec_decode_video2(avctx, pFrame, &frameFinished, &pkt);
if (frameFinished)
{
struct SwsContext *img_convert_ctx;
img_convert_ctx = sws_getCachedContext(NULL,
avctx->width,
avctx->height,
avctx->pix_fmt,
avctx->width,
avctx->height,
AV_PIX_FMT_BGR24,
SWS_BICUBIC,
NULL,
NULL,
NULL);
sws_scale(img_convert_ctx,
((AVPicture*)pFrame)->data,
((AVPicture*)pFrame)->linesize,
0,
avctx->height,
((AVPicture *)pFrameRGB)->data,
((AVPicture *)pFrameRGB)->linesize);
sws_freeContext(img_convert_ctx);
// Do some image processing
cv::Mat img(pFrame->height, pFrame->width, CV_8UC3, pFrameRGB->data[0],false);
cv::GaussianBlur(img,img,Size(5,5),3);
cv::imshow("Display", img);
cv::waitKey(5);
// --------------------------------
// Transform back to initial format
// --------------------------------
img_convert_ctx = sws_getCachedContext(NULL,
avctx->width,
avctx->height,
AV_PIX_FMT_BGR24,
avctx->width,
avctx->height,
avctx->pix_fmt,
SWS_BICUBIC,
NULL,
NULL,
NULL);
sws_scale(img_convert_ctx,
((AVPicture*)pFrameRGB)->data,
((AVPicture*)pFrameRGB)->linesize,
0,
avctx->height,
((AVPicture *)pFrame)->data,
((AVPicture *)pFrame)->linesize);
// --------------------------------------------
// Something must be here
// --------------------------------------------
//
// Write fideo frame (How to write frame to output stream ?)
//
// --------------------------------------------
sws_freeContext(img_convert_ctx);
}
}
else // write sound frame
{
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
}
if (ret < 0)
{
fprintf(stderr, "Error muxing packet\n");
break;
}
// Decrease packet ref counter
av_packet_unref(&pkt);
}
av_write_trailer(ofmt_ctx);
end:
avformat_close_input(&ifmt_ctx);
// close output
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
{
avio_closep(&ofmt_ctx->pb);
}
avformat_free_context(ofmt_ctx);
if (ret < 0 && ret != AVERROR_EOF)
{
char buf_err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
av_make_error_string(buf_err, AV_ERROR_MAX_STRING_SIZE, ret);
fprintf(stderr, "Error occurred: %s\n", buf_err);
return 1;
}
avcodec_close(avctx);
av_free(pFrame);
av_free(pFrameRGB);
return 0;
}
Your original code segfaults in my case. Initializing the output codec context seems to fix it. The code below works for me but I didn't test the OpenCV stuff as I don't have the lib installed.
Get the codec context:
// Search for output video codec info
AVCodec *out_codec = NULL;
AVCodecContext* o_avctx = NULL;
int o_video_stream_index = -1;
for (int i = 0; i < ofmt_ctx->nb_streams; i++)
{
if (ofmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
{
o_video_stream_index = i;
out_codec = avcodec_find_encoder(ofmt_ctx->streams[i]->codec->codec_id);
o_avctx = avcodec_alloc_context3(out_codec);
o_avctx->height = avctx->height;
o_avctx->width = avctx->width;
o_avctx->sample_aspect_ratio = avctx->sample_aspect_ratio;
if (out_codec->pix_fmts)
o_avctx->pix_fmt = out_codec->pix_fmts[0];
else
o_avctx->pix_fmt = avctx->pix_fmt;
o_avctx->time_base = avctx->time_base;
avcodec_open2(o_avctx, out_codec, NULL);
}
}
Encode and write:
// Main loop
while (1)
{
...
if (pkt.stream_index == video_stream_index)
{
avcodec_decode_video2(avctx, pFrame, &frameFinished, &pkt);
if (frameFinished)
{
...
// --------------------------------------------
// Something must be here
// --------------------------------------------
int got_packet = 0;
AVPacket enc_pkt = { 0 };
av_init_packet(&enc_pkt);
avcodec_encode_video2(o_avctx, &enc_pkt, pFrame, &got_packet);
av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
....
}
}
you should assign processed frame's packets information to your Original packets then pass it to av_interleaved_write_frame
After many weeks trying to connect to my home cctv in my c++/Qt project using libav on my raspberry pi (armv7) I am able to correctly display my camera for a good while (at least 20 minutes) with hardly any NAL warnings in the console (but some warnings about invalid NAL unit). However, eventually I will just start to see just a grey screen and lots of NAL warnings in the console and I have to close and reopen the connection to get my camera displaying correctly. I have searched the internet many times to make sure I am understanding av_parser_parse2 correctly and I think I am.
I have pasted my 3 main functions below in the hope that somebody can tell me - is there more to decoding a h264 stream using libav than I understand currently?
Thank you.
void MainWindow::playStreamToScreen(MainWindow* app)
{
avcodec_register_all();
codec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!codec) {
qDebug() << "codec not found\n";
return;
}
c = avcodec_alloc_context3(codec);
/* if(codec->capabilities & CODEC_CAP_TRUNCATED)
c->flags |= CODEC_FLAG_TRUNCATED;
if(codec->capabilities & CODEC_FLAG2_CHUNKS)
c->flags |= CODEC_FLAG2_CHUNKS;*//* We may send incomplete frames */
picture = av_frame_alloc();
pFrameRGB=av_frame_alloc();
int numBytes=avpicture_get_size(pix_fmt_dst, myWidth, myHeight);
buffer= (uint8_t*)malloc(numBytes);
if(avpicture_fill((AVPicture *)pFrameRGB, buffer, pix_fmt_dst,
myWidth, myHeight) < 0)
{
qDebug() << "failed fill picture";
}
if (avcodec_open2(c, codec, NULL) < 0) {
qDebug() << "could not open codec\n";
return;
}
frame = 0;
if(parser == NULL)parser = av_parser_init(AV_CODEC_ID_H264);
if(parser == NULL)
{
qDebug() << "after init parser is null";
return;
}
parser->flags |= PARSER_FLAG_ONCE;
struct timeval t;
float inv_fps2;// = 4;
int ii = 0;
inv_fps2 = 1e6/9.98;//av_q2d(c->time_base);
qDebug() << "before timebase";
c->time_base = (AVRational){1,10};
c->bit_rate = 251000;
c->width = 352;
c->height = 288;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if(img_convert_ctx == NULL)
{
img_convert_ctx = sws_getCachedContext ( img_convert_ctx, c->width, c->height,
AV_PIX_FMT_YUV420P, myWidth, myHeight, AV_PIX_FMT_RGB555,
SWS_LANCZOS | SWS_ACCURATE_RND , NULL, NULL, NULL );
}
running = true;
int retval;
gettimeofday(&t, 0);
}
void MainWindow::display_frame(AVCodecContext* cc, AVFrame *frame, SwsContext* img_convert_ctx, MainWindow* app)
{
sws_scale ( img_convert_ctx, frame->data, frame->linesize, 0,
288, pFrameRGB->data, pFrameRGB->linesize );
app->currentImage = QImage(pFrameRGB->data[0], app->myWidth, app->myHeight, pFrameRGB->linesize[0], QImage::Format_RGB555) .copy();// - See more at: http://www.mzan.com/article/30784549-best-simplest-way-to-display-ffmpeg-frames-in-qt5-solved.shtml#sthash.RvAFLQsc.dpuf
QImage img2 = app->currentImage.scaled(app->width(), app->height(), Qt::KeepAspectRatio);
QMetaObject::invokeMethod(app->myLabel, "setImage", Qt::QueuedConnection, Q_ARG(QImage, img2));
app->myLabel->setFixedSize(app->width(), app->height());
}
void MainWindow::socketReadyRead()
{
if(connectingToLive && i == 0)
{
qDebug() << "starting live stream";
i++;
this->playStreamToScreen(this);
}
else if(connectingToLive && i > 0)
{
QByteArray temp;
int inbuf_start = 0;
int inbuf_len = 0;
temp.append(socket->readAll());//reads from network here
inbuf_len = temp.size();
inbuf_start = 0;
int out_size;
while (inbuf_len)
{
av_init_packet(&packet2);
packet2.data = 0;
packet2.size = 0;
len = av_parser_parse2(parser, c, &packet2.data, &packet2.size,
(uint8_t*)temp.constData()+ inbuf_start, inbuf_len,
AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
inbuf_start += len;
inbuf_len -= len;
if(packet2.size == 0 && len >= 0)
{
break;
}
else if(len)
{
retval = avcodec_decode_video2(c, picture, &got_picture, &packet2);
if (got_picture && retval > 0)
{
display_frame(c, picture, img_convert_ctx, this);
}
}
}
}
}
I have a program which capture video from webcam, encode with ffmpeg, encoded packet then write to buffer. At the receiver side, read from buffer decode with ffmpeg and play.
Now I merge sender and receiver in one program for testing. It works fine with AV_CODEC_ID_MPEG1VIDEO, but when I change the ffmpeg codec to AV_CODEC_ID_H264, at the decoding progress, it shows error:
The whole program is here FYI, I made a loop to let the whole progress run twice.
What is the cause of the error, is there anything special for H264? Thanks in advance!
#include <math.h>
extern "C" {
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <libswscale/swscale.h>
#include "v4l2.h"
}
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
#define INBUF_SIZE 4096
static uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
static AVCodec *codec;
static AVCodecContext *c= NULL;
static int ret, got_output;
static int frame_count;
static FILE *f;
static AVPacket pkt;
static AVFrame *frame;
static AVFrame *frameDecode;
static AVFrame *framergb;
static uint8_t endcode[] = { 0, 0, 1, 0xb7 };
static AVPacket avpkt;
int totalSize=0;
#define SUBSITY 3
static int decode_write_frame(AVCodecContext *avctx,
AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
{
int len, got_frame;
char buf[1024];
struct SwsContext *convert_ctx;
Mat m;
AVFrame dst;
len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
if (len < 0) {
fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
return len;
}
if (got_frame) {
printf("Saving %s frame %3d\n", last ? "last " : "", *frame_count);
fflush(stdout);
int w = avctx->width;
int h = avctx->height;
/*convert AVFrame to opencv Mat frame*/
m = cv::Mat(h, w, CV_8UC3);
dst.data[0] = (uint8_t *)m.data;
avpicture_fill( (AVPicture *)&dst, dst.data[0], PIX_FMT_BGR24, w, h);
enum PixelFormat src_pixfmt = (enum PixelFormat)frame->format;
enum PixelFormat dst_pixfmt = PIX_FMT_BGR24;
convert_ctx = sws_getContext(w, h, src_pixfmt, w, h, dst_pixfmt,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if(convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context!\n");
exit(1);
}
sws_scale(convert_ctx, frame->data, frame->linesize, 0, h,
dst.data, dst.linesize);
imshow("MyVideo", m);
//video.write(m);
waitKey(10); //wait next frame time
(*frame_count)++;
}
if (pkt->data) {
pkt->size -= len;
pkt->data += len;
}
return 0;
}
static void video_decode_example(char *inbufout)
{
int bytes;
uint8_t *buffer;
av_init_packet(&avpkt);
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
codec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
if(codec->capabilities&CODEC_CAP_TRUNCATED)
c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
frameDecode = avcodec_alloc_frame();
if (!frameDecode) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
bytes=avpicture_get_size(PIX_FMT_RGB24, CAMER_WIDTH, CAMER_HEIGHT);
buffer=(uint8_t *)av_malloc(bytes*sizeof(uint8_t));
avpicture_fill((AVPicture *)framergb, buffer, PIX_FMT_RGB24,
CAMER_WIDTH, CAMER_HEIGHT);*/
frame_count = 0;
namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
int size1=0;
for(;;) {
memcpy(inbuf,inbufout+size1,INBUF_SIZE);
size1+=INBUF_SIZE;
if (size1>(totalSize-INBUF_SIZE))
break;
avpkt.size=INBUF_SIZE;
avpkt.data = inbuf;
/*frame by frame process*/
while (avpkt.size > 0)
if (decode_write_frame(c, frameDecode, &frame_count, &avpkt, 0) < 0)
exit(1);
}
avpkt.data = NULL;
avpkt.size = 0;
decode_write_frame(c, frameDecode, &frame_count, &avpkt, 1);
}
static void init_video_encode(const char *filename, AVCodecID codec_id, int max_f)
{
printf("Encode video file %s\n", filename);
/* find the mpeg1 video encoder */
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 640;
c->height = 480;
/* frames per second */
c->time_base= (AVRational){1,25};
c->gop_size = 10; /* emit one intra frame every ten frames */
c->max_b_frames=max_f;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if(codec_id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
frame = avcodec_alloc_frame();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
c->pix_fmt, 32);
/* get the delayed frames */
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(1);
}
printf("\n");
}
int video_encode(int frameNo,char *inbufout)
{
static int count = 0;
static int i = 0;
/* encode 1 frame of video */
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
//cout<<"\nBefore YUV\n";
if(count == 0)
read_yuv420(frame->data[0]);
count ++;
if(count == SUBSITY) {
count = 0;
}
frame->pts = i++;
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
return -1;
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
memcpy(inbufout+totalSize,pkt.data,pkt.size);
totalSize+=pkt.size;
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
return 0;
}
void cancle_encode(void)
{
fclose(f);
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
avcodec_free_frame(&frame);
}
int main(int argc, char **argv)
{
int i;
char inbufout[25*50*(INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE)];
if(init_v4l2() < 0) {
printf("can't open camera\n");
return 0;
}
/* register all the codecs */
avcodec_register_all();
for(int j=0;j<2;j++){
//init_video_encode("test.mpg", AV_CODEC_ID_MPEG1VIDEO, 15);
init_video_encode("test.mpg", AV_CODEC_ID_H264, 15);
//for(i = 0;i< 10*15;i++ ) {
for(i = 0;i< 25*10;i++ ) {
if(video_encode(i,inbufout) < 0)
return 0;
}
cout<<"\n"<<totalSize<<"\n"<<endl;
video_decode_example(inbufout);
cancle_encode();
totalSize=0;
}
exit_v4l2();
return 0;
}
You need to include a parser. The ffmpeg mpeg1/2 decoders happen to work fine without a parser, but h264/mpeg4/vp9 need a parser, or you'll get errors like the above.
Note that if you use libavformat for demuxing and call avformat_read_frame(), it will automatically parse for you, but since you're doing buffer management yourself, you need to include the parser yourself also.
I have made a small application to extract audio from an mp4 file, or simply convert an existing audio file to AAC/mp4 format (both raw AAC, or inside mp4 container). I have run this application with existing mp4 files as input, and it properly extracts audio, and encodes to mp4 (audio only:AAC), or even directly in AAC format (i.e. test.aac also works). But when I tried running it on mp3 files, output clip plays faster than it should be (a clip of 1:12 seconds plays back till 1:05 seconds only).
Edit: I have made improvements in code - now, it no longer plays back faster, but is still only converted till 1:05 seconds, remaining clip is missing (this is about 89% conversion done, and remaining 11% remaining).
Here is the code I have written to achieve this:
////////////////////////////////////////////////
#include "stdafx.h"
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <map>
#include <deque>
#include <queue>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <conio.h>
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
#include "libavutil/dict.h"
#include "libavutil/error.h"
#include "libavutil/opt.h"
#include <libavutil/fifo.h>
#include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libswresample/swresample.h>
}
AVFormatContext* fmt_ctx= NULL;
int audio_stream_index = -1;
AVCodecContext * codec_ctx_audio = NULL;
AVCodec* codec_audio = NULL;
AVFrame* decoded_frame = NULL;
uint8_t** audio_dst_data = NULL;
int got_frame = 0;
int audiobufsize = 0;
AVPacket input_packet;
int audio_dst_linesize = 0;
int audio_dst_bufsize = 0;
SwrContext * swr = NULL;
AVOutputFormat * output_format = NULL ;
AVFormatContext * output_fmt_ctx= NULL;
AVStream * audio_st = NULL;
AVCodec * audio_codec = NULL;
double audio_pts = 0.0;
AVFrame * out_frame = avcodec_alloc_frame();
int audio_input_frame_size = 0;
uint8_t * audio_data_buf = NULL;
uint8_t * audio_out = NULL;
int audio_bit_rate;
int audio_sample_rate;
int audio_channels;
int decode_packet();
int open_audio_input(char* src_filename);
int decode_frame();
int open_encoder(char* output_filename);
AVStream *add_audio_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id);
int open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st);
void close_audio(AVFormatContext *oc, AVStream *st);
void write_audio_frame(uint8_t ** audio_src_data, int audio_src_bufsize);
int open_audio_input(char* src_filename)
{
int i =0;
/* open input file, and allocate format context */
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0)
{
fprintf(stderr, "Could not open source file %s\n", src_filename);
exit(1);
}
// Retrieve stream information
if(avformat_find_stream_info(fmt_ctx, NULL)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
av_dump_format(fmt_ctx, 0, src_filename, 0);
// Find the first video stream
for(i=0; i<fmt_ctx->nb_streams; i++)
{
if(fmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO)
{
audio_stream_index=i;
break;
}
}
if ( audio_stream_index != -1 )
{
// Get a pointer to the codec context for the audio stream
codec_ctx_audio=fmt_ctx->streams[audio_stream_index]->codec;
// Find the decoder for the video stream
codec_audio=avcodec_find_decoder(codec_ctx_audio->codec_id);
if(codec_audio==NULL) {
fprintf(stderr, "Unsupported audio codec!\n");
return -1; // Codec not found
}
// Open codec
AVDictionary *codecDictOptions = NULL;
if(avcodec_open2(codec_ctx_audio, codec_audio, &codecDictOptions)<0)
return -1; // Could not open codec
// Set up SWR context once you've got codec information
swr = swr_alloc();
av_opt_set_int(swr, "in_channel_layout", codec_ctx_audio->channel_layout, 0);
av_opt_set_int(swr, "out_channel_layout", codec_ctx_audio->channel_layout, 0);
av_opt_set_int(swr, "in_sample_rate", codec_ctx_audio->sample_rate, 0);
av_opt_set_int(swr, "out_sample_rate", codec_ctx_audio->sample_rate, 0);
av_opt_set_sample_fmt(swr, "in_sample_fmt", codec_ctx_audio->sample_fmt, 0);
av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
swr_init(swr);
// Allocate audio frame
if ( decoded_frame == NULL ) decoded_frame = avcodec_alloc_frame();
int nb_planes = 0;
AVStream* audio_stream = fmt_ctx->streams[audio_stream_index];
nb_planes = av_sample_fmt_is_planar(codec_ctx_audio->sample_fmt) ? codec_ctx_audio->channels : 1;
int tempSize = sizeof(uint8_t *) * nb_planes;
audio_dst_data = (uint8_t**)av_mallocz(tempSize);
if (!audio_dst_data)
{
fprintf(stderr, "Could not allocate audio data buffers\n");
}
else
{
for ( int i = 0 ; i < nb_planes ; i ++ )
{
audio_dst_data[i] = NULL;
}
}
}
}
int decode_frame()
{
int rv = 0;
got_frame = 0;
if ( fmt_ctx == NULL )
{
return rv;
}
int ret = 0;
audiobufsize = 0;
rv = av_read_frame(fmt_ctx, &input_packet);
if ( rv < 0 )
{
return rv;
}
rv = decode_packet();
// Free the input_packet that was allocated by av_read_frame
//av_free_packet(&input_packet);
return rv;
}
int decode_packet()
{
int rv = 0;
int ret = 0;
//audio stream?
if(input_packet.stream_index == audio_stream_index)
{
/* decode audio frame */
rv = avcodec_decode_audio4(codec_ctx_audio, decoded_frame, &got_frame, &input_packet);
if (rv < 0)
{
fprintf(stderr, "Error decoding audio frame\n");
//return ret;
}
else
{
if (got_frame)
{
if ( audio_dst_data[0] == NULL )
{
ret = av_samples_alloc(audio_dst_data, &audio_dst_linesize, decoded_frame->channels,
decoded_frame->nb_samples, (AVSampleFormat)decoded_frame->format, 1);
if (ret < 0)
{
fprintf(stderr, "Could not allocate audio buffer\n");
return AVERROR(ENOMEM);
}
/* TODO: extend return code of the av_samples_* functions so that this call is not needed */
audio_dst_bufsize = av_samples_get_buffer_size(NULL, audio_st->codec->channels,
decoded_frame->nb_samples, (AVSampleFormat)decoded_frame->format, 1);
//int16_t* outputBuffer = ...;
swr_convert( swr, audio_dst_data, out_frame->nb_samples, (const uint8_t**) decoded_frame->extended_data, decoded_frame->nb_samples );
}
/* copy audio data to destination buffer:
* this is required since rawaudio expects non aligned data */
//av_samples_copy(audio_dst_data, decoded_frame->data, 0, 0,
// decoded_frame->nb_samples, decoded_frame->channels, (AVSampleFormat)decoded_frame->format);
}
}
}
return rv;
}
int open_encoder(char* output_filename )
{
int rv = 0;
/* allocate the output media context */
AVOutputFormat *opfmt = NULL;
avformat_alloc_output_context2(&output_fmt_ctx, opfmt, NULL, output_filename);
if (!output_fmt_ctx) {
printf("Could not deduce output format from file extension: using MPEG.\n");
avformat_alloc_output_context2(&output_fmt_ctx, NULL, "mpeg", output_filename);
}
if (!output_fmt_ctx) {
rv = -1;
}
else
{
output_format = output_fmt_ctx->oformat;
}
/* Add the audio stream using the default format codecs
* and initialize the codecs. */
audio_st = NULL;
if ( output_fmt_ctx )
{
if (output_format->audio_codec != AV_CODEC_ID_NONE)
{
audio_st = add_audio_stream(output_fmt_ctx, &audio_codec, output_format->audio_codec);
}
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (audio_st)
{
rv = open_audio(output_fmt_ctx, audio_codec, audio_st);
if ( rv < 0 ) return rv;
}
av_dump_format(output_fmt_ctx, 0, output_filename, 1);
/* open the output file, if needed */
if (!(output_format->flags & AVFMT_NOFILE))
{
if (avio_open(&output_fmt_ctx->pb, output_filename, AVIO_FLAG_WRITE) < 0) {
fprintf(stderr, "Could not open '%s'\n", output_filename);
rv = -1;
}
else
{
/* Write the stream header, if any. */
if (avformat_write_header(output_fmt_ctx, NULL) < 0)
{
fprintf(stderr, "Error occurred when opening output file\n");
rv = -1;
}
}
}
}
return rv;
}
AVStream *add_audio_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
/* find the audio encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find codec\n");
exit(1);
}
st = avformat_new_stream(oc, *codec);
if (!st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
st->id = 1;
c = st->codec;
/* put sample parameters */
c->sample_fmt = AV_SAMPLE_FMT_S16;
c->bit_rate = audio_bit_rate;
c->sample_rate = audio_sample_rate;
c->channels = audio_channels;
// some formats want stream headers to be separate
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st;
}
int open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
int ret=0;
AVCodecContext *c;
st->duration = fmt_ctx->duration;
c = st->codec;
/* open it */
ret = avcodec_open2(c, codec, NULL) ;
if ( ret < 0)
{
fprintf(stderr, "could not open codec\n");
return -1;
//exit(1);
}
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
audio_input_frame_size = 10000;
else
audio_input_frame_size = c->frame_size;
int tempSize = audio_input_frame_size *
av_get_bytes_per_sample(c->sample_fmt) *
c->channels;
return ret;
}
void close_audio(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
}
void write_audio_frame(uint8_t ** audio_src_data, int audio_src_bufsize)
{
AVFormatContext *oc = output_fmt_ctx;
AVStream *st = audio_st;
if ( oc == NULL || st == NULL ) return;
AVCodecContext *c;
AVPacket pkt = { 0 }; // data and size must be 0;
int got_packet;
av_init_packet(&pkt);
c = st->codec;
out_frame->nb_samples = audio_input_frame_size;
int buf_size = audio_src_bufsize *
av_get_bytes_per_sample(c->sample_fmt) *
c->channels;
avcodec_fill_audio_frame(out_frame, c->channels, c->sample_fmt,
(uint8_t *) *audio_src_data,
buf_size, 1);
avcodec_encode_audio2(c, &pkt, out_frame, &got_packet);
if (!got_packet)
{
}
else
{
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(pkt.pts, st->codec->time_base, st->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts = av_rescale_q(pkt.dts, st->codec->time_base, st->time_base);
if ( c && c->coded_frame && c->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
pkt.flags |= AV_PKT_FLAG_KEY;
/* Write the compressed frame to the media file. */
if (av_interleaved_write_frame(oc, &pkt) != 0)
{
fprintf(stderr, "Error while writing audio frame\n");
exit(1);
}
}
av_free_packet(&pkt);
}
void write_delayed_frames(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c = st->codec;
int got_output = 0;
int ret = 0;
AVPacket pkt;
pkt.data = NULL;
pkt.size = 0;
av_init_packet(&pkt);
int i = 0;
for (got_output = 1; got_output; i++)
{
ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
if (ret < 0)
{
fprintf(stderr, "error encoding frame\n");
exit(1);
}
static int64_t tempPts = 0;
static int64_t tempDts = 0;
/* If size is zero, it means the image was buffered. */
if (got_output)
{
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(pkt.pts, st->codec->time_base, st->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts = av_rescale_q(pkt.dts, st->codec->time_base, st->time_base);
if ( c && c->coded_frame && c->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
}
else
{
ret = 0;
}
av_free_packet(&pkt);
}
}
int main(int argc, char **argv)
{
/* register all formats and codecs */
av_register_all();
avcodec_register_all();
avformat_network_init();
avdevice_register_all();
int i =0;
char src_filename[90] = "mp3.mp3";
char dst_filename[90] = "test.mp4";
open_audio_input(src_filename);
audio_bit_rate = codec_ctx_audio->bit_rate;
audio_sample_rate = codec_ctx_audio->sample_rate;
audio_channels = codec_ctx_audio->channels;
open_encoder( dst_filename );
while(1)
{
int rv = decode_frame();
if ( rv < 0 )
{
break;
}
if (audio_st)
{
audio_pts = (double)audio_st->pts.val * audio_st->time_base.num /
audio_st->time_base.den;
}
else
{
audio_pts = 0.0;
}
if ( codec_ctx_audio )
{
if ( got_frame)
{
write_audio_frame( audio_dst_data, audio_dst_bufsize );
}
}
if ( audio_dst_data[0] )
{
av_freep(&audio_dst_data[0]);
audio_dst_data[0] = NULL;
}
av_free_packet(&input_packet);
printf("\naudio_pts: %.3f", audio_pts);
}
write_delayed_frames( output_fmt_ctx, audio_st );
av_write_trailer(output_fmt_ctx);
close_audio( output_fmt_ctx, audio_st);
swr_free(&swr);
avcodec_free_frame(&out_frame);
return 0;
}
///////////////////////////////////////////////
I have been looking at this problem from many angles since about two days now, but cant seem to figure out what I'm doing wrong.
Note also: the printf() statement I've inserted shows audio_pts up to 64.551 (that's about 1:05 seconds that also proves encoder is not going to full duration of input file: 1:12 secs).
Can anyone please guide me what I may be doing wrong?
Thanks in advance for any guidance!
p.s. when run through command line like: ffmpeg -i test.mp3 test.mp4, it converts the file just fine.