Related
Imagine that in my project, I receive RTP packets with the payload type-8, for later saving this load as the Nth part of the audio track. I extract this load from the RTP packet and save it to a temporary buffer:
...
while ((rtp = receiveRtpPackets()).withoutErrors()) {
payloadData.push(rtp.getPayloadData());
}
audioGenerator.setPayloadData(payloadData);
audioGenerator.recordToFile();
...
After filling a temporary buffer of a certain size with this payload, I process this buffer, namely, extract the entire payload and encode it using ffmpeg for further saving to an audio file in Matroska format. But I have a problem. Since the payload of the RTP packet is type 8, I have to save the raw audio data of the pcm_alaw format to mka audio format. But when saving raw data pcm_alaw to an audio file, I get these messages from the library:
...
[libopus # 0x18eff60] Queue input is backward in time
[libopus # 0x18eff60] Queue input is backward in time
[libopus # 0x18eff60] Queue input is backward in time
[libopus # 0x18eff60] Queue input is backward in time
...
When you open an audio file in vlc, nothing is played (the audio track timestamp is missing).
The task of my project is to simply take pcm_alaw data and pack it in a container, in mka format. The best way to determine the codec is to use the av_guess_codec() function, which in turn automatically selects the desired codec ID. But how do I pack the raw data into the container correctly, I do not know.
It is important to note that I can get as raw data any format of this data (audio formats only) defined by the RTP packet type (All types of RTP packet payload). All I know is that in any case, I have to pack the audio data in an mka container.
I also attach the code (borrowed from this resource) that I use:
audiogenerater.h
extern "C"
{
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libswresample/swresample.h"
}
class AudioGenerater
{
public:
AudioGenerater();
~AudioGenerater() = default;
void generateAudioFileWithOptions(
QString fileName,
QByteArray pcmData,
int channel,
int bitRate,
int sampleRate,
AVSampleFormat format);
private:
// init Format
bool initFormat(QString audioFileName);
private:
AVCodec *m_AudioCodec = nullptr;
AVCodecContext *m_AudioCodecContext = nullptr;
AVFormatContext *m_FormatContext = nullptr;
AVOutputFormat *m_OutputFormat = nullptr;
};
audiogenerater.cpp
AudioGenerater::AudioGenerater()
{
av_register_all();
avcodec_register_all();
}
AudioGenerater::~AudioGenerater()
{
// ...
}
bool AudioGenerater::initFormat(QString audioFileName)
{
// Create an output Format context
int result = avformat_alloc_output_context2(&m_FormatContext, nullptr, nullptr, audioFileName.toLocal8Bit().data());
if (result < 0) {
return false;
}
m_OutputFormat = m_FormatContext->oformat;
// Create an audio stream
AVStream* audioStream = avformat_new_stream(m_FormatContext, m_AudioCodec);
if (audioStream == nullptr) {
avformat_free_context(m_FormatContext);
return false;
}
// Set the parameters in the stream
audioStream->id = m_FormatContext->nb_streams - 1;
audioStream->time_base = { 1, 8000 };
result = avcodec_parameters_from_context(audioStream->codecpar, m_AudioCodecContext);
if (result < 0) {
avformat_free_context(m_FormatContext);
return false;
}
// Print FormatContext information
av_dump_format(m_FormatContext, 0, audioFileName.toLocal8Bit().data(), 1);
// Open file IO
if (!(m_OutputFormat->flags & AVFMT_NOFILE)) {
result = avio_open(&m_FormatContext->pb, audioFileName.toLocal8Bit().data(), AVIO_FLAG_WRITE);
if (result < 0) {
avformat_free_context(m_FormatContext);
return false;
}
}
return true;
}
void AudioGenerater::generateAudioFileWithOptions(
QString _fileName,
QByteArray _pcmData,
int _channel,
int _bitRate,
int _sampleRate,
AVSampleFormat _format)
{
AVFormatContext* oc;
if (avformat_alloc_output_context2(
&oc, nullptr, nullptr, _fileName.toStdString().c_str())
< 0) {
qDebug() << "Error in line: " << __LINE__;
return;
}
if (!oc) {
printf("Could not deduce output format from file extension: using mka.\n");
avformat_alloc_output_context2(
&oc, nullptr, "mka", _fileName.toStdString().c_str());
}
if (!oc) {
qDebug() << "Error in line: " << __LINE__;
return;
}
AVOutputFormat* fmt = oc->oformat;
if (fmt->audio_codec == AV_CODEC_ID_NONE) {
qDebug() << "Error in line: " << __LINE__;
return;
}
AVCodecID codecID = av_guess_codec(
fmt, nullptr, _fileName.toStdString().c_str(), nullptr, AVMEDIA_TYPE_AUDIO);
// Find Codec
m_AudioCodec = avcodec_find_encoder(codecID);
if (m_AudioCodec == nullptr) {
qDebug() << "Error in line: " << __LINE__;
return;
}
// Create an encoder context
m_AudioCodecContext = avcodec_alloc_context3(m_AudioCodec);
if (m_AudioCodecContext == nullptr) {
qDebug() << "Error in line: " << __LINE__;
return;
}
// Setting parameters
m_AudioCodecContext->bit_rate = _bitRate;
m_AudioCodecContext->sample_rate = _sampleRate;
m_AudioCodecContext->sample_fmt = _format;
m_AudioCodecContext->channels = _channel;
m_AudioCodecContext->channel_layout = av_get_default_channel_layout(_channel);
m_AudioCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
// Turn on the encoder
int result = avcodec_open2(m_AudioCodecContext, m_AudioCodec, nullptr);
if (result < 0) {
avcodec_free_context(&m_AudioCodecContext);
if (m_FormatContext != nullptr)
avformat_free_context(m_FormatContext);
return;
}
// Create a package
if (!initFormat(_fileName)) {
avcodec_free_context(&m_AudioCodecContext);
if (m_FormatContext != nullptr)
avformat_free_context(m_FormatContext);
return;
}
// write to the file header
result = avformat_write_header(m_FormatContext, nullptr);
if (result < 0) {
avcodec_free_context(&m_AudioCodecContext);
if (m_FormatContext != nullptr)
avformat_free_context(m_FormatContext);
return;
}
// Create Frame
AVFrame* frame = av_frame_alloc();
if (frame == nullptr) {
avcodec_free_context(&m_AudioCodecContext);
if (m_FormatContext != nullptr)
avformat_free_context(m_FormatContext);
return;
}
int nb_samples = 0;
if (m_AudioCodecContext->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE) {
nb_samples = 10000;
}
else {
nb_samples = m_AudioCodecContext->frame_size;
}
// Set the parameters of the Frame
frame->nb_samples = nb_samples;
frame->format = m_AudioCodecContext->sample_fmt;
frame->channel_layout = m_AudioCodecContext->channel_layout;
// Apply for data memory
result = av_frame_get_buffer(frame, 0);
if (result < 0) {
av_frame_free(&frame);
{
avcodec_free_context(&m_AudioCodecContext);
if (m_FormatContext != nullptr)
avformat_free_context(m_FormatContext);
return;
}
}
// Set the Frame to be writable
result = av_frame_make_writable(frame);
if (result < 0) {
av_frame_free(&frame);
{
avcodec_free_context(&m_AudioCodecContext);
if (m_FormatContext != nullptr)
avformat_free_context(m_FormatContext);
return;
}
}
int perFrameDataSize = frame->linesize[0];
int count = _pcmData.size() / perFrameDataSize;
bool needAddOne = false;
if (_pcmData.size() % perFrameDataSize != 0) {
count++;
needAddOne = true;
}
int frameCount = 0;
for (int i = 0; i < count; ++i) {
// Create a Packet
AVPacket* pkt = av_packet_alloc();
if (pkt == nullptr) {
avcodec_free_context(&m_AudioCodecContext);
if (m_FormatContext != nullptr)
avformat_free_context(m_FormatContext);
return;
}
av_init_packet(pkt);
if (i == count - 1)
perFrameDataSize = _pcmData.size() % perFrameDataSize;
// Synthesize WAV files
memset(frame->data[0], 0, perFrameDataSize);
memcpy(frame->data[0], &(_pcmData.data()[perFrameDataSize * i]), perFrameDataSize);
frame->pts = frameCount++;
// send Frame
result = avcodec_send_frame(m_AudioCodecContext, frame);
if (result < 0)
continue;
// Receive the encoded Packet
result = avcodec_receive_packet(m_AudioCodecContext, pkt);
if (result < 0) {
av_packet_free(&pkt);
continue;
}
// write to file
av_packet_rescale_ts(pkt, m_AudioCodecContext->time_base, m_FormatContext->streams[0]->time_base);
pkt->stream_index = 0;
result = av_interleaved_write_frame(m_FormatContext, pkt);
if (result < 0)
continue;
av_packet_free(&pkt);
}
// write to the end of the file
av_write_trailer(m_FormatContext);
// Close file IO
avio_closep(&m_FormatContext->pb);
// Release Frame memory
av_frame_free(&frame);
avcodec_free_context(&m_AudioCodecContext);
if (m_FormatContext != nullptr)
avformat_free_context(m_FormatContext);
}
main.cpp
int main(int argc, char **argv)
{
av_log_set_level(AV_LOG_TRACE);
QFile file("rawDataOfPcmAlawType.bin");
if (!file.open(QIODevice::ReadOnly)) {
return EXIT_FAILURE;
}
QByteArray rawData(file.readAll());
AudioGenerater generator;
generator.generateAudioFileWithOptions(
"test.mka",
rawData,
1,
64000,
8000,
AV_SAMPLE_FMT_S16);
return 0;
}
It is IMPORTANT you help me find the most appropriate way to record pcm_alaw or a different data format in an MKA audio file.
I ask everyone who knows anything to help (there is too little time left to implement this project)
These useful links will help you:
A good overview of the data processing sequence in libav: ffmpeg-libav-tutorial
Examples from the ffmpeg developers themselves: avio_reading, resampling_audio, transcode_aac
I am using libx264 compiled from source. It was configured to get both .dll and .lib by this command
./configure --disable-cli --enable-shared --extra-ldflags=-Wl,--output-def=libx264.def`
I am using the libx264 API in my screen-sharing program with the preset - "veryfast", tune - "zerolatency", profile - "high" and also the following settings.
param.i_csp = X264_CSP_BGRA;
param.i_threads = 1;
param.i_width = width;
param.i_height = height;
param.i_fps_num = fps;
param.i_fps_den = 1;
param.rc.i_bitrate = bitrate;
param.rc.i_rc_method = X264_RC_ABR;
param.rc.b_filler = true;
param.rc.f_rf_constant = (float)0;
param.rc.i_vbv_max_bitrate = param.rc.i_bitrate;
param.rc.i_vbv_buffer_size = param.rc.i_bitrate;
param.b_repeat_headers = 0;
param.b_annexb = 1;
For these settings the program works fine. I specified it as single threaded by setting param.i_threads = 1.
If this is removed, x264 defaults to using multiple threads and sets param.i_threads as 1.5x of number of cores in the CPU automatically. This will give faster performance than running in single thread.
But when I remove the param.i_threads = 1 to make it multi-threaded, the generated output is fully grey. I cannot see any output when I view the live stream with VLC or some times I can view a weird output.
I am using this bitmap image as an example (https://imgur.com/a/l8LCd1l). Only this same image is being encoded multiple times. When it is saved into .h264 video, it is viewable clearly. But when the encoded payload is sent through rtmp, the live stream produces very bad and weird output (or sometimes no output). This is the weird output which im seeing most of the time for this image: https://imgur.com/a/VdyX1Zm
This is the full example code in which I am both streaming and writing video file of the same picture. This is using the srs librtmp library. There is no error but the stream has weird output.
In this code if you set add param.i_threads = 1; then only the output stream will be viewable. The problem is that it should be viewable in both single-threaded and multi-threaded encoding.
#include <iostream>
#include <stdio.h>
#include <sstream>
#include <x264.h>
#include "srs_librtmp.h"
#pragma comment(lib, "C:/Softwares/x264/libx264.lib")
using namespace std;
int check_ret(int ret);
int main()
{
int dts = 0;
x264_param_t param;
x264_t* h;
x264_nal_t* nals;
int i_nal;
int pts = 0;
int i_frame_size;
x264_picture_t picIn;
x264_picture_t picOut;
x264_param_default_preset(¶m, "veryfast", "zerolatency");
//x264 settings
param.i_csp = X264_CSP_BGRA;
param.i_width = 1920;
param.i_height = 1080;
param.i_fps_num = 30;
param.i_fps_den = 1;
param.rc.i_bitrate = 2500;
param.rc.i_rc_method = X264_RC_ABR;
param.rc.b_filler = true;
param.rc.f_rf_constant = (float)0;
param.rc.i_vbv_max_bitrate = param.rc.i_bitrate;
param.rc.i_vbv_buffer_size = param.rc.i_bitrate;
param.b_repeat_headers = 0;
param.b_annexb = 1;
x264_param_apply_profile(¶m, "high");
h = x264_encoder_open(¶m);
//allocate picture
x264_picture_alloc(&picIn, param.i_csp, param.i_width, param.i_height);
//picture settings
picIn.img.i_plane = 1;
picIn.img.i_stride[0] = 4 * param.i_width;
picIn.i_type = X264_TYPE_AUTO;
int header_size = x264_encoder_headers(h, &nals, &i_nal);
FILE* fptr;
fopen_s(&fptr, "example1.h264", "wb");
// write sps and pps in the video file
fwrite(nals->p_payload, header_size, 1, fptr);
int size = 1920 * 1080 * 4;
char* bmp = new char[size];
FILE* bitptr;
errno_t err = fopen_s(&bitptr, "flower.bmp", "rb");
fseek(bitptr, 54, SEEK_SET);
fread(bmp, size, 1, bitptr);
fclose(bitptr);
srs_rtmp_t rtmp = srs_rtmp_create("127.0.0.1:1935/live/test");
if (srs_rtmp_handshake(rtmp) != 0)
{
std::cout << "Simple handshake failed.";
return -1;
}
std::cout << "Handshake completed successfully.\n";
if (srs_rtmp_connect_app(rtmp) != 0) {
std::cout << "Connecting to host failed.";
return -1;
}
std::cout << "Connected to host successfully.\n";
if (srs_rtmp_publish_stream(rtmp) != 0) {
std::cout << "Publish signal failed.";
}
std::cout << "Publish signal success\n";
// write sps and pps in the live stream
int ret = srs_h264_write_raw_frames(rtmp, reinterpret_cast<char*>(nals->p_payload), header_size, 0, 0);
ret = check_ret(ret);
if (!ret)
return -1;
std::cout << "SPS and PPS sent.\n";
// main loop
std::cout << "Now streaming and encoding\n";
int i = 1800;
while (i--)
{
picIn.img.plane[0] = reinterpret_cast<uint8_t*>(bmp);
picIn.i_pts = pts++;
i_frame_size = x264_encoder_encode(h, &nals, &i_nal, &picIn, &picOut);
if (i_frame_size)
{
for (int j = 0; j < i_nal; j++)
{
x264_nal_t* nal = nals + j;
// write data in the video file
fwrite(nal->p_payload, nal->i_payload, 1, fptr);
// write data in the live stream
ret = srs_h264_write_raw_frames(rtmp, reinterpret_cast<char*>(nal->p_payload), nal->i_payload, dts, dts);
ret = check_ret(ret);
if (!ret)
{
return -1;
}
}
}
else
{
std::cout << "i_frame_size = 0 (encoder failed)\n";
}
dts += 33;
}
while (x264_encoder_delayed_frames(h))
{
i_frame_size = x264_encoder_encode(h, &nals, &i_nal, NULL, &picOut);
if (i_frame_size)
{
fwrite(nals->p_payload, i_frame_size, 1, fptr);
}
}
std::cout << "\nAll done\n";
std::cout << "Output video is example1.h264 and it is viewable in VLC";
return 0;
}
int check_ret(int ret)
{
if (ret != 0) {
if (srs_h264_is_dvbsp_error(ret)) {
srs_human_trace("ignoring drop video error, code=%d", ret);
}
else if (srs_h264_is_duplicated_sps_error(ret)) {
srs_human_trace("ignoring duplicated sps, code=%d", ret);
}
else if (srs_h264_is_duplicated_pps_error(ret)) {
srs_human_trace("ignoring duplicated pps, code=%d", ret);
}
else {
srs_human_trace("sending h264 raw data failed. ret=%d", ret);
return 0;
}
}
return 1;
}
If you would like to download the original flower.bmp file, here is the link: https://gofile.io/d/w2kX56
This error can be reproduced in any other bmp file also.
Please tell me what is causing this problem when multi-threading is enabled. Am I setting wrong values? Is the code in which I am streaming the encoded data wrong?
I am trying to decode a video stream from the browser using the ffmpeg API. The stream is produced by the webcam and recorded with MediaRecorder as webm format. What I ultimately need is a vector of opencv cv::Mat objects for further processing.
I have written a C++ webserver using the uWebsocket library. The video stream is sent via websocket from the browser to the server once per second. On the server, I append the received data to my custom buffer and decode it with the ffmpeg API.
If I just save the data on the disk and later I play it with a media player, it works fine. So, whatever the browser sends is a valid video.
I do not think that I correctly understand how should the custom IO behave with network streaming as nothing seems to be working.
The custom buffer:
struct Buffer
{
std::vector<uint8_t> data;
int currentPos = 0;
};
The readAVBuffer method for custom IO
int MediaDecoder::readAVBuffer(void* opaque, uint8_t* buf, int buf_size)
{
MediaDecoder::Buffer* mbuf = (MediaDecoder::Buffer*)opaque;
int count = 0;
for(int i=0;i<buf_size;i++)
{
int index = i + mbuf->currentPos;
if(index >= (int)mbuf->data.size())
{
break;
}
count++;
buf[i] = mbuf->data.at(index);
}
if(count > 0) mbuf->currentPos+=count;
std::cout << "read : "<<count<<" "<<mbuf->currentPos<<", buff size:"<<mbuf->data.size() << std::endl;
if(count <= 0) return AVERROR(EAGAIN); //is this error that should be returned? It cannot be EOF since we're not done yet, most likely
return count;
}
The big decode method, that's supposed to return whatever frames it could read
std::vector<cv::Mat> MediaDecoder::decode(const char* data, size_t length)
{
std::vector<cv::Mat> frames;
//add data to the buffer
for(size_t i=0;i<length;i++) {
buf.data.push_back(data[i]);
}
//do not invoke the decoders until we have 1MB of data
if(((buf.data.size() - buf.currentPos) < 1*1024*1024) && !initializedCodecs) return frames;
std::cout << "decoding data length "<<length<<std::endl;
if(!initializedCodecs) //initialize ffmpeg objects. Custom I/O, format, decoder, etc.
{
//these are just members of the class
avioCtxPtr = std::unique_ptr<AVIOContext,avio_context_deleter>(
avio_alloc_context((uint8_t*)av_malloc(4096),4096,0,&buf,&readAVBuffer,nullptr,nullptr),
avio_context_deleter());
if(!avioCtxPtr)
{
std::cerr << "Could not create IO buffer" << std::endl;
return frames;
}
fmt_ctx = std::unique_ptr<AVFormatContext,avformat_context_deleter>(avformat_alloc_context(),
avformat_context_deleter());
fmt_ctx->pb = avioCtxPtr.get();
fmt_ctx->flags |= AVFMT_FLAG_CUSTOM_IO ;
//fmt_ctx->max_analyze_duration = 2 * AV_TIME_BASE; // read 2 seconds of data
{
AVFormatContext *fmtCtxRaw = fmt_ctx.get();
if (avformat_open_input(&fmtCtxRaw, "", nullptr, nullptr) < 0) {
std::cerr << "Could not open movie" << std::endl;
return frames;
}
}
if (avformat_find_stream_info(fmt_ctx.get(), nullptr) < 0) {
std::cerr << "Could not find stream information" << std::endl;
return frames;
}
if((video_stream_idx = av_find_best_stream(fmt_ctx.get(), AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0)) < 0)
{
std::cerr << "Could not find video stream" << std::endl;
return frames;
}
AVStream *video_stream = fmt_ctx->streams[video_stream_idx];
AVCodec *dec = avcodec_find_decoder(video_stream->codecpar->codec_id);
video_dec_ctx = std::unique_ptr<AVCodecContext,avcodec_context_deleter> (avcodec_alloc_context3(dec),
avcodec_context_deleter());
if (!video_dec_ctx)
{
std::cerr << "Failed to allocate the video codec context" << std::endl;
return frames;
}
avcodec_parameters_to_context(video_dec_ctx.get(),video_stream->codecpar);
video_dec_ctx->thread_count = 1;
/* video_dec_ctx->max_b_frames = 0;
video_dec_ctx->frame_skip_threshold = 10;*/
AVDictionary *opts = nullptr;
av_dict_set(&opts, "refcounted_frames", "1", 0);
av_dict_set(&opts, "deadline", "1", 0);
av_dict_set(&opts, "auto-alt-ref", "0", 0);
av_dict_set(&opts, "lag-in-frames", "1", 0);
av_dict_set(&opts, "rc_lookahead", "1", 0);
av_dict_set(&opts, "drop_frame", "1", 0);
av_dict_set(&opts, "error-resilient", "1", 0);
int width = video_dec_ctx->width;
videoHeight = video_dec_ctx->height;
if(avcodec_open2(video_dec_ctx.get(), dec, &opts) < 0)
{
std::cerr << "Failed to open the video codec context" << std::endl;
return frames;
}
AVPixelFormat pFormat = AV_PIX_FMT_BGR24;
img_convert_ctx = std::unique_ptr<SwsContext,swscontext_deleter>(sws_getContext(width, videoHeight,
video_dec_ctx->pix_fmt, width, videoHeight, pFormat,
SWS_BICUBIC, nullptr, nullptr,nullptr),swscontext_deleter());
frame = std::unique_ptr<AVFrame,avframe_deleter>(av_frame_alloc(),avframe_deleter());
frameRGB = std::unique_ptr<AVFrame,avframe_deleter>(av_frame_alloc(),avframe_deleter());
int numBytes = av_image_get_buffer_size(pFormat, width, videoHeight,32 /*https://stackoverflow.com/questions/35678041/what-is-linesize-alignment-meaning*/);
std::unique_ptr<uint8_t,avbuffer_deleter> imageBuffer((uint8_t *) av_malloc(numBytes*sizeof(uint8_t)),avbuffer_deleter());
av_image_fill_arrays(frameRGB->data,frameRGB->linesize,imageBuffer.get(),pFormat,width,videoHeight,32);
frameRGB->width = width;
frameRGB->height = videoHeight;
initializedCodecs = true;
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = nullptr;
pkt.size = 0;
int read_frame_return = 0;
while ( (read_frame_return=av_read_frame(fmt_ctx.get(), &pkt)) >= 0)
{
readFrame(&frames,&pkt,video_dec_ctx.get(),frame.get(),img_convert_ctx.get(),
videoHeight,frameRGB.get());
//if(cancelled) break;
}
avioCtxPtr->eof_reached = 0;
avioCtxPtr->error = 0;
//flush
// readFrame(frames.get(),nullptr,video_dec_ctx.get(),frame.get(),
// img_convert_ctx.get(),videoHeight,frameRGB.get());
avioCtxPtr->eof_reached = 0;
avioCtxPtr->error = 0;
if(frames->size() <= 0)
{
std::cout << "buffer pos: "<<buf.currentPos<<", buff size:"<<buf.data.size()
<<",read_frame_return:"<<read_frame_return<< std::endl;
}
return frames;
}
What I would expect to happen would be for a continuous extraction of cv::Mat frames as I feed it more and more data. What actually happens is that after the the buffer is fully read I see:
[matroska,webm # 0x507b450] Read error at pos. 1278266 (0x13813a)
[matroska,webm # 0x507b450] Seek to desired resync point failed. Seeking to earliest point available instead.
And then no more bytes are read from the buffer even if later I increase the size of it.
There is something terribly wrong I'm doing here and I don't understand what.
What I ended up doing was to do the reading of the incoming data and actual decoding in a different thread. The read method, however, will just block if there are no more bytes available, waiting until anything is coming.
When new bytes are arriving, they're added to the buffer and the conditional_variable signals the waiting thread to wake up and start reading data again from the buffer.
It works well enough.
I just started using FFMPEG with C++ and try to code an audio decoder then write the decoded audio into a file.
However i'm not sure about which data to write to the output file. As far as i know from looking at the sample codes it seems to be the AVFrame -> data[0].
But when i try to print it on the consoles, i get some random numbers that are different each time i run the program. And when i try to write this AVFrame->data[0] into a file i keep getting an error.
So my question is how can I write the decoded audio after I call the function av_codec_decode_audio4 ?
Below I attached my code and I pass the argument "C:\02.mp3" which is a path for a valid mp3 file on my PC.
Thank you for your help.
// TestFFMPEG.cpp : Audio Decoder
//
#include "stdafx.h"
#include <iostream>
#include <fstream>
#include <sstream>
extern "C" {
#include <avcodec.h>
#include <avformat.h>
#include <swscale.h>
}
using namespace std;
int main(int argc, char* argv[])
{
int audioStream = -1;
AVCodec *aCodec;
AVPacket avPkt;
AVFrame *decode_frame = avcodec_alloc_frame();
AVCodecContext *aCodecCtxt;
AVFormatContext *pFormatCtxt = NULL;
if(argc != 2) { // Checking whether there is enough argument
return -1;
}
av_register_all(); //Initialize CODEC
avformat_network_init();
av_init_packet (&avPkt);
if (avformat_open_input (&pFormatCtxt, argv[1],NULL,NULL)!= 0 ){ //Opening File
return -2;
}
if(avformat_find_stream_info (pFormatCtxt,NULL) < 0){ //Get Streams Info
return -3;
}
AVStream *stream = NULL;
//av_read_play (pFormatCtxt); //open streams
for (int i = 0; i < pFormatCtxt->nb_streams ; i++) { //Find Audio Stream
if (pFormatCtxt->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){
audioStream =i;
}
}
aCodecCtxt = pFormatCtxt ->streams [audioStream]->codec; // opening decoder
aCodec = avcodec_find_decoder( pFormatCtxt->streams [audioStream] ->codec->codec_id);
if (!aCodec) {
return -8;
}
if (avcodec_open2(aCodecCtxt,aCodec,NULL)!=0) {
return -9;
}
int cnt = 0;
while(av_read_frame(pFormatCtxt,&avPkt) >= 0 ){
if (avPkt.stream_index == audioStream){
int check = 0;
int result = avcodec_decode_audio4 (aCodecCtxt,decode_frame,&check, &avPkt);
cout << "Decoded : "<< (int) decode_frame->data[0] <<", "<< "Check : " << check << ", Format :" << decode_frame->format <<" " << decode_frame->linesize[0]<< " "<<cnt <<endl;
}
av_free_packet(&avPkt);
cnt++;
}
return aCodec ->id;
}
You're doing it right.
The data that you decode contain at the pointer decode_frame->data[0]. Data size in bytes is decode_frame->linesize[0], the number of audio samples is decode_frame->nb_samples.
Thus, you can copy the audio data into your own buffer as follows:
memcpy(OutputBuffer, decode_frame->data[0], decode_frame->linesize[0]);
You can try using ffms2 which will directly give you decoded audio sample. It internally used ffmpeg/libav. So you don't have to worry about decoding stuff.
https://code.google.com/p/ffmpegsource/
I am processing video with opencv, but at the same time I need to play audio and simply control it, like loud or current frame number.
I think I should create a parallel process with ffmpeg, but I don't know how to do so. Can you explain what to do?
Or do you know another solution?
I think ffmpeg should be used to play audio and SDL for video in this case.
After opening the file with OpenCV and processing the frame, you can use OpenCV -> SDL to display it while retrieving the audio frames through ffmpeg and playing them with SDL.
Here is a nice collection of ffmpeg/SDL tutorials!
I also found a nice post that shows how to capture frames from a video file using ffmpeg, store them in OpenCV cv::Mat and display the result in a OpenCV window. But this way you can't play audio since OpenCV doesn't deal with that.
You might be interested in reading this post as well: How to avoid a growing delay with ffmpeg between sound and raw video data ?
EDIT:
I spent the last 4hrs coding a prototype to demonstrate how it's done. This demo reads video frames through OpenCV (so you can process them) and audio through ffmpeg, and SDL is used to play both! There are 2 limitations in this demo you must be aware: 1 - it assumes you are working with an OpenCV image packed as BGR (24bits), and 2 - audio and video are not being sync! Yes, I left have some work for you to do (yeeeey). But don't panic, page 6 has some ideas!
It's important to sync audio and video because you will be doing some processing on the frames, and that will certainly make the video and audio go out of sync real fast since they are being played independently of each other.
The ffmpeg tutorials I suggested above are very very important to understand the code, a lot of code from this demo came from there. They show how to deal with SDL, and how to read packets of audio/video streams.
#include <highgui.h>
#include <cv.h>
extern "C"
{
#include <SDL.h>
#include <SDL_thread.h>
#include <avcodec.h>
#include <avformat.h>
}
#include <iostream>
#include <stdio.h>
//#include <malloc.h>
using namespace cv;
#define SDL_AUDIO_BUFFER_SIZE 1024
typedef struct PacketQueue
{
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
PacketQueue audioq;
int audioStream = -1;
int videoStream = -1;
int quit = 0;
SDL_Surface* screen = NULL;
SDL_Surface* surface = NULL;
AVFormatContext* pFormatCtx = NULL;
AVCodecContext* aCodecCtx = NULL;
AVCodecContext* pCodecCtx = NULL;
void show_frame(IplImage* img)
{
if (!screen)
{
screen = SDL_SetVideoMode(img->width, img->height, 0, 0);
if (!screen)
{
fprintf(stderr, "SDL: could not set video mode - exiting\n");
exit(1);
}
}
// Assuming IplImage packed as BGR 24bits
SDL_Surface* surface = SDL_CreateRGBSurfaceFrom((void*)img->imageData,
img->width,
img->height,
img->depth * img->nChannels,
img->widthStep,
0xff0000, 0x00ff00, 0x0000ff, 0
);
SDL_BlitSurface(surface, 0, screen, 0);
SDL_Flip(screen);
}
void packet_queue_init(PacketQueue *q)
{
memset(q, 0, sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt)
{
AVPacketList *pkt1;
if (av_dup_packet(pkt) < 0)
{
return -1;
}
//pkt1 = (AVPacketList*) av_malloc(sizeof(AVPacketList));
pkt1 = (AVPacketList*) malloc(sizeof(AVPacketList));
if (!pkt1) return -1;
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if (!q->last_pkt)
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1;
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for (;;)
{
if( quit)
{
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1)
{
q->first_pkt = pkt1->next;
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
//av_free(pkt1);
free(pkt1);
ret = 1;
break;
}
else if (!block)
{
ret = 0;
break;
}
else
{
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size)
{
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
int len1, data_size;
for (;;)
{
while (audio_pkt_size > 0)
{
data_size = buf_size;
len1 = avcodec_decode_audio2(aCodecCtx, (int16_t*)audio_buf, &data_size,
audio_pkt_data, audio_pkt_size);
if (len1 < 0)
{
/* if error, skip frame */
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
if (data_size <= 0)
{
/* No data yet, get more frames */
continue;
}
/* We have data, return it and come back for more later */
return data_size;
}
if (pkt.data)
av_free_packet(&pkt);
if (quit) return -1;
if (packet_queue_get(&audioq, &pkt, 1) < 0) return -1;
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
}
void audio_callback(void *userdata, Uint8 *stream, int len)
{
AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
int len1, audio_size;
static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
static unsigned int audio_buf_size = 0;
static unsigned int audio_buf_index = 0;
while (len > 0)
{
if (audio_buf_index >= audio_buf_size)
{
/* We have already sent all our data; get more */
audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf));
if(audio_size < 0)
{
/* If error, output silence */
audio_buf_size = 1024; // arbitrary?
memset(audio_buf, 0, audio_buf_size);
}
else
{
audio_buf_size = audio_size;
}
audio_buf_index = 0;
}
len1 = audio_buf_size - audio_buf_index;
if (len1 > len)
len1 = len;
memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
len -= len1;
stream += len1;
audio_buf_index += len1;
}
}
void setup_ffmpeg(char* filename)
{
if (av_open_input_file(&pFormatCtx, filename, NULL, 0, NULL) != 0)
{
fprintf(stderr, "FFmpeg failed to open file %s!\n", filename);
exit(-1);
}
if (av_find_stream_info(pFormatCtx) < 0)
{
fprintf(stderr, "FFmpeg failed to retrieve stream info!\n");
exit(-1);
}
// Dump information about file onto standard error
dump_format(pFormatCtx, 0, filename, 0);
// Find the first video stream
int i = 0;
for (i; i < pFormatCtx->nb_streams; i++)
{
if (pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO && videoStream < 0)
{
videoStream = i;
}
if (pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO && audioStream < 0)
{
audioStream = i;
}
}
if (videoStream == -1)
{
fprintf(stderr, "No video stream found in %s!\n", filename);
exit(-1);
}
if (audioStream == -1)
{
fprintf(stderr, "No audio stream found in %s!\n", filename);
exit(-1);
}
// Get a pointer to the codec context for the audio stream
aCodecCtx = pFormatCtx->streams[audioStream]->codec;
// Set audio settings from codec info
SDL_AudioSpec wanted_spec;
wanted_spec.freq = aCodecCtx->sample_rate;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = aCodecCtx->channels;
wanted_spec.silence = 0;
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
wanted_spec.callback = audio_callback;
wanted_spec.userdata = aCodecCtx;
SDL_AudioSpec spec;
if (SDL_OpenAudio(&wanted_spec, &spec) < 0)
{
fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
exit(-1);
}
AVCodec* aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
if (!aCodec)
{
fprintf(stderr, "Unsupported codec!\n");
exit(-1);
}
avcodec_open(aCodecCtx, aCodec);
// audio_st = pFormatCtx->streams[index]
packet_queue_init(&audioq);
SDL_PauseAudio(0);
// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
AVCodec* pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL)
{
fprintf(stderr, "Unsupported codec!\n");
exit(-1); // Codec not found
}
// Open codec
if (avcodec_open(pCodecCtx, pCodec) < 0)
{
fprintf(stderr, "Unsupported codec!\n");
exit(-1); // Could not open codec
}
}
int main(int argc, char* argv[])
{
if (argc < 2)
{
std::cout << "Usage: " << argv[0] << " <video>" << std::endl;
return -1;
}
av_register_all();
// Init SDL
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))
{
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
return -1;
}
// Init ffmpeg and setup some SDL stuff related to Audio
setup_ffmpeg(argv[1]);
VideoCapture cap(argv[1]); // open the default camera
if (!cap.isOpened()) // check if we succeeded
{
std::cout << "Failed to load file!" << std::endl;
return -1;
}
AVPacket packet;
while (av_read_frame(pFormatCtx, &packet) >= 0)
{
if (packet.stream_index == videoStream)
{
// Actually this is were SYNC between audio/video would happen.
// Right now I assume that every VIDEO packet contains an entire video frame, and that's not true. A video frame can be made by multiple packets!
// But for the time being, assume 1 video frame == 1 video packet,
// so instead of reading the frame through ffmpeg, I read it through OpenCV.
Mat frame;
cap >> frame; // get a new frame from camera
// do some processing on the frame, either as a Mat or as IplImage.
// For educational purposes, applying a lame grayscale conversion
IplImage ipl_frame = frame;
for (int i = 0; i < ipl_frame.width * ipl_frame.height * ipl_frame.nChannels; i += ipl_frame.nChannels)
{
ipl_frame.imageData[i] = (ipl_frame.imageData[i] + ipl_frame.imageData[i+1] + ipl_frame.imageData[i+2])/3; //B
ipl_frame.imageData[i+1] = (ipl_frame.imageData[i] + ipl_frame.imageData[i+1] + ipl_frame.imageData[i+2])/3; //G
ipl_frame.imageData[i+2] = (ipl_frame.imageData[i] + ipl_frame.imageData[i+1] + ipl_frame.imageData[i+2])/3; //R
}
// Display it on SDL window
show_frame(&ipl_frame);
av_free_packet(&packet);
}
else if (packet.stream_index == audioStream)
{
packet_queue_put(&audioq, &packet);
}
else
{
av_free_packet(&packet);
}
SDL_Event event;
SDL_PollEvent(&event);
switch (event.type)
{
case SDL_QUIT:
SDL_FreeSurface(surface);
SDL_Quit();
break;
default:
break;
}
}
// the camera will be deinitialized automatically in VideoCapture destructor
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
return 0;
}
On my Mac I compiled it with:
g++ ffmpeg_snd.cpp -o ffmpeg_snd -D_GNU_SOURCE=1 -D_THREAD_SAFE -I/usr/local/include/opencv -I/usr/local/include -I/usr/local/include/SDL -Wl,-framework,Cocoa -L/usr/local/lib -lopencv_core -lopencv_imgproc -lopencv_highgui -lopencv_ml -lopencv_video -lopencv_features2d -lopencv_calib3d -lopencv_objdetect -lopencv_contrib -lopencv_legacy -lopencv_flann -lSDLmain -lSDL -L/usr/local/lib -lavfilter -lavcodec -lavformat -I/usr/local/Cellar/ffmpeg/HEAD/include/libavcodec -I/usr/local/Cellar/ffmpeg/HEAD/include/libavformat