How to play FFMPEG sound sample with OpenAL? - c++

I am using FFMPEG to load Audio Video from File. It works with video, but I don't know how to play audio samples.
Here is my code to get audio samples:
m_AdotimeBase = (int64_t(m_Adocdec_ctx->time_base.num) * AV_TIME_BASE) / int64_t(m_Adocdec_ctx->time_base.den);
if(!m_Adofmt_ctx)
{
//AfxMessageBox(L"m_timeBase");
return FALSE ;
}
int64_t seekAdoTarget = int64_t(m_currFrame) * m_AdotimeBase;
if(av_seek_frame(m_Adofmt_ctx, -1, seekAdoTarget, AVSEEK_FLAG_ANY) < 0)
{
/*CString st;
st.Format(L"%d",m_currFrame);
AfxMessageBox(L"av_seek_frame "+st);*/
m_currFrame = m_totalFrames-1;
return FALSE ;
}
if ((ret = av_read_frame(m_Adofmt_ctx, &packet)) < 0)
return FALSE;
if (packet.stream_index == 0)
{
ret = avcodec_decode_audio4(m_Adocdec_ctx, &in_AdeoFrame, &got_frame, &packet);
if (ret < 0)
{
av_free_packet(&packet);
return FALSE;
}
}
My problem is I want to listen that sample using OPENAL.
I would appreciate any tutorials or references on the subject.

Related

ffmpeg api alternate transcoding and remuxing for same file

Context
Hello !
I'm currently working on the development of a small library allowing to cut an h.264 video on any frame, but without re-encoding (transcoding) the whole video. The idea is to re-encode only the GOP on which we want to cut, and to rewrite (remux) directly the others GOP.
The avcut project (https://github.com/anyc/avcut) allows to do that, but requires a systematic decoding of each package, and seems to not work with the recent versions of ffmpeg from the tests I could do and from the recent feedbacks in the github issues.
As a beginner, I started from the code examples provided in the ffmpeg documentation, in particular: transcoding.c and remuxing.c.
Problem encountered
The problem I'm having is that I can't get both transcoding and remuxing to work properly at the same time. In particular, depending on the method I use to initialize the AVCodecParameters of the output video stream, transcoding works, or remuxing works:
avcodec_parameters_copy works well for remuxing
avcodec_parameters_from_context works well for transcoding
In case I choose avcodec_parameters_from_context, the transcoded GOP are correctly read by my video player (parole), but the remuxed packets are not read, and ffprobe does not show/detect them.
In case I choose avcodec_parameters_from_context, the remuxing GOP are correctly read by my video player, but the transcoding key_frame are bugged (I have the impression that the b-frame and p-frame are ok), and ffprobe -i return an error about the NAL of the key-frames:
[h264 # 0x55ec8a079300] sps_id 32 out of range
[h264 # 0x55ec8a079300] Invalid NAL unit size (1677727148 > 735).
[h264 # 0x55ec8a079300] missing picture in access unit with size 744
I suspect that the problem is related to the extradata of the packets. Through some experiments on the different attributes of the output AVCodecParameters, it seems that it is the extradata and extradata_size attributes that are responsible for the functioning of one method or the other.
Version
ffmpeg development branch retrieved on 2022-05-17 from https://github.com/FFmpeg/FFmpeg.
Compiled with --enable-libx264 --enable-gpl --enable-decoder=png --enable-encoder=png
Code
My code is written in c++ and is based on two classes: a class defining the parameters and methods on the input file (InputContexts) and a class defining them for the output file (OutputContexts). The code of these two classes is defined in the following files:
contexts.h
contexts.cpp
The code normally involved in the problem is the following:
stream initialization
int OutputContexts::init(const char* out_filename, InputContexts* input_contexts){
int ret;
int stream_index = 0;
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx) {
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
return ret;
}
av_dump_format(ofmt_ctx, 0, out_filename, 1);
encoders.resize(input_contexts->ifmt_ctx->nb_streams, nullptr);
codecs.resize(input_contexts->ifmt_ctx->nb_streams, nullptr);
// stream mapping
for (int i = 0; i < input_contexts->ifmt_ctx->nb_streams; i++) {
AVStream *out_stream;
AVStream *in_stream = input_contexts->ifmt_ctx->streams[i];
AVCodecContext* decoder_ctx = input_contexts->decoders[i];
// add new stream to output context
out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
return ret;
}
// from avcut blog
av_dict_copy(&out_stream->metadata, in_stream->metadata, 0);
out_stream->time_base = in_stream->time_base;
// encoder
if (decoder_ctx->codec_type == AVMEDIA_TYPE_VIDEO){
ret = prepare_encoder_video(i, input_contexts);
if (ret < 0){
fprintf(stderr, "Error while preparing encoder for stream #%u\n", i);
return ret;
}
// from avcut
out_stream->sample_aspect_ratio = in_stream->sample_aspect_ratio;
// works well for remuxing
ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
if (ret < 0) {
fprintf(stderr, "Failed to copy codec parameters\n");
return ret;
}
// works well for transcoding
// ret = avcodec_parameters_from_context(out_stream->codecpar, encoders[i]);
// if (ret < 0) {
// av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
// return ret;
// }
} else if (decoder_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
...
} else {
...
}
// TODO useful ???
// set current stream position to 0
// out_stream->codecpar->codec_tag = 0;
}
// opening output file in write mode with the ouput context
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open output file '%s'", out_filename);
return ret;
}
}
// write headers from output context in output file
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
return ret;
}
return ret;
}
AVCodecContext initialization for encoder
int OutputContexts::prepare_encoder_video(int stream_index, InputContexts* input_contexts){
int ret;
const AVCodec* encoder;
AVCodecContext* decoder_ctx = input_contexts->decoders[stream_index];
AVCodecContext* encoder_ctx;
if (video_index >= 0){
fprintf(stderr, "Impossible to mark stream #%u as video, stream #%u is already registered as video stream.\n",
stream_index, video_index);
return -1; //TODO change this value for correct error code
}
video_index = stream_index;
if(decoder_ctx->codec_id == AV_CODEC_ID_H264){
encoder = avcodec_find_encoder_by_name("libx264");
if (!encoder) {
av_log(NULL, AV_LOG_FATAL, "Encoder libx264 not found\n");
return AVERROR_INVALIDDATA;
}
fmt::print("Encoder libx264 will be used for stream {}.\n", stream_index);
} else {
std::string s = fmt::format("No video encoder found for the given codec_id: {}\n", avcodec_get_name(decoder_ctx->codec_id));
av_log(NULL, AV_LOG_FATAL, s.c_str());
return AVERROR_INVALIDDATA;
}
encoder_ctx = avcodec_alloc_context3(encoder);
if (!encoder_ctx) {
av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
return AVERROR(ENOMEM);
}
// from avcut
encoder_ctx->time_base = decoder_ctx->time_base;
encoder_ctx->ticks_per_frame = decoder_ctx->ticks_per_frame;
encoder_ctx->delay = decoder_ctx->delay;
encoder_ctx->width = decoder_ctx->width;
encoder_ctx->height = decoder_ctx->height;
encoder_ctx->pix_fmt = decoder_ctx->pix_fmt;
encoder_ctx->sample_aspect_ratio = decoder_ctx->sample_aspect_ratio;
encoder_ctx->color_primaries = decoder_ctx->color_primaries;
encoder_ctx->color_trc = decoder_ctx->color_trc;
encoder_ctx->colorspace = decoder_ctx->colorspace;
encoder_ctx->color_range = decoder_ctx->color_range;
encoder_ctx->chroma_sample_location = decoder_ctx->chroma_sample_location;
encoder_ctx->profile = decoder_ctx->profile;
encoder_ctx->level = decoder_ctx->level;
encoder_ctx->thread_count = 1; // spawning more threads causes avcodec_close to free threads multiple times
encoder_ctx->codec_tag = 0;
// correct values ???
encoder_ctx->qmin = 16;
encoder_ctx->qmax = 26;
encoder_ctx->max_qdiff = 4;
// end from avcut
// according to avcut, should not be set
// if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER){
// encoder_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
// }
ret = avcodec_open2(encoder_ctx, encoder, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", stream_index);
return ret;
}
codecs[stream_index] = encoder;
encoders[stream_index] = encoder_ctx;
return ret;
}
Example
To illustrate my problem, I provide here a test code using the two classes that alternates between transcoding and remuxing at each key-frame encountered in the file using my classes.
trans_remux.cpp
To compile the code:
g++ -o trans_remux trans_remux.cpp contexts.cpp -D__STDC_CONSTANT_MACROS `pkg-config --libs libavfilter` -lfmt -g
Currently the code is using avcodec_parameters_copy (contexts.cpp:333), so it works well for remuxing. If you want to test the version with avcodec_parameters_from_context, pls comment from line 333 to 337 in contexts.cpp and uncomment from line 340 to 344 and recompile.

OpenAL: Sound track distorted and cutting off

I'm working on an OpenAL project also using the libsndfile library. I have a problem with playing soundtracks with long sound buffering in OpenAL. The sound is distorted in the beginning and just suddenly cuts off. Here is the base class snippet that determines and set the format of the clip and calculates the audio frame size:
musicBuffer::musicBuffer(const char* filename){
alGenSources(1, &se_source);
alGenBuffers(num_buffers, se_buffers);
std::size_t frame_size;
//using sndfile to read the soundtrack
se_sndfile = sf_open(filename, SFM_READ, &se_sfinfo);
if (!se_sndfile){
throw("could not open provided music file -- check path");
}
if (se_sfinfo.channels == 1)
se_format = AL_FORMAT_MONO16;
else if (se_sfinfo.channels == 2)
se_format = AL_FORMAT_STEREO16;
else if (se_sfinfo.channels == 3){
if (sf_command(se_sndfile, SFC_WAVEX_GET_AMBISONIC, NULL, 0) == SF_AMBISONIC_B_FORMAT)
se_format = AL_FORMAT_BFORMAT2D_16;
}
else if (se_sfinfo.channels == 4){
if (sf_command(se_sndfile, SFC_WAVEX_GET_AMBISONIC, NULL, 0) == SF_AMBISONIC_B_FORMAT)
se_format = AL_FORMAT_BFORMAT3D_16;
}
if (!se_format){
sf_close(se_sndfile);
se_sndfile = NULL;
throw("Unsupported channel count from file");
}
frame_size = ((size_t)b_samples * (size_t)se_sfinfo.channels) * sizeof(short);
se_membuf = static_cast<short*>(malloc(frame_size));
}
Also, it does not seem like the audio format would be the issue as this issue is present in all audio formats I use anyway. Below is the Play() and updateBufferStream() functions which gives all the relevant data to the audio buffers to be played:
void musicBuffer::Play(){
ALsizei i;
alGetError();
//rewinds source position + clear buffer
alSourceRewind(se_source);
alSourcei(se_source, AL_BUFFER, 0);
//fill the buffer queue with info from sound file
for (i = 0; i < num_buffers; i++){
sf_count_t slen = sf_readf_short(se_sndfile, se_membuf, b_samples);
if (slen < 1) break;
slen *= se_sfinfo.channels * (sf_count_t)sizeof(short);
alBufferData(se_buffers[i], se_format, se_membuf, (ALsizei)slen, se_sfinfo.samplerate);
}
if (alGetError() != AL_NO_ERROR){
throw("Error buffering for playback");
}
//queue and start playback
alSourceQueueBuffers(se_source, i, se_buffers);
alSourcePlay(se_source);
if (alGetError() != AL_NO_ERROR){
throw("Error starting playback");
}
}
void musicBuffer::updateBufferStream(){
ALint processed, state;
alGetError();
//check status of speakers
alGetSourcei(se_source, AL_SOURCE_STATE, &state);
//check amnt of buffers processed
alGetSourcei(se_source, AL_BUFFERS_PROCESSED, &processed);
if (alGetError() != AL_NO_ERROR)
{
throw("error checking music source state");
}
while (processed > 0){
ALuint bufid;
sf_count_t slen;
alSourceUnqueueBuffers(se_source, 1, &bufid);
processed--;
//read the rest of the data, refill the buffers and re-queue
slen = sf_readf_short(se_sndfile, se_membuf, b_samples);
if (slen > 0){
slen *= se_sfinfo.channels * (sf_count_t)sizeof(short);
alBufferData(bufid, se_format, se_membuf, (ALsizei)slen,
se_sfinfo.samplerate);
alSourceQueueBuffers(se_source, 1, &bufid);
}
if (alGetError() != AL_NO_ERROR){
throw("error buffering music data");
}
}
//checking if source is underrun
if (state != AL_PLAYING && state != AL_PAUSED){
ALint queued;
//if there isnt any buffers queued it means playback is done
alGetSourcei(se_source, AL_BUFFERS_QUEUED, &queued);
if (queued == 0)
return;
alSourcePlay(se_source);
if (alGetError() != AL_NO_ERROR)
{
throw("error restarting music playback");
}
}
}
Does anyone have any suggestions on what looks wrong or identify the issue itself?

How to Skip frames while decoding H264 stream?

I'm using FFMPEG to decode H264 (or H265) RTSP Stream.
My system have 2 software: Server and Client
Server: Read frames from RTSP stream --> Forward frames to Client
Client: Receive frames from Server --> Decode --> Render
I have implemented and it worked ok, but there is a case make my system work not good. That is when internet from Server - Client is slow, frames can not transfer real-time to Client.
In present, I deal with this issue by Skip some frames (not send to Client) when the Queue is reached limit of count. The following is my summary code
//At Server Software (include 2 threads A and B)
//Thread A: Read AVPacket and forward to Client
while(true)
{
AVPacket packet;
av_init_packet(&packet);
packet.size = 0;
packet.data = NULL;
int ret = AVERROR(EAGAIN);
while (AVERROR(EAGAIN) == ret)
ret = av_read_frame(pFormatCtx, &packet);
if(packet.size > 0)
{
if(mySendQueue.count < 120) //limit 120 packet in queue
mySendQueue.Enqueue(packet); ////Thread B will read from this queue, to send packets to Client via TCP socket
else
;//SkipThisFrame ***: No send
}
}
//Thread B: Send To Client via TCP Socket
While(true)
{
AVPacket packet;
if(mySendQueue.Dequeue(packet))
{
SendPacketToClient(packet);
}
}
//At Server Software : Receive AVPacket from Server --> Decode --> Render
While(true)
{
AVPacket packet;
AVFrame frame;
ReadPacketFromServer(packet);
if (av_decode_asyn(pCodecCtx, &frame, &frameFinished, &packet) == RS_OK)
{
if (frameFinished)
{
RenderFrame(frame);
}
}
}
UINT32 __clrcall av_decode_asyn(AVCodecContext *pCodecCtx, AVFrame *frame, int *frameFinished, AVPacket *packet)
{
int ret = -1;
*frameFinished = 0;
if (packet)
{
ret = avcodec_send_packet(pCodecCtx, packet);
// In particular, we don't expect AVERROR(EAGAIN), because we read all
// decoded frames with avcodec_receive_frame() until done.
if (ret < 0 && ret != AVERROR_EOF)
return RS_NOT_OK;
}
ret = avcodec_receive_frame(pCodecCtx, frame);
if (ret < 0 && ret != AVERROR(EAGAIN))
{
return RS_NOT_OK;
}
if (ret >= 0)
*frameFinished = 1;
return RS_OK;
}
My question is focus in line of code SkipThisFrame ***, this algorithm skip frame continuously, so it maybe make the decoder on Client occur unexpectedly error or Crash?
And when skip frame like that, make Client Render frames is not normally?
And someone call show me the proper algorithm to skip frames in my case?
Thank you very much!
I have a brief read on doc of AVPacket, it says:
For video, it should typically contain one compressed frame.
Theoretically you cannot skip frames for a compressed video stream, as most frames do not contain complete information about that frame's image, but only contain changes compared with some previous frames. So if you skip a frame, it is probable that many trailing decoded frames won't contain correct result (until next key frame flushes whole image).
"My question is focus in line of code SkipThisFrame ***, this algorithm
skip frame continuously, so it maybe make the decoder on Client occur
unexpectedly error or Crash?"
One thing I notice is wrong...
Your While(true) statements also need a break; to stop, otherwise they will run forever, blocking other functions and causing the system to crash. Think about it, you say "While the loop is true do X-Y-Z instructions" but you never say when to stop (eg: break out of this While loop to do next instructions). Computer is stuck doing first While loop only and also repeating that to infinity...
Try setting up like this:
//At Server Software (include 2 threads A and B)
//Thread A: Read AVPacket and forward to Client
while(true)
{
AVPacket packet;
av_init_packet(&packet);
packet.size = 0;
packet.data = NULL;
int ret = AVERROR(EAGAIN);
while (AVERROR(EAGAIN) == ret) { ret = av_read_frame(pFormatCtx, &packet); }
if(packet.size > 0)
{
if(mySendQueue.count < 120) //limit 120 packet in queue
{
mySendQueue.Enqueue(packet); ////Thread B will read from this queue, to send packets to Client via TCP socket
}
//else { } //no need for ELSE if doing nothing... //SkipThisFrame ***: No send
}
break; //stop this part and move to "Thead B"
}
//Thread B: Send To Client via TCP Socket
While(true)
{
AVPacket packet;
if( mySendQueue.Dequeue(packet) )
{ SendPacketToClient(packet); break; }
}
//At Server Software : Receive AVPacket from Server --> Decode --> Render
While(true)
{
AVPacket packet; AVFrame frame;
ReadPacketFromServer(packet);
if (av_decode_asyn(pCodecCtx, &frame, &frameFinished, &packet) == RS_OK)
{
if (frameFinished) { RenderFrame(frame); break; }
}
}
UINT32 __clrcall av_decode_asyn(AVCodecContext *pCodecCtx, AVFrame *frame, int *frameFinished, AVPacket *packet)
{
int ret = -1;
*frameFinished = 0;
if (packet)
{
ret = avcodec_send_packet(pCodecCtx, packet);
// In particular, we don't expect AVERROR(EAGAIN), because we read all
// decoded frames with avcodec_receive_frame() until done.
if (ret < 0 && ret != AVERROR_EOF)
return RS_NOT_OK;
}
ret = avcodec_receive_frame(pCodecCtx, frame);
if (ret < 0 && ret != AVERROR(EAGAIN))
{
return RS_NOT_OK;
}
if (ret >= 0)
*frameFinished = 1;
return RS_OK;
}
Hope it helps. Let me know of results / errors.

How to read h264 stream as a file from the USB webcam directly in c/c++ without using opencv?

I am able to read a video file of h264 format and doing some machine learning inference on top of it. The code works absolutely fine for input from a file. Below code is a sample code from Deepstream SDK
FileDataProvider(const char *szFilePath, simplelogger::Logger *logger)
: logger_(logger)
{
fp_ = fopen(szFilePath, "rb");
//fp_ = fopen("/dev/video0", "rb");
if (nullptr == fp_) {
LOG_ERROR(logger, "Failed to open file " << szFilePath);
exit(1);
}
pLoadBuf_ = new uint8_t[nLoadBuf_];
pPktBuf_ = new uint8_t[nPktBuf_];
assert(nullptr != pLoadBuf_);
}
~FileDataProvider() {
if (fp_) {
fclose(fp_);
}
if (pLoadBuf_) {
delete [] pLoadBuf_;
}
if (pPktBuf_) {
delete [] pPktBuf_;
}
}
What is requirement ?
Read from the Logitech c920 webcam instead for video file.
I know How to read from webcam using opencv. But I don't want to use opencv here.
My Research
Using v4l we can get the stream and display it in vlc.
Camera supports below formats.
#ubox:~$ v4l2-ctl --device=/dev/video1 --list-formats
ioctl: VIDIOC_ENUM_FMT Index : 0 Type : Video Capture
Pixel Format: 'YUYV' Name : YUYV 4:2:2
Index : 1 Type : Video Capture Pixel Format: 'H264'
(compressed) Name : H.264
Index : 2 Type : Video Capture Pixel Format: 'MJPG'
(compressed) Name : Motion-JPEG
Reading output of a USB webcam in Linux
vlc v4l2:///dev/video1 --v4l2-chroma=h264 - this displays the video from the webcam.
How to do this?
- Now how to feed this live stream into
above sample code such that it reads from the webcam rather than file?
[update-1]
- In otherwords, does v4l has some options to write the video stream as h264 formant ? So that, I can read that file like before(above code) when its(v4l) writing to disk.
[update-2]
- we can use ffmpeg instead of v4l. If any solutions for using ffmpeg to save the video stream into disk continuously, so that other programs reads that file ?
Before using ioctl to capture frames from camera, you need to set the format like below first.
fp_ = open("/dev/video0", O_RDWR);
struct v4l2_format fmt = {0};
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
ioctl(fp_, VIDIOC_S_FMT, &fmt);
then, initialize and map buffer
struct Buffer
{
void *start;
unsigned int length;
unsigned int flags;
};
int buffer_count_ = 4;
Buffer *buffers_;
bool AllocateBuffer()
{
struct v4l2_requestbuffers req = {0};
req.count = buffer_count_;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
if (ioctl(fp_, VIDIOC_REQBUFS, &req) < 0)
{
perror("ioctl Requesting Buffer");
return false;
}
buffers_ = new Buffer[buffer_count_];
for (int i = 0; i < buffer_count_; i++)
{
struct v4l2_buffer buf = {0};
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (ioctl(fp_, VIDIOC_QUERYBUF, &buf) < 0)
{
perror("ioctl Querying Buffer");
return false;
}
buffers_[i].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, buf.m.offset);
buffers_[i].length = buf.length;
if (MAP_FAILED == buffers_[i].start)
{
printf("MAP FAILED: %d\n", i);
for (int j = 0; j < i; j++)
munmap(buffers_[j].start, buffers_[j].length);
return false;
}
if (ioctl(fp_, VIDIOC_QBUF, &buf) < 0)
{
perror("ioctl Queue Buffer");
return false;
}
}
return true;
}
STREAMON to start capturing
v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ioctl(fp_, VIDIOC_STREAMON, &type);
finally read a frame from the mapped buffer. Generally, CaptureImage() will be in the while loop.
Buffer CaptureImage()
{
fd_set fds;
FD_ZERO(&fds);
FD_SET(fd_, &fds);
struct timeval tv = {0};
tv.tv_sec = 1;
tv.tv_usec = 0;
int r = select(fd_ + 1, &fds, NULL, NULL, &tv);
if (r == 0)
{
// timeout
}
struct v4l2_buffer buf = {0};
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
while (ioctl(fp_, VIDIOC_DQBUF, &buf) < 0)
{
perror("Retrieving Frame");
}
struct Buffer buffer = {.start = buffers_[buf.index].start,
.length = buf.bytesused,
.flags = buf.flags};
if (ioctl(fp_, VIDIOC_QBUF, &buf) < 0)
{
perror("Queue buffer");
}
return buffer;
}

FFmpeg: How to put encoded media data from one container to another with out re-encoding?

So for example: I have file.mp3, I know that my desired format can play sound with out video (for example FLV) so how to put encoded mp3 data using ffmpeg from mp3 container into flv (where to get articles/code samples on this)?
I mean not from cmd but from C++ using ffmpeg as library. (see tags)
Here is the command to convert .mp3 file to .flv(which does not have any video data).
ffmpeg -i test.mp3 -ab 32k -acodec libmp3lame -ac 1 -ar 44100 audio.flv.
You can execute this command from your program.
If you need help on how to install and use ffmpeg you can go to their site:
http://ffmpeg.org
Thanks,
Mahmud
Have you considered just running ffmpeg from a popen() / system() call from c++?
It's a lot easier than setting up the ffmpeg library, it makes it trivial to multithread (not really an issue in the example) and frees you from any LGPL linking and dll-hell issues.
Here's what you want to do:
AVFormatContext *ptrFormatContext;
int i, videoStream, audioStream;
AVCodecContext *ptrCodecCtxt;
AVCodec *ptrCodec;
AVFrame *ptrFrame;
AVPacket ptrPacket;
int frameFinished;
float aspect_ratio;
AVCodecContext *aCodecCtx;
AVCodec *aCodec;
AVCodecContext *aTargetCodecCtxt;
AVCodecContext *vTargetCodecCtxt;
AVCodec *aTargetCodec;
AVCodec *vTargetCodec;
AVSampleFormat ptrSampleFormats[2] = {AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32};
audioStream = videoStream = -1;
av_register_all();
avcodec_register_all();
ptrFormatContext = avformat_alloc_context();
if(avformat_open_input(&ptrFormatContext, filename, NULL, NULL) != 0 )
{
qDebug("Error opening the input");
exit(-1);
}
if(av_find_stream_info( ptrFormatContext) < 0)
{
qDebug("Could not find any stream info");
exit(-2);
}
dump_format(ptrFormatContext, 0, filename, (int) NULL);
for(i=0; i<ptrFormatContext->nb_streams; i++)
{
switch(ptrFormatContext->streams[i]->codec->codec_type)
{
case AVMEDIA_TYPE_VIDEO:
{
if(videoStream < 0) videoStream = i;
break;
}
case AVMEDIA_TYPE_AUDIO:
{
if(audioStream < 0) audioStream = i;
}
}
}
if(audioStream == -1)
{
qDebug("Could not find any audio stream");
exit(-3);
}
if(videoStream == -1)
{
qDebug("Could not find any video stream");
exit(-4);
}
aCodecCtx = ptrFormatContext->streams[audioStream]->codec;
if( (aCodec = avcodec_find_decoder(aCodecCtx->codec_id)) == NULL)
{
qDebug("Could not find the audio decoder");
exit(-5);
}
if( (avcodec_open(aCodecCtx, aCodec)) != 0 )
{
qDebug("Could not open the audio decoder");
exit(-6);
}
ptrCodecCtxt = ptrFormatContext->streams[videoStream]->codec;
if( (ptrCodec = avcodec_find_decoder(ptrCodecCtxt->codec_id)) == NULL )
{
qDebug("Could not find the video decoder");
exit(-7);
}
if((avcodec_open(ptrCodecCtxt, ptrCodec)) != 0)
{
qDebug("Could not find any video stream");
exit(-8);
}
Then some other stuff, mostly irrelevant if you don't want to reencode...
ptrFrame = avcodec_alloc_frame();
while(av_read_frame(ptrFormatContext,&ptrPacket) >= 0)
{
if(ptrPacket.stream_index == videoStream)
{
//do stuff with the package, for eg transcribe it into another output stream..
}
else if (ptrPacket.stream_index == audioStream)
{
//do stuff with the package, for eg transcribe it into another output stream..
}
}
Hope that's helpful. The code is however only an excerpt and will not work on its own, but it'll help you get the idea.
ffmpeg -i file.mp3 -acodec copy output.flv