I'm trying to create a C++ program with Waveform Audio library that would be playing AudioFrames (raw audio data, each frame consists of about 1920 bytes) provided by another program (right now I'm just simulating that by reading file as AudioFrames). Modifying code from this thread I was able to make SoundPlayer class that does the job, but the output I get is extremely choppy. It's gets better with bigger frame sizes, but even with frames as big as 96000 bytes the audio still glitches every second or so (and I need the frames too be much smaller than that).
How can I fix this issue?
Here is the test file I'm using. And here is the code itself:
#include <windows.h>
#include <iostream>
#pragma comment(lib, "Winmm.lib")
constexpr int FRAME_SIZE_IN_BYTES = 1920;
struct AudioFrame
{
char *Data;
int DataSize;
};
class SoundPlayer
{
public:
SoundPlayer()
{
// Initialize the sound format we will request from sound card
m_waveFormat.wFormatTag = WAVE_FORMAT_PCM; // Uncompressed sound format
m_waveFormat.nChannels = 1; // 1 = Mono, 2 = Stereo
m_waveFormat.wBitsPerSample = 16; // Bits per sample per channel
m_waveFormat.nSamplesPerSec = 48000; // Sample Per Second
m_waveFormat.nBlockAlign = m_waveFormat.nChannels * m_waveFormat.wBitsPerSample / 8;
m_waveFormat.nAvgBytesPerSec = m_waveFormat.nSamplesPerSec * m_waveFormat.nBlockAlign;
m_waveFormat.cbSize = 0;
}
void Play(AudioFrame* af)
{
// Create our "Sound is Done" event
m_done = CreateEvent(0, FALSE, FALSE, 0);
// Open the audio device
if (waveOutOpen(&m_waveOut, 0, &m_waveFormat, (DWORD)m_done, 0, CALLBACK_EVENT) != MMSYSERR_NOERROR)
{
std::cout << "Sound card cannot be opened." << std::endl;
return;
}
// Create the wave header for our sound buffer
m_waveHeader.lpData = af->Data;
m_waveHeader.dwBufferLength = af->DataSize;
m_waveHeader.dwFlags = 0;
m_waveHeader.dwLoops = 0;
// Prepare the header for playback on sound card
if (waveOutPrepareHeader(m_waveOut, &m_waveHeader, sizeof(m_waveHeader)) != MMSYSERR_NOERROR)
{
std::cout << "Error preparing Header!" << std::endl;
return;
}
ResetEvent(m_done); // Reset our Event so it is non-signaled, it will be signaled again with buffer finished
// Play the sound!
if (waveOutWrite(m_waveOut, &m_waveHeader, sizeof(m_waveHeader)) != MMSYSERR_NOERROR)
{
std::cout << "Error writing to sound card!" << std::endl;
return;
}
// Wait until sound finishes playing
if (WaitForSingleObject(m_done, INFINITE) != WAIT_OBJECT_0)
{
std::cout << "Error waiting for sound to finish" << std::endl;
return;
}
// Unprepare our wav header
if (waveOutUnprepareHeader(m_waveOut, &m_waveHeader, sizeof(m_waveHeader)) != MMSYSERR_NOERROR)
{
std::cout << "Error unpreparing header!" << std::endl;
return;
}
// Close the wav device
if (waveOutClose(m_waveOut) != MMSYSERR_NOERROR)
{
std::cout << "Sound card cannot be closed!" << std::endl;
return;
}
// Release our event handle
CloseHandle(m_done);
}
private:
HWAVEOUT m_waveOut; // Handle to sound card output
WAVEFORMATEX m_waveFormat; // The sound format
WAVEHDR m_waveHeader; // WAVE header for our sound data
HANDLE m_done; // Event Handle that tells us the sound has finished being played.
// This is a very efficient way to put the program to sleep
// while the sound card is processing the sound buffer
};
int main()
{
FILE * fileDes;
fopen_s(&fileDes, "Ducksauce.raw", "rb");
if (fileDes == nullptr)
std::cout << "File opening failed.\n";
int bufferSize = FRAME_SIZE_IN_BYTES;
char *buffer = new char[bufferSize];
SoundPlayer sp;
while (fread(buffer, sizeof(char), bufferSize, fileDes) > 0)
{
AudioFrame af;
af.Data = buffer;
af.DataSize = bufferSize;
sp.Play(&af);
}
fclose(fileDes);
delete[] buffer;
return 0;
}
Edit: Version number 2. Still doesn't work as intended.
#include <windows.h>
#include <iostream>
#pragma comment(lib, "Winmm.lib")
constexpr int FRAME_SIZE_IN_BYTES = 1920;
struct AudioFrame
{
char *Data;
int DataSize;
};
class SoundPlayer
{
public:
SoundPlayer()
{
// Initialize the sound format we will request from sound card
m_waveFormat.wFormatTag = WAVE_FORMAT_PCM; // Uncompressed sound format
m_waveFormat.nChannels = 1; // 1 = Mono, 2 = Stereo
m_waveFormat.wBitsPerSample = 16; // Bits per sample per channel
m_waveFormat.nSamplesPerSec = 48000; // Sample Per Second
m_waveFormat.nBlockAlign = m_waveFormat.nChannels * m_waveFormat.wBitsPerSample / 8;
m_waveFormat.nAvgBytesPerSec = m_waveFormat.nSamplesPerSec * m_waveFormat.nBlockAlign;
m_waveFormat.cbSize = 0;
// Create our "Sound is Done" event
m_done = CreateEvent(0, FALSE, FALSE, 0);
// Open the audio device
if (waveOutOpen(&m_waveOut, 0, &m_waveFormat, (DWORD)m_done, 0, CALLBACK_EVENT) != MMSYSERR_NOERROR)
{
std::cout << "Sound card cannot be opened." << std::endl;
return;
}
}
~SoundPlayer()
{
// Close the wav device
if (waveOutClose(m_waveOut) != MMSYSERR_NOERROR)
{
std::cout << "Sound card cannot be closed!" << std::endl;
return;
}
// Release our event handle
CloseHandle(m_done);
}
void StartPlaying(AudioFrame* af)
{
// Create the wave header for our sound buffer
m_waveHeader.lpData = af->Data;
m_waveHeader.dwBufferLength = af->DataSize;
m_waveHeader.dwFlags = 0;
m_waveHeader.dwLoops = 0;
// Prepare the header for playback on sound card
if (waveOutPrepareHeader(m_waveOut, &m_waveHeader, sizeof(m_waveHeader)) != MMSYSERR_NOERROR)
{
std::cout << "Error preparing Header!" << std::endl;
return;
}
ResetEvent(m_done); // Reset our Event so it is non-signaled, it will be signaled again with buffer finished
// Play the sound!
if (waveOutWrite(m_waveOut, &m_waveHeader, sizeof(m_waveHeader)) != MMSYSERR_NOERROR)
{
std::cout << "Error writing to sound card!" << std::endl;
return;
}
}
void WaitUntilFrameFinishes()
{
// Wait until sound finishes playing
if (WaitForSingleObject(m_done, INFINITE) != WAIT_OBJECT_0)
{
std::cout << "Error waiting for sound to finish" << std::endl;
return;
}
// Unprepare our wav header
if (waveOutUnprepareHeader(m_waveOut, &m_waveHeader, sizeof(m_waveHeader)) != MMSYSERR_NOERROR)
{
std::cout << "Error unpreparing header!" << std::endl;
return;
}
}
private:
HWAVEOUT m_waveOut; // Handle to sound card output
WAVEFORMATEX m_waveFormat; // The sound format
WAVEHDR m_waveHeader; // WAVE header for our sound data
HANDLE m_done; // Event Handle that tells us the sound has finished being played.
// This is a very efficient way to put the program to sleep
// while the sound card is processing the sound buffer
};
int main()
{
FILE * fileDes;
fopen_s(&fileDes, "Ducksauce.raw", "rb");
if (fileDes == nullptr)
std::cout << "File opening failed.\n";
int bufferSize = FRAME_SIZE_IN_BYTES;
char *buffer = new char[bufferSize];
SoundPlayer sp;
// Read first time
fread(buffer, sizeof(char), bufferSize, fileDes);
while (true)
{
AudioFrame af;
af.Data = buffer;
af.DataSize = bufferSize;
// Start playing, but don't block
sp.StartPlaying(&af);
// Prepare the next chunk
if (fread(buffer, sizeof(char), bufferSize, fileDes) <= 0)
break;
// Now block the code, waiting with next chunk already loaded
// and ready to be played in the next iteration.
sp.WaitUntilFrameFinishes();
}
fclose(fileDes);
delete[] buffer;
return 0;
}
Edit 2: It works if I add this before while:
for (int i = 0; i < 3; i++ )
{
fread(buffer, sizeof(char), bufferSize, fileDes);
af.Data = buffer;
af.DataSize = bufferSize;
sp.StartPlaying(&af);
}
Also I modified while a bit too:
while (true)
{
// Prepare the next chunk
if (fread(buffer, sizeof(char), bufferSize, fileDes) <= 0)
break;
// Now block the code, waiting with next chunk already loaded
// and ready to be played in the next iteration.
sp.WaitUntilFrameFinishes();
af.Data = buffer;
af.DataSize = bufferSize;
sp.StartPlaying(&af);
}
You should read the data from disk while the sound plays, not in between buffers!
If you can't read the whole file at once, you should change your Play function so that it doesn't just call WaitForSingleObject. Using it makes your code block and wait until the sound stops playing.
What you need instead is to start playing, then go back to your reading loop, prepare the next buffer, and then wait for the music to end, like so (in SoundPlayer):
void WaitUntilFrameFinishes() {
// Wait until sound finishes playing
if (WaitForSingleObject(m_done, INFINITE) != WAIT_OBJECT_0)
// ... move all the code from Play till the end here
}
Then back in the main loop:
// Read first frame
fread(buffer, sizeof(char), bufferSize, fileDes);
while (true)
{
AudioFrame af;
af.Data = buffer;
af.DataSize = bufferSize;
// Start playing, but don't block
sp.Play(&af);
// Prepare the next chunk
if (fread(buffer, sizeof(char), bufferSize, fileDes) <= 0) {
break;
// Now block the code, waiting with next chunk already loaded
// and ready to be played in the next iteration.
sp.WaitUntilFrameFinishes();
}
Ideally you'd also wrap the fread calls into something that can provide chunks in a nicer way.
After day of trying to figure out how to go about audio playback based on the documentation alone I found this excellent tutorial. If anyone found this thread while trying to create audio playback using Waveform audio it's a very good point of reference (surely much better than my buggy code above).
About my code I suspect it doesn't work correctly because one is supposed to keep the AudioFrames queue using waveOutWrite() with at least several frames at all time to prevent situation where sound card would have to wait for another AudioFrame.
Related
In a previous question, I asked how to implement asynchronous I/O. This code now works, except that at the end it never stops. It seems that aio_read reads starting at offset, for length, and if it is past the end of the file, the operation succeeds? This code builds and runs on Ubuntu 20.04LTS and successfully reads blocks 1-5, each 512 bytes, then when it runs out of file it keeps oscillating between block 4 and 5. It never terminates.
Here is the code:
#include <aio.h>
#include <fcntl.h>
#include <signal.h>
#include <unistd.h>
#include <condition_variable>
#include <cstring>
#include <iostream>
#include <thread>
using namespace std;
using namespace std::chrono_literals;
constexpr uint32_t blockSize = 512;
mutex readMutex;
bool readReady = false;
condition_variable cv;
bool operation_completed = false;
int fh;
int bytesRead;
void process(char* buf, uint32_t bytesRead) {
cout << "processing..." << endl;
usleep(100000);
}
void aio_completion_handler(sigval_t sigval) {
struct aiocb* req = (struct aiocb*)sigval.sival_ptr;
// check whether asynch operation is complete
int status;
if ((status = aio_error(req)) != 0) {
cout << "Error: " << status << '\n';
return;
}
int ret = aio_return(req);
bytesRead = req->aio_nbytes;
cout << "ret == " << ret << endl;
cout << (char*)req->aio_buf << endl;
unique_lock<mutex> readLock(readMutex);
operation_completed = true;
cv.notify_one();
}
void thready() {
char* buf1 = new char[blockSize];
char* buf2 = new char[blockSize];
aiocb cb;
char* processbuf = buf1;
char* readbuf = buf2;
fh = open("smallfile.dat", O_RDONLY);
if (fh < 0) {
throw std::runtime_error("cannot open file!");
}
memset(&cb, 0, sizeof(aiocb));
cb.aio_fildes = fh;
cb.aio_nbytes = blockSize;
cb.aio_offset = 0;
// Fill in callback information
/*
Using SIGEV_THREAD to request a thread callback function as a notification
method
*/
cb.aio_sigevent.sigev_notify_attributes = nullptr;
cb.aio_sigevent.sigev_notify = SIGEV_THREAD;
cb.aio_sigevent.sigev_notify_function = aio_completion_handler;
/*
The context to be transmitted is loaded into the handler (in this case, a
reference to the aiocb request itself). In this handler, we simply refer to
the arrived sigval pointer and use the AIO function to verify that the request
has been completed.
*/
cb.aio_sigevent.sigev_value.sival_ptr = &cb;
int cursor = 0;
int currentBytesRead = read(fh, buf1, blockSize); // read the 1st block
while (true) {
cb.aio_buf = readbuf;
operation_completed = false; // set predicate to true and wait until asynch changes it
cb.aio_offset = cursor;
aio_read(&cb); // each next block is read asynchronously
process(processbuf, currentBytesRead); // process while waiting
{
unique_lock<mutex> readLock(readMutex);
cv.wait( readLock, []{ return operation_completed; } );
}
if (!operation_completed)
break;
currentBytesRead = bytesRead; // make local copy of global modified by the asynch code
cursor += bytesRead;
if (currentBytesRead < blockSize) {
break; // last time, get out
}
cout << "back from wait" << endl;
swap(processbuf, readbuf); // switch to other buffer for next time
currentBytesRead = bytesRead; // create local copy
}
delete[] buf1;
delete[] buf2;
}
int main() {
try {
thready();
} catch (std::exception& e) {
cerr << e.what() << '\n';
}
return 0;
}
First, is the above code an appropriate way to do this to get the length of the file and figure out exactly how many reads to do?
Second, if this is so, fine, but how can aio_read just return success if I try to read past the end of file? Error status is always zero. I am confused about what it is supposed to do.
with 512 bytes of each of 1,2,3,4,5
I am trying to decode a video stream from the browser using the ffmpeg API. The stream is produced by the webcam and recorded with MediaRecorder as webm format. What I ultimately need is a vector of opencv cv::Mat objects for further processing.
I have written a C++ webserver using the uWebsocket library. The video stream is sent via websocket from the browser to the server once per second. On the server, I append the received data to my custom buffer and decode it with the ffmpeg API.
If I just save the data on the disk and later I play it with a media player, it works fine. So, whatever the browser sends is a valid video.
I do not think that I correctly understand how should the custom IO behave with network streaming as nothing seems to be working.
The custom buffer:
struct Buffer
{
std::vector<uint8_t> data;
int currentPos = 0;
};
The readAVBuffer method for custom IO
int MediaDecoder::readAVBuffer(void* opaque, uint8_t* buf, int buf_size)
{
MediaDecoder::Buffer* mbuf = (MediaDecoder::Buffer*)opaque;
int count = 0;
for(int i=0;i<buf_size;i++)
{
int index = i + mbuf->currentPos;
if(index >= (int)mbuf->data.size())
{
break;
}
count++;
buf[i] = mbuf->data.at(index);
}
if(count > 0) mbuf->currentPos+=count;
std::cout << "read : "<<count<<" "<<mbuf->currentPos<<", buff size:"<<mbuf->data.size() << std::endl;
if(count <= 0) return AVERROR(EAGAIN); //is this error that should be returned? It cannot be EOF since we're not done yet, most likely
return count;
}
The big decode method, that's supposed to return whatever frames it could read
std::vector<cv::Mat> MediaDecoder::decode(const char* data, size_t length)
{
std::vector<cv::Mat> frames;
//add data to the buffer
for(size_t i=0;i<length;i++) {
buf.data.push_back(data[i]);
}
//do not invoke the decoders until we have 1MB of data
if(((buf.data.size() - buf.currentPos) < 1*1024*1024) && !initializedCodecs) return frames;
std::cout << "decoding data length "<<length<<std::endl;
if(!initializedCodecs) //initialize ffmpeg objects. Custom I/O, format, decoder, etc.
{
//these are just members of the class
avioCtxPtr = std::unique_ptr<AVIOContext,avio_context_deleter>(
avio_alloc_context((uint8_t*)av_malloc(4096),4096,0,&buf,&readAVBuffer,nullptr,nullptr),
avio_context_deleter());
if(!avioCtxPtr)
{
std::cerr << "Could not create IO buffer" << std::endl;
return frames;
}
fmt_ctx = std::unique_ptr<AVFormatContext,avformat_context_deleter>(avformat_alloc_context(),
avformat_context_deleter());
fmt_ctx->pb = avioCtxPtr.get();
fmt_ctx->flags |= AVFMT_FLAG_CUSTOM_IO ;
//fmt_ctx->max_analyze_duration = 2 * AV_TIME_BASE; // read 2 seconds of data
{
AVFormatContext *fmtCtxRaw = fmt_ctx.get();
if (avformat_open_input(&fmtCtxRaw, "", nullptr, nullptr) < 0) {
std::cerr << "Could not open movie" << std::endl;
return frames;
}
}
if (avformat_find_stream_info(fmt_ctx.get(), nullptr) < 0) {
std::cerr << "Could not find stream information" << std::endl;
return frames;
}
if((video_stream_idx = av_find_best_stream(fmt_ctx.get(), AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0)) < 0)
{
std::cerr << "Could not find video stream" << std::endl;
return frames;
}
AVStream *video_stream = fmt_ctx->streams[video_stream_idx];
AVCodec *dec = avcodec_find_decoder(video_stream->codecpar->codec_id);
video_dec_ctx = std::unique_ptr<AVCodecContext,avcodec_context_deleter> (avcodec_alloc_context3(dec),
avcodec_context_deleter());
if (!video_dec_ctx)
{
std::cerr << "Failed to allocate the video codec context" << std::endl;
return frames;
}
avcodec_parameters_to_context(video_dec_ctx.get(),video_stream->codecpar);
video_dec_ctx->thread_count = 1;
/* video_dec_ctx->max_b_frames = 0;
video_dec_ctx->frame_skip_threshold = 10;*/
AVDictionary *opts = nullptr;
av_dict_set(&opts, "refcounted_frames", "1", 0);
av_dict_set(&opts, "deadline", "1", 0);
av_dict_set(&opts, "auto-alt-ref", "0", 0);
av_dict_set(&opts, "lag-in-frames", "1", 0);
av_dict_set(&opts, "rc_lookahead", "1", 0);
av_dict_set(&opts, "drop_frame", "1", 0);
av_dict_set(&opts, "error-resilient", "1", 0);
int width = video_dec_ctx->width;
videoHeight = video_dec_ctx->height;
if(avcodec_open2(video_dec_ctx.get(), dec, &opts) < 0)
{
std::cerr << "Failed to open the video codec context" << std::endl;
return frames;
}
AVPixelFormat pFormat = AV_PIX_FMT_BGR24;
img_convert_ctx = std::unique_ptr<SwsContext,swscontext_deleter>(sws_getContext(width, videoHeight,
video_dec_ctx->pix_fmt, width, videoHeight, pFormat,
SWS_BICUBIC, nullptr, nullptr,nullptr),swscontext_deleter());
frame = std::unique_ptr<AVFrame,avframe_deleter>(av_frame_alloc(),avframe_deleter());
frameRGB = std::unique_ptr<AVFrame,avframe_deleter>(av_frame_alloc(),avframe_deleter());
int numBytes = av_image_get_buffer_size(pFormat, width, videoHeight,32 /*https://stackoverflow.com/questions/35678041/what-is-linesize-alignment-meaning*/);
std::unique_ptr<uint8_t,avbuffer_deleter> imageBuffer((uint8_t *) av_malloc(numBytes*sizeof(uint8_t)),avbuffer_deleter());
av_image_fill_arrays(frameRGB->data,frameRGB->linesize,imageBuffer.get(),pFormat,width,videoHeight,32);
frameRGB->width = width;
frameRGB->height = videoHeight;
initializedCodecs = true;
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = nullptr;
pkt.size = 0;
int read_frame_return = 0;
while ( (read_frame_return=av_read_frame(fmt_ctx.get(), &pkt)) >= 0)
{
readFrame(&frames,&pkt,video_dec_ctx.get(),frame.get(),img_convert_ctx.get(),
videoHeight,frameRGB.get());
//if(cancelled) break;
}
avioCtxPtr->eof_reached = 0;
avioCtxPtr->error = 0;
//flush
// readFrame(frames.get(),nullptr,video_dec_ctx.get(),frame.get(),
// img_convert_ctx.get(),videoHeight,frameRGB.get());
avioCtxPtr->eof_reached = 0;
avioCtxPtr->error = 0;
if(frames->size() <= 0)
{
std::cout << "buffer pos: "<<buf.currentPos<<", buff size:"<<buf.data.size()
<<",read_frame_return:"<<read_frame_return<< std::endl;
}
return frames;
}
What I would expect to happen would be for a continuous extraction of cv::Mat frames as I feed it more and more data. What actually happens is that after the the buffer is fully read I see:
[matroska,webm # 0x507b450] Read error at pos. 1278266 (0x13813a)
[matroska,webm # 0x507b450] Seek to desired resync point failed. Seeking to earliest point available instead.
And then no more bytes are read from the buffer even if later I increase the size of it.
There is something terribly wrong I'm doing here and I don't understand what.
What I ended up doing was to do the reading of the incoming data and actual decoding in a different thread. The read method, however, will just block if there are no more bytes available, waiting until anything is coming.
When new bytes are arriving, they're added to the buffer and the conditional_variable signals the waiting thread to wake up and start reading data again from the buffer.
It works well enough.
I want to read and open a video in encoded domain without decoding. I have written the code up to now and it works without errors. But the output of the method av_read_frame() just gives number of zeros and same negative integer value is repeating.
I'm not sure whether I passed the parameters correctly to the method. Please help.
void CFfmpegmethods::VideoRead(){
av_register_all();
const char *url = "H:\\Sanduni_projects\\ad_1.mp4";
AVDictionary *options = NULL;
AVFormatContext *s = avformat_alloc_context(); //NULL;
//AVFormatContext *avfmt = NULL;
//avformat_alloc_context();
AVPacket pkt;
//AVFormatContext *avformat_alloc_context();
//AVIOContext *avio_alloc_context();
//open an input stream and read the header
int ret = avformat_open_input(&s, url, NULL, NULL);
//avformat_find_stream_info(s, &options); //finding the missing information
if (ret < 0)
abort();
av_dict_set(&options, "video_size", "640x480", 0);
av_dict_set(&options, "pixel_format", "rgb24", 0);
if (avformat_open_input(&s, url, NULL, &options) < 0){
abort();
}
av_dict_free(&options);
AVDictionaryEntry *e;
if (e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) {
fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key);
abort();
}
//int i = 0;
while (1){
//Split what is stored in the file into frames and return one for each call
//returns the next frame of the stream
int frame = av_read_frame(s, &pkt);
//cout <<i << " " << frame << endl;
waitKey(30);
//i++;
}
//make the packet free
av_packet_unref(&pkt);
//Close the file after reading
avformat_close_input(&s);
}
Method av_read_frame() output zeros while reading the packets and after that gives negative values. In my code loop runs infinitely. Therefore gives infinite number of negative values.
This is the modified code
while (1){
//Split what is stored in the file into frames and return one for each call
//returns the next frame of the stream
int frame = av_read_frame(s, pkt);
duration = pkt->duration;
size = pkt->size;
total_size = total_size + size;
total_duration = total_duration + duration;
i++;
if (frame < 0) break;
cout << "frame" << i << " " << size << " "<< duration << endl;
}
im reading a udp-mjpeg-stream with the ffmpeg-API. When i read and display the Stream with an ARM-Processor i have 2 Problems:
1- The Applikation is too slow and there is a big delay between network cam and displayed video.
2- the memory usage increases every time when i call the function av_read_frame().
The Source code
const char *cam1_url = "udp://192.168.1.1:1234";
AVCodec *pCodec;
AVFrame *pFrame, *pFrameRGB;
AVCodecContext *pCodecCon;
AVDictionary *pUdpStreamOptions = NULL;
AVInputFormat *pMjpegFormat = av_find_input_format("mjpeg");
av_dict_set(&pUdpStreamOptions, "fifo_size", "5000000", 0);
av_register_all();
avdevice_register_all();
avcodec_register_all();
avformat_network_init();
AVFormatContext *pFormatCont = avformat_alloc_context();
if(avformat_open_input(&pFormatCont,cam1_url,pMjpegFormat,&pUdpStreamOptions) < 0)
{
cout << "!! Error !! - avformat_open_input(): failed to open input URL" << endl;
}
if(avformat_find_stream_info(pFormatCont,NULL) < 0)
{
cout << "!! Error !! - avformat_find_stream_info(), Failed to retrieve stream info" << endl;
}
av_dump_format(pFormatCont, 0, cam1_url, 0);
int videoStream;
for(int i=0; i< pFormatCont->nb_streams; i++)
{
if(pFormatCont->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
{
videoStream=i;
cout << " videoStream = " << videoStream << endl;
}
}
pCodecCon = pFormatCont->streams[videoStream]->codec;
pCodec = avcodec_find_decoder(pCodecCon->codec_id);
if(NULL == pCodec)
{
cout << "couldnt find codec" << endl;
return EXIT_FAILURE;
}
if(avcodec_open2(pCodecCon,pCodec,NULL) < 0)
{
cout << "!! Error !! - in avcodec_open2()" << endl;
return EXIT_FAILURE;
}
uint8_t *frameBuffer;
int numRxBytes = 0;
AVPixelFormat pFormat =AV_PIX_FMT_BGR24;
int width_rgb = (int)((float)pCodecCon->width);
int height_rgb = (int)((float)pCodecCon->height);
numRxBytes = avpicture_get_size(pFormat,width_rgb,height_rgb);
frameBuffer = (uint8_t *) av_malloc(numRxBytes*sizeof(uint8_t));
avpicture_fill((AVPicture *) pFrameRGB, frameBuffer, pFormat,width_rgb,height_rgb);
AVPacket rx_pkt; // received packet
int frameFinished = 0;
struct SwsContext *imgConvertCtx;
av_init_packet(&rx_pkt);
while(av_read_frame(pFormatCont, &rx_pkt) >= 0)
{
if(rx_pkt.stream_index == videoStream)
{
av_frame_free(&pFrame);
pFrame = av_frame_alloc();
av_frame_free(&pFrameRGB);
pFrameRGB = av_frame_alloc();
avcodec_decode_video2(pCodecCon, pFrame, &frameFinished,&rx_pkt);
if(frameFinished)
{
imgConvertCtx = sws_getCachedContext(NULL, pFrame->width,pFrame->height, AV_PIX_FMT_YUVJ420P,width_rgb,height_rgb,AV_PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL,NULL);
sws_scale(imgConvertCtx, ((AVPicture*)pFrame)->data, ((AVPicture*)pFrame)->linesize, 0, pCodecCon->height, ((AVPicture *)pFrameRGB)->data, ((AVPicture *)pFrameRGB)->linesize);
av_frame_unref(pFrame);
av_frame_unref(pFrameRGB);
}
}
av_free_packet(&rx_pkt);
av_packet_unref(&rx_pkt);
}
//cvDestroyWindow("Cam1Video");
av_free_packet(&rx_pkt);
avcodec_close(pCodecCon);
av_free(pFrame);
av_free(pFrameRGB);
avformat_close_input(&pFormatCont);
I have read, the reason could be that the ffmpeg-Libs saves the incomming frames in the cache but the arm-processor isnt fast enough to process them. After like 4 minutes the system craches.
How could i solve the Problem.
one option could be to tell ffmpeg to act as frame grabber, also to read frames in real time, with the flag "-re". How can i set this Flag in the c++ source code. Or can anybody help me to solve that Problem.
Thank you very much
I am writing a new framework for a game engine, but I'm stuck with one issue, OpenAL.
I'm usually using freealut for this, but i cant find it anywhere, the only site that hosted it is offline, and i don't have any copies of it. I even had to dissect some other guys' project to find openal32.lib. Either my google fu has grown weak, or the vast Internet really doesn't have any copies of it.
I found some example coding showing how to work openAL without the freealut framework, but i cant get it to load in multiple files, so i either have to find out why its not working, or somehow locate freealut, i found some sources for it in github, but at this moment, building freealut from source is out of the question.
I am using visual express c++ 2010 as the ide.
I modified the code I found, into this:
Basically its three commands to load, play and delete the sound files.
It works fine for one sound file, but when I try to load in more, it stops working.
#include "AudioLib.h"
#include <iostream>
#include <cstdlib>
#include <Windows.h>
#include <map>
#include <vector>
#include <AL\al.h>
#include <AL\alc.h>
using namespace std;
typedef map <const char *, ALuint > MapType;
MapType soundsbuffer;
MapType soundssource;
int endWithError(char* msg, int error=0)
{
//Display error message in console
cout << msg << "\n";
//system("PAUSE");
return error;
}
vector<const char *> soundslist;
ALCdevice *device;
ALCcontext *context;
int loadSound(const char * input) {
FILE *fp;
unsigned char* buf;
ALuint source;
ALuint buffer;
fp = NULL;
fp = fopen(input,"rb");
char type[4];
DWORD size,chunkSize;
short formatType,channels;
DWORD sampleRate, avgBytesPerSec;
short bytesPerSample, bitsPerSample;
DWORD dataSize;
//Check that the WAVE file is OK
fread(type,sizeof(char),4,fp); //Reads the first bytes in the file
if(type[0]!='R' || type[1]!='I' || type[2]!='F' || type[3]!='F') //Should be "RIFF"
return endWithError ("No RIFF"); //Not RIFF
fread(&size, sizeof(DWORD),1,fp); //Continue to read the file
fread(type, sizeof(char),4,fp); //Continue to read the file
if (type[0]!='W' || type[1]!='A' || type[2]!='V' || type[3]!='E') //This part should be "WAVE"
return endWithError("not WAVE"); //Not WAVE
fread(type,sizeof(char),4,fp); //Continue to read the file
if (type[0]!='f' || type[1]!='m' || type[2]!='t' || type[3]!=' ') //This part should be "fmt "
return endWithError("not fmt "); //Not fmt
//Now we know that the file is a acceptable WAVE file
//Info about the WAVE data is now read and stored
fread(&chunkSize,sizeof(DWORD),1,fp);
fread(&formatType,sizeof(short),1,fp);
fread(&channels,sizeof(short),1,fp);
fread(&sampleRate,sizeof(DWORD),1,fp);
fread(&avgBytesPerSec,sizeof(DWORD),1,fp);
fread(&bytesPerSample,sizeof(short),1,fp);
fread(&bitsPerSample,sizeof(short),1,fp);
fread(type,sizeof(char),4,fp);
if (type[0]!='d' || type[1]!='a' || type[2]!='t' || type[3]!='a') //This part should be "data"
return endWithError("Missing DATA"); //not data
fread(&dataSize,sizeof(DWORD),1,fp); //The size of the sound data is read
//Display the info about the WAVE file
cout << "Chunk Size: " << chunkSize << "\n";
cout << "Format Type: " << formatType << "\n";
cout << "Channels: " << channels << "\n";
cout << "Sample Rate: " << sampleRate << "\n";
cout << "Average Bytes Per Second: " << avgBytesPerSec << "\n";
cout << "Bytes Per Sample: " << bytesPerSample << "\n";
cout << "Bits Per Sample: " << bitsPerSample << "\n";
cout << "Data Size: " << dataSize << "\n";
buf= new unsigned char[dataSize]; //Allocate memory for the sound data
cout << fread(buf,sizeof(BYTE),dataSize,fp) << " bytes loaded\n"; //Read the sound data and display the
//number of bytes loaded.
//Should be the same as the Data Size if OK
//Now OpenAL needs to be initialized
//And an OpenAL Context
device = alcOpenDevice(NULL); //Open the device
if(!device) return endWithError("no sound device"); //Error during device oening
context = alcCreateContext(device, NULL); //Give the device a context
alcMakeContextCurrent(context); //Make the context the current
if(!context) return endWithError("no sound context"); //Error during context handeling
//Stores the sound data
ALuint frequency=sampleRate;; //The Sample Rate of the WAVE file
ALenum format=0; //The audio format (bits per sample, number of channels)
alGenBuffers(1, &buffer); //Generate one OpenAL Buffer and link to "buffer"
alGenSources(1, &source); //Generate one OpenAL Source and link to "source"
if(alGetError() != AL_NO_ERROR) return endWithError("Error GenSource"); //Error during buffer/source generation
//Figure out the format of the WAVE file
if(bitsPerSample == 8)
{
if(channels == 1)
format = AL_FORMAT_MONO8;
else if(channels == 2)
format = AL_FORMAT_STEREO8;
}
else if(bitsPerSample == 16)
{
if(channels == 1)
format = AL_FORMAT_MONO16;
else if(channels == 2)
format = AL_FORMAT_STEREO16;
}
if(!format) return endWithError("Wrong BitPerSample"); //Not valid format
alBufferData(buffer, format, buf, dataSize, frequency); //Store the sound data in the OpenAL Buffer
soundsbuffer[input] = buffer;
soundssource[input] = source;
soundslist.push_back(input);
if(alGetError() != AL_NO_ERROR) {
return endWithError("Error loading ALBuffer"); //Error during buffer loading
}
fclose(fp);
delete[] buf;
}
int playSound(const char * input) {
//Sound setting variables
ALfloat SourcePos[] = { 0.0, 0.0, 0.0 }; //Position of the source sound
ALfloat SourceVel[] = { 0.0, 0.0, 0.0 }; //Velocity of the source sound
ALfloat ListenerPos[] = { 0.0, 0.0, 0.0 }; //Position of the listener
ALfloat ListenerVel[] = { 0.0, 0.0, 0.0 }; //Velocity of the listener
ALfloat ListenerOri[] = { 0.0, 0.0, -1.0, 0.0, 1.0, 0.0 }; //Orientation of the listener
//First direction vector, then vector pointing up)
//Listener
alListenerfv(AL_POSITION, ListenerPos); //Set position of the listener
alListenerfv(AL_VELOCITY, ListenerVel); //Set velocity of the listener
alListenerfv(AL_ORIENTATION, ListenerOri); //Set orientation of the listener
ALuint source = soundssource[input];
ALuint buffer = soundsbuffer[input];
//Source
alSourcei (source, AL_BUFFER, buffer); //Link the buffer to the source
alSourcef (source, AL_PITCH, 1.0f ); //Set the pitch of the source
alSourcef (source, AL_GAIN, 1.0f ); //Set the gain of the source
alSourcefv(source, AL_POSITION, SourcePos); //Set the position of the source
alSourcefv(source, AL_VELOCITY, SourceVel); //Set the velocity of the source
alSourcei (source, AL_LOOPING, AL_FALSE ); //Set if source is looping sound
//PLAY
alSourcePlay(source); //Play the sound buffer linked to the source
if(alGetError() != AL_NO_ERROR) return endWithError("Error playing sound"); //Error when playing sound
//system("PAUSE"); //Pause to let the sound play
}
void deleteSound() {
//Clean-up
//Close the WAVE file
//Delete the sound data buffer
for(int i = 0; i < soundslist.size(); i++) {
const char * out = soundslist[i];
alDeleteSources(1, &soundssource[out]); //Delete the OpenAL Source
alDeleteBuffers(1, &soundsbuffer[out]);
}
//Delete the OpenAL Buffer
soundslist.clear();
alcMakeContextCurrent(NULL); //Make no context current
alcDestroyContext(context); //Destroy the OpenAL Context
alcCloseDevice(device);
}
So what I'm asking for:
I need either the freealut files, or some help with the code.
Any solutions?
Ok, the openal site seems to be partially back online.
For anyone who needs the link: http://connect.creativelabs.com/openal/Downloads/Forms/AllItems.aspx?RootFolder=http%3a%2f%2fconnect%2ecreativelabs%2ecom%2fopenal%2fDownloads%2fALUT&FolderCTID=0x01200073059C4C04B4D14B80686126F6C1A2E8