How to get a sound to stop playing in OpenAL - c++

I am trying to have a play sound method execute on a click event, followed by a stop method being called on the release, using OpenAL in in C++. My problem is that I cannot get it to stop playing on the release. My source code to play the sound is as follows:
bool SoundManager::play(QString fileName, float pitch, float gain)
{
static uint sourceIndex = 0;
ALint state;
// Get the corresponding buffer id set up in the init function.
ALuint bufferID = mSoundBuffers[fileName];
if (bufferID != 0) {
// Increment which source we are using, so that we play in a "free" source.
sourceIndex = (sourceIndex + 1) % SOUNDMANAGER_MAX_NBR_OF_SOURCES;
// Get the source in which the sound will be played.
ALuint source = mSoundSources[sourceIndex];
if (alIsSource (source) == AL_TRUE) {
// Attach the buffer to an available source.
alSourcei(source, AL_BUFFER, bufferID);
if (alGetError() != AL_NO_ERROR) {
reportOpenALError();
return false;
}
// Set the source pitch value.
alSourcef(source, AL_PITCH, pitch);
if (alGetError() != AL_NO_ERROR) {
reportOpenALError();
return false;
}
// Set the source gain value.
alSourcef(source, AL_GAIN, gain);
if (alGetError() != AL_NO_ERROR) {
reportOpenALError();
return false;
}
alGetSourcei(source, AL_SOURCE_STATE, &state);
if (state!=AL_PLAYING)
alSourcePlay(source);
else if(state==AL_PLAYING)
alSourceStop(source);
if (alGetError() != AL_NO_ERROR) {
reportOpenALError();
return false;
}
}
} else {
// The buffer was not found.
return false;
}`
I think that the issue is that when it is called the second time, when it should be stopped, it is a different source, and that is why its state is not playing. If this is the issue, then how can I access the same source?

Of course it's not the same source as before, you increase the sourceIndex variable each call.
So the first call, to play, sourceIndex will be 1 (sourceIndex + 1). The next time you call the function (which btw. is badly named for something that toggles playing) then sourceIndex again will be increased by one, which will give you a new index into the source vector.

Related

OpenAL: Sound track distorted and cutting off

I'm working on an OpenAL project also using the libsndfile library. I have a problem with playing soundtracks with long sound buffering in OpenAL. The sound is distorted in the beginning and just suddenly cuts off. Here is the base class snippet that determines and set the format of the clip and calculates the audio frame size:
musicBuffer::musicBuffer(const char* filename){
alGenSources(1, &se_source);
alGenBuffers(num_buffers, se_buffers);
std::size_t frame_size;
//using sndfile to read the soundtrack
se_sndfile = sf_open(filename, SFM_READ, &se_sfinfo);
if (!se_sndfile){
throw("could not open provided music file -- check path");
}
if (se_sfinfo.channels == 1)
se_format = AL_FORMAT_MONO16;
else if (se_sfinfo.channels == 2)
se_format = AL_FORMAT_STEREO16;
else if (se_sfinfo.channels == 3){
if (sf_command(se_sndfile, SFC_WAVEX_GET_AMBISONIC, NULL, 0) == SF_AMBISONIC_B_FORMAT)
se_format = AL_FORMAT_BFORMAT2D_16;
}
else if (se_sfinfo.channels == 4){
if (sf_command(se_sndfile, SFC_WAVEX_GET_AMBISONIC, NULL, 0) == SF_AMBISONIC_B_FORMAT)
se_format = AL_FORMAT_BFORMAT3D_16;
}
if (!se_format){
sf_close(se_sndfile);
se_sndfile = NULL;
throw("Unsupported channel count from file");
}
frame_size = ((size_t)b_samples * (size_t)se_sfinfo.channels) * sizeof(short);
se_membuf = static_cast<short*>(malloc(frame_size));
}
Also, it does not seem like the audio format would be the issue as this issue is present in all audio formats I use anyway. Below is the Play() and updateBufferStream() functions which gives all the relevant data to the audio buffers to be played:
void musicBuffer::Play(){
ALsizei i;
alGetError();
//rewinds source position + clear buffer
alSourceRewind(se_source);
alSourcei(se_source, AL_BUFFER, 0);
//fill the buffer queue with info from sound file
for (i = 0; i < num_buffers; i++){
sf_count_t slen = sf_readf_short(se_sndfile, se_membuf, b_samples);
if (slen < 1) break;
slen *= se_sfinfo.channels * (sf_count_t)sizeof(short);
alBufferData(se_buffers[i], se_format, se_membuf, (ALsizei)slen, se_sfinfo.samplerate);
}
if (alGetError() != AL_NO_ERROR){
throw("Error buffering for playback");
}
//queue and start playback
alSourceQueueBuffers(se_source, i, se_buffers);
alSourcePlay(se_source);
if (alGetError() != AL_NO_ERROR){
throw("Error starting playback");
}
}
void musicBuffer::updateBufferStream(){
ALint processed, state;
alGetError();
//check status of speakers
alGetSourcei(se_source, AL_SOURCE_STATE, &state);
//check amnt of buffers processed
alGetSourcei(se_source, AL_BUFFERS_PROCESSED, &processed);
if (alGetError() != AL_NO_ERROR)
{
throw("error checking music source state");
}
while (processed > 0){
ALuint bufid;
sf_count_t slen;
alSourceUnqueueBuffers(se_source, 1, &bufid);
processed--;
//read the rest of the data, refill the buffers and re-queue
slen = sf_readf_short(se_sndfile, se_membuf, b_samples);
if (slen > 0){
slen *= se_sfinfo.channels * (sf_count_t)sizeof(short);
alBufferData(bufid, se_format, se_membuf, (ALsizei)slen,
se_sfinfo.samplerate);
alSourceQueueBuffers(se_source, 1, &bufid);
}
if (alGetError() != AL_NO_ERROR){
throw("error buffering music data");
}
}
//checking if source is underrun
if (state != AL_PLAYING && state != AL_PAUSED){
ALint queued;
//if there isnt any buffers queued it means playback is done
alGetSourcei(se_source, AL_BUFFERS_QUEUED, &queued);
if (queued == 0)
return;
alSourcePlay(se_source);
if (alGetError() != AL_NO_ERROR)
{
throw("error restarting music playback");
}
}
}
Does anyone have any suggestions on what looks wrong or identify the issue itself?

Repeating ffmpeg stream (libavcodec/libavformat)

I am using the various APIs from ffmpeg to draw videos in my application. So far this works very well. Since I also have gifs I want to do looping without having to load the file over and over again.
In my code the decoder loop looks like this:
AVPacket packet = {};
av_init_packet(&packet);
while (mIsRunning) {
int error = av_read_frame(mContext, &packet);
if (error == AVERROR_EOF) {
if(mRepeat) {
logger.info("EOF-repeat");
auto stream = mContext->streams[mVideoStream];
av_seek_frame(mContext, mVideoStream, 0, 0);
continue;
}
if (mReadVideo) {
avcodec_send_packet(mVideoCodec, nullptr);
}
if (mReadAudio) {
avcodec_send_packet(mAudioCodec, nullptr);
}
break;
}
if (error < 0) {
char err[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, error);
logger.error("Failed to read next frame from stream: ", err);
throw std::runtime_error("Stream reading failed");
}
if (packet.stream_index == mVideoStream && mReadVideo) {
int32 err;
{
std::lock_guard<std::mutex> l(mVideoLock);
err = avcodec_send_packet(mVideoCodec, &packet);
}
mImageEvent.notify_all();
while (err == AVERROR(EAGAIN) && mIsRunning) {
{
std::unique_lock<std::mutex> l(mReaderLock);
mReaderEvent.wait(l);
}
{
std::lock_guard<std::mutex> l(mVideoLock);
err = avcodec_send_packet(mVideoCodec, &packet);
}
}
}
av_packet_unref(&packet);
}
Reading a video to the end works perfectly well and if I dont set mRepeat to true it properly EOFs and stops parsing. However when I use looping the following happens:
The video ends
AVERROR_EOF happens at av_read_frame
EOF-repeat is printed
A random frame is read from the stream (and rendered)
AVERROR_EOF happens at av_read_frame
EOF-repeat is printed
A random frame is read from the stream (and rendered)
...
You can imagine it like I have a gif of a spinning globe and after one full turn it just starts randomly jumping around, sometimes for a fraction of a second correctly, sometimes backwards and sometimes just randomly everywhere.
I have also tried several versions with avformat_seek_file what other way would there be to reset everything to the beginning and start from scratch again?
I figured out that I also need to reset my IO context to the beginning:
if(mRepeat) {
auto stream = mContext->streams[mVideoStream];
avio_seek(mContext->pb, 0, SEEK_SET);
avformat_seek_file(mContext, mVideoStream, 0, 0, stream->duration, 0);
continue;
}
Now the video properly loops forever :)

Figuring out a race condition

I am building a screen recorder, I am using ffmpeg to make the video out from frames I get from Google Chrome. I get green screen in the output video. I think there is a race condition in the threads since I am not allowed to use main thread to do the processing. here how the code look like
This function works each time I get a new frame, I suspect the functions avpicture_fill & vpx_codec_get_cx_data are being rewritten before write_ivf_frame_header & WriteFile are done.
I am thinking of creating a queue where this function push the object pp::VideoFrame then another thread with mutex will dequeue and do the processing below.
What is the best solution for this problem? and what is the optimal way of debugging it
void EncoderInstance::OnGetFrame(int32_t result, pp::VideoFrame frame) {
if (result != PP_OK)
return;
const uint8_t* data = static_cast<const uint8_t*>(frame.GetDataBuffer());
pp::Size size;
frame.GetSize(&size);
uint32_t buffersize = frame.GetDataBufferSize();
if (is_recording_) {
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt;
// copy the pixels into our "raw input" container.
int bytes_filled = avpicture_fill(&pic_raw, data, AV_PIX_FMT_YUV420P, out_width, out_height);
if(!bytes_filled) {
Logger::Log("Cannot fill the raw input buffer");
return;
}
if(vpx_codec_encode(&codec, &raw, frame_cnt, 1, flags, VPX_DL_REALTIME))
die_codec(&codec, "Failed to encode frame");
while( (pkt = vpx_codec_get_cx_data(&codec, &iter)) ) {
switch(pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT:
glb_app_thread.message_loop().PostWork(callback_factory_.NewCallback(&EncoderInstance::write_ivf_frame_header, pkt));
glb_app_thread.message_loop().PostWork(callback_factory_.NewCallback(&EncoderInstance::WriteFile, pkt));
break;
default:break;
}
}
frame_cnt++;
}
video_track_.RecycleFrame(frame);
if (need_config_) {
ConfigureTrack();
need_config_ = false;
} else {
video_track_.GetFrame(
callback_factory_.NewCallbackWithOutput(
&EncoderInstance::OnGetFrame));
}
}

Losing quality when encoding with ffmpeg

I am using the c libraries of ffmpeg to read frames from a video and create an output file that is supposed to be identical to the input.
However, somewhere during this process some quality gets lost and the result is "less sharp". My guess is that the problem is the encoding and that the frames are too compressed (also because the size of the file decreases quite significantly). Is there some parameter in the encoder that allows me to control the quality of the result? I found that AVCodecContext has a compression_level member, but changing it that does not seem to have any effect.
I post here part of my code in case it could help. I would say that something must be changed in the init function of OutputVideoBuilder when I set the codec. The AVCodecContext that is passed to the method is the same of InputVideoHandler.
Here are the two main classes that I created to wrap the ffmpeg functionalities:
// This class opens the video files and sets the decoder
class InputVideoHandler {
public:
InputVideoHandler(char* name);
~InputVideoHandler();
AVCodecContext* getCodecContext();
bool readFrame(AVFrame* frame, int* success);
private:
InputVideoHandler();
void init(char* name);
AVFormatContext* formatCtx;
AVCodec* codec;
AVCodecContext* codecCtx;
AVPacket packet;
int streamIndex;
};
void InputVideoHandler::init(char* name) {
streamIndex = -1;
int numStreams;
if (avformat_open_input(&formatCtx, name, NULL, NULL) != 0)
throw std::exception("Invalid input file name.");
if (avformat_find_stream_info(formatCtx, NULL)<0)
throw std::exception("Could not find stream information.");
numStreams = formatCtx->nb_streams;
if (numStreams < 0)
throw std::exception("No streams in input video file.");
for (int i = 0; i < numStreams; i++) {
if (formatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
streamIndex = i;
break;
}
}
if (streamIndex < 0)
throw std::exception("No video stream in input video file.");
// find decoder using id
codec = avcodec_find_decoder(formatCtx->streams[streamIndex]->codec->codec_id);
if (codec == nullptr)
throw std::exception("Could not find suitable decoder for input file.");
// copy context from input stream
codecCtx = avcodec_alloc_context3(codec);
if (avcodec_copy_context(codecCtx, formatCtx->streams[streamIndex]->codec) != 0)
throw std::exception("Could not copy codec context from input stream.");
if (avcodec_open2(codecCtx, codec, NULL) < 0)
throw std::exception("Could not open decoder.");
}
// frame must be initialized with av_frame_alloc() before!
// Returns true if there are other frames, false if not.
// success == 1 if frame is valid, 0 if not.
bool InputVideoHandler::readFrame(AVFrame* frame, int* success) {
*success = 0;
if (av_read_frame(formatCtx, &packet) < 0)
return false;
if (packet.stream_index == streamIndex) {
avcodec_decode_video2(codecCtx, frame, success, &packet);
}
av_free_packet(&packet);
return true;
}
// This class opens the output and write frames to it
class OutputVideoBuilder{
public:
OutputVideoBuilder(char* name, AVCodecContext* inputCtx);
~OutputVideoBuilder();
void writeFrame(AVFrame* frame);
void writeVideo();
private:
OutputVideoBuilder();
void init(char* name, AVCodecContext* inputCtx);
void logMsg(AVPacket* packet, AVRational* tb);
AVFormatContext* formatCtx;
AVCodec* codec;
AVCodecContext* codecCtx;
AVStream* stream;
};
void OutputVideoBuilder::init(char* name, AVCodecContext* inputCtx) {
if (avformat_alloc_output_context2(&formatCtx, NULL, NULL, name) < 0)
throw std::exception("Could not determine file extension from provided name.");
codec = avcodec_find_encoder(inputCtx->codec_id);
if (codec == nullptr) {
throw std::exception("Could not find suitable encoder.");
}
codecCtx = avcodec_alloc_context3(codec);
if (avcodec_copy_context(codecCtx, inputCtx) < 0)
throw std::exception("Could not copy output codec context from input");
codecCtx->time_base = inputCtx->time_base;
codecCtx->compression_level = 0;
if (avcodec_open2(codecCtx, codec, NULL) < 0)
throw std::exception("Could not open encoder.");
stream = avformat_new_stream(formatCtx, codec);
if (stream == nullptr) {
throw std::exception("Could not allocate stream.");
}
stream->id = formatCtx->nb_streams - 1;
stream->codec = codecCtx;
stream->time_base = codecCtx->time_base;
av_dump_format(formatCtx, 0, name, 1);
if (!(formatCtx->oformat->flags & AVFMT_NOFILE)) {
if (avio_open(&formatCtx->pb, name, AVIO_FLAG_WRITE) < 0) {
throw std::exception("Could not open output file.");
}
}
if (avformat_write_header(formatCtx, NULL) < 0) {
throw std::exception("Error occurred when opening output file.");
}
}
void OutputVideoBuilder::writeFrame(AVFrame* frame) {
AVPacket packet = { 0 };
int success;
av_init_packet(&packet);
if (avcodec_encode_video2(codecCtx, &packet, frame, &success))
throw std::exception("Error encoding frames");
if (success) {
av_packet_rescale_ts(&packet, codecCtx->time_base, stream->time_base);
packet.stream_index = stream->index;
logMsg(&packet,&stream->time_base);
av_interleaved_write_frame(formatCtx, &packet);
}
av_free_packet(&packet);
}
This is the part of the main function that reads and write frames:
while (inputHandler->readFrame(frame,&gotFrame)) {
if (gotFrame) {
try {
outputBuilder->writeFrame(frame);
}
catch (std::exception e) {
std::cout << e.what() << std::endl;
return -1;
}
}
}
Your qmin/qmax answer is partially correct, but it misses the point, in that the quality indeed goes up, but the compression ratio (in terms of quality per bit) will suffer significantly as you restrict the qmin/qmax range - i.e. you will spend many more bits to accomplish the same quality than should really be necessary if you used the encoder optimally.
To increase quality without hurting the compression ratio, you need to actually increase the quality target. How you do this differs a little depending on the codec, but you typically increase the target CRF value or target bitrate. For commandline options, see e.g. the H264 docs. There's identical docs for HEVC/VP9 also. To use these options in the C API, use av_opt_set() with the same option names/values.
In case this could be useful to someone else, I add the answer that damjeux suggested, which worked for me. AVCodecContex has two members qmin and qmax which control the QP (quantization parameter) of the encoder. By default in my case qmin is 2 and qmax is 31. By setting qmax to a lower value the quality of the output improves.

Trying to play something in OpenAL, but it never gets out of the AL_INITIAL state

I'm attempting to play an audio file using OpenAl, but it's not actually playing anything. When I check if the audio file is playing after calling alSourcePlay, the state is still AL_INITIAL.
Here's where I play the audio:
// Make sure the audio source ident is valid and usable
if ( audioID >= MAX_AUDIO_SOURCES || !mAudioSourceInUse[audioID])
return false;
int sourceAudioState = 0;
if(!alIsSource(mAudioSources[audioID]) == AL_TRUE) return false;
alGetError();
// Are we currently playing the audio source?
alGetSourcei( mAudioSources[audioID], AL_SOURCE_STATE, &sourceAudioState );
if ( sourceAudioState == AL_PLAYING )
{
printf("Currently playing!\n");
if ( forceRestart )
stopAudio( audioID );
else
return false; // Not forced, so we don't do anything
}
alSourcePlay( mAudioSources[ audioID ] );
if ( checkALError( "playAudio::alSourcePlay: ") )
return false;
alGetSourcei( mAudioSources[audioID], AL_SOURCE_STATE, &sourceAudioState );
printState(sourceAudioState);
if(sourceAudioState == AL_PLAYING)
{
printf("Now playing!\n\n");
}
return true;
mAudioSources is an array of buffers created by alGenBuffers. The file is loaded with
mAudioBuffers[bufferID] = alutCreateBufferFromFile(filename.c_str());
Which doesn't cause an error in alGetError. Any ideas?
alutCreateBufferFromFile will not cause an error in alGetError. You have to use alutGetError (note the prefix alut instead of al) instead. If alutCreateBufferFromFile fails, it will return AL_NONE which is a valid buffer: the NULL buffer which will not be played.
Check the ALUT documentation for information on alutGetError as well as the OpenAL Specs.