VisualGestureBuilder API not working when more than one player - c++

I'm using the VisualGestureBuilder C++ API to track three simple discrete gestures.
When there is only one player in front of the Kinect, it works great.
But as soon as a second player arrives, the gesture analysis seems to go crazy, and for the two players, I get erratic but smooth results, as if the data for the two players and for the three gestures were mixed together, and random data was added...
And while the VGB API is going wrong, I still get valid body data (I'm drawing skeletons at the same time)
I discovered that this phenomenon was triggered as soon as I have more than one IVisualGestureBuilderFrameReader in the not-paused state.
Here are some portions of my code :
class GESTURES_STREAM
{
friend class MY_KINECT;
private:
struct SLOT
{
IVisualGestureBuilderDatabase* database = nullptr;
IGesture** gestures = nullptr;
UINT gestures_count = 0;
IVisualGestureBuilderFrameSource* source = nullptr;
IVisualGestureBuilderFrameReader* reader = nullptr;
IVisualGestureBuilderFrame* frame = nullptr;
};
SLOT slots[BODY_COUNT];
public:
GESTURES_RESULTS gestures_results[BODY_COUNT];
};
GESTURES_STREAM gestures_stream;
//----------------------------------------------------------------------------------------------------
void MY_KINECT::start_gestures_stream(wstring vgb_file)
{
for (int i = 0; i < BODY_COUNT; i++)
{
hr = CreateVisualGestureBuilderDatabaseInstanceFromFile(vgb_file.c_str(), &gestures_stream.slots[i].database);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
gestures_stream.slots[i].gestures_count = 0;
hr = gestures_stream.slots[i].database->get_AvailableGesturesCount(&gestures_stream.slots[i].gestures_count);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
if (gestures_stream.slots[i].gestures_count == 0)
{
MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
}
gestures_stream.slots[i].gestures = new IGesture*[gestures_stream.slots[i].gestures_count];
hr = gestures_stream.slots[i].database->get_AvailableGestures(gestures_stream.slots[i].gestures_count, gestures_stream.slots[i].gestures);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
/////
hr = CreateVisualGestureBuilderFrameSource(kinect_sensor, 0, &gestures_stream.slots[i].source);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
hr = gestures_stream.slots[i].source->AddGestures(gestures_stream.slots[i].gestures_count, gestures_stream.slots[i].gestures);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
hr = gestures_stream.slots[i].source->OpenReader(&gestures_stream.slots[i].reader);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
hr = gestures_stream.slots[i].reader->put_IsPaused(TRUE);
if (FAILED(hr)) MY_UTILITIES::fatal_error("Erreur initialisation Kinect : gestures");
/////
safe_release(&gestures_stream.slots[i].database);
}
}
//----------------------------------------------------------------------------------------------------
void MY_KINECT::update()
{
//...
//...
// handling all the different stream, including body
//...
//...
if (body_stream.data_is_new == true)
{
for (int i = 0; i < BODY_COUNT; i++)
{
if (gestures_stream.slots[i].source != nullptr)
{
BOOLEAN is_tracked;
if (body_stream.bodies[i]->get_IsTracked(&is_tracked) == S_OK)
{
if (is_tracked == TRUE)
{
UINT64 a;
if (body_stream.bodies[i]->get_TrackingId(&a) == S_OK)
{
UINT64 b;
if (gestures_stream.slots[i].source->get_TrackingId(&b) == S_OK)
{
if (a != b)
{
gestures_stream.slots[i].source->put_TrackingId(a);
}
}
}
BOOLEAN paused;
if (gestures_stream.slots[i].reader->get_IsPaused(&paused) == S_OK)
{
if (paused == TRUE)
{
gestures_stream.slots[i].reader->put_IsPaused(FALSE);
}
}
}
else
{
BOOLEAN paused;
if (gestures_stream.slots[i].reader->get_IsPaused(&paused) == S_OK)
{
if (paused == FALSE)
{
gestures_stream.slots[i].reader->put_IsPaused(TRUE);
}
}
}
}
}
if (gestures_stream.slots[i].reader != nullptr)
{
if (gestures_stream.slots[i].reader->CalculateAndAcquireLatestFrame(&gestures_stream.slots[i].frame) == S_OK)
{
BOOLEAN is_valid;
if (gestures_stream.slots[i].frame->get_IsTrackingIdValid(&is_valid) == S_OK)
{
if (is_valid == TRUE)
{
for (int j = 0; j < gestures_stream.slots[i].gestures_count; j++)
{
wchar_t gesture_name[256];
if (gestures_stream.slots[i].gestures[j]->get_Name(sizeof(gesture_name), gesture_name) == S_OK)
{
IDiscreteGestureResult* discrete_result = nullptr;
if (gestures_stream.slots[i].frame->get_DiscreteGestureResult(gestures_stream.slots[i].gestures[j], &discrete_result) == S_OK)
{
BOOLEAN detected;
if (discrete_result->get_Detected(&detected) == S_OK)
{
if (detected == TRUE)
{
float confidence;
if (discrete_result->get_Confidence(&confidence) == S_OK)
{
gestures_stream.gestures_results[i].results[gesture_name] = confidence;
}
}
else
{
gestures_stream.gestures_results[i].results[gesture_name] = 0;
}
}
safe_release(&discrete_result);
}
}
}
}
}
safe_release(&gestures_stream.slots[i].frame);
}
}
}
}
}
//----------------------------------------------------------------------------------------------------

Related

rapid TS fragment ffmpeg decoding - memory leak

Environment:
Ubuntu 16.04 (x64)
C++
ffmpeg
Use-case
Multiple MPEG-TS fragments are rapidly decoded ( numerous every sec )
The format of the TS fragments is dynamic and can't be known ahead of time
The first A/V frames of each fragment are needed to be extracted
Problem statement
The code bellow successfully decodes A/V, BUT, has a huge memory leak ( MBytes/sec )
According to the docs seems all memory is freed as it should ( does it... ? )
Why do I get this huge mem leak, what am I missing in the following code snap ?
struct MEDIA_TYPE {
ffmpeg::AVMediaType eType;
union {
struct {
ffmpeg::AVPixelFormat colorspace;
int width, height;
float fFPS;
} video;
struct : WAVEFORMATEX {
short sSampleFormat;
} audio;
} format;
};
struct FRAME {
enum { MAX_PALNES = 3 + 1 };
int iStrmId;
int64_t pts; // Duration in 90Khz clock resolution
uint8_t** ppData; // Null terminated
int32_t* pStride;// Zero terminated
};
HRESULT ProcessTS(IN Operation op, IN uint8_t* pTS, IN uint32_t uiBytes, bool(*cb)(IN const MEDIA_TYPE& mt, IN FRAME& frame, IN PVOID pCtx), IN PVOID pCbCtx)
{
uiBytes -= uiBytes % 188;// align to 188 packet size
struct CONTEXT {
uint8_t* pTS;
uint32_t uiBytes;
int32_t iPos;
} ctx = { pTS, uiBytes, 0 };
LOGTRACE(TSDecoder, "ProcessTS(%d, 0x%.8x, %d, 0x%.8x, 0x%.8x), this=0x%.8x\r\n", (int)op, pTS, uiBytes, cb, pCbCtx, this);
ffmpeg::AVFormatContext* pFmtCtx = 0;
if (0 == (pFmtCtx = ffmpeg::avformat_alloc_context()))
return E_OUTOFMEMORY;
ffmpeg::AVIOContext* pIoCtx = ffmpeg::avio_alloc_context(pTS, uiBytes, 0, &ctx
, [](void *opaque, uint8_t *buf, int buf_size)->int {
auto pCtx = (CONTEXT*)opaque;
int size = pCtx->uiBytes;
if (pCtx->uiBytes - pCtx->iPos < buf_size)
size = pCtx->uiBytes - pCtx->iPos;
if (size > 0) {
memcpy(buf, pCtx->pTS + pCtx->iPos, size);
pCtx->iPos += size;
}
return size;
}
, 0
, [](void* opaque, int64_t offset, int whence)->int64_t {
auto pCtx = (CONTEXT*)opaque;
switch (whence)
{
case SEEK_SET:
pCtx->iPos = offset;
break;
case SEEK_CUR:
pCtx->iPos += offset;
break;
case SEEK_END:
pCtx->iPos = pCtx->uiBytes - offset;
break;
case AVSEEK_SIZE:
return pCtx->uiBytes;
}
return pCtx->iPos;
});
pFmtCtx->pb = pIoCtx;
int iRet = ffmpeg::avformat_open_input(&pFmtCtx, "fakevideo.ts", m_pInputFmt, 0);
if (ERROR_SUCCESS != iRet) {
assert(false);
pFmtCtx = 0;// a user-supplied AVFormatContext will be freed on failure.
return E_FAIL;
}
struct DecodeContext {
ffmpeg::AVStream* pStream;
ffmpeg::AVCodec* pDecoder;
int iFramesProcessed;
};
HRESULT hr = S_OK;
int iStreamsProcessed = 0;
bool bVideoFound = false;
int64_t ptsLast = 0;
int64_t dtsLast = 0;
auto pContext = (DecodeContext*)alloca(sizeof(DecodeContext) * pFmtCtx->nb_streams);
for (unsigned int i = 0; i < pFmtCtx->nb_streams; i++) {
assert(pFmtCtx->streams[i]->index == i);
pContext[i].pStream = pFmtCtx->streams[i];
pContext[i].pDecoder = ffmpeg::avcodec_find_decoder(pFmtCtx->streams[i]->codec->codec_id);
pContext[i].iFramesProcessed= 0;
if (0 == pContext[i].pDecoder)
continue;
if ((iRet = ffmpeg::avcodec_open2(pFmtCtx->streams[i]->codec, pContext[i].pDecoder, NULL)) < 0) {
_ASSERT(FALSE);
hr = E_FAIL;
goto ErrExit;
}
}
while (S_OK == hr) {
ffmpeg::AVFrame* pFrame = 0;
ffmpeg::AVPacket pkt;
ffmpeg::av_init_packet(&pkt);
if (ERROR_SUCCESS != (iRet = ffmpeg::av_read_frame(pFmtCtx, &pkt))) {
hr = E_FAIL;
break;
}
if ((0 == dtsLast) && (0 != pkt.dts))
dtsLast = pkt.dts;
if ((0 == ptsLast) && (0 != pkt.pts))
ptsLast = pkt.pts;
DecodeContext& ctx = pContext[pkt.stream_index];
if (Operation::DECODE_FIRST_FRAME_OF_EACH_STREAM == op) {
if (iStreamsProcessed == pFmtCtx->nb_streams) {
hr = S_FALSE;
goto Next;
}
if (ctx.iFramesProcessed > 0)
goto Next;
iStreamsProcessed++;
}
if (0 == ctx.pDecoder)
goto Next;
if (0 == (pFrame = ffmpeg::av_frame_alloc())) {
hr = E_OUTOFMEMORY;
goto Next;
}
LOGTRACE(TSDecoder, "ProcessTS(%d, 0x%.8x, %d, 0x%.8x, 0x%.8x), this=0x%.8x, decode, S:%d, T:%d\r\n", (int)op, pTS, uiBytes, cb, pCbCtx, this, pkt.stream_index, ctx.pStream->codec->codec_type);
int bGotFrame = false;
int iBytesUsed = 0;
MEDIA_TYPE mt;
memset(&mt, 0, sizeof(mt));
mt.eType = ctx.pStream->codec->codec_type;
switch (mt.eType) {
case ffmpeg::AVMediaType::AVMEDIA_TYPE_AUDIO:
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if((iRet = ffmpeg::avcodec_decode_audio4(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
hr = E_FAIL;
goto Next;
}
_ASSERT(pkt.size == iRet);
// FFMPEG AAC decoder oddity, first call to 'avcodec_decode_audio4' results mute audio where the second result the expected audio
bGotFrame = false;
if ((iRet = ffmpeg::avcodec_decode_audio4(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
hr = E_FAIL;
goto Next;
}
_ASSERT(pkt.size == iRet);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if (false == bGotFrame)
goto Next;
iBytesUsed = ctx.pStream->codec->frame_size;
mt.format.audio.nChannels = ctx.pStream->codec->channels;
mt.format.audio.nSamplesPerSec = ctx.pStream->codec->sample_rate;
mt.format.audio.wBitsPerSample = ffmpeg::av_get_bytes_per_sample(ctx.pStream->codec->sample_fmt) * 8;
mt.format.audio.nBlockAlign = mt.format.audio.nChannels * mt.format.audio.wBitsPerSample / 8;
mt.format.audio.sSampleFormat = (short)pFrame->format;
break;
case ffmpeg::AVMediaType::AVMEDIA_TYPE_VIDEO:
if ((iRet = ffmpeg::avcodec_decode_video2(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
hr = E_FAIL;
break;
}
if (false == bGotFrame)
goto Next;
assert(ffmpeg::AVPixelFormat::AV_PIX_FMT_YUV420P == ctx.pStream->codec->pix_fmt);// Thats is the only color space currently supported
iBytesUsed = (ctx.pStream->codec->width * ctx.pStream->codec->height * 3) / 2;
mt.format.video.width = ctx.pStream->codec->width;
mt.format.video.height = ctx.pStream->codec->height;
mt.format.video.colorspace = ctx.pStream->codec->pix_fmt;
mt.format.video.fFPS = (float)ctx.pStream->codec->framerate.num / ctx.pStream->codec->framerate.den;
bVideoFound = true;
break;
default:
goto Next;
}
ctx.iFramesProcessed++;
{
FRAME f = { ctx.pStream->index, ((0 == ptsLast) ? dtsLast : ptsLast), (uint8_t**)pFrame->data, (int32_t*)pFrame->linesize };
if ((iRet > 0) && (false == cb(mt, f, pCbCtx)))
hr = S_FALSE;// Breaks the loop
}
Next:
ffmpeg::av_free_packet(&pkt);
if (0 != pFrame) {
//ffmpeg::av_frame_unref(pFrame);
ffmpeg::av_frame_free(&pFrame);
pFrame = 0;
}
}
ErrExit:
for (unsigned int i = 0; i < pFmtCtx->nb_streams; i++)
ffmpeg::avcodec_close(pFmtCtx->streams[i]->codec);
pIoCtx->buffer = 0;// We have allocated the buffer, no need for ffmpeg to free it 4 us
pFmtCtx->pb = 0;
ffmpeg::av_free(pIoCtx);
ffmpeg::avformat_close_input(&pFmtCtx);
ffmpeg::avformat_free_context(pFmtCtx);
return hr;
}
You need to unref the packets before reusing them. And there's no need to allocate and deallocate them all the time.
Here's how I do it which might help you:
// Initialise a packet queue
std::list<AVPacket *> packets;
...
for (int c = 0; c < MAX_PACKETS; c++) {
ff->packets.push_back(av_packet_alloc());
}
while (!quit) {
... get packet from queue
int err = av_read_frame(ff->context, packet);
... process packet (audio, video, etc)
av_packet_unref(packet); // add back to queue for reuse
}
// Release packets
while (ff->packets.size()) { // free packets
AVPacket *packet = ff->packets.front();
av_packet_free(&packet);
ff->packets.pop_front();
}
In your code you've freed a packet which wasn't allocated in the first place.

Mix_LoadWAV returns NULL when it shouldn't

I'm making a game in C++, but I've been running into an issue I just can't figure out when it comes to the sound. I'm using SDL_Mixer, with Mix_Music for the music and Mix_Chunk for the sound effects, but for some reason when I try to load a sound effect file using MIX_LoadWAV it returns NULL.
The thing is, when I load the same file using MIX_LoadMUS (loading as music instead of sound effect), it loads fine.
I can't find any differences that would explain the issue; the only difference is Mix_LoadWAV instead of Mix_LoadMUS, but why? Mix_LoadMUS doesn't work for Mix_Chunk, or else I'd just use that, but ...
Here, I'll show you the code for the sound manager I use. MusicClip being a Mix_Music and SoundClip being a Mix_Chunk.
#include "stdafx.h"
#include "SoundManager.h"
#include "MusicClip.h"
#include "SoundClip.h"
SoundManager::SoundManager()
{
}
SoundManager::~SoundManager()
{
Shutdown();
}
bool SoundManager::Initialize()
{
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO) < 0)
{
printf("SDL could not initialize! SDL Error: %s\n", SDL_GetError());
}
if (Mix_OpenAudio(44100, MIX_DEFAULT_FORMAT, 2, 2048) < 0)
{
printf(" SDL_mixer :: Msx_OpenAudio %s\n", Mix_GetError());
return false;
}
return true;
}
void SoundManager::Shutdown()
{
for (unsigned int i = 0; i < m_axSoundClips.size(); i++)
{
delete m_axSoundClips[i];
m_axSoundClips[i] = nullptr;
}
m_axSoundClips.clear();
for (unsigned int i = 0; i < m_axMusicClips.size(); i++)
{
delete m_axMusicClips[i];
m_axMusicClips[i] = nullptr;
}
m_axMusicClips.clear();
{
std::map<std::string, Mix_Chunk*>::iterator it = m_axSounds.begin();
while (it != m_axSounds.end())
{
Mix_FreeChunk(it->second);
it->second = nullptr;
it++;
}
m_axSounds.clear();
}
{
std::map<std::string, Mix_Music*>::iterator it = m_axMusic.begin();
while (it != m_axMusic.end())
{
Mix_FreeMusic(it->second);
it->second = nullptr;
it++;
}
m_axMusic.clear();
}
Mix_CloseAudio();
}
MusicClip *SoundManager::CreateMusicClip(std::string p_sFilename)
{
MusicClip *Ret = nullptr;
std::map<std::string, Mix_Music*>::iterator it = m_axMusic.find(p_sFilename);
if (it == m_axMusic.end())
{
Mix_Music* Music = Mix_LoadMUS(p_sFilename.c_str());
if (Music == NULL) {
printf("Error; music will not play\n");
printf(p_sFilename.c_str());
printf("\n");
printf(Mix_GetError());
}
std::pair<std::string, Mix_Music*> Pair;
Pair = std::make_pair(p_sFilename, Music);
Ret = new MusicClip(Music);
}
else
Ret = new MusicClip(it->second);
m_axMusicClips.push_back(Ret);
return Ret;
}
SoundClip *SoundManager::CreateSoundClip(std::string p_sFilename)
{
SoundClip *Ret = nullptr;
std::map<std::string, Mix_Chunk*>::iterator it = m_axSounds.find(p_sFilename);
if (it == m_axSounds.end())
{
Mix_Chunk* Sound = Mix_LoadWAV(p_sFilename.c_str());
if (Sound == NULL) {
printf("\nError; sound will not play\n");
printf(p_sFilename.c_str());
printf("\n");
printf(Mix_GetError());
}
std::pair<std::string, Mix_Chunk*> Pair;
Pair = std::make_pair(p_sFilename, Sound);
Ret = new SoundClip(Sound);
}
else
Ret = new SoundClip(it->second);
m_axSoundClips.push_back(Ret);
return Ret;
}
Here's the code for MusicClip and SoundClip; They seem identical aside from me adding PlayOnce and fadeout to MusicClip.
#include "stdafx.h"
#include "SoundClip.h"
SoundClip::SoundClip()
{
m_pxClip = nullptr;
m_iChannel = -1;
}
SoundClip::~SoundClip()
{
m_pxClip = nullptr;
}
SoundClip::SoundClip(Mix_Chunk* p_pxClip)
{
m_pxClip = p_pxClip;
m_iChannel = -1;
}
void SoundClip::Play()
{
m_iChannel = Mix_PlayChannel(-1, m_pxClip, 0);
}
void SoundClip::Stop()
{
if (m_iChannel == -1)
return;
Mix_HaltChannel(m_iChannel);
m_iChannel = -1;
}
void SoundClip::Volume(int p_iVolume)
{
if (m_iChannel == -1)
return;
Mix_Volume(m_iChannel, p_iVolume);
}
void SoundClip::Pause()
{
if (m_iChannel == -1)
return;
if (Mix_Paused(m_iChannel))
{
Mix_Resume(m_iChannel);
}
else
{
Mix_Pause(m_iChannel);
}
}
-
// MusicClip.cpp
#include "stdafx.h"
#include "MusicClip.h"
MusicClip::MusicClip()
{
m_xClip = nullptr;
m_iChannel = -1;
}
MusicClip::~MusicClip()
{
m_xClip = nullptr;
m_iChannel = -1;
}
MusicClip::MusicClip(Mix_Music* p_xClip)
{
m_xClip = p_xClip;
m_iChannel = -1;
}
void MusicClip::Play()
{
m_iChannel = Mix_PlayMusic(m_xClip, -1);
}
void MusicClip::PlayOnce()
{
m_iChannel = Mix_PlayMusic(m_xClip, 1);
}
void MusicClip::Pause()
{
if (m_iChannel == -1)
return;
if ( Mix_PausedMusic() )
Mix_ResumeMusic();
else
Mix_Pause(m_iChannel);
}
void MusicClip::Volume(int p_iVolume)
{
Mix_VolumeMusic(p_iVolume);
oldVolume = p_iVolume;
}
void MusicClip::FadeOut()
{
if (oldVolume>0)
oldVolume--;
Mix_VolumeMusic(oldVolume);
}
void MusicClip::Stop()
{
if (m_iChannel == -1)
return;
Mix_HaltChannel(m_iChannel);
m_iChannel = -1;
}
And this is the code I use to call on the functions. (The soundManager was initialized earlier in the program to start up the BGM.) So this, using the Mix_Chunk* SFX2, won't work:
void GameState::playSFX(std::string FX)
{
SFX2 = soundManager.CreateSoundClip("../assets/sfx/sfx-bleep.wav");
SFX2->Volume(60);
SFX2->Play();
}
But for some reason, if I change SFX2 to a Mix_Music*, this DOES work.
void GameState::playSFX(std::string FX)
{
SFX2 = soundManager.CreateMusicClip("../assets/sfx/sfx-bleep.wav");
SFX2->Volume(60);
SFX2->Play();
}
What do I do? Mix_GetError() doesn't return anything either... If I write the path wrong it returns "couldn't open file", so clearly it can find/open the file, it just... won't? I can't just use Mix_Music for my sound effects either, because if I do they cancel out the music.
By the way, I don't know if it's related, but Mix_LoadMUS won't load non-wav files even though it's supposed to support .ogg and .mp3 too?

Kinect SDK 2.0 handle and acquire depth frame

I am actually working with the Kinect V2 (the one for Xbox One) and I am trying to have a depth stream. I want to see what the depth sensor of Kinect sees. But I don't succeed to open a stream. I only succeed to open a single frame with another piece of code, but not a video. With a few research, I have tried to use handles, but the code I wrote don't printe on the screen the line 'stream' put at the end of the code. I am working on VS2012, the code is in C++.
I think I have this because I don't know how to use correcty an handle... If anybody could help me and explain to me what an handle is instead of a kind of pointer to something, it would be great. Thank you
Here is my code :
HRESULT hr=S_OK;
WAITABLE_HANDLE *stream=nullptr;
IKinectSensor* kinectSensor=nullptr;
if( SUCCEEDED(hr) )
{
std::cout << "Success IKinectSensor::GetDefaultSensor" << std::endl;
}
else
{
std::cout << "Failed IKinectSensor::GetDefaultSensor" << std::endl;
}
std::cout << "Opening sensors" << std::endl;
if(kinectSensor != NULL)
{
hr = kinectSensor->Open();
Sleep(sleeptime*5);
if( SUCCEEDED( hr ) )
{
std::cout << "Success IKinectSensor::Open" << std::endl;
}
else
{
std::cout << "Failed IKinectSensor::Open" << std::endl;
}
}
}
hr = kinectSensor->OpenMultiSourceFrameReader(FrameSourceTypes_Depth | FrameSourceTypes_Color , &multiSourceReader);
if( SUCCEEDED(hr) )
{
std::cout << "reader open" << std::endl;
hr = multiSourceReader->SubscribeMultiSourceFrameArrived(stream);
if( SUCCEEDED(hr) )
{
std::cout << "stream" << std::endl;
}
}
I didn't use a handle. See the below code snippet for getting the depth frame from a multisource frame reader and display it with OpenCV.
#include <Kinect.h>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
// helper function
template <class Interface> inline void safe_release(Interface **ppT)
{
if (*ppT)
{
(*ppT)->Release();
*ppT = NULL;
}
}
bool setup_kinect_sensor_and_acquire_frame()
{
IKinectSensor* kinect_sensor
// initialize kinect sensor
HRESULT hr = GetDefaultKinectSensor(&kinect_sensor);
if (FAILED(hr) || !kinect_sensor)
{
safe_release(&p_multisource_frame);
return false;
}
kinect_sensor->Open();
if (FAILED(hr))
{
return false;
}
// initialize kinect multisource frame reader
IMultiSourceFrameReader* kinect_multisource_reader;
hr = kinect_sensor->OpenMultiSourceFrameReader(FrameSourceTypes_Depth, &kinect_multisource_reader;);
if (FAILED(hr))
{
safe_release(&kinect_multisource_reader);
return false;
}
// acquire multisource frame
IMultiSourceFrame* p_multisource_frame = NULL;
HRESULT hr = kinect_multisource_reader->AcquireLatestFrame(&p_multisource_frame);
if (FAILED(hr))
{
safe_release(&p_multisource_frame);
return false;
}
// get depth frame
IDepthFrameReference* p_depth_frame_ref = NULL;
hr = p_multisource_frame->get_DepthFrameReference(&p_depth_frame_ref);
if (FAILED(hr))
{
safe_release(&p_depth_frame_ref);
return false;
}
IDepthFrame* p_depth_frame = NULL;
IFrameDescription* p_frame_description = NULL;
int width = 0;
int height = 0;
unsigned short depth_min_distance = 0;
unsigned short depth_max_distance = 0;
unsigned short* p_depth_buffer = NULL;
hr = p_depth_frame_ref->AcquireFrame(&p_depth_frame);
if (SUCCEEDED(hr))
{
hr = p_depth_frame->get_FrameDescription(&p_frame_description);
}
if (SUCCEEDED(hr))
{
hr = p_frame_description->get_Width(&width);
}
if (SUCCEEDED(hr))
{
hr = p_frame_description->get_Height(&height);
}
if (width != 512 || height != 424)
{
safe_release(&p_depth_frame);
safe_release(&p_frame_description);
return false;
}
// process depth frame
if (SUCCEEDED(hr))
{
hr = p_depth_frame->get_DepthMinReliableDistance(&depth_min_distance);
}
if (SUCCEEDED(hr))
{
hr = p_depth_frame->get_DepthMaxReliableDistance(&depth_max_distance);
}
if (SUCCEEDED(hr))
{
int size = 512 * 424;
p_depth_buffer = new unsigned short[size];
hr = p_depth_frame->CopyFrameDataToArray(size, p_depth_buffer);
if (SUCCEEDED(hr))
{
cv::Mat depth_map(cv::Size(DEPTH_WIDTH, DEPTH_HEIGHT), CV_16UC1, p_depth_buffer);
double scale = 255.0 / (depth_max_distance - depth_min_distance);
depth_map.convertTo(depth_frame, CV_8UC1, scale);
cv::imshow("depth", depth_map);
}
}
// Clean up depth frame
safe_release(&p_depth_frame_ref);
safe_release(&p_depth_frame);
safe_release(&p_frame_description);
if (p_depth_buffer != NULL) {
delete[] p_depth_buffer;
p_depth_buffer = NULL;
}
if (FAILED(hr))
{
return false;
}
else
{
return true;
}
}

FMOD playSound throws an error about invalid parameter

I've tried building some sort of audio manager after openAL failed to deliver on certain machines so I found out about fmod. However after few hours of changes in code nothing really works. My playSound call seems to be bugging.
An invalid parameter was passed to this function.
This is exactly what an errorcheck output gives me.
Code...let's get from start:
typedef std::map<std::string, FMOD::Sound*> SoundPool;
SoundPool m_sounds;
FMOD::Channel* m_soundChannels[MAX_SOUNDS];
FMOD::System* m_soundSystem;
and then:
FMOD::System_Create(&m_soundSystem);
m_soundSystem->init(32, FMOD_INIT_NORMAL, NULL);
for(int i = 0; i < MAX_SOUNDS; i++)
m_soundChannels[i] = 0;
and later:
void CResourceManager::PlaySound(std::string filename, float volume)
{
for(int i = 0; i < MAX_SOUNDS; i++)
{
if(m_soundChannels[i] == 0)
{
if(volume > 1.0f) volume = 1.0f;
FMOD_RESULT result = m_soundSystem->playSound(FMOD_CHANNEL_FREE,
m_sounds[filename], false, &m_soundChannels[i]);
if(result != FMOD_OK)
{
std::cout << FMOD_ErrorString(result); //// here's the error
}
m_soundChannels[i]->setVolume(volume);
break;
}
}
}
void CResourceManager::Update()
{
m_soundSystem->update();
for(int i = 0; i < MAX_SOUNDS; i++)
{
if(m_soundChannels[i] != 0)
{
bool playing;
m_soundChannels[i]->isPlaying(&playing);
if(!playing)
m_soundChannels[i] = 0;
}
}
}
bool CResourceManager::AddSound( std::string filename )
{
FMOD_RESULT result = m_soundSystem->createSound(filename.c_str(),
FMOD_LOOP_NORMAL, NULL, &m_sounds[filename]);
if(result == FMOD_OK)
return true;
return false;
}

Memory leaks in Webbrowser (COM)

This snippet of code generate huge memory leaks. Could you help me to find out where it happens?
This code does the following thing:
1) It gets IHTMLDocuments2 interface
2) Requests for all tags collection
3) Iterates over whole collection
4) and adds some of the tags' data to list
IDispatch* pDisp;
pDisp = this->GetHtmlDocument();
if (pDisp != NULL )
{
IHTMLDocument2* pHTMLDocument2;
HRESULT hr;
hr = pDisp->QueryInterface( IID_IHTMLDocument2,(void**)&pHTMLDocument2 );
if (hr == S_OK)
{
// I know that I could use IHTMLDocument3 interface to get collection by ID
// but it didn't worked and returned NULL on each call.
IHTMLElementCollection* pColl = NULL;
// get all tags
hr = pHTMLDocument2->get_all( &pColl );
if (hr == S_OK && pColl != NULL)
{
LONG celem;
hr = pColl->get_length( &celem );
if ( hr == S_OK )
{
//iterate through all tags
// if I iterate this block of code in cycle, it
// uses memory available upto 2GBs and then
// app crashes
for ( int i=0; i< celem; i++ )
{
VARIANT varIndex;
varIndex.vt = VT_UINT;
varIndex.lVal = i;
VARIANT var2;
VariantInit( &var2 );
IDispatch* pElemDisp = NULL;
hr = pColl->item( varIndex, var2, &pElemDisp );
if ( hr == S_OK && pElemDisp != NULL)
{
IHTMLElement* pElem;
hr = pElemDisp->QueryInterface(IID_IHTMLElement,(void **)&pElem);
if ( hr == S_OK)
{
// check INPUT tags only
BSTR tagNameStr = L"";
pElem->get_tagName(&tagNameStr);
CString tagname(tagNameStr);
SysFreeString(tagNameStr);
tagname.MakeLower();
if (tagname != "input")
{
continue;
}
//get ID attribute
BSTR bstr = L"";
pElem->get_id(&bstr);
CString idStr(bstr);
SysFreeString(bstr);
if (RequiredTag(pElem))
{
AddTagToList(pElem);
}
//release all objects
pElem->Release();
}
pElemDisp->Release();
}
}
}
// I looked over this code snippet many times and couldn't find what I'm missing here...
pColl->Release();
}
pHTMLDocument2->Release();
}
pDisp->Release();
}
Inside your loop, for each retreived element that does not have a tagname of "input" (which will be most elements), you are not calling pElem->Release() when calling continue, so you are leaking them:
if (tagname != "input")
{
pElem->Release(); // <-- add this
continue;
}
With that said, you should re-write your code to use ATL's smart pointer classes (CComPtr, CComQIPtr, CComBSTR, etc) to manage the memory for you so you do not have to manually release everything yourself anymore, eg:
CComPtr<IDispatch> pDisp;
pDisp.Attach(this->GetHtmlDocument());
if (pDisp.p != NULL)
{
CComQIPtr<IHTMLDocument2> pHTMLDocument2(pDisp);
if (pHTMLDocument2.p != NULL)
{
CComPtr<IHTMLElementCollection> pColl;
pHTMLDocument2->get_all(&pColl);
if (pColl.p != NULL)
{
LONG celem;
if (SUCCEEDED(pColl->get_length(&celem)))
{
for (LONG i = 0; i < celem; ++i)
{
VARIANT varIndex;
varIndex.vt = VT_UINT;
varIndex.lVal = i;
VARIANT var2;
VariantInit( &var2 );
CComPtr<IDispatch> pElemDisp;
pColl->item( varIndex, var2, &pElemDisp );
if (pElemDisp.p != NULL)
{
CComQIPtr<IHTMLElement> pElem(pElemDisp);
if (pElem.p != NULL)
{
CComBSTR tagNameStr;
pElem->get_tagName(&tagNameStr);
if (lstrcmpiW(tagNameStr.m_str, L"input") != 0)
continue;
CComBSTR idStr;
pElem->get_id(&idStr);
if (RequiredTag(pElem))
AddTagToList(pElem);
}
}
}
}
}
}
}