I got to compile, bundle and load resources under XCode 4.3 and SDL 1.2.15
I know resources are loading correctly because file handles are not null and no error is thrown.
I successfully load png's and ttf's, obtain and crop surfaces, and blit them.
But when I flip, the only thing I get to see are the lines I drew using SDL_Draw
I will put some bits of code, as I'm trying to keep an engine-ish structure so the code is everything but together.
Initialization:
void CEngine::Init() {
// Register SDL_Quit to be called at exit; makes sure things are cleaned up when we quit.
atexit( SDL_Quit );
// Initialize SDL's subsystems - in this case, only video.
if ( SDL_Init( SDL_INIT_EVERYTHING ) < 0 ) {
fprintf( stderr, "Unable to init SDL: %s\n", SDL_GetError() );
exit( 1 );
}
// Attempt to create a window with the specified height and width.
SetSize( m_iWidth, m_iHeight );
// If we fail, return error.
if ( m_pScreen == NULL ) {
fprintf( stderr, "Unable to set up video: %s\n", SDL_GetError() );
exit( 1 );
}
AdditionalInit();
}
and
void CTileEngine::AdditionalInit() {
SetTitle( "TileEngine - Loading..." );
PrintDebug("Initializing SDL_Image");
int flags = IMG_INIT_PNG;
int initted = IMG_Init( flags );
if( ( initted & flags ) != flags ) {
PrintDebug("IMG_Init: Failed to init required image support!");
PrintDebug(IMG_GetError());
// handle error
}
PrintDebug("Initializing SDL_TTF");
if( TTF_Init() == -1 ) {
PrintDebug("TTF_Init: Failed to init required ttf support!");
PrintDebug(TTF_GetError());
}
PrintDebug("Loading fonts");
font = TTF_OpenFont( OSXFileManager::GetResourcePath("Roboto-Regular.ttf"), 28 );
if( !font ) {
PrintDebug("Error loading fonts");
PrintDebug(TTF_GetError());
}
g_pGame = new CGame;
LoadGame( OSXFileManager::GetResourcePath( "test", "tmx") );
SetTitle( "TileEngine" );
PrintDebug("Finished AditionalInit()");
}
Main draw method
void CEngine::DoRender(){
++m_iFPSCounter;
if ( m_iFPSTickCounter >= 1000 ) {
m_iCurrentFPS = m_iFPSCounter;
m_iFPSCounter = 0;
m_iFPSTickCounter = 0;
}
SDL_FillRect( m_pScreen, 0, SDL_MapRGB( m_pScreen->format, 0, 0, 0 ) );
// Lock surface if needed
if ( SDL_MUSTLOCK( m_pScreen ) ){
if ( SDL_LockSurface( m_pScreen ) < 0 ){
return;
}
}
Render( GetSurface() );
// Render FPS
SDL_Color fpsColor = { 255, 255, 255 };
string fpsMessage = "FPS: ";
fpsMessage.append( SSTR(m_iCurrentFPS) );
SDL_Surface* fps = TTF_RenderText_Solid(font, fpsMessage.c_str(), fpsColor);
if( fps ) {
SDL_Rect destRect;
destRect.x = pDestSurface->w - fps->w;
destRect.y = pDestSurface->h - fps->h;
destRect.w = fps->w;
destRect.h = fps->h;
SDL_BlitSurface(fps, &fps->clip_rect, pDestSurface, &destRect);
SDL_FreeSurface(fps);
}
// Unlock if needed
if ( SDL_MUSTLOCK( m_pScreen ) )
SDL_UnlockSurface( m_pScreen );
// Tell SDL to update the whole gScreen
SDL_Flip( m_pScreen );
}
Image file loading
bool CEntity::VLoadImageFromFile( const string& sFile) {
if ( m_pSurface != 0 ){
SDL_FreeSurface( m_pSurface );
}
string nFile = string(OSXFileManager::APPNAME) + OSXFileManager::RESOURCEDIR + sFile;
SDL_Surface *pTempSurface;
pTempSurface = IMG_Load( nFile.c_str() );
m_sImage = sFile;
if ( pTempSurface == 0 ){
char czError[256];
sprintf( czError, "Image '%s' could not be opened. Reason: %s", nFile.c_str(), IMG_GetError() );
fprintf( stderr, "\nERROR: %s", czError );
return false;
} else {
pTempSurface = SDL_DisplayFormatAlpha(pTempSurface);
}
m_pSurface = pTempSurface;
return true;
}
Entity draw method
void CEntity::VRender( SDL_Surface *pDestSurface ) {
if ( ( m_pSurface == 0 ) || ( m_bVisible == false) || ( m_iAlpha == 0 ) ){
// If the surface is invalid or it's 100% transparent.
return;
}
SDL_Rect SDestRect;
SDestRect.x = m_iPosX;
SDestRect.y = m_iPosY;
SDestRect.w = m_pSurface->w;
SDestRect.h = m_pSurface->h;
if ( m_iAlpha != 255 )
SDL_SetAlpha( m_pSurface, SDL_SRCALPHA, m_iAlpha );
SDL_BlitSurface( m_pSurface, &m_pSurface->clip_rect, pDestSurface, &SDestRect );
}
I have checked and debugged million times and I don't get what's wrong here. As I told before, file loading seems to be OK.
But this part
void CTile::RenderGrid( SDL_Surface* pDestSurface ) {
Uint32 m_GridColor = SDL_MapRGB( pDestSurface->format, 0xFF, 0xFF, 0xFF );
Draw_Rect(pDestSurface, GetPosX(), GetPosY(), GetWidth(), GetHeight(), m_GridColor);
}
works like a charm.
I found out what was happening. Turns out that, from SDL version 1.1.18 SDL_Lock calls are recursive, so each lock must pair an unlock. That was not happening last time I used SDL, so I was not aware of it. Simply matching locks and unlocks did the job.
Related
I'm developing an application which captures frames using DXGI mechanism from an application.
First creation of the IDXGIOutputDuplication is correct. When the application changes its display (for example from fullscreen to windowed or vice versa), the frame acquire failed (expected behaviour), then I recreate the IDXGIOutputDuplication and regularly the call to DuplicateOutput crash.
Below creation of the D3d11 device:
D3D_FEATURE_LEVEL FeatureLevels[] =
{
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_1
};
UINT NumFeatureLevels = ARRAYSIZE( FeatureLevels );
D3D_FEATURE_LEVEL FeatureLevel = D3D_FEATURE_LEVEL_11_1;
D3D11CreateDevice( _pDxgiAdapter, D3D_DRIVER_TYPE_UNKNOWN, nullptr, 0, FeatureLevels, NumFeatureLevels, D3D11_SDK_VERSION, &_pD3D11Device, &FeatureLevel, &_pD3D11DeviceCtx );
_pDxgiAdapter is created before the device.
Below creation of the IDXGIOutputDuplication:
int DxgiVideoCapture::open() {
findDxgiAdapter();
if( nullptr != _pDxgiAdapter ) {
// Duplicate output
uint32_t ui32OutputIndex = 0;
HRESULT hr = S_OK;
while( SUCCEEDED( hr ) ) {
IDXGIOutput* pDxgiOutput = nullptr;
hr = _pDxgiAdapter->EnumOutputs( ui32OutputIndex, &pDxgiOutput );
if( SUCCEEDED( hr ) ) {
IDXGIOutput1* pDxgiOutput1 = nullptr;
if( SUCCEEDED ( pDxgiOutput->QueryInterface( __uuidof( IDXGIOutput1 ), ( void** )&pDxgiOutput1 ) ) ) {
uint32_t ui32EnumMode = 0;
uint32_t ui32Flags = 0xffffffff;
}
if( SUCCEEDED ( pDxgiOutput->QueryInterface( __uuidof( IDXGIOutput1 ), ( void** )&_pDxgiOutput1 ) ) ) {
LOGI( "/!\\ Call which crash regularly /!\\" );
HRESULT hrDup = _pDxgiOutput1->DuplicateOutput( _pD3D11Device, &_pOutputDuplication );
if( SUCCEEDED( hrDup ) ) {
LOGI( "Output duplication created." );
} else {
switch( hrDup ) {
case E_INVALIDARG :
{
LOGW( "Invalid device or output already duplicated" );
}
break;
case E_ACCESSDENIED :
{
LOGW( "Access denied" );
}
break;
case DXGI_ERROR_UNSUPPORTED :
{
LOGW( "DXGI_ERROR_UNSUPPORTED" );
}
break;
case DXGI_ERROR_NOT_CURRENTLY_AVAILABLE :
{
LOGW( "DXGI_ERROR_NOT_CURRENTLY_AVAILABLE" );
}
break;
case DXGI_ERROR_SESSION_DISCONNECTED :
{
LOGW( "DXGI_ERROR_NOT_CURRENTLY_AVAILABLE" );
}
break;
default:
{
LOGW( "default" );
}
}
} else {
LOGE( "Unable to retrieve interface creating the ouput duplication" );
}
}
pDxgiOutput->Release();
}
ui32OutputIndex++;
}
return RET_OK;
}
Below the frame acquisition:
int DxgiVideoCapture::captureGameFrame() {
int iRet = RET_ERROR;
ID3D11Texture2D* pCapturedTexture = nullptr;
DXGI_OUTDUPL_FRAME_INFO frameInfo;
ZeroMemory(&frameInfo, sizeof(frameInfo));
HRESULT hr = _pOutputDuplication->AcquireNextFrame( 1000, &frameInfo, &_pDxgiResource );
if( FAILED( hr ) ) {
if( hr == DXGI_ERROR_WAIT_TIMEOUT ) {
LOGW( "Wait for %d ms timed out", 1000);
}
if (hr == DXGI_ERROR_INVALID_CALL) {
LOGW( "Invalid Call, previous frame not released?" );
}
if (hr == DXGI_ERROR_ACCESS_LOST) {
LOGW( "Error Access lost - is it a game end ?" );
}
iRet = RET_RESET_CAPTURE;
return iRet;
}
if( FAILED( hr = _pDxgiResource->QueryInterface( __uuidof( ID3D11Texture2D ), ( void** ) &pCapturedTexture ) ) ) {
LOGW( "unable to retrieve D3D11 texture 0x%x", hr );
return RET_WARNING;
} else {
// Store window of the game
D3D11_TEXTURE2D_DESC d3D11TextureDesc;
pCapturedTexture->GetDesc( &d3D11TextureDesc );
// Compute the zone to extract.
D3D11_BOX srcBox;
memset( &srcBox, 0, sizeof( srcBox ) );
if( _pGameWindow->getLeftPos() > 0 ) {
srcBox.left = _pGameWindow->getLeftPos();
}
if( _pGameWindow->getTopPos() > 0 ) {
srcBox.top = _pGameWindow->getTopPos();
}
srcBox.front = 0;
srcBox.right = _pGameWindow->getLeftPos() + _pGameWindow->getWidth();
if( srcBox.right > _pGameWindow->getMonitorWidth() ) {
srcBox.right = _pGameWindow->getMonitorWidth();
}
if( ( srcBox.right - srcBox.left ) % 2 != 0 ) {
srcBox.right--;
}
srcBox.bottom = _pGameWindow->getTopPos() + _pGameWindow->getHeight();
if( srcBox.bottom > _pGameWindow->getMonitorHeight() ) {
srcBox.bottom = _pGameWindow->getMonitorHeight();
}
if( ( srcBox.bottom - srcBox.top ) % 2 != 0 ) {
srcBox.bottom--;
}
srcBox.back = 1;
// AVFrame info are udpate just when the captured game window and the texture are diffrent.
// In the same time texture is reallocated.
if( ( ( srcBox.right - srcBox.left ) != _CapturedTextureDesc.Width )
|| ( ( srcBox.bottom - srcBox.top ) != _CapturedTextureDesc.Height )
) {
LOGD( "Game window: %dx%d ; %d->%d", _pGameWindow->getLeftPos(), _pGameWindow->getTopPos(), _pGameWindow->getWidth(), _pGameWindow->getHeight() );
LOGD( "Texture creation %dx%d -> %dx%d", srcBox.left, srcBox.top, srcBox.right, srcBox.bottom );
// Create the new texture
iRet = createCapturedTexture( srcBox.right - srcBox.left, srcBox.bottom - srcBox.top );
}
DirectX11Util::GetInstance()->getD3D11DeviceContext()->CopySubresourceRegion( _pGameTexture, 0, 0, 0, 0, pCapturedTexture, 0, &srcBox );
}
if( nullptr != _pDxgiResource ) {
_pOutputDuplication->ReleaseFrame();
pCapturedTexture->Release();
_pDxgiResource->Release();
_pDxgiResource = nullptr;
}
iRet = RET_OK;
return iRet;
}
Below the release of the D3d11 capture before recreation of the IDXGIOutputDuplication:
int DxgiVideoCapture::close() {
if( nullptr != _pGameTexture ) {
ZeroMemory( &_CapturedTextureDesc, sizeof( D3D11_TEXTURE2D_DESC ) );
_pGameTexture->Release();
_pGameTexture = nullptr;
}
if( nullptr != _pDxgiResource ){
_pOutputDuplication->ReleaseFrame();
_pDxgiResource->Release();
_pDxgiResource = nullptr;
}
if( nullptr != _pOutputDuplication ) {
_pOutputDuplication->Release();
_pOutputDuplication = nullptr;
}
return RET_OK;
}
What I would like to know is how can I invest this crash (application ends without any message). To be more accurate I cross compile my application, but the behaviour seems the same. Do you have any idea how to invest this issue ?
Tell me if you want more details.
Thanks in advance !
Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 5 years ago.
Improve this question
I need my SDL2 program to know whether a window is fullscreen, and I thought I could get that info using SDL_GetWindowFlags(). By default I initialize my window with two flags: SDL_WINDOW_SHOWN and SDL_WINDOW_BORDERLESS, which are equal to 16 and 4 respectively. So I expected the function to return 20, but instead I get 532. And also sometimes 1556, which even changes to 532 during runtime after reinitializing the window a few times. 532 never changes to 1556 during runtime however.
How do these flags work?
bool init( int windowflags )
{
bool success = true;
if( SDL_Init( SDL_INIT_VIDEO ) < 0 )
{
printf( "Video initialization failed: %s\n", SDL_GetError() );
success = false;
}
else
{
gWindow = SDL_CreateWindow( "VIRGULE", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, WIN_W, WIN_H, SDL_WINDOW_SHOWN + windowflags );
if( gWindow == NULL )
{
printf( "Window could not be created: %s\n", SDL_GetError() );
success = false;
}
else
{
gRenderer = SDL_CreateRenderer( gWindow, -1, SDL_RENDERER_ACCELERATED + SDL_RENDERER_TARGETTEXTURE );
if( gRenderer == NULL )
{
printf( "Renderer could not be created: %s\n", SDL_GetError() );
success = false;
}
else
{
gTexture = SDL_CreateTexture( gRenderer, SDL_PIXELFORMAT_UNKNOWN, SDL_TEXTUREACCESS_TARGET, SCR_W, SCR_H );
if( gTexture == NULL )
{
printf( "Texture creation failed: %s\n", SDL_GetError() );
success = false;
}
}
}
}
printf( "%i\n", SDL_GetWindowFlags( gWindow ) );
//this is either prints 1556 or 532
return success;
}
Looks like your flag value is changing based on the states of SDL_WINDOW_INPUT_FOCUS and SDL_WINDOW_MOUSE_FOCUS. But it doesn't matter. Flag values change all the time. You shouldn't worry about the total value of the flags. You only need to know the value of the flag bit you are watching. The SDL_WINDOW_SHOWN and SDL_WINDOW_BORDERLESS flags are still set when the values are 532 and 1556 (if you look in binary).
Just grab the value of the bit flag:
int flags = SDL_GetWindowFlags( gWindow );
int window_shown = ( flags & SDL_WINDOW_SHOWN ) ? true : false;
int window_borderless = ( flags & SDL_WINDOW_BORDERLESS ) ? true : false;
int window_fullscreen = ( flags & SDL_WINDOW_FULLSCREEN ) ? true : false;
Here's a function you can use to see what flags are set based on the value:
void show_flags(int flags);
int main()
{
show_flags(20);
show_flags(532);
show_flags(1556);
return 0;
}
void show_flags(int flags) {
printf("\nFLAGS ENABLED: ( %d )\n", flags);
printf("=======================\n");
if(flags & SDL_WINDOW_FULLSCREEN) printf("SDL_WINDOW_FULLSCREEN\n");
if(flags & SDL_WINDOW_OPENGL) printf("SDL_WINDOW_OPENGL\n");
if(flags & SDL_WINDOW_SHOWN) printf("SDL_WINDOW_SHOWN\n");
if(flags & SDL_WINDOW_HIDDEN) printf("SDL_WINDOW_HIDDEN\n");
if(flags & SDL_WINDOW_BORDERLESS) printf("SDL_WINDOW_BORDERLESS\n");
if(flags & SDL_WINDOW_RESIZABLE) printf("SDL_WINDOW_RESIZABLE\n");
if(flags & SDL_WINDOW_MINIMIZED) printf("SDL_WINDOW_MINIMIZED\n");
if(flags & SDL_WINDOW_MAXIMIZED) printf("SDL_WINDOW_MAXIMIZED\n");
if(flags & SDL_WINDOW_INPUT_GRABBED) printf("SDL_WINDOW_INPUT_GRABBED\n");
if(flags & SDL_WINDOW_INPUT_FOCUS) printf("SDL_WINDOW_INPUT_FOCUS\n");
if(flags & SDL_WINDOW_MOUSE_FOCUS) printf("SDL_WINDOW_MOUSE_FOCUS\n");
if(flags & SDL_WINDOW_FULLSCREEN_DESKTOP) printf("SDL_WINDOW_FULLSCREEN_DESKTOP\n");
if(flags & SDL_WINDOW_FOREIGN) printf("SDL_WINDOW_FOREIGN\n");
}
More flags can be found here: https://wiki.libsdl.org/SDL_WindowFlags.
Output:
FLAGS ENABLED: ( 20 )
=======================
SDL_WINDOW_SHOWN
SDL_WINDOW_BORDERLESS
FLAGS ENABLED: ( 532 )
=======================
SDL_WINDOW_SHOWN
SDL_WINDOW_BORDERLESS
SDL_WINDOW_INPUT_FOCUS
FLAGS ENABLED: ( 1556 )
=======================
SDL_WINDOW_SHOWN
SDL_WINDOW_BORDERLESS
SDL_WINDOW_INPUT_FOCUS
SDL_WINDOW_MOUSE_FOCUS
So I have been following this set of tutorials to get myself some general knowledge with SDL before going to make a game engine with it. However, once I reached this part of the tutorial, I started having issues.
Whenever I would try to use IMG_Load or any related commands, it would cause the program to instantly close. I was doing just fine when I was using SDL_LoadBMP, but now the problem only started to arise now. Even when I copy the code in the tutorial exactly, it still wants to be disagreeable.
bool init()
{
//Initialization flags
bool success = true;
//Initiralize SDL
if(SDL_Init(SDL_INIT_VIDEO) < 0)
{
printf("SDL failed to initialize! Details: %s\n", SDL_GetError());
success = false;
}
else
{
//Set texture filtering to linear
if( !SDL_SetHint( SDL_HINT_RENDER_SCALE_QUALITY, "1" ) )
{
printf( "Warning: Linear texture filtering not enabled!" );
}
//Create a window
gWindow = SDL_CreateWindow("densipoint", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, SCREEN_WIDTH, SCREEN_HEIGHT, SDL_WINDOW_SHOWN);
if(gWindow == 0)
{
printf("Window failed to display! Details: %s\n", SDL_GetError());
success = false;
}
else
{
//Create renderer for window
gRenderer = SDL_CreateRenderer(gWindow, -1, SDL_RENDERER_ACCELERATED);
if(gRenderer == 0)
{
printf("Renderer failed to be created! Details: %s\n", SDL_GetError());
success = false;
}
else
{
//Initialize renderer color
SDL_SetRenderDrawColor(gRenderer, 0xff, 0xff, 0xff, 0xff);
//Initialize PNG loading
int imgFlags = IMG_INIT_PNG;
if( !( IMG_Init( imgFlags ) & imgFlags ))
{
printf("SDL_image failed to initialize! Details: %s\n", IMG_GetError());
success = false;
}
}
}
}
return success;
}
Any ideas/advice? I'm using command-line MinGW with the g++ compiler.
EDIT: I have narrowed the problem down to issues with initializing SDL_image.
int imgFlags = IMG_INIT_PNG;
if( !( IMG_Init( imgFlags ) & imgFlags ))
{
printf("SDL_image failed to initialize! Details: %s\n", IMG_GetError());
success = false;
}
I want to create one GL Context for each GPU on Linux using the GLX. As nVIDIA Slides show, it is pretty simple and I just have to use ":0.0" for the first gpu and ":0.1" for the second one in XOpenDisplay function. I have tried it but it only works with ":0.0" but not with ":0.1". I have two gpus: GTX 980 and GTX 970. Also, as the xorg.conf shows the Xinerama is disabled. Furthermore, I only have one display monitor and it is connected to the GTX 980.
Do you have any idea about how to fix that? or what is missing?
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <GL/gl.h>
#include <GL/glx.h>
#define GLX_CONTEXT_MAJOR_VERSION_ARB 0x2091
#define GLX_CONTEXT_MINOR_VERSION_ARB 0x2092
typedef GLXContext (*glXCreateContextAttribsARBProc)(Display*, GLXFBConfig, GLXContext, Bool, const int*);
// Helper to check for extension string presence. Adapted from:
// http://www.opengl.org/resources/features/OGLextensions/
static bool isExtensionSupported(const char *extList, const char *extension)
{
const char *start;
const char *where, *terminator;
/* Extension names should not have spaces. */
where = strchr(extension, ' ');
if (where || *extension == '\0')
return false;
/* It takes a bit of care to be fool-proof about parsing the
OpenGL extensions string. Don't be fooled by sub-strings,
etc. */
for (start=extList;;) {
where = strstr(start, extension);
if (!where)
break;
terminator = where + strlen(extension);
if ( where == start || *(where - 1) == ' ' )
if ( *terminator == ' ' || *terminator == '\0' )
return true;
start = terminator;
}
return false;
}
static bool ctxErrorOccurred = false;
static int ctxErrorHandler( Display *dpy, XErrorEvent *ev )
{
ctxErrorOccurred = true;
return 0;
}
int main(int argc, char* argv[])
{
Display *display = XOpenDisplay(":0.1");
if (!display)
{
printf("Failed to open X display\n");
exit(1);
}
// Get a matching FB config
static int visual_attribs[] =
{
GLX_X_RENDERABLE , True,
GLX_DRAWABLE_TYPE , GLX_WINDOW_BIT,
GLX_RENDER_TYPE , GLX_RGBA_BIT,
GLX_X_VISUAL_TYPE , GLX_TRUE_COLOR,
GLX_RED_SIZE , 8,
GLX_GREEN_SIZE , 8,
GLX_BLUE_SIZE , 8,
GLX_ALPHA_SIZE , 8,
GLX_DEPTH_SIZE , 24,
GLX_STENCIL_SIZE , 8,
GLX_DOUBLEBUFFER , True,
//GLX_SAMPLE_BUFFERS , 1,
//GLX_SAMPLES , 4,
None
};
int glx_major, glx_minor;
// FBConfigs were added in GLX version 1.3.
if ( !glXQueryVersion( display, &glx_major, &glx_minor ) ||
( ( glx_major == 1 ) && ( glx_minor < 3 ) ) || ( glx_major < 1 ) )
{
printf("Invalid GLX version");
exit(1);
}
printf( "Getting matching framebuffer configs\n" );
int fbcount;
GLXFBConfig* fbc = glXChooseFBConfig(display, DefaultScreen(display), visual_attribs, &fbcount);
if (!fbc)
{
printf( "Failed to retrieve a framebuffer config\n" );
exit(1);
}
printf( "Found %d matching FB configs.\n", fbcount );
// Pick the FB config/visual with the most samples per pixel
printf( "Getting XVisualInfos\n" );
int best_fbc = -1, worst_fbc = -1, best_num_samp = -1, worst_num_samp = 999;
int i;
for (i=0; i<fbcount; ++i)
{
XVisualInfo *vi = glXGetVisualFromFBConfig( display, fbc[i] );
if ( vi )
{
int samp_buf, samples;
glXGetFBConfigAttrib( display, fbc[i], GLX_SAMPLE_BUFFERS, &samp_buf );
glXGetFBConfigAttrib( display, fbc[i], GLX_SAMPLES , &samples );
printf( " Matching fbconfig %d, visual ID 0x%2x: SAMPLE_BUFFERS = %d,"
" SAMPLES = %d\n",
i, vi -> visualid, samp_buf, samples );
if ( best_fbc < 0 || samp_buf && samples > best_num_samp )
best_fbc = i, best_num_samp = samples;
if ( worst_fbc < 0 || !samp_buf || samples < worst_num_samp )
worst_fbc = i, worst_num_samp = samples;
}
XFree( vi );
}
GLXFBConfig bestFbc = fbc[ best_fbc ];
// Be sure to free the FBConfig list allocated by glXChooseFBConfig()
XFree( fbc );
// Get a visual
XVisualInfo *vi = glXGetVisualFromFBConfig( display, bestFbc );
printf( "Chosen visual ID = 0x%x\n", vi->visualid );
printf( "Creating colormap\n" );
XSetWindowAttributes swa;
Colormap cmap;
swa.colormap = cmap = XCreateColormap( display,
RootWindow( display, vi->screen ),
vi->visual, AllocNone );
swa.background_pixmap = None ;
swa.border_pixel = 0;
swa.event_mask = StructureNotifyMask;
printf( "Creating window\n" );
Window win = XCreateWindow( display, RootWindow( display, vi->screen ),
0, 0, 100, 100, 0, vi->depth, InputOutput,
vi->visual,
CWBorderPixel|CWColormap|CWEventMask, &swa );
if ( !win )
{
printf( "Failed to create window.\n" );
exit(1);
}
// Done with the visual info data
XFree( vi );
XStoreName( display, win, "GL 3.0 Window" );
printf( "Mapping window\n" );
XMapWindow( display, win );
// Get the default screen's GLX extension list
const char *glxExts = glXQueryExtensionsString( display,
DefaultScreen( display ) );
// NOTE: It is not necessary to create or make current to a context before
// calling glXGetProcAddressARB
glXCreateContextAttribsARBProc glXCreateContextAttribsARB = 0;
glXCreateContextAttribsARB = (glXCreateContextAttribsARBProc)
glXGetProcAddressARB( (const GLubyte *) "glXCreateContextAttribsARB" );
GLXContext ctx = 0;
// Install an X error handler so the application won't exit if GL 3.0
// context allocation fails.
//
// Note this error handler is global. All display connections in all threads
// of a process use the same error handler, so be sure to guard against other
// threads issuing X commands while this code is running.
ctxErrorOccurred = false;
int (*oldHandler)(Display*, XErrorEvent*) =
XSetErrorHandler(&ctxErrorHandler);
// Check for the GLX_ARB_create_context extension string and the function.
// If either is not present, use GLX 1.3 context creation method.
if ( !isExtensionSupported( glxExts, "GLX_ARB_create_context" ) ||
!glXCreateContextAttribsARB )
{
printf( "glXCreateContextAttribsARB() not found"
" ... using old-style GLX context\n" );
ctx = glXCreateNewContext( display, bestFbc, GLX_RGBA_TYPE, 0, True );
}
// If it does, try to get a GL 3.0 context!
else
{
int context_attribs[] =
{
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
GLX_CONTEXT_MINOR_VERSION_ARB, 0,
//GLX_CONTEXT_FLAGS_ARB , GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB,
None
};
printf( "Creating context\n" );
ctx = glXCreateContextAttribsARB( display, bestFbc, 0,
True, context_attribs );
// Sync to ensure any errors generated are processed.
XSync( display, False );
if ( !ctxErrorOccurred && ctx )
printf( "Created GL 3.0 context\n" );
else
{
// Couldn't create GL 3.0 context. Fall back to old-style 2.x context.
// When a context version below 3.0 is requested, implementations will
// return the newest context version compatible with OpenGL versions less
// than version 3.0.
// GLX_CONTEXT_MAJOR_VERSION_ARB = 1
context_attribs[1] = 1;
// GLX_CONTEXT_MINOR_VERSION_ARB = 0
context_attribs[3] = 0;
ctxErrorOccurred = false;
printf( "Failed to create GL 3.0 context"
" ... using old-style GLX context\n" );
ctx = glXCreateContextAttribsARB( display, bestFbc, 0,
True, context_attribs );
}
}
// Sync to ensure any errors generated are processed.
XSync( display, False );
// Restore the original error handler
XSetErrorHandler( oldHandler );
if ( ctxErrorOccurred || !ctx )
{
printf( "Failed to create an OpenGL context\n" );
exit(1);
}
// Verifying that context is a direct context
if ( ! glXIsDirect ( display, ctx ) )
{
printf( "Indirect GLX rendering context obtained\n" );
}
else
{
printf( "Direct GLX rendering context obtained\n" );
}
printf( "Making context current\n" );
glXMakeCurrent( display, win, ctx );
glClearColor( 0, 0.5, 1, 1 );
glClear( GL_COLOR_BUFFER_BIT );
glXSwapBuffers ( display, win );
sleep( 1 );
glClearColor ( 1, 0.5, 0, 1 );
glClear ( GL_COLOR_BUFFER_BIT );
glXSwapBuffers ( display, win );
sleep( 1 );
glXMakeCurrent( display, 0, 0 );
glXDestroyContext( display, ctx );
XDestroyWindow( display, win );
XFreeColormap( display, cmap );
XCloseDisplay( display );
return 0;
}
The reason it works with ":0.0" but not with ":0.1" is because they are the X display and screen numbers. ":0.0" means the first screen on the first display and ":0.1" means the second screen on the first display.
These numbers are for selecting which monitor you wish to display the window to and not which GPU you wish to use. As you have only one monitor attached you only have one screen so ":0.1" fails.
I believe the slides expect you to have two or more monitors attached, each driven by a different GPU.
I have a fairly simple application that contains the following:
context->mFaceTracker = FTCreateFaceTracker();
hr = context->mFaceTracker->Initialize( &mVideoCameraConfig, &mDepthCameraConfig, NULL, NULL );
which works fine and returns S_OK and mFaceTracker is (as far as I can tell) initialized properly. However, the next line is:
hr = context->mFaceTracker->CreateFTResult( &context->mFTResult );
which always returns FT_ERROR_UNINITIALIZED, doesn't allocate the pointer, and has me puzzled. I've tried many different strategies for getting this to work from changing how the threading for the device and detector works, to changing my FTcontext object from a class to a struct to match the samples, all with no success. The Kinect SDK samples all work fine, but trying to use them in my own application doesn't seem to, despite my closely mirroring how they initialize the device and the Face Tracker. I'm curious if anyone else has run into this or similar problems around initializing either IFTFaceTracker or the IFTResult. Also, I'm curious how else I can test the IFTFaceTracker for correct intialization, other than testing the HRESULT that Initialize() returns. Thanks in advance,
=== edit
I've had a few request for more code. It's built on Cinder and is using this block for Cinder: https://github.com/BanTheRewind/Cinder-KinectSdk
I can't post all of the code, but I've posted at least most of the relevant Kinect initialization code here:
void Kinect::start( const DeviceOptions &deviceOptions )
{
if ( !mCapture ) {
// Copy device options
mDeviceOptions = deviceOptions;
string deviceId = mDeviceOptions.getDeviceId();
int32_t index = mDeviceOptions.getDeviceIndex();
// Clamp device index
if ( index >= 0 ) {
index = math<int32_t>::clamp( index, 0, math<int32_t>::max( getDeviceCount() - 1, 0 ) );
}
// Initialize device instance
long hr = S_OK;
if ( index >= 0 ) {
hr = NuiCreateSensorByIndex( index, &mSensor );
if ( FAILED( hr ) ) {
trace( "Unable to create device instance " + toString( index ) + ": " );
error( hr );
return;
}
} else if ( deviceId.length() > 0 ) {
_bstr_t id = deviceId.c_str();
hr = NuiCreateSensorById( id, &mSensor );
if ( FAILED( hr ) ) {
trace( "Unable to create device instance " + deviceId + ":" );
error( hr );
return;
}
} else {
trace( "Invalid device name or index." );
return;
}
// Check device
hr = mSensor != 0 ? mSensor->NuiStatus() : E_NUI_NOTCONNECTED;
if ( hr == E_NUI_NOTCONNECTED ) {
error( hr );
return;
}
// Get device name and index
if ( mSensor != 0 ) {
mDeviceOptions.setDeviceIndex( mSensor->NuiInstanceIndex() );
BSTR id = ::SysAllocString( mSensor->NuiDeviceConnectionId() );
_bstr_t idStr( id );
if ( idStr.length() > 0 ) {
std::string str( idStr );
mDeviceOptions.setDeviceId( str );
}
::SysFreeString( id );
} else {
index = -1;
deviceId = "";
}
flags |= NUI_INITIALIZE_FLAG_USES_COLOR;
}
hr = mSensor->NuiInitialize( flags );
if ( FAILED( hr ) ) {
trace( "Unable to initialize device " + mDeviceOptions.getDeviceId() + ":" );
error( hr );
return;
}
hr = mSensor->NuiSkeletonTrackingEnable( 0, flags );
if ( FAILED( hr ) ) {
trace( "Unable to initialize skeleton tracking for device " + mDeviceOptions.getDeviceId() + ": " );
error( hr );
return;
}
mIsSkeletonDevice = true;
mThread = CreateThread(NULL, 0, &Kinect::StaticThread, (PVOID)this, 0, 0);
}
}
DWORD WINAPI Kinect::StaticThread(PVOID lpParam)
{
Kinect* device = static_cast<Kinect*>(lpParam);
if (device)
{
return device->run();
}
return 0;
}
void run() {
if(mSensor) {
if(mEnabledFaceTracking)
{
if(mNeedFaceTracker) {
mFaceTracker = new FaceTracker(
mDeviceOptions.getVideoSize().x,
mDeviceOptions.getVideoSize().y,
mDeviceOptions.getDepthSize().x,
mDeviceOptions.getDepthSize().y,
1.0,
1 );
mNeedFaceTracker = false;
}
// make sure we have both color && depth buffers to work with
if(newDepth || newVideo)
{
FT_SENSOR_DATA sensorData(mFTColorImage, mFTDepthImage);
FT_VECTOR3D hint[2]; // this is initialized elsewhere
mFaceTracker->checkFaces( (NUI_SKELETON_FRAME*) &skeletonFrame, mFTColorImage, mFTDepthImage, 1.0, 0);
if(mFaceTracker->getNumFaces() > 0) {
cout << " we have a face " << mFaceTracker->getNumFaces() << endl;
mNewFaceTrackData = true;
mFaceData.clear();
for( int i = 0; i < mFaceTracker->getNumFaces(); i++) {
Face newFace;
mFaceTracker->getProjectedShape(0, newFace.scale, newFace.rotation, newFace.transform, newFace.screenPositions);
mFaceData.push_back(newFace);
}
}
}
}
Sleep( 8 );
}
}
It looks like you never call (or omitted including in the code sample) NuiImageStreamOpen(), such as this code snippet from the SingleFace sample, KinectSensor.cpp in the Init method:
hr = NuiImageStreamOpen(
colorType,
colorRes,
0,
2,
m_hNextVideoFrameEvent,
&m_pVideoStreamHandle );
if (FAILED(hr))
{
return hr;
}
hr = NuiImageStreamOpen(
depthType,
depthRes,
(bNearMode)? NUI_IMAGE_STREAM_FLAG_ENABLE_NEAR_MODE : 0,
2,
m_hNextDepthFrameEvent,
&m_pDepthStreamHandle );
Calling those before you call CreateFTResult() may fix the uninitialized error.
Additionally, you call CreateThread() and then call run(), but there is no while loop so that thread will exit almost immediately, certainly without enough time for the Kinect to start providing data to the FaceTracking.
It doesn't look like you have included the Thread or event loop that is checking the sensor for new data, updating mFTColorImage and mFTDepthImage, and setting the newDepth and newVideo flags. This could be in the same thread that you create above (provided you create a while loop, and ignoring performance or other classes needing the Kinect data), or could be a different thread as in the SingleFace Kinect SDK sample.