Converting DTED to other format - c++

I'm trying to convert a DTED file in another format so I can display it in an application, for now I'm targetting the JPEG format. What I need is basically the gdal_translate command but through the C++ API.
I'm using the GDAL C++ API and I can read the input DTED file and read it without problem :
GDALAllRegister();
GDALDataset *poDataset;
poDataset = (GDALDataset *) GDALOpen( "n43.dt2", GA_ReadOnly );
if( poDataset == NULL )
{
return 0;
}
I can also access the corresponding band without an issue.
GDALRasterBand *poBand;
int nBlockXSize, nBlockYSize;
int bGotMin, bGotMax;
double adfMinMax[2];
poBand = poDataset->GetRasterBand( 1 );
poBand->GetBlockSize( &nBlockXSize, &nBlockYSize );
printf( "Block=%dx%d Type=%s, ColorInterp=%s\n",
nBlockXSize, nBlockYSize,
GDALGetDataTypeName(poBand->GetRasterDataType()),
GDALGetColorInterpretationName(
poBand->GetColorInterpretation()) );
adfMinMax[0] = poBand->GetMinimum( &bGotMin );
adfMinMax[1] = poBand->GetMaximum( &bGotMax );
if( ! (bGotMin && bGotMax) )
GDALComputeRasterMinMax((GDALRasterBandH)poBand, TRUE, adfMinMax);
printf( "Min=%.3fd, Max=%.3f\n", adfMinMax[0], adfMinMax[1] );
if( poBand->GetOverviewCount() > 0 )
printf( "Band has %d overviews.\n", poBand->GetOverviewCount() );
if( poBand->GetColorTable() != NULL )
printf( "Band has a color table with %d entries.\n",
poBand->GetColorTable()->GetColorEntryCount() );
But I can't figure out how to add this band in another dataset using the wanted driver. My application crashes when I try to use the AddBand function.
float *pafScanline;
int nXSize = poBand->GetXSize();
pafScanline = (float *) CPLMalloc(sizeof(float)*nXSize);
poBand->RasterIO( GF_Read, 0, 0, nXSize, 1,
pafScanline, nXSize, 1, GDT_Float32,
0, 0 );
GDALClose(poDataset);
GDALDataset *resDataset;
GDALRasterBand *resBand;
resDataset->AddBand (GDT_Float32, NULL);//<-application crashes here
/*resBand = resDataset->GetRasterBand(1);
resBand->RasterIO( GF_Write, 0, 0, nXSize, 1,
pafScanline, nXSize, 1, GDT_Float32,
0, 0 );*/
So I guess what I'm trying to do is not the proper way to do what I need. Could you explain to me what I am doing wrong ?

Okay I think I figured it out : unless going through a virtual raster, I shouldn't use raster bands but just CreatCopy.
Here's a working code for me :
#include <iostream>
#include "gdal_priv.h"
#include "cpl_conv.h" // for CPLMalloc()
using namespace std;
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
MainWindow w;
w.show();
GDALAllRegister();
GDALDatasetH poDataset;
poDataset = (GDALDataset *) GDALOpenShared( "n43.dt2", GA_ReadOnly );
if( poDataset == NULL )
{
return 0;
}
const char *pszFormat = "PNG";
GDALDatasetH resDataset;
GDALProgressFunc pfnProgress = GDALTermProgress;
GDALDriverH hDriver = GDALGetDriverByName( pszFormat );
const char *pszDest = "n43.png";
char **papszCreateOptions = NULL;
resDataset = GDALCreateCopy( hDriver, pszDest, poDataset,
FALSE, papszCreateOptions,
pfnProgress, NULL );
if( resDataset != NULL )
{
GDALClose( resDataset );
}
else
{
printf("Error creating output dataset.");
}
GDALClose(poDataset);
CSLDestroy( papszCreateOptions );
return 1;
}
This gives a slightly brighter PNG image than the one I get from using gdal_translate, I still need to figure out what's causing this. This works with JPEG images but the result cannot be read (I think it's an application specific format as it is called "JPEG JTIF" by gdalinfo --formats).

Related

Creating Seperate Context for Each GPU while having one display monitor

I want to create one GL Context for each GPU on Linux using the GLX. As nVIDIA Slides show, it is pretty simple and I just have to use ":0.0" for the first gpu and ":0.1" for the second one in XOpenDisplay function. I have tried it but it only works with ":0.0" but not with ":0.1". I have two gpus: GTX 980 and GTX 970. Also, as the xorg.conf shows the Xinerama is disabled. Furthermore, I only have one display monitor and it is connected to the GTX 980.
Do you have any idea about how to fix that? or what is missing?
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <GL/gl.h>
#include <GL/glx.h>
#define GLX_CONTEXT_MAJOR_VERSION_ARB 0x2091
#define GLX_CONTEXT_MINOR_VERSION_ARB 0x2092
typedef GLXContext (*glXCreateContextAttribsARBProc)(Display*, GLXFBConfig, GLXContext, Bool, const int*);
// Helper to check for extension string presence. Adapted from:
// http://www.opengl.org/resources/features/OGLextensions/
static bool isExtensionSupported(const char *extList, const char *extension)
{
const char *start;
const char *where, *terminator;
/* Extension names should not have spaces. */
where = strchr(extension, ' ');
if (where || *extension == '\0')
return false;
/* It takes a bit of care to be fool-proof about parsing the
OpenGL extensions string. Don't be fooled by sub-strings,
etc. */
for (start=extList;;) {
where = strstr(start, extension);
if (!where)
break;
terminator = where + strlen(extension);
if ( where == start || *(where - 1) == ' ' )
if ( *terminator == ' ' || *terminator == '\0' )
return true;
start = terminator;
}
return false;
}
static bool ctxErrorOccurred = false;
static int ctxErrorHandler( Display *dpy, XErrorEvent *ev )
{
ctxErrorOccurred = true;
return 0;
}
int main(int argc, char* argv[])
{
Display *display = XOpenDisplay(":0.1");
if (!display)
{
printf("Failed to open X display\n");
exit(1);
}
// Get a matching FB config
static int visual_attribs[] =
{
GLX_X_RENDERABLE , True,
GLX_DRAWABLE_TYPE , GLX_WINDOW_BIT,
GLX_RENDER_TYPE , GLX_RGBA_BIT,
GLX_X_VISUAL_TYPE , GLX_TRUE_COLOR,
GLX_RED_SIZE , 8,
GLX_GREEN_SIZE , 8,
GLX_BLUE_SIZE , 8,
GLX_ALPHA_SIZE , 8,
GLX_DEPTH_SIZE , 24,
GLX_STENCIL_SIZE , 8,
GLX_DOUBLEBUFFER , True,
//GLX_SAMPLE_BUFFERS , 1,
//GLX_SAMPLES , 4,
None
};
int glx_major, glx_minor;
// FBConfigs were added in GLX version 1.3.
if ( !glXQueryVersion( display, &glx_major, &glx_minor ) ||
( ( glx_major == 1 ) && ( glx_minor < 3 ) ) || ( glx_major < 1 ) )
{
printf("Invalid GLX version");
exit(1);
}
printf( "Getting matching framebuffer configs\n" );
int fbcount;
GLXFBConfig* fbc = glXChooseFBConfig(display, DefaultScreen(display), visual_attribs, &fbcount);
if (!fbc)
{
printf( "Failed to retrieve a framebuffer config\n" );
exit(1);
}
printf( "Found %d matching FB configs.\n", fbcount );
// Pick the FB config/visual with the most samples per pixel
printf( "Getting XVisualInfos\n" );
int best_fbc = -1, worst_fbc = -1, best_num_samp = -1, worst_num_samp = 999;
int i;
for (i=0; i<fbcount; ++i)
{
XVisualInfo *vi = glXGetVisualFromFBConfig( display, fbc[i] );
if ( vi )
{
int samp_buf, samples;
glXGetFBConfigAttrib( display, fbc[i], GLX_SAMPLE_BUFFERS, &samp_buf );
glXGetFBConfigAttrib( display, fbc[i], GLX_SAMPLES , &samples );
printf( " Matching fbconfig %d, visual ID 0x%2x: SAMPLE_BUFFERS = %d,"
" SAMPLES = %d\n",
i, vi -> visualid, samp_buf, samples );
if ( best_fbc < 0 || samp_buf && samples > best_num_samp )
best_fbc = i, best_num_samp = samples;
if ( worst_fbc < 0 || !samp_buf || samples < worst_num_samp )
worst_fbc = i, worst_num_samp = samples;
}
XFree( vi );
}
GLXFBConfig bestFbc = fbc[ best_fbc ];
// Be sure to free the FBConfig list allocated by glXChooseFBConfig()
XFree( fbc );
// Get a visual
XVisualInfo *vi = glXGetVisualFromFBConfig( display, bestFbc );
printf( "Chosen visual ID = 0x%x\n", vi->visualid );
printf( "Creating colormap\n" );
XSetWindowAttributes swa;
Colormap cmap;
swa.colormap = cmap = XCreateColormap( display,
RootWindow( display, vi->screen ),
vi->visual, AllocNone );
swa.background_pixmap = None ;
swa.border_pixel = 0;
swa.event_mask = StructureNotifyMask;
printf( "Creating window\n" );
Window win = XCreateWindow( display, RootWindow( display, vi->screen ),
0, 0, 100, 100, 0, vi->depth, InputOutput,
vi->visual,
CWBorderPixel|CWColormap|CWEventMask, &swa );
if ( !win )
{
printf( "Failed to create window.\n" );
exit(1);
}
// Done with the visual info data
XFree( vi );
XStoreName( display, win, "GL 3.0 Window" );
printf( "Mapping window\n" );
XMapWindow( display, win );
// Get the default screen's GLX extension list
const char *glxExts = glXQueryExtensionsString( display,
DefaultScreen( display ) );
// NOTE: It is not necessary to create or make current to a context before
// calling glXGetProcAddressARB
glXCreateContextAttribsARBProc glXCreateContextAttribsARB = 0;
glXCreateContextAttribsARB = (glXCreateContextAttribsARBProc)
glXGetProcAddressARB( (const GLubyte *) "glXCreateContextAttribsARB" );
GLXContext ctx = 0;
// Install an X error handler so the application won't exit if GL 3.0
// context allocation fails.
//
// Note this error handler is global. All display connections in all threads
// of a process use the same error handler, so be sure to guard against other
// threads issuing X commands while this code is running.
ctxErrorOccurred = false;
int (*oldHandler)(Display*, XErrorEvent*) =
XSetErrorHandler(&ctxErrorHandler);
// Check for the GLX_ARB_create_context extension string and the function.
// If either is not present, use GLX 1.3 context creation method.
if ( !isExtensionSupported( glxExts, "GLX_ARB_create_context" ) ||
!glXCreateContextAttribsARB )
{
printf( "glXCreateContextAttribsARB() not found"
" ... using old-style GLX context\n" );
ctx = glXCreateNewContext( display, bestFbc, GLX_RGBA_TYPE, 0, True );
}
// If it does, try to get a GL 3.0 context!
else
{
int context_attribs[] =
{
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
GLX_CONTEXT_MINOR_VERSION_ARB, 0,
//GLX_CONTEXT_FLAGS_ARB , GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB,
None
};
printf( "Creating context\n" );
ctx = glXCreateContextAttribsARB( display, bestFbc, 0,
True, context_attribs );
// Sync to ensure any errors generated are processed.
XSync( display, False );
if ( !ctxErrorOccurred && ctx )
printf( "Created GL 3.0 context\n" );
else
{
// Couldn't create GL 3.0 context. Fall back to old-style 2.x context.
// When a context version below 3.0 is requested, implementations will
// return the newest context version compatible with OpenGL versions less
// than version 3.0.
// GLX_CONTEXT_MAJOR_VERSION_ARB = 1
context_attribs[1] = 1;
// GLX_CONTEXT_MINOR_VERSION_ARB = 0
context_attribs[3] = 0;
ctxErrorOccurred = false;
printf( "Failed to create GL 3.0 context"
" ... using old-style GLX context\n" );
ctx = glXCreateContextAttribsARB( display, bestFbc, 0,
True, context_attribs );
}
}
// Sync to ensure any errors generated are processed.
XSync( display, False );
// Restore the original error handler
XSetErrorHandler( oldHandler );
if ( ctxErrorOccurred || !ctx )
{
printf( "Failed to create an OpenGL context\n" );
exit(1);
}
// Verifying that context is a direct context
if ( ! glXIsDirect ( display, ctx ) )
{
printf( "Indirect GLX rendering context obtained\n" );
}
else
{
printf( "Direct GLX rendering context obtained\n" );
}
printf( "Making context current\n" );
glXMakeCurrent( display, win, ctx );
glClearColor( 0, 0.5, 1, 1 );
glClear( GL_COLOR_BUFFER_BIT );
glXSwapBuffers ( display, win );
sleep( 1 );
glClearColor ( 1, 0.5, 0, 1 );
glClear ( GL_COLOR_BUFFER_BIT );
glXSwapBuffers ( display, win );
sleep( 1 );
glXMakeCurrent( display, 0, 0 );
glXDestroyContext( display, ctx );
XDestroyWindow( display, win );
XFreeColormap( display, cmap );
XCloseDisplay( display );
return 0;
}
The reason it works with ":0.0" but not with ":0.1" is because they are the X display and screen numbers. ":0.0" means the first screen on the first display and ":0.1" means the second screen on the first display.
These numbers are for selecting which monitor you wish to display the window to and not which GPU you wish to use. As you have only one monitor attached you only have one screen so ":0.1" fails.
I believe the slides expect you to have two or more monitors attached, each driven by a different GPU.

C++ copy sqlite blob from one database to another

I am trying to copy some blob data from one sqlite table to another in C++. However, once the data has been copied over to the new table, it seems to be getting corrupted. The said data contains some jpeg images. The code I am using to copy from TABLE1 to TABLE2 is shown below:
// Read the blob from the database
int64_t rowID = 0;
sscanf( id.c_str(), "%llu", &rowID );
sqlite3_blob* blobHandle = NULL;
if( sqlite3_blob_open( m_dbHandle_temp, "main", m_currentTileTable.c_str(), "tile_data", rowID, 0, &blobHandle ) != SQLITE_OK )
{
sqlite3_blob_close( blobHandle ); // An SQLite blob will be initialised regardless of the success of 'sqlite3_blob_open'
return false;
}
tiles_insert_statement.append( ")" );
// Copy blob to database
sqlite3_stmt *stmt = 0;
const char* tail;
sqlite3_prepare_v2( m_dbHandle, tiles_insert_statement.c_str(), strlen( tiles_insert_statement.c_str() )+1, &stmt, &tail );
int bindSuccess = sqlite3_bind_blob( stmt, 1, blobHandle, sqlite3_blob_bytes( blobHandle ), SQLITE_TRANSIENT );
if( sqlite3_step( stmt ) != SQLITE_DONE )
printf( "Error message: %s\n", sqlite3_errmsg( m_dbHandle ) );
sqlite3_finalize( stmt );
// close handles
sqlite3_blob_close( blobHandle );
Is there anything I am doing wrong in the above code, the reason I say that it is getting corrupted is because, I am reading the blobs on an android device to be displayed in an image viewer. The blobs in the TABLE 1 can be read and displayed fine, however the ones in TABLE 2 do not display anything. Any help is greatly appreaciated.
SOLUTION:
// Read the blob from the database
int64_t rowID = 0;
sscanf( id.c_str(), "%llu", &rowID );
sqlite3_blob* blobHandle = NULL;
if( sqlite3_blob_open( m_dbHandle_temp, "main", m_currentTileTable.c_str(), "tile_data", rowID, 0, &blobHandle ) != SQLITE_OK )
{
sqlite3_blob_close( blobHandle ); // An SQLite blob will be initialised regardless of the success of 'sqlite3_blob_open'
return false;
}
unsigned int length = sqlite3_blob_bytes( blobHandle );
// TODO - instances of this class OWN the buffer.
// Delete the buffer in the destructor ;)
unsigned char* buffer = new unsigned char[ length ];
if( sqlite3_blob_read( blobHandle, buffer, length, 0 ) != SQLITE_OK )
{
return false;
}
tiles_insert_statement.append( ")" );
sqlite3_stmt *stmt = 0;
const char* tail;
sqlite3_prepare_v2( m_dbHandle, tiles_insert_statement.c_str(), strlen( tiles_insert_statement.c_str() )+1, &stmt, &tail );
int bindSuccess = sqlite3_bind_blob( stmt, 1, buffer, length, SQLITE_TRANSIENT );
if( sqlite3_step( stmt ) != SQLITE_DONE )
printf( "Error message: %s\n", sqlite3_errmsg( m_dbHandle ) );
sqlite3_finalize( stmt );
// close handles
sqlite3_blob_close( blobHandle );
sqlite3_bind_blob expects a pointer to the actual blob data; it is not possible to use a blob handle for that.
To get the blob data as a memory chunk, execute a query like SELECT tile_data FROM MyTable WHERE ... and read the value with sqlite3_column_blob.

Images and text not showing in SDL under Mac OSX

I got to compile, bundle and load resources under XCode 4.3 and SDL 1.2.15
I know resources are loading correctly because file handles are not null and no error is thrown.
I successfully load png's and ttf's, obtain and crop surfaces, and blit them.
But when I flip, the only thing I get to see are the lines I drew using SDL_Draw
I will put some bits of code, as I'm trying to keep an engine-ish structure so the code is everything but together.
Initialization:
void CEngine::Init() {
// Register SDL_Quit to be called at exit; makes sure things are cleaned up when we quit.
atexit( SDL_Quit );
// Initialize SDL's subsystems - in this case, only video.
if ( SDL_Init( SDL_INIT_EVERYTHING ) < 0 ) {
fprintf( stderr, "Unable to init SDL: %s\n", SDL_GetError() );
exit( 1 );
}
// Attempt to create a window with the specified height and width.
SetSize( m_iWidth, m_iHeight );
// If we fail, return error.
if ( m_pScreen == NULL ) {
fprintf( stderr, "Unable to set up video: %s\n", SDL_GetError() );
exit( 1 );
}
AdditionalInit();
}
and
void CTileEngine::AdditionalInit() {
SetTitle( "TileEngine - Loading..." );
PrintDebug("Initializing SDL_Image");
int flags = IMG_INIT_PNG;
int initted = IMG_Init( flags );
if( ( initted & flags ) != flags ) {
PrintDebug("IMG_Init: Failed to init required image support!");
PrintDebug(IMG_GetError());
// handle error
}
PrintDebug("Initializing SDL_TTF");
if( TTF_Init() == -1 ) {
PrintDebug("TTF_Init: Failed to init required ttf support!");
PrintDebug(TTF_GetError());
}
PrintDebug("Loading fonts");
font = TTF_OpenFont( OSXFileManager::GetResourcePath("Roboto-Regular.ttf"), 28 );
if( !font ) {
PrintDebug("Error loading fonts");
PrintDebug(TTF_GetError());
}
g_pGame = new CGame;
LoadGame( OSXFileManager::GetResourcePath( "test", "tmx") );
SetTitle( "TileEngine" );
PrintDebug("Finished AditionalInit()");
}
Main draw method
void CEngine::DoRender(){
++m_iFPSCounter;
if ( m_iFPSTickCounter >= 1000 ) {
m_iCurrentFPS = m_iFPSCounter;
m_iFPSCounter = 0;
m_iFPSTickCounter = 0;
}
SDL_FillRect( m_pScreen, 0, SDL_MapRGB( m_pScreen->format, 0, 0, 0 ) );
// Lock surface if needed
if ( SDL_MUSTLOCK( m_pScreen ) ){
if ( SDL_LockSurface( m_pScreen ) < 0 ){
return;
}
}
Render( GetSurface() );
// Render FPS
SDL_Color fpsColor = { 255, 255, 255 };
string fpsMessage = "FPS: ";
fpsMessage.append( SSTR(m_iCurrentFPS) );
SDL_Surface* fps = TTF_RenderText_Solid(font, fpsMessage.c_str(), fpsColor);
if( fps ) {
SDL_Rect destRect;
destRect.x = pDestSurface->w - fps->w;
destRect.y = pDestSurface->h - fps->h;
destRect.w = fps->w;
destRect.h = fps->h;
SDL_BlitSurface(fps, &fps->clip_rect, pDestSurface, &destRect);
SDL_FreeSurface(fps);
}
// Unlock if needed
if ( SDL_MUSTLOCK( m_pScreen ) )
SDL_UnlockSurface( m_pScreen );
// Tell SDL to update the whole gScreen
SDL_Flip( m_pScreen );
}
Image file loading
bool CEntity::VLoadImageFromFile( const string& sFile) {
if ( m_pSurface != 0 ){
SDL_FreeSurface( m_pSurface );
}
string nFile = string(OSXFileManager::APPNAME) + OSXFileManager::RESOURCEDIR + sFile;
SDL_Surface *pTempSurface;
pTempSurface = IMG_Load( nFile.c_str() );
m_sImage = sFile;
if ( pTempSurface == 0 ){
char czError[256];
sprintf( czError, "Image '%s' could not be opened. Reason: %s", nFile.c_str(), IMG_GetError() );
fprintf( stderr, "\nERROR: %s", czError );
return false;
} else {
pTempSurface = SDL_DisplayFormatAlpha(pTempSurface);
}
m_pSurface = pTempSurface;
return true;
}
Entity draw method
void CEntity::VRender( SDL_Surface *pDestSurface ) {
if ( ( m_pSurface == 0 ) || ( m_bVisible == false) || ( m_iAlpha == 0 ) ){
// If the surface is invalid or it's 100% transparent.
return;
}
SDL_Rect SDestRect;
SDestRect.x = m_iPosX;
SDestRect.y = m_iPosY;
SDestRect.w = m_pSurface->w;
SDestRect.h = m_pSurface->h;
if ( m_iAlpha != 255 )
SDL_SetAlpha( m_pSurface, SDL_SRCALPHA, m_iAlpha );
SDL_BlitSurface( m_pSurface, &m_pSurface->clip_rect, pDestSurface, &SDestRect );
}
I have checked and debugged million times and I don't get what's wrong here. As I told before, file loading seems to be OK.
But this part
void CTile::RenderGrid( SDL_Surface* pDestSurface ) {
Uint32 m_GridColor = SDL_MapRGB( pDestSurface->format, 0xFF, 0xFF, 0xFF );
Draw_Rect(pDestSurface, GetPosX(), GetPosY(), GetWidth(), GetHeight(), m_GridColor);
}
works like a charm.
I found out what was happening. Turns out that, from SDL version 1.1.18 SDL_Lock calls are recursive, so each lock must pair an unlock. That was not happening last time I used SDL, so I was not aware of it. Simply matching locks and unlocks did the job.

MS Kinect FaceTracker creating IFTResult

I have a fairly simple application that contains the following:
context->mFaceTracker = FTCreateFaceTracker();
hr = context->mFaceTracker->Initialize( &mVideoCameraConfig, &mDepthCameraConfig, NULL, NULL );
which works fine and returns S_OK and mFaceTracker is (as far as I can tell) initialized properly. However, the next line is:
hr = context->mFaceTracker->CreateFTResult( &context->mFTResult );
which always returns FT_ERROR_UNINITIALIZED, doesn't allocate the pointer, and has me puzzled. I've tried many different strategies for getting this to work from changing how the threading for the device and detector works, to changing my FTcontext object from a class to a struct to match the samples, all with no success. The Kinect SDK samples all work fine, but trying to use them in my own application doesn't seem to, despite my closely mirroring how they initialize the device and the Face Tracker. I'm curious if anyone else has run into this or similar problems around initializing either IFTFaceTracker or the IFTResult. Also, I'm curious how else I can test the IFTFaceTracker for correct intialization, other than testing the HRESULT that Initialize() returns. Thanks in advance,
=== edit
I've had a few request for more code. It's built on Cinder and is using this block for Cinder: https://github.com/BanTheRewind/Cinder-KinectSdk
I can't post all of the code, but I've posted at least most of the relevant Kinect initialization code here:
void Kinect::start( const DeviceOptions &deviceOptions )
{
if ( !mCapture ) {
// Copy device options
mDeviceOptions = deviceOptions;
string deviceId = mDeviceOptions.getDeviceId();
int32_t index = mDeviceOptions.getDeviceIndex();
// Clamp device index
if ( index >= 0 ) {
index = math<int32_t>::clamp( index, 0, math<int32_t>::max( getDeviceCount() - 1, 0 ) );
}
// Initialize device instance
long hr = S_OK;
if ( index >= 0 ) {
hr = NuiCreateSensorByIndex( index, &mSensor );
if ( FAILED( hr ) ) {
trace( "Unable to create device instance " + toString( index ) + ": " );
error( hr );
return;
}
} else if ( deviceId.length() > 0 ) {
_bstr_t id = deviceId.c_str();
hr = NuiCreateSensorById( id, &mSensor );
if ( FAILED( hr ) ) {
trace( "Unable to create device instance " + deviceId + ":" );
error( hr );
return;
}
} else {
trace( "Invalid device name or index." );
return;
}
// Check device
hr = mSensor != 0 ? mSensor->NuiStatus() : E_NUI_NOTCONNECTED;
if ( hr == E_NUI_NOTCONNECTED ) {
error( hr );
return;
}
// Get device name and index
if ( mSensor != 0 ) {
mDeviceOptions.setDeviceIndex( mSensor->NuiInstanceIndex() );
BSTR id = ::SysAllocString( mSensor->NuiDeviceConnectionId() );
_bstr_t idStr( id );
if ( idStr.length() > 0 ) {
std::string str( idStr );
mDeviceOptions.setDeviceId( str );
}
::SysFreeString( id );
} else {
index = -1;
deviceId = "";
}
flags |= NUI_INITIALIZE_FLAG_USES_COLOR;
}
hr = mSensor->NuiInitialize( flags );
if ( FAILED( hr ) ) {
trace( "Unable to initialize device " + mDeviceOptions.getDeviceId() + ":" );
error( hr );
return;
}
hr = mSensor->NuiSkeletonTrackingEnable( 0, flags );
if ( FAILED( hr ) ) {
trace( "Unable to initialize skeleton tracking for device " + mDeviceOptions.getDeviceId() + ": " );
error( hr );
return;
}
mIsSkeletonDevice = true;
mThread = CreateThread(NULL, 0, &Kinect::StaticThread, (PVOID)this, 0, 0);
}
}
DWORD WINAPI Kinect::StaticThread(PVOID lpParam)
{
Kinect* device = static_cast<Kinect*>(lpParam);
if (device)
{
return device->run();
}
return 0;
}
void run() {
if(mSensor) {
if(mEnabledFaceTracking)
{
if(mNeedFaceTracker) {
mFaceTracker = new FaceTracker(
mDeviceOptions.getVideoSize().x,
mDeviceOptions.getVideoSize().y,
mDeviceOptions.getDepthSize().x,
mDeviceOptions.getDepthSize().y,
1.0,
1 );
mNeedFaceTracker = false;
}
// make sure we have both color && depth buffers to work with
if(newDepth || newVideo)
{
FT_SENSOR_DATA sensorData(mFTColorImage, mFTDepthImage);
FT_VECTOR3D hint[2]; // this is initialized elsewhere
mFaceTracker->checkFaces( (NUI_SKELETON_FRAME*) &skeletonFrame, mFTColorImage, mFTDepthImage, 1.0, 0);
if(mFaceTracker->getNumFaces() > 0) {
cout << " we have a face " << mFaceTracker->getNumFaces() << endl;
mNewFaceTrackData = true;
mFaceData.clear();
for( int i = 0; i < mFaceTracker->getNumFaces(); i++) {
Face newFace;
mFaceTracker->getProjectedShape(0, newFace.scale, newFace.rotation, newFace.transform, newFace.screenPositions);
mFaceData.push_back(newFace);
}
}
}
}
Sleep( 8 );
}
}
It looks like you never call (or omitted including in the code sample) NuiImageStreamOpen(), such as this code snippet from the SingleFace sample, KinectSensor.cpp in the Init method:
hr = NuiImageStreamOpen(
colorType,
colorRes,
0,
2,
m_hNextVideoFrameEvent,
&m_pVideoStreamHandle );
if (FAILED(hr))
{
return hr;
}
hr = NuiImageStreamOpen(
depthType,
depthRes,
(bNearMode)? NUI_IMAGE_STREAM_FLAG_ENABLE_NEAR_MODE : 0,
2,
m_hNextDepthFrameEvent,
&m_pDepthStreamHandle );
Calling those before you call CreateFTResult() may fix the uninitialized error.
Additionally, you call CreateThread() and then call run(), but there is no while loop so that thread will exit almost immediately, certainly without enough time for the Kinect to start providing data to the FaceTracking.
It doesn't look like you have included the Thread or event loop that is checking the sensor for new data, updating mFTColorImage and mFTDepthImage, and setting the newDepth and newVideo flags. This could be in the same thread that you create above (provided you create a while loop, and ignoring performance or other classes needing the Kinect data), or could be a different thread as in the SingleFace Kinect SDK sample.

Am i incorrectly setting up avi info for saving a file in C++ vfw?

I am using a specialized network streaming camera and I am trying to save the video stream off in a file. at the moment the code saves the video but in an ackward RGB format which screws up the color and then saves it using VFW. Am i doing this correctly and this is supposed to create avi with mismatched colors or did i setup something wrong in the BITMAPINFOHEADER areas?
void PvSbProUISampleDlg::OnBnClickedSave()
{
// TODO: Add your control notification handler code here
CString StringValue;
mMovieSave.GetWindowTextW(StringValue);
if (StringValue == L"Save")
{
CString codecValue;
mMovieCodecSelected.GetWindowTextW(codecValue);
if (codecValue.IsEmpty()){
MessageBox( L"Please select a codec before saving to file",
L"Select Codec!",
MB_OK | MB_ICONEXCLAMATION );
return;
}
CString fileNameValue;
mFileName.GetWindowTextW(fileNameValue);
if (fileNameValue.IsEmpty()){
MessageBox( L"Please select a file location",
L"Select File!",
MB_OK | MB_ICONEXCLAMATION );
return;
}
if (!StartMovie())
return;
mSavingMovie = true;
mMovieSave.SetWindowTextW(L"Saving");
}
else
{
mVideoMutex.Lock();
PvResult aResult = mVideoCompressor->Stop();
mSavingMovie = false;
mVideoMutex.Unlock();
if (!aResult.IsOK())
{
MessageBox( mLocation,
L"Can't Stop Video Compressor!",
MB_OK | MB_ICONEXCLAMATION );
return;
}
mMovieSave.SetWindowTextW(L"Save");
}
}
I set up the video stream and select uncompressed AVI for my codec. I click "save" button which then calls the function below
bool PvSbProUISampleDlg::StartMovie()
{
if ( !mDevice.IsConnected() )
{
MessageBox( L"Need to connect to device",
L"Cannot start Video Compressor!",
MB_OK | MB_ICONEXCLAMATION );
return false;
}
if (!mPipeline.IsStarted() )
{
return false;
}
if (mSavingMovie)
return false;
PvInt64 width;
PvInt64 height;
PvInt64 bitCount;
if (!GetImageWidth(width).IsOK())
return false;
if (!GetImageHeight(height).IsOK())
return false;
if (!GetPixelBitCount(bitCount).IsOK())
return false;
// Start the movie compressor
if ( !mVideoCompressor->Start( mLocation,
width,
height,
bitCount/8,
59).IsOK())
{
MessageBox( mLocation,
L"Cannot start Video Compressor!",
MB_OK | MB_ICONEXCLAMATION );
return false;
}
return true;
}
the function gets the video size info and then calls the actually compression to start
PvResult VideoCompressor::Start(const CString& aFileName, unsigned short aSizeX, unsigned short aSizeY, unsigned short aBPP, double aFPS)
{
IAVIFile *lAVIFile = NULL;
IAVIStream *lAVIStream = NULL;
IAVIStream *lAVICompressedStream = NULL;
AVISTREAMINFO lAVISTREAMINFO;
AVICOMPRESSOPTIONS lAVICOMPRESSOPTIONS;
// Try to match the image format with the Video Compressor capabilities
BITMAPINFO lTempBI;
lTempBI.bmiHeader.biSize = sizeof( BITMAPINFO );
lTempBI.bmiHeader.biWidth = aSizeX;
lTempBI.bmiHeader.biHeight = aSizeY;
lTempBI.bmiHeader.biPlanes = 1;
lTempBI.bmiHeader.biBitCount = aBPP * 8;
lTempBI.bmiHeader.biCompression = BI_RGB;
lTempBI.bmiHeader.biSizeImage = aSizeX * aSizeY * aBPP;
lTempBI.bmiHeader.biXPelsPerMeter = 1280;
lTempBI.bmiHeader.biYPelsPerMeter = 720;
lTempBI.bmiHeader.biClrUsed = 0;
lTempBI.bmiHeader.biClrImportant = 0;
//lTempBI.bmiHeader.
if( ( mCOMPVARS.hic != NULL ) && // if not the "Full Frames (uncompressed)"
( ICCompressQuery( mCOMPVARS.hic, &lTempBI, NULL ) != ICERR_OK ) )
{
mLastVideoError = "Image format not accepted by compressor!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
// Try to open the stream for writing
if( mTempBuffer )
delete [] mTempBuffer;
mTempBuffer = new unsigned char[ aSizeX * aSizeY * aBPP ];
if( mTempBuffer == NULL )
{
mLastVideoError = "Cannot allocate memory for a temporary buffer!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
if( AVIFileOpen( &lAVIFile, aFileName, OF_CREATE | OF_WRITE, NULL ) != 0 )
{
mLastVideoError = "Cannot open movie file for writing!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
// Fill out AVIStream information
memset( &lAVISTREAMINFO, 0, sizeof( AVISTREAMINFO ) );
lAVISTREAMINFO.fccType = streamtypeVIDEO;
lAVISTREAMINFO.fccHandler = mCOMPVARS.fccHandler;
lAVISTREAMINFO.dwFlags = 0;
lAVISTREAMINFO.dwCaps = 0;
lAVISTREAMINFO.wPriority = 0;
lAVISTREAMINFO.wLanguage = 0;
lAVISTREAMINFO.dwScale = 100;
lAVISTREAMINFO.dwRate = (unsigned long)( aFPS * 100.0 );
lAVISTREAMINFO.dwStart = 0;
lAVISTREAMINFO.dwLength = 0;
lAVISTREAMINFO.dwInitialFrames = 0;
lAVISTREAMINFO.dwQuality = mCOMPVARS.lQ;
lAVISTREAMINFO.dwSuggestedBufferSize = aSizeX * aSizeY * aBPP;
lAVISTREAMINFO.dwSampleSize = aSizeX * aSizeY * aBPP;
SetRect(&lAVISTREAMINFO.rcFrame, 0, aSizeY, aSizeX, 0);
// Convert to a wchar_t*
char *orig = "Video Stream";
size_t origsize = strlen(orig) + 1;
const size_t newsize = 64;
size_t convertedChars = 0;
mbstowcs_s(&convertedChars, lAVISTREAMINFO.szName, origsize, orig, _TRUNCATE);
if( AVIFileCreateStream( lAVIFile, &lAVIStream, &lAVISTREAMINFO ) != 0 )
{
mLastVideoError = "Cannot create video stream!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
BITMAPINFOHEADER lBIH;
lBIH.biSize = sizeof( BITMAPINFOHEADER );
lBIH.biWidth = aSizeX;
lBIH.biHeight = aSizeY;
lBIH.biPlanes = 1;
lBIH.biBitCount = aBPP * 8;
lBIH.biCompression = BI_RGB;
lBIH.biSizeImage = aSizeX * aSizeY * aBPP;
lBIH.biXPelsPerMeter = 1280;
lBIH.biYPelsPerMeter = 720;
lBIH.biClrUsed = 0;
lBIH.biClrImportant = 0;
memset( &lAVICOMPRESSOPTIONS, 0, sizeof( AVICOMPRESSOPTIONS ) );
lAVICOMPRESSOPTIONS.fccType = streamtypeVIDEO;
lAVICOMPRESSOPTIONS.fccHandler = mCOMPVARS.fccHandler;
lAVICOMPRESSOPTIONS.dwKeyFrameEvery = 15;
lAVICOMPRESSOPTIONS.dwQuality = mCOMPVARS.lQ;
lAVICOMPRESSOPTIONS.dwBytesPerSecond = 0;
lAVICOMPRESSOPTIONS.dwFlags = AVICOMPRESSF_KEYFRAMES; //| AVICOMPRESSF_VALID;//|AVICOMPRESSF_DATARATE;
lAVICOMPRESSOPTIONS.lpFormat = &lBIH;
lAVICOMPRESSOPTIONS.cbFormat = sizeof( lBIH );
lAVICOMPRESSOPTIONS.lpParms = 0;
lAVICOMPRESSOPTIONS.cbParms = 0;
lAVICOMPRESSOPTIONS.dwInterleaveEvery = 0;
HRESULT lR = AVIMakeCompressedStream( &lAVICompressedStream, lAVIStream, &lAVICOMPRESSOPTIONS, NULL);
if( lR == AVIERR_NOCOMPRESSOR )
{
mLastVideoError = "Cannot find a suitable compressor!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
else if( lR == AVIERR_MEMORY )
{
mLastVideoError = "Not enough memory to start the compressor!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
else if( lR == AVIERR_UNSUPPORTED )
{
mLastVideoError = "Compression is not supported for this image buffer!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
if( AVIStreamSetFormat( lAVICompressedStream, 0, &lBIH, sizeof( lBIH ) ) != 0 )
{
mLastVideoError = "Cannot set stream format. It probably isn't supported by the Codec!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
///////////////////
HRESULT hr;
//IBaseFilter mux = Null;
//IFileSinkFilter sink = null;
// Guid x = new Guid( 0xe436eb88, 0x524f, 0x11ce, 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70 );
//ICaptureGraphBuilder2::SetOutputFileName(
//////////////////
// finishing up
mAVIFile = lAVIFile;
mAVIStream = lAVIStream;
mAVICompressedStream = lAVICompressedStream;
mSizeX = aSizeX;
mSizeY = aSizeY;
mBPP = aBPP;
mImageSize = aSizeX * aSizeY * aBPP;
mLastSample = 0;
mCompressing = true;
return PvResult::Code::OK;
}
this compresses the stream
PvResult VideoCompressor::Compress(PvBuffer *aPvBuffer)
{
if (!mCompressing)
return PvResult::Code::GENERIC_ERROR;
ASSERT( mTempBuffer != NULL );
long lSamplesWritten, lBytesWritten;
int numberOfLines = 0;
PvUInt8 * aBuffer = aPvBuffer->GetDataPointer();
for( unsigned short lLine = 0; lLine < mSizeY; lLine++ )
{
numberOfLines = lLine;
unsigned char *lCurLine = (unsigned char *)aBuffer + (lLine ) * mSizeX * mBPP;
unsigned char *lCurLineInv = mTempBuffer + (mSizeY - lLine - 1) * mSizeX * mBPP;
::memcpy( lCurLineInv, lCurLine, mSizeX * mBPP );
}
if( AVIStreamWrite( mAVICompressedStream, mLastSample, 1, mTempBuffer, mImageSize, 0,
&lSamplesWritten, &lBytesWritten ) != 0 ||
lSamplesWritten < 1 ||
lBytesWritten < 1 )
{
mLastVideoError = "Cannot compress image!";
return PvResult::Code::GENERIC_ERROR;
}
mLastSample ++;
return PvResult::Code::OK;
}
this is what it should look like:
http://i13.photobucket.com/albums/a269/Masterg_/Untitled-16.png
this is what it saves as ( minus the guy):
http://i13.photobucket.com/albums/a269/Masterg_/vlcsnap-2011-06-07-13h11m34s97.png
From MSDN we have:
Syntax
DWORD ICCompressQuery(
hic,
lpbiInput,
lpbiOutput );
Parameters
hic : Handle to a compressor.
lpbiInput : Pointer to a BITMAPINFO structure containing the input format.
lpbiOutput : Pointer to a BITMAPINFO structure containing the output format. You can
specify zero for this parameter to
indicate any output format is
acceptable.
I might be wrong, but it seems to me that you are trying to "force" this input format whitout taking into account the actual format you are passing as input. If your input format does not match the "forced" one, weird result must be expected.
If your actual input format is not compatible with your compressor, you could try usign a ColorSpace converter filter before your compressor.