Tile Map Usage Much CPU With OpenGL and SDL - c++

I been working in a method to draw a map based on tiles with OpenGL and SDL. And I finally coded but when I execute the basic program where it draw a tile map of 25x16, and I check the use of CPU, it says that consume 25% but without drawing the map consume by much 1% of CPU.
So exists another method to draw the map or why is the use of CPU so high.
This is the code for drawing the map.
void CMapManager::drawMap(Map *map)
{
vector<ImagePtr> tempImages = CGameApplication::getInstance()->getGameApp()->getImages();
GLuint texture = tempImages.at(1)->getTexture();
glColor3f(1.0f, 1.0f, 1.0f);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glBindTexture( GL_TEXTURE_2D, texture );
glBegin( GL_QUADS );
for (int i = 0; i < map->getHeight(); i++)
{
for (int j = 0; j < map->getWidth(); j++)
{
ImagePtr imgDraw = tempImages.at(map->getMapTiles()[i][j]->getTypeTile());
glTexCoord2i( 0, 0 );
glVertex3f( imgDraw->getPosX() + (imgDraw->getWidth()*j), imgDraw->getPosY() + (imgDraw->getHeight()*i), 0.f );
//Bottom-left vertex (corner)
glTexCoord2i( 1, 0 );
glVertex3f( imgDraw->getOffsetX() + (imgDraw->getWidth()*j), imgDraw->getPosY() + (imgDraw->getHeight()*i), 0.f );
//Bottom-right vertex (corner)
glTexCoord2i( 1, 1 );
glVertex3f( imgDraw->getOffsetX() + (imgDraw->getWidth()*j), imgDraw->getOffsetY() + (imgDraw->getHeight()*i), 0.f );
//Top-right vertex (corner)
glTexCoord2i( 0, 1 );
glVertex3f( imgDraw->getPosX() + (imgDraw->getWidth()*j), imgDraw->getOffsetY() + (imgDraw->getHeight()*i), 0.f );
}
}
glEnd();
glDisable(GL_BLEND);
}
And in this method I call the function:
void CGameApplication::renderApplication()
{
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glEnable(GL_TEXTURE_2D);
vector<ImagePtr> tempImages = GApp->getImages();
vector<ImagePtr>::iterator iterImage;
for (iterImage = tempImages.begin(); iterImage != tempImages.end(); ++iterImage)
{
CImageM->drawSprites( (*iterImage)->getTexture(), (*iterImage)->getPosX(), (*iterImage)->getPosY(),
(*iterImage)->getOffsetX(), (*iterImage)->getOffsetY() );
}
vector<TextPtr> tempTexts = GApp->getTexts();
vector<TextPtr>::iterator iterText;
for (iterText = tempTexts.begin(); iterText != tempTexts.end(); ++iterText)
{
CTextM->drawFonts( (*iterText) );
}
CMapM->drawMap(GApp->getCurrentMap());
glDisable(GL_TEXTURE_2D);
}
I already set a Timer that after this function:
GameApplication->getCKeyboardHandler()->inputLogic();
GameApplication->renderApplication();
SDL_GL_SwapBuffers();
GameApplication->getGameApp()->getTimer()->delay();
And the delay function is:
void Timer::delay()
{
if( this->getTicks() < 1000 / FRAMES_PER_SECOND )
{
SDL_Delay( ( 1000 / FRAMES_PER_SECOND ) - this->getTicks() );
}
}
The const FRAMES_PER_SECOND it's 5 in this moment.
And the function for convert image to GL texture is:
GLuint CImageManager::imageToGLTexture(std::string name)
{
GLuint texture;
SDL_Surface *surface;
GLenum texture_format;
GLint nOfColors;
if ( (surface = IMG_Load(name.c_str())) ) {
// Check that the image's width is a power of 2
if ( (surface->w & (surface->w - 1)) != 0 ) {
printf("warning: image.bmp's width is not a power of 2\n");
}
// Also check if the height is a power of 2
if ( (surface->h & (surface->h - 1)) != 0 ) {
printf("warning: image.bmp's height is not a power of 2\n");
}
// get the number of channels in the SDL surface
nOfColors = surface->format->BytesPerPixel;
if (nOfColors == 4) // contains an alpha channel
{
if (surface->format->Rmask == 0x000000ff)
texture_format = GL_RGBA;
else
texture_format = GL_BGRA_EXT;
}
else if (nOfColors == 3) // no alpha channel
{
if (surface->format->Rmask == 0x000000ff)
texture_format = GL_RGB;
else
texture_format = GL_BGR_EXT;
}
else {
printf("warning: the image is not truecolor.. this will probably break\n");
// this error should not go unhandled
}
SDL_SetAlpha(surface, 0, 0);
// Have OpenGL generate a texture object handle for us
glGenTextures( 1, &texture );
// Bind the texture object
glBindTexture( GL_TEXTURE_2D, texture );
// Set the texture's stretching properties
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
// Edit the texture object's image data using the information SDL_Surface gives us
glTexImage2D( GL_TEXTURE_2D, 0, nOfColors, surface->w, surface->h, 0,
texture_format, GL_UNSIGNED_BYTE, surface->pixels );
}
else {
printf("SDL could not load the image: %s\n", SDL_GetError());
SDL_Quit();
exit(1);
}
if ( surface ) {
SDL_FreeSurface( surface );
}
return texture;
}
Thanks before hand for the help.

After all, avoid state changes. Combine all your tiles into one texture and render using only one glBegin/glEnd block.
If you don't want to make many changes try display lists. OpenGL will be able to optimize your calls but there is no guarantee it will run much faster.
If your map doesn't change a lot use VBOs. It's the fastest way.

Related

OpenGL texture appears to big on the screen

Okay so I am working on a toy 2d engine. I initially used regular SDL_Surfaces for rendering and the built in SDL_Renderer. But I thought why not use OpenGL, get some experience with that.
But I am stuck now. I have a context and things are rendered to the screen, but it looks like the textures I am trying to display are way to big to fit in the screen. Like I only see a couple of pixels, but not really.
The texture class can be found here:
#include "texture.h"
Texture::Texture(std::string path, bool loadNow) {
//Initialize texture ID
mTextureID = 0;
//Initialize texture dimensions
width = 0;
height = 0;
this->path = path;
if(loadNow) {
loadTexture(path);
}
}
Texture::~Texture() {
freeTexture();
}
bool Texture::loadTexture(std::string path) {
//Texture loading success
loaded = false;
SDL_Surface *image = IMG_Load(path.c_str());
//Image loaded successfully
if(image != NULL) {
if((image->w & (image->w - 1)) == 0) {
printf("Warning: image width not power of 2 -> %s\n", path.c_str());
}
if((image->h & (image->h - 1)) == 0) {
printf("Warning: image height not power of 2 -> %s\n", path.c_str());
}
loaded = loadTextureFromPixels32(image, (GLuint)image->w, (GLuint)image->h);
}
//Report error
if(!loaded) {
printf( "Unable to load %s\n", path.c_str() );
}
return loaded;
}
bool Texture::loadTextureFromPixels32(SDL_Surface *image, GLuint width, GLuint height ) {
//Free texture if it exists
freeTexture();
//Get texture dimensions
this->width = width;
this->height = height;
//Generate texture ID
glGenTextures(1, &mTextureID);
//Bind texture ID
glBindTexture(GL_TEXTURE_2D, mTextureID);
//Generate texture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image->pixels);
//Set texture parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//Unbind texture
glBindTexture(GL_TEXTURE_2D, 0);
//Check for error
GLenum error = glGetError();
if(error != GL_NO_ERROR) {
printf("Error loading texture from %p pixels!\n", image->pixels);
return false;
}
return true;
}
void Texture::render(GLfloat x, GLfloat y) {
if(loaded) {
//If the texture exists
if(mTextureID != 0) {
GLfloat realX = x;// - (this->width / 2);
GLfloat realY = y;// - (this->height / 2);
//Remove any previous transformations
glLoadIdentity();
//Move to rendering point
glTranslatef(realX, realY, 0.f);
glClearDepth(1.0f);
//Set texture ID
glBindTexture(GL_TEXTURE_2D, mTextureID);
//Render textured quad
glBegin(GL_QUADS);
glTexCoord2f( 0.f, 0.f ); glVertex2f(0.f, 0.f);
glTexCoord2f( 1.f, 0.f ); glVertex2f(width, 0.f);
glTexCoord2f( 1.f, 1.f ); glVertex2f(width, height);
glTexCoord2f( 0.f, 1.f ); glVertex2f(0.f, height);
glEnd();
}
} else {
// do nothing
}
}
GLuint Texture::getWidth() {
return this->width;
}
GLuint Texture::getHeight() {
return this->height;
}
void Texture::freeTexture() {
//Delete texture
if(mTextureID != 0) {
glDeleteTextures(1, &mTextureID);
mTextureID = 0;
}
width = 0;
height = 0;
}
I am guessing the problem is here, but it could also be in how I initialize OpenGL so here is that:
void Main::initGL() {
/* Request opengl 3.2 context.
* SDL doesn't have the ability to choose which profile at this time of writing,
* but it should default to the core profile */
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 2);
/* Turn on double buffering with a 24bit Z buffer.
* You may need to change this to 16 or 32 for your system */
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 32);
glContext = SDL_GL_CreateContext(this->window);
glViewport(0.0, 0.0, SCREEN_WIDTH, SCREEN_HEIGHT);
glOrtho( 0.0, SCREEN_WIDTH, SCREEN_HEIGHT, 0.0, 1.0, -1.0 );
SDL_GL_SetSwapInterval(0);
//Initialize clear color
glClearColor( 0.f, 0.f, 0.f, 1.f );
//Enable texturing
glEnable( GL_TEXTURE_2D );
//Check for error
GLenum error = glGetError();
if(error != GL_NO_ERROR) {
printf("Error initializing OpenGL!\n");
}
}
SDL is correctly initialized otherwise there wouldn't be anything on the screen. I am completely new to OpenGL so any help would be appreciated.
You mix ordinate GL_TEXTURE_2D stuff with GL_TEXTURE_RECTANGLE, and enabling both is a very bad idea. You are using texcoords in the range [0,1], so you actually seem to want to use GL_TEXTURE_2D. You should rewrite your texture code to use that, and dropt those rectangle textures entirely.
The next thing is that your projection setup is wrong. Your glOrtho call has no effect since you completely overwrite this by loading the identity matrix a few lines later. You should make yourself familiar with the stae machine approach the GL is using. As your matrices are set up currently, you draw a huge quad with most of it completely ot of the screen.
Now that part is completely strange:
/* Request opengl 3.2 context.
* SDL doesn't have the ability to choose which profile at this time of writing,
* but it should default to the core profile */
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 2);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 1);
This code is will never create a core profile, because core profiles didn't even exist in GL2.1, they were introduced in GL3.2. It is unclear what SDL version you are using, but modern SDL is capable of selecting the profile.
However, your code is using completely outdated ande deprecated OpenGL, there is no way this will work with a core profile. If you learn OpenGL in this decade, I'd strongly suggest that you forget about all that and start with some documentation/tutorial on modern GL, and actually use a core profiel.

Follow up: Asynchronous off-screen query performance

I recently asked this question:
How to perform asynchronous off-screen queries?
What I've heard, but haven't been able to confirm yet, is that rendering to the window is more expensive than rendering to a framebuffer. First of all, can anyone comment on this? Can I draw multiple scenes to framebuffers faster than I can to the window? Are there other options, e.g., pbuffers or PBOs?
I have started playing around with framebuffers, but I have not been able to get the query to work. Here's some psuedo code for what I have set up so far:
glfwWindowHint(GLFW_VISIBLE, GL_FALSE);
window = glfwCreateWindow(1, 1, "OpenGL", NULL, NULL);
glfwMakeContextCurrent(window);
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
glGenRenderbuffers(1, &rbo);
glBindRenderbuffer(GL_RENDERBUFFER, rbo);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, size, size);
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, rbo);
glEnable(GL_DEPTH_TEST);
glGenQueries(numberOfQueries, queries);
for (scene in scenesToRender)
{
glClear(GL_DEPTH_BUFFER_BIT);
glDepthFunc(GL_LESS);
drawShadingObjects(scene);
glBeginQuery(GL_SAMPLES_PASSED, queries[index]);
glDepthFunc(GL_LEQUAL);
drawShadedObject(scene);
glEndQuery(GL_SAMPLES_PASSED);
}
collectQueryResults();
deleteBuffers();
So far everything runs, but all of the queries return "0". Is there something about querying when drawing to a framebuffer that is different than when drawing to the window buffer?
Again, my two questions are:
Can I draw multiple scenes to framebuffers faster than I can to the window? Are there other options, e.g., pbuffers or PBOs?
Is there something about querying when drawing to a framebuffer that is different than when drawing to the window buffer?
Try something like this:
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <iostream>
#include <vector>
using namespace std;
const unsigned int sz = 1024;
void drawScene( unsigned int multiplier )
{
glViewport( 0, 0, sz, sz );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glRotatef( (float)glfwGetTime() * 50.f * multiplier, 0.f, 0.f, 1.f);
glBegin(GL_TRIANGLES);
glColor3f(1.f, 0.f, 0.f);
glVertex3f(-0.6f, -0.4f, 0.f);
glColor3f(0.f, 1.f, 0.f);
glVertex3f(0.6f, -0.4f, 0.f);
glColor3f(0.f, 0.f, 1.f);
glVertex3f(0.f, 0.6f, 0.f);
glEnd();
}
bool available( const vector< GLuint >& queries )
{
for( size_t i = 0; i < queries.size(); ++i )
{
GLuint available = 0;
glGetQueryObjectuiv( queries[i], GL_QUERY_RESULT_AVAILABLE, &available );
if( GL_FALSE == available )
return false;
}
return true;
}
int main()
{
glfwInit();
GLFWwindow* window = glfwCreateWindow( 400, 400, "Simple example", NULL, NULL );
glfwMakeContextCurrent( window );
glewInit();
if( !glewIsSupported( "GL_VERSION_2_1" ) )
return -1;
if( !glewIsSupported( "GL_EXT_framebuffer_object" ) )
return -1;
GLuint fbo = 0;
glGenFramebuffersEXT( 1, &fbo );
glBindFramebufferEXT( GL_DRAW_FRAMEBUFFER_EXT, fbo );
GLuint rbo0 = 0;
glGenRenderbuffersEXT( 1, &rbo0 );
glBindRenderbufferEXT( GL_RENDERBUFFER_EXT, rbo0 );
glRenderbufferStorageEXT( GL_RENDERBUFFER_EXT, GL_RGBA, sz, sz );
glFramebufferRenderbufferEXT( GL_DRAW_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_RENDERBUFFER_EXT, rbo0 );
GLuint rbo1 = 0;
glGenRenderbuffersEXT( 1, &rbo1 );
glBindRenderbufferEXT( GL_RENDERBUFFER_EXT, rbo1 );
glRenderbufferStorageEXT( GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, sz, sz );
glFramebufferRenderbufferEXT( GL_DRAW_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, rbo1 );
GLenum status = glCheckFramebufferStatusEXT( GL_FRAMEBUFFER_EXT );
if( status != GL_FRAMEBUFFER_COMPLETE_EXT )
return -1;
glBindFramebufferEXT( GL_FRAMEBUFFER_EXT, 0 );
vector< GLuint > queries( 10 );
glGenQueries( queries.size(), &queries[0] );
glBindFramebufferEXT( GL_FRAMEBUFFER_EXT, fbo );
for( size_t i = 0; i < queries.size(); ++i )
{
glBeginQuery( GL_SAMPLES_PASSED, queries[i] );
drawScene( i + 1 );
glEndQuery( GL_SAMPLES_PASSED );
}
glBindFramebufferEXT( GL_FRAMEBUFFER_EXT, 0 );
// wait for queries to become available
unsigned int cnt = 0;
while( !available( queries ) )
{
cnt++;
}
// all queries available, display query results
cout << "cnt: " << cnt << endl;
for( size_t i = 0; i < queries.size(); ++i )
{
GLuint samples = 0;
glGetQueryObjectuiv( queries[i], GL_QUERY_RESULT, &samples );
cout << i << ": " << samples << endl;
}
cout << endl;
glfwDestroyWindow( window );
glfwTerminate();
return 0;
}
Representative output on my system:
cnt: 1884
0: 157288
1: 157288
2: 157289
3: 157288
4: 157287
5: 157286
6: 157292
7: 157286
8: 157289
9: 157288

Load image with GDAL Libraries (VC++)

I have a problem when I try to load an image with GDAL Libraries, and implements it(image) to the OpenGL Control. The problem is on the color as you can see on the picture.
And this is the functions to generate texture from the image:
GLuint COpenGLControl::ReadGDALData(CString filename)
{
BYTE* tempReturn;
GLuint texture;
GDALDataset *poDataset;
GDALAllRegister();
poDataset = (GDALDataset *) GDALOpen((const char *)(CStringA)filename, GA_ReadOnly);
int Height = poDataset->GetRasterXSize(), Width = poDataset->GetRasterYSize();
LONG LineBytes = (Width*8+31)/32*4;
BYTE * pData = (BYTE *)new char[ LineBytes * Height * 3];
if (poDataset == NULL)
{
AfxMessageBox("Couldn't open selected file!");
return NULL;
}
nBands = poDataset->GetRasterCount();
GDALRasterBand **poBand;
poBand = new GDALRasterBand *[nBands];
if (poBand == NULL)
{
AfxMessageBox("Couldn't open the bands!", MB_ICONWARNING);
return NULL;
}
for (int i=0; i<nBands; i++)
{
poBand[i] = poDataset->GetRasterBand(i+1);
if (poBand[i] == NULL)
{
AfxMessageBox("Couldn't open selected bands", MB_ICONWARNING);
return NULL;
}
}
int BandChoice = 2;
nXsize = poBand[BandChoice]->GetXSize();
nYsize = poBand[BandChoice]->GetYSize();
if (BandChoice == 1)
{
poBandBlock_Gray = (BYTE*)CPLMalloc(sizeof(BYTE)*(nXsize*nYsize));
poBand[BandChoice]->RasterIO(GF_Read, 0, 0, nXsize, nYsize, poBandBlock_Gray, nXsize, nYsize, poBand[BandChoice]->GetRasterDataType(), 0, 0);
}
else
{
int nXsize_R, nXsize_G, nXsize_B;
int nYsize_R, nYsize_G, nYsize_B;
int BandChoiceR = 0;
int BandChoiceG = 1;
int BandChoiceB = 2;
nXsize_R = poBand[BandChoiceR]->GetXSize();
nXsize_G = poBand[BandChoiceG]->GetXSize();
nXsize_B = poBand[BandChoiceB]->GetXSize();
nYsize_R = poBand[BandChoiceR]->GetYSize();
nYsize_G = poBand[BandChoiceG]->GetYSize();
nYsize_B = poBand[BandChoiceB]->GetYSize();
nXsize = nXsize_R;
nYsize = nYsize_R;
poBandBlock_R = (BYTE*)CPLMalloc(sizeof(BYTE)*(nXsize_R*nYsize_R));
poBandBlock_G = (BYTE*)CPLMalloc(sizeof(BYTE)*(nXsize_G*nYsize_G));
poBandBlock_B = (BYTE*)CPLMalloc(sizeof(BYTE)*(nXsize_B*nYsize_B));
poBand[BandChoiceR]->RasterIO(GF_Read, 0, 0, nXsize_R, nYsize_R, poBandBlock_R, nXsize_R, nYsize_R, poBand[BandChoiceR]->GetRasterDataType(), 0, 0);
poBand[BandChoiceG]->RasterIO(GF_Read, 0, 0, nXsize_G, nYsize_G, poBandBlock_G, nXsize_G, nYsize_G, poBand[BandChoiceG]->GetRasterDataType(), 0, 0);
poBand[BandChoiceB]->RasterIO(GF_Read, 0, 0, nXsize_B, nYsize_B, poBandBlock_B, nXsize_B, nYsize_B, poBand[BandChoiceB]->GetRasterDataType(), 0, 0);
delete poDataset;
}
if (BandChoice == 1)
{
for ( int i=0; i < Height; i++)
{
for ( int j=0; j < Width; j++)
{
pData[(Height-i-1) * LineBytes + j] = poBandBlock_Gray[i*Width + j];
}
}
CPLFree(poBandBlock_Gray);
}
else
{
int j2 ;
for ( int i=0; i<Height; i++)
{
for ( int j=0, j2=0; j < Width, j2 < 3 * Width; j++, j2+=3)
{
pData[(Height-i-1)*LineBytes + j2+2] = poBandBlock_R[i*Width + j];
pData[(Height-i-1)*LineBytes + j2+1] = poBandBlock_G[i*Width + j];
pData[(Height-i-1)*LineBytes + j2] = poBandBlock_B[i*Width + j];
}
}
CPLFree(poBandBlock_B);
CPLFree(poBandBlock_R);
CPLFree(poBandBlock_G);
}
// allocate a texture name
glGenTextures( 1, &texture );
// select our current texture
glBindTexture( GL_TEXTURE_2D, texture );
// select modulate to mix texture with color for shading
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
// when texture area is small, bilinear filter the closest mipmap
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_LINEAR_MIPMAP_NEAREST );
// when texture area is large, bilinear filter the first mipmap
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
// if wrap is true, the texture wraps over at the edges (repeat)
// ... false, the texture ends at the edges (clamp)
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, FALSE );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, FALSE );
// build our texture mipmaps
gluBuild2DMipmaps( GL_TEXTURE_2D, 3, Width, Height, GL_RGB, GL_UNSIGNED_BYTE, pData );
// free buffer
free( pData );
return texture;
}
This is the Draw function:
void COpenGLControl::OnDraw(CDC *pDC)
{
// TODO: Camera controls
wglMakeCurrent(hdc,hrc);
// Set color to use when clearing the background.
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClearDepth(1.0f);
// Turn on backface culling
glFrontFace(GL_CCW);
glCullFace(GL_FRONT_AND_BACK);
// Turn on depth testing
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Clear all objects
glEnable( GL_TEXTURE_2D ); // enable texture for 2 dimensions
glPushMatrix();
if (filename.IsEmpty() == false)
{
imgData = ReadGDALData( filename );
glBindTexture( GL_TEXTURE_2D, imgData );
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Clear all objects
glLoadIdentity ();
gluLookAt (0,0,1,0,0,0,0,1,0);
glTranslatef (m_fPosX, m_fPosY, 0.0f);
glScalef (m_fZoom,m_fZoom,1.0);
glBegin( GL_QUADS ); // apply loaded texture to viewport
glTexCoord2d(0.0,0.0); glVertex2d(-1.0,-1.0);
glTexCoord2d(1.0,0.0); glVertex2d(+1.0,-1.0);
glTexCoord2d(1.0,1.0); glVertex2d(+1.0,+1.0);
glTexCoord2d(0.0,1.0); glVertex2d(-1.0,+1.0);
glEnd();
}
glPopMatrix();
glDisable( GL_TEXTURE_2D );
glFlush();
// Swap buffers
SwapBuffers(hdc);
wglMakeCurrent(NULL, NULL);
}
The problem is not so much in the color, but (from what I can tell from the sample) in the way your data is packed. Look into what byte ordering / row padding / color packing your OpenGL buffer expects, and in your GDAL loader what it provides. Just a hunch here, but it seems like your OpenGL expects a 4th (alpha) component in your RGB structs, but your GDAL code doesn't supply that. Also your GDAL loader aligns on 32 bit boundaries it seems, check if your OpenGL texture calls require that, too. Did you copy/paste the GDAL loader from a sample where somebody uses it to draw with BitBlt()? It looks that way.

OpenGL Applying Texture to Tessellation

I'm trying to take a concave polygon and apply an image to it as a texture. The polygon can have multiple contours, both internal holes and external "islands". It can be any shape, but will be smaller than the image and will fit inside it. It does not necessarily touch the edges of the image.
I've successfully displayed the tessellated polygon, and textured a simple square, but can't get the two to work together.
Here's how I'm loading the texture:
GLuint texture;
int width, height;
BYTE * data;
FILE * file;
// open texture data
file = fopen( filename, "rb" );
if ( file == NULL ) return 0;
// allocate buffer
width = 256;
height = 256;
data = (BYTE *)malloc( width * height * 3 );
// read texture data
fread( data, width * height * 3, 1, file );
fclose( file );
glGenTextures( 1, &texture );
glBindTexture( GL_TEXTURE_2D, texture );
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, wrap ? GL_REPEAT : GL_CLAMP );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, wrap ? GL_REPEAT : GL_CLAMP );
gluBuild2DMipmaps( GL_TEXTURE_2D, 3, width,height, GL_RGB, GL_UNSIGNED_BYTE, data );
free( data );
return texture;
Here's the tessellation function:
GLuint tessellate1()
{
GLuint id = glGenLists(1); // create a display list
if(!id) return id; // failed to create a list, return 0
GLUtesselator *tess = gluNewTess(); // create a tessellator
if(!tess) return 0; // failed to create tessellation object, return 0
GLdouble quad1[4][3] = { {-1,3,0}, {0,0,0}, {1,3,0}, {0,2,0} };
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, GLTexture::LoadTextureRAW("texture.raw", true));
// register callback functions
gluTessCallback(tess, GLU_TESS_BEGIN, (void (CALLBACK *)())tessBeginCB);
gluTessCallback(tess, GLU_TESS_END, (void (CALLBACK *)())tessEndCB);
gluTessCallback(tess, GLU_TESS_ERROR, (void (CALLBACK *)())tessErrorCB);
gluTessCallback(tess, GLU_TESS_VERTEX, (void (CALLBACK *)())tessVertexCB);
glNewList(id, GL_COMPILE);
glColor3f(1,1,1);
gluTessBeginPolygon(tess, 0); // with NULL data
gluTessBeginContour(tess);
gluTessVertex(tess, quad1[0], quad1[0]);
gluTessVertex(tess, quad1[1], quad1[1]);
gluTessVertex(tess, quad1[2], quad1[2]);
gluTessVertex(tess, quad1[3], quad1[3]);
gluTessEndContour(tess);
gluTessEndPolygon(tess);
glEndList();
gluDeleteTess(tess); // delete after tessellation
glDisable(GL_TEXTURE_2D);
setCamera(0, 0, 5, 0, 0, 0);
return id; // return handle ID of a display list
}
Here's the tessellation vertex callback function:
// cast back to double type
const GLdouble *ptr = (const GLdouble*)data;
double dImageX = -1, dImageY = -1;
//hardcoded extents of the polygon for the purposes of testing
int minX = 607011, maxX = 616590;
int minY = 4918219, maxY = 4923933;
//get the % coord of the texture for a poly vertex. Assumes image and poly bounds are the same for the purposes of testing
dImageX = (ptr[0] - minX) / (maxX - minX);
dImageY = (ptr[1] - minY) / (maxY - minY);
glTexCoord2d(dImageX, dImageY);
glVertex2d(ptr[0], ptr[1]);
And here's the display callback:
void displayCB()
{
// clear buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
// save the initial ModelView matrix before modifying ModelView matrix
glPushMatrix();
// tramsform camera
glTranslatef(0, 0, cameraDistance);
glRotatef(cameraAngleX, 1, 0, 0); // pitch
glRotatef(cameraAngleY, 0, 1, 0); // heading
// draw meshes
glCallList(listId1); //id of the tessellated poly
// draw info messages
showInfo();
glPopMatrix();
glutSwapBuffers();
}
The results of this are a correctly drawn polygon with no texture applied.
// init
glGenTextures( 1, &texture );
// vertex callback
glBindTexture(GL_TEXTURE_2D, 1);
I don't think the first ID returned by glGenTextures() is required to be 1.
Try using texture instead of 1 in your glBindTexture() call.
Also, there's really no reason to enable texturing and re-bind the texture for every vertex. Just do it once before you call into the tesselator.
You're not capturing the texture binding and Enable inside the display list, so it's not going to be taken into account when you replay it. So, either:
Capture the BindTexture and Enable inside the display list, or
BindTexture and Enable(TEXTURE_2D) before calling CallList
The problem was the glDisable(GL_TEXTURE_2D) call in the tessellation function. After removing it, the texture was applied correctly.

SDL Surface Pixel Format Conversion

I want to convert an SDL_Surface, which was loaded by IMG_Load() to an other pixel format (rgba8) for an OpenGL Texture. How can I do that?
I've read about SDL_ConvertSurface() in the documentation, but I can't figure out, how to put it together.
Give "How To Load an OpenGL Texture from an SDL_Surface" a shot:
GLuint texture; // This is a handle to our texture object
SDL_Surface *surface; // This surface will tell us the details of the image
GLenum texture_format;
GLint nOfColors;
if( (surface = SDL_LoadBMP("image.bmp")) )
{
// Check that the image's width is a power of 2
if( (surface->w & (surface->w - 1)) != 0 )
{
printf("warning: image.bmp's width is not a power of 2\n");
}
// Also check if the height is a power of 2
if( (surface->h & (surface->h - 1)) != 0 )
{
printf("warning: image.bmp's height is not a power of 2\n");
}
// get the number of channels in the SDL surface
nOfColors = surface->format->BytesPerPixel;
if( nOfColors == 4 ) // contains an alpha channel
{
if(surface->format->Rmask == 0x000000ff)
texture_format = GL_RGBA;
else
texture_format = GL_BGRA;
}
else if( nOfColors == 3 ) // no alpha channel
{
if(surface->format->Rmask == 0x000000ff)
texture_format = GL_RGB;
else
texture_format = GL_BGR;
}
else
{
printf("warning: the image is not truecolor.. this will probably break\n");
// this error should not go unhandled
}
// Have OpenGL generate a texture object handle for us
glGenTextures( 1, &texture );
// Bind the texture object
glBindTexture( GL_TEXTURE_2D, texture );
// Set the texture's stretching properties
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
// Edit the texture object's image data using the information SDL_Surface gives us
glTexImage2D
(
GL_TEXTURE_2D,
0,
nOfColors,
surface->w,
surface->h,
0,
texture_format,
GL_UNSIGNED_BYTE,
surface->pixels
);
}
else
{
printf("SDL could not load image.bmp: %s\n", SDL_GetError());
SDL_Quit();
return 1;
}
// Free the SDL_Surface only if it was successfully created
if( surface )
{
SDL_FreeSurface( surface );
}