Why can I not load a texture into my app? - opengl

I think texture mapping is a really easy task. Actually, I implemented it many times but failed in this time and don't know why? And I can guarantee that the route to load the texture is right. Any other reasons for my confusion?
Here is my code:
GLuint mytexture;
// two functions below come from NeHe's tut. I think it works well.
AUX_RGBImageRec *LoadBMP(CHAR *Filename)
{
FILE *File=NULL;
if (!Filename)
{
return NULL;
}
File=fopen(Filename,"r");
if (File)
{
fclose(File);
return auxDIBImageLoadA(Filename);
}
return NULL;
}
int LoadGLTextures()
{
int Status=FALSE;
AUX_RGBImageRec *TextureImage[1];
memset(TextureImage,0,sizeof(void *)*1);
if (TextureImage[0]=LoadBMP("NeHe.bmp"))
{
Status=TRUE;
glGenTextures(1, &mytexture);
glBindTexture(GL_TEXTURE_2D, mytexture);
glTexImage2D(GL_TEXTURE_2D, 0, 3, TextureImage[0]->sizeX, TextureImage[0]->sizeY, 0,
GL_RGB, GL_UNSIGNED_BYTE, TextureImage[0]->data);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
}
if (TextureImage[0])
{
if (TextureImage[0]->data)
{
free(TextureImage[0]->data);
}
free(TextureImage[0]);
}
return Status;
}
//next is my Init() code:
bool DemoInit( void )
{
if (!LoadGLTextures())
{
return FALSE;
}
glEnable(GL_TEXTURE_2D);
........//other init is ok
}
bool DemoRender()
{
...///render other things
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, mytexture);
glColor3f(0,0,1);
glBegin(GL_QUADS);
glTexCoord2f(0, 0); glVertex2f(0, 0);
glTexCoord2f(1, 0); glVertex2f(200, 0);
glTexCoord2f(1, 1); glVertex2f(200, 200);
glTexCoord2f(0, 1); glVertex2f(0, 200);
glEnd();
glDisable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, 0);
}
Pretty clear, ha? However, the final result only has a blue rectangle without the texture. Anybody could give me a hint?

Assuming TextureImage[0]->data is correctly populated:
However, the final result only has a blue rectangle without the texture.
You're using the default GL_MODULATE texture environment. Either switch glColor3f(0,0,1) to glColor3f(1,1,1) or use GL_DECAL.
You might also try a glPixelStorei(GL_UNPACK_ALIGNMENT, 1) before your glTexImage2D() since you're using GL_RGB for format.

The problem is I set the GL_LINE mode before I load the texture and I failed to notice that. So after I set the GL_FILL mode, everything is fine!!!

Related

SDL2 (C++) not all .png images render correctly

Note: I am using Legacy OpenGL
So I have encountered a problem where only a few .png images get rendered correctly.
Here is an example screenshot of rendered game (Left: enemyRed.png, Right: (ship.png)):
Here is the code I use to load images:
SpriteImage* SDLOpenGL::loadImage(std::string path) {
SDL_Surface * surface = IMG_Load(path.c_str());
SpriteImage * image = new SpriteImage(&*surface);
return image;
}
Here is the SpriteImage class:
SpriteImage::SpriteImage(SDL_Surface *surface) {
this->surface = surface;
this->TextureID = 0;
}
void SpriteImage::bind() {
glTexImage2D(GL_TEXTURE_2D,
0,
this->mode,
this->surface->w,
this->surface->h,
0,
this->mode,
GL_UNSIGNED_BYTE,
this->surface->pixels
);
glBindTexture(GL_TEXTURE_2D, this->TextureID);
}
int SpriteImage::getWidth() {
return this->surface->w;
}
int SpriteImage::getHeight() {
return this->surface->h;
}
Here is where I render the images:
(note, this->getCurrentImage() returns a "SpriteImage")
void Sprite::draw(float delta) {
this->getCurrentImage()->bind();
glBegin(GL_QUADS);
glTexCoord2f(0, 0);
glVertex2f(0.0f, 0.0f);
glTexCoord2f(0, 1);
glVertex2f(0.0f, this->getHeight());
glTexCoord2f(1, 1);
glVertex2f(this->getWidth(), this->getHeight());
glTexCoord2f(1, 0);
glVertex2f(this->getWidth(), 0.0f);
glEnd();
this->next();
}
The image that doesn't render has a width indivisible by four. You should use glPixelStorei with GL_UNPACK_ALIGNMENT before glTexImage2D to specify alignment of your rows in memory -- which are probably unaligned (OpenGL assumes by default that they are four-byte aligned).
You should first glBindTexture and then upload the data with glTexImage2D.
Do you ever call to glGenTextures? You should initialize the TextureID with glGenTextures.
You should not upload the texture each and every time you bind the texture. Instead upload it in the constructor, then to switch textures you only need to glBindTexture your TextureID.

OpenGL texture appears to big on the screen

Okay so I am working on a toy 2d engine. I initially used regular SDL_Surfaces for rendering and the built in SDL_Renderer. But I thought why not use OpenGL, get some experience with that.
But I am stuck now. I have a context and things are rendered to the screen, but it looks like the textures I am trying to display are way to big to fit in the screen. Like I only see a couple of pixels, but not really.
The texture class can be found here:
#include "texture.h"
Texture::Texture(std::string path, bool loadNow) {
//Initialize texture ID
mTextureID = 0;
//Initialize texture dimensions
width = 0;
height = 0;
this->path = path;
if(loadNow) {
loadTexture(path);
}
}
Texture::~Texture() {
freeTexture();
}
bool Texture::loadTexture(std::string path) {
//Texture loading success
loaded = false;
SDL_Surface *image = IMG_Load(path.c_str());
//Image loaded successfully
if(image != NULL) {
if((image->w & (image->w - 1)) == 0) {
printf("Warning: image width not power of 2 -> %s\n", path.c_str());
}
if((image->h & (image->h - 1)) == 0) {
printf("Warning: image height not power of 2 -> %s\n", path.c_str());
}
loaded = loadTextureFromPixels32(image, (GLuint)image->w, (GLuint)image->h);
}
//Report error
if(!loaded) {
printf( "Unable to load %s\n", path.c_str() );
}
return loaded;
}
bool Texture::loadTextureFromPixels32(SDL_Surface *image, GLuint width, GLuint height ) {
//Free texture if it exists
freeTexture();
//Get texture dimensions
this->width = width;
this->height = height;
//Generate texture ID
glGenTextures(1, &mTextureID);
//Bind texture ID
glBindTexture(GL_TEXTURE_2D, mTextureID);
//Generate texture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image->pixels);
//Set texture parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//Unbind texture
glBindTexture(GL_TEXTURE_2D, 0);
//Check for error
GLenum error = glGetError();
if(error != GL_NO_ERROR) {
printf("Error loading texture from %p pixels!\n", image->pixels);
return false;
}
return true;
}
void Texture::render(GLfloat x, GLfloat y) {
if(loaded) {
//If the texture exists
if(mTextureID != 0) {
GLfloat realX = x;// - (this->width / 2);
GLfloat realY = y;// - (this->height / 2);
//Remove any previous transformations
glLoadIdentity();
//Move to rendering point
glTranslatef(realX, realY, 0.f);
glClearDepth(1.0f);
//Set texture ID
glBindTexture(GL_TEXTURE_2D, mTextureID);
//Render textured quad
glBegin(GL_QUADS);
glTexCoord2f( 0.f, 0.f ); glVertex2f(0.f, 0.f);
glTexCoord2f( 1.f, 0.f ); glVertex2f(width, 0.f);
glTexCoord2f( 1.f, 1.f ); glVertex2f(width, height);
glTexCoord2f( 0.f, 1.f ); glVertex2f(0.f, height);
glEnd();
}
} else {
// do nothing
}
}
GLuint Texture::getWidth() {
return this->width;
}
GLuint Texture::getHeight() {
return this->height;
}
void Texture::freeTexture() {
//Delete texture
if(mTextureID != 0) {
glDeleteTextures(1, &mTextureID);
mTextureID = 0;
}
width = 0;
height = 0;
}
I am guessing the problem is here, but it could also be in how I initialize OpenGL so here is that:
void Main::initGL() {
/* Request opengl 3.2 context.
* SDL doesn't have the ability to choose which profile at this time of writing,
* but it should default to the core profile */
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 2);
/* Turn on double buffering with a 24bit Z buffer.
* You may need to change this to 16 or 32 for your system */
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 32);
glContext = SDL_GL_CreateContext(this->window);
glViewport(0.0, 0.0, SCREEN_WIDTH, SCREEN_HEIGHT);
glOrtho( 0.0, SCREEN_WIDTH, SCREEN_HEIGHT, 0.0, 1.0, -1.0 );
SDL_GL_SetSwapInterval(0);
//Initialize clear color
glClearColor( 0.f, 0.f, 0.f, 1.f );
//Enable texturing
glEnable( GL_TEXTURE_2D );
//Check for error
GLenum error = glGetError();
if(error != GL_NO_ERROR) {
printf("Error initializing OpenGL!\n");
}
}
SDL is correctly initialized otherwise there wouldn't be anything on the screen. I am completely new to OpenGL so any help would be appreciated.
You mix ordinate GL_TEXTURE_2D stuff with GL_TEXTURE_RECTANGLE, and enabling both is a very bad idea. You are using texcoords in the range [0,1], so you actually seem to want to use GL_TEXTURE_2D. You should rewrite your texture code to use that, and dropt those rectangle textures entirely.
The next thing is that your projection setup is wrong. Your glOrtho call has no effect since you completely overwrite this by loading the identity matrix a few lines later. You should make yourself familiar with the stae machine approach the GL is using. As your matrices are set up currently, you draw a huge quad with most of it completely ot of the screen.
Now that part is completely strange:
/* Request opengl 3.2 context.
* SDL doesn't have the ability to choose which profile at this time of writing,
* but it should default to the core profile */
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 2);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 1);
This code is will never create a core profile, because core profiles didn't even exist in GL2.1, they were introduced in GL3.2. It is unclear what SDL version you are using, but modern SDL is capable of selecting the profile.
However, your code is using completely outdated ande deprecated OpenGL, there is no way this will work with a core profile. If you learn OpenGL in this decade, I'd strongly suggest that you forget about all that and start with some documentation/tutorial on modern GL, and actually use a core profiel.

Incorrect output texture on quad

I'm trying to display the text in my application using freetype. At first I thought that this built-in function (which would be quite natural for the library intended to draw the text). But there was only a function to display the symbol.Then I decided to take the characters one by one into a texture. But here again I was disappointed: all guides one texture uses a single image (probably glTexSubImage2D can help me?).Now I put a symbol on the texture and texture to opengl element.Here's my code (it's quite messy, but now I'm just trying to understand how it works):
//init:
if (FT_Init_FreeType(&ft)) {
fprintf(stderr, "Could not init freetype library\n");
return 0;
}
if (FT_New_Face(ft, fontfilename, 0, &face)) {
fprintf(stderr, "Could not open font %s\n", fontfilename);
return 0;
}
FT_Set_Pixel_Sizes(face, 0, 48); FT_GlyphSlot g = face->glyph;
and from display():
void display()
{
glClear(GL_COLOR_BUFFER_BIT);
glClearColor(1.0 ,1.0, 1.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();//load identity matrix
std::string s = "QWERTYOG0l ";
for(int i = 0; i < s.size(); i++){
FT_Load_Char( face, s[i], FT_LOAD_RENDER );
FT_GlyphSlot g = face->glyph;
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_LINEAR); // Linear Filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_LINEAR); // Linear Filtering
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
gluBuild2DMipmaps( GL_TEXTURE_2D,
GL_RED,
g->bitmap.width,
g->bitmap.rows,
GL_RED,
GL_UNSIGNED_BYTE,
g->bitmap.buffer );
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);glVertex3f(0.1f*i-0.1,0.07f,0.0f); //top left
glTexCoord2f(0.0f, 1.0f);glVertex3f(0.1f*i,0.07f,0.0f); //top right
glTexCoord2f(1.0f, 1.0f);glVertex3f(0.1f*i,-0.07f,0.0f); // bottom right
glTexCoord2f(1.0f, 0.0f);glVertex3f(0.1f*i-0.1,-0.07f,0.0f); //bottom left
glEnd();
}
As you can see the "O" and "T" is correct (if I change bottom left and top right corners of texture it will be absolutely correct). But other symbols seems like shifted (for example "E" is shifted at left from top to bottom).
The full code:
#include <math.h>
#include <iostream>
#include <GL/glew.h>
#include <GL/glut.h>
#include <ft2build.h>
#include FT_FREETYPE_H
FT_Library ft;
FT_Face face;
const char *fontfilename = "LucidaTypewriterBold.ttf";
GLuint texture[10];
GLint uniform_mytexture;
int setup() {
if (FT_Init_FreeType(&ft)) {
fprintf(stderr, "Could not init freetype library\n");
return 0;
}
if (FT_New_Face(ft, fontfilename, 0, &face)) {
fprintf(stderr, "Could not open font %s\n", fontfilename);
return 0;
}
FT_Set_Pixel_Sizes(face, 0, 48);
FT_Load_Char( face, 'O', FT_LOAD_RENDER );
FT_GlyphSlot g = face->glyph;
glGenTextures(1, &texture[0]); // Create The Texture
glBindTexture(GL_TEXTURE_2D, texture[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_LINEAR); // Linear Filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_LINEAR); // Linear Filtering
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
gluBuild2DMipmaps(GL_TEXTURE_2D, GL_RGBA, g->bitmap.width, g->bitmap.rows, GL_RED, GL_UNSIGNED_BYTE, g->bitmap.buffer);
return 1;
}
void display()
{
glClear(GL_COLOR_BUFFER_BIT);
glClearColor(1.0 ,1.0, 1.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();//load identity matrix
std::string s = "QWERTYOG0l ";
for(int i = 0; i < s.size(); i++){
FT_Load_Char( face, s[i], FT_LOAD_RENDER );
FT_GlyphSlot g = face->glyph;
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_LINEAR); // Linear Filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_LINEAR); // Linear Filtering
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
gluBuild2DMipmaps( GL_TEXTURE_2D,
GL_RED,
g->bitmap.width,
g->bitmap.rows,
GL_RED,
GL_UNSIGNED_BYTE,
g->bitmap.buffer );
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);glVertex3f(0.1f*i-0.1,0.07f,0.0f); //top left
glTexCoord2f(0.0f, 1.0f);glVertex3f(0.1f*i,0.07f,0.0f); //top right
glTexCoord2f(1.0f, 1.0f);glVertex3f(0.1f*i,-0.07f,0.0f); // bottom right
glTexCoord2f(1.0f, 0.0f);glVertex3f(0.1f*i-0.1,-0.07f,0.0f); //bottom left
glEnd();
}
//glActiveTexture(GL_TEXTURE0);
//glBindTexture(GL_TEXTURE_2D, texture[0]); // Select Our Texture
// glUniform1i(uniform_mytexture, /*GL_TEXTURE*/0);
glutPostRedisplay();
glutSwapBuffers();
}
void TimerFunction(int value)
{
}
int main(int argc, char *argv[])
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DEPTH | GLUT_DOUBLE);
glutInitWindowSize(800,600);
glutCreateWindow("Hello World");
//glutTimerFunc(30, TimerFunction, 1);
glewInit();
glEnable (GL_TEXTURE_2D);
setup();
glutDisplayFunc(display);
glutMainLoop();
return 0;
}
I have been looking into this for a bit, and while this answer is possibly incomplete, maybe it can help you figure it out.
Preliminary Note
Before I get to what I have found, I need to point out a problem with your texture coordinates. You have this:
glTexCoord2f(0.0f, 0.0f);glVertex3f(0.1f*i-0.1,0.07f,0.0f); //top left
glTexCoord2f(0.0f, 1.0f);glVertex3f(0.1f*i,0.07f,0.0f); //top right
glTexCoord2f(1.0f, 1.0f);glVertex3f(0.1f*i,-0.07f,0.0f); // bottom right
glTexCoord2f(1.0f, 0.0f);glVertex3f(0.1f*i-0.1,-0.07f,0.0f); //bottom left
when it should look like this:
glTexCoord2f(0.0f, 0.0f);glVertex3f(0.1f*i-0.1,0.07f,0.0f); //top left
glTexCoord2f(1.0f, 0.0f);glVertex3f(0.1f*i,0.07f,0.0f); //top right
glTexCoord2f(1.0f, 1.0f);glVertex3f(0.1f*i,-0.07f,0.0f); // bottom right
glTexCoord2f(0.0f, 1.0f);glVertex3f(0.1f*i-0.1,-0.07f,0.0f); //bottom left
note how the top left corresponds to 0, 0 in texture coordinates, and 1, 1 corresponds to the bottom right. This is because (kind of guessing here) freetype puts treats the top left as its origin.
The Stuff That May Help
Freetype will not generate a bitmap whose dimensions are necessarily power-of-two, which is often required for mipmapping (see: https://gamedev.stackexchange.com/a/7929 ).
So if you want to test this (note: do not actually use this in your code; this is only for illustration) you can replace your gluBuild2DMipmaps call in display with the following (be sure to #include <cstring>:
int pitch = g->bitmap.pitch;
if (pitch < 0) {
pitch = -pitch;
}
unsigned char data[4096] = {0};
for (int row = 0; row < g->bitmap.rows; ++row) {
std::memcpy(data + 64 * row, g->bitmap.buffer + pitch * row, pitch);
}
gluBuild2DMipmaps(
GL_TEXTURE_2D,
GL_RGBA,
64,
64,
GL_RED,
GL_UNSIGNED_BYTE,
data
);
What it does is copy the bitmap buffer to the upper left corner of a different 64x64-byte buffer, and then builds the mipmaps from that. This is the result:
Further Notes
My illustration code is bad because it copies the bitmap data for each glyph every redraw, and it does not take into account the actual size of the bitmap buffer, or if pitch is greater than 64. You also probably do not want to be (re)generating your mipmaps every redraw, either, but if you are just trying to learn how to get words into OpenGL do not worry about it :)
Edit: I had to use a different font than you because I do not have yours.
As tecu said, the correct solution is using textures with power of two size.
Also before that answer i found another solution:
glPixelStorei( GL_UNPACK_ALIGNMENT, 1 ); before gluBuild2DMipmaps. But here you get more problems like gray border around texture.
For those who are asking similar goals I want to share my experience:
Make black on a transparent background:
GLfloat swizzleMask[] = { 0,0,0, GL_RED};
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_RGBA, swizzleMask);
UPD there is a more simple and obvious solution withput using an OpenGL extension.
gluBuild2DMipmaps( GL_TEXTURE_2D,
GL_ALPHA,
g->bitmap.width,
g->bitmap.rows,
GL_RGBA,
GL_UNSIGNED_BYTE,
g->bitmap.buffer )
Connect all the letters in a single texture
I think that this is better for perfomance, but not sure that I change the right way.
if(text[i] == ' ') left += 20; else
for (int row = 0; row < g->bitmap.rows; ++row) {
std::memcpy(data + left + 64*(strSize*(row + 64 - g->bitmap_top))
, g->bitmap.buffer + pitch * row, pitch);
}
left += g->advance.x >> 6;
It will better if you calculate width and height (and round to power of two) before connecting in data array.
If you want kerning you should write its own slower implementation of memcpy, where you will add (not fully change) the value and check exceeding of UCHAR_MAX.
My final result:

Wrong OpenGL initialization

My program displays video frames as OpenGL textures.
I have problems with OpenGL initialization. To see video I need to start rendering thread, stop it and start again. I think I am missing something in CRenderThread::InitOpenGL() function. What should I do for correct OpenGL initialization?
My environment:
Windows 7 x64
Microsoft Visual Studio 2008 x64
Here is the code:
#include "RenderThread.h"
#include <QtDebug>
#include <vm_time.h>
static Ipp32u UMCToInternalFormat(UMC::ColorFormat format)
{
switch(format)
{
case UMC::BGR24: return GL_BGR;
case UMC::BGR32: return GL_BGRA;
case UMC::RGB24: return GL_RGB;
case UMC::RGB32: return GL_RGBA;
}
return 0;
}
CRenderThread::CRenderThread(const WId& rnWindowHandle)
: m_bInitialized(false)
, m_WindowHandle(rnWindowHandle)
, m_Texture(0)
, m_fTextureWidth(0.0f)
, m_fTextureHeight(0.0f)
, m_nFrameWidth(0)
, m_nFrameHeight(0)
, m_nWindowWidth(0)
, m_nWindowHeight(0)
{
}
void CRenderThread::PrepareWork()
{
// Wait until first frame comes
if(!m_bAbort)
Suspend();
}
void CRenderThread::DoOnStop()
{
if(m_WindowGLResourceContext)
{
wglDeleteContext(m_WindowGLResourceContext);
m_WindowGLResourceContext = 0;
}
ReleaseDC(m_WindowHandle, m_WindowDC);
if(m_Texture)
{
glDeleteTextures(1, &m_Texture);
m_Texture = 0;
}
}
void CRenderThread::InitOpenGL()
{
PIXELFORMATDESCRIPTOR pfd = {
sizeof(PIXELFORMATDESCRIPTOR), 1, PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER,
PFD_TYPE_RGBA, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, PFD_MAIN_PLANE, 0, 0, 0, 0
};
m_WindowDC = GetDC(m_WindowHandle);
if(!m_WindowDC)
return;
if(!SetPixelFormat(m_WindowDC, ChoosePixelFormat(m_WindowDC, &pfd), &pfd))
return;
m_WindowGLResourceContext = wglCreateContext(m_WindowDC); // create rendering context
if(!m_WindowGLResourceContext)
return;
if(!wglMakeCurrent(m_WindowDC, m_WindowGLResourceContext)) // set it as current
return;
// OpenGL context already tied to output window
// to disable all slow GL components
// it is not mandatory to disable all if we have accelerated card
glClearColor(0.0f, 170.0f, 255.0f, 1.0f);
glClearDepth(1.0);
glDepthFunc(GL_NEVER);
// disable slow GL extensions
glDisable(GL_DEPTH_TEST); glDisable(GL_ALPHA_TEST); glDisable(GL_BLEND);
glDisable(GL_DITHER); glDisable(GL_FOG); glDisable(GL_STENCIL_TEST);
glDisable(GL_LIGHTING); glDisable(GL_LOGIC_OP); glDisable(GL_TEXTURE_1D);
glDisable(GL_TEXTURE_2D);
glPixelTransferi(GL_MAP_COLOR, GL_FALSE);
glPixelTransferi(GL_RED_SCALE, 1); glPixelTransferi(GL_RED_BIAS, 0);
glPixelTransferi(GL_GREEN_SCALE, 1); glPixelTransferi(GL_GREEN_BIAS, 0);
glPixelTransferi(GL_BLUE_SCALE, 1); glPixelTransferi(GL_BLUE_BIAS, 0);
glPixelTransferi(GL_ALPHA_SCALE, 1); glPixelTransferi(GL_ALPHA_BIAS, 0);
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &m_Texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glViewport(0, 0, m_nWindowWidth, m_nWindowHeight);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glRasterPos2i(-1, 1); // move to the upper left corner
glPixelZoom(1.0, -1.0); // top to bottom
SwapBuffers(m_WindowDC);
m_bInitialized = true;
}
void CRenderThread::SetRenderFrame(PVideoData pFrame)
{
Q_ASSERT(pFrame.get());
{
//boost::mutex::scoped_lock Lock(m_FrameMutex);
m_pFrameToRender = pFrame;
}
// Resume thread to render current frame
Resume();
}
void CRenderThread::DoWork()
{
IppiSize CurWinSize;
UMC::Status nStatus = UMC::UMC_OK;
::RECT rect;
GetClientRect(m_WindowHandle, &rect);
CurWinSize.height = rect.bottom;
CurWinSize.width = rect.right;
if(!m_bInitialized)
InitOpenGL();
if(CurWinSize.width > IPP_MAX_16S || CurWinSize.height > IPP_MAX_16S) // window seems to be destroyed
return;
// reinit buffers if window size has been changed
if(CurWinSize.height != m_nWindowHeight || CurWinSize.width != m_nWindowWidth)
{
m_nWindowWidth = CurWinSize.width;
m_nWindowHeight = CurWinSize.height;
glViewport(0, 0, m_nWindowWidth, m_nWindowHeight);
}
// Render frame
{
//boost::mutex::scoped_lock Lock(m_FrameMutex);
if(m_pFrameToRender.get())
{
if(m_nWindowWidth && m_nWindowHeight)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
m_nFrameWidth = m_pFrameToRender->GetWidth();
m_nFrameHeight = m_pFrameToRender->GetHeight();
m_nRenderFormat = UMCToInternalFormat(m_pFrameToRender->GetColorFormat());
glTexImage2D(GL_TEXTURE_2D, 0, 3, m_nFrameWidth, m_nFrameHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, m_pFrameToRender->GetBufferPointer());
//glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, m_nFrameWidth, m_nFrameHeight, m_nRenderFormat, GL_UNSIGNED_BYTE, m_pFrameToRender->GetBufferPointer());
glBegin(GL_POLYGON);
glTexCoord2i(0, 0); glVertex2f(-1.0, 1.0);
glTexCoord2i(1, 0); glVertex2f( 1.0, 1.0);
glTexCoord2i(1, 1); glVertex2f( 1.0, -1.0);
glTexCoord2i(0, 1); glVertex2f(-1.0, -1.0);
glEnd();
glFlush();
SwapBuffers(m_WindowDC); // to draw on physical screen
}
}
}
// Wait for next frame to render
if(!m_bAbort)
Suspend();
}
Couple thoughts, not all necessarily related to your problem:
glClearColor(0.0f, 170.0f, 255.0f, 1.0f);
Clear color is clamped to the range of [0,1], not [0,255].
// disable slow GL extensions
glDisable(GL_DEPTH_TEST); glDisable(GL_ALPHA_TEST); glDisable(GL_BLEND);
glDisable(GL_DITHER); glDisable(GL_FOG); glDisable(GL_STENCIL_TEST);
glDisable(GL_LIGHTING); glDisable(GL_LOGIC_OP); glDisable(GL_TEXTURE_1D);
glDisable(GL_TEXTURE_2D);
These (and most all opengl settings) are disabled by default. These are all doing nothing. Not hurting anything though.
glPixelTransferi(GL_MAP_COLOR, GL_FALSE);
glPixelTransferi(GL_RED_SCALE, 1); glPixelTransferi(GL_RED_BIAS, 0);
glPixelTransferi(GL_GREEN_SCALE, 1); glPixelTransferi(GL_GREEN_BIAS, 0);
glPixelTransferi(GL_BLUE_SCALE, 1); glPixelTransferi(GL_BLUE_BIAS, 0);
glPixelTransferi(GL_ALPHA_SCALE, 1); glPixelTransferi(GL_ALPHA_BIAS, 0);
Again, these are all the defaults.
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &m_Texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
This might actually be a real problem. glTexParameter only effects the currently bound texture, but you're calling them here with no texture bound. So these are doing nothing. When you actually do use a texture later, it will have mipmapping set on the min filter, which could cause it not to be displayed. Move your glTexParameter calls to after you have bound the texture that you want them to effect.

rendering SDL_TTF text onto openGL Red Square instead of text

I've been attempting to render text onto an openGL window using SDL and the SDL_TTF library on windows XP, VS2010.
Versions:
SDL version 1.2.14
SDL TTF devel 1.2.10
openGL (version is at least 2-3 years old).
I have successfully created an openGL window using SDL / SDL_image and can render lines / polygons onto it with no problems.
However, moving onto text it appears that there is some flaw in my current program, I am getting the following result when trying this code here
for those not willing to pastebin here are only the crutial code segments:
void drawText(char * text) {
glLoadIdentity();
SDL_Color clrFg = {0,0,255,0}; // set colour to blue (or 'red' for BGRA)
SDL_Surface *sText = TTF_RenderUTF8_Blended( fntCourier, text, clrFg );
GLuint * texture = create_texture(sText);
glBindTexture(GL_TEXTURE_2D, *texture);
// draw a polygon and map the texture to it, may be the source of error
glBegin(GL_QUADS); {
glTexCoord2i(0, 0); glVertex3f(0, 0, 0);
glTexCoord2i(1, 0); glVertex3f(0 + sText->w, 0, 0);
glTexCoord2i(1, 1); glVertex3f(0 + sText->w, 0 + sText->h, 0);
glTexCoord2i(0, 1); glVertex3f(0, 0 + sText->h, 0);
} glEnd();
// free the surface and texture, removing this code has no effect
SDL_FreeSurface( sText );
glDeleteTextures( 1, texture );
}
segment 2:
// create GLTexture out of SDL_Surface
GLuint * create_texture(SDL_Surface *surface) {
GLuint texture = 0;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
// The SDL_Surface appears to have BGR_A formatting, however this ends up with a
// white rectangle no matter which colour i set in the previous code.
int Mode = GL_RGB;
if(surface->format->BytesPerPixel == 4) {
Mode = GL_RGBA;
}
glTexImage2D(GL_TEXTURE_2D, 0, Mode, surface->w, surface->h, 0, Mode,
GL_UNSIGNED_BYTE, surface->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
return &texture;
}
Is there an obvious bit of code I am missing?
Thank you for any help on this subject.
I've been trying to learn openGL and SDL for 3 days now, so please forgive any misinformation on my part.
EDIT:
I notice that using
TTF_RenderUTF8_Shaded
TTF_RenderUTF8_Solid
Throw a null pointer exception, meaning that there is an error within the actual text rendering function (I suspect), I do not know how this means TTF_RenderUTF8_Blended returns a red square but I suspect all troubles hinge on this.
I think the problem is in the glEnable(GL_TEXTURE_2D) and glDisable(GL_TEXTURE_2D) functions which must be called every time the text is painted on the screen.And maybe also the color conversion between the SDL and GL surface is not right.
I have combined create_texture and drawText into a single function that displays the text properly. That's the code:
void drawText(char * text, TTF_Font* tmpfont) {
SDL_Rect area;
SDL_Color clrFg = {0,0,255,0};
SDL_Surface *sText = SDL_DisplayFormatAlpha(TTF_RenderUTF8_Blended( tmpfont, text, clrFg ));
area.x = 0;area.y = 0;area.w = sText->w;area.h = sText->h;
SDL_Surface* temp = SDL_CreateRGBSurface(SDL_HWSURFACE|SDL_SRCALPHA,sText->w,sText->h,32,0x000000ff,0x0000ff00,0x00ff0000,0x000000ff);
SDL_BlitSurface(sText, &area, temp, NULL);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, sText->w, sText->h, 0, GL_RGBA, GL_UNSIGNED_BYTE, temp->pixels);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS); {
glTexCoord2d(0, 0); glVertex3f(0, 0, 0);
glTexCoord2d(1, 0); glVertex3f(0 + sText->w, 0, 0);
glTexCoord2d(1, 1); glVertex3f(0 + sText->w, 0 + sText->h, 0);
glTexCoord2d(0, 1); glVertex3f(0, 0 + sText->h, 0);
} glEnd();
glDisable(GL_TEXTURE_2D);
SDL_FreeSurface( sText );
SDL_FreeSurface( temp );
}
screenshot
I'm initializing OpenGL as follows:
int Init(){
glClearColor( 0.1, 0.2, 0.2, 1);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho( 0, 600, 300, 0, -1, 1 );
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
if( glGetError() != GL_NO_ERROR ){
return false;
}
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_COLOR, GL_ONE_MINUS_SRC_ALPHA);
}
I think you should just add glEnable(GL_BLEND), because the code for the text surface says TTF_RenderUTF8_Blended( fntCourier, text, clrFg ) and you have to enable the blending abilities of opengl.
EDIT
Okay, I finally took the time to put your code through a compiler. Most importantly, compiler with -Werror so that warning turn into errors
GLuint * create_texture(SDL_Surface *surface) {
GLuint texture = 0;
/*...*/
return &texture;
}
I didn't see it first, because that's something like C coder's 101 and is quite unexpected: You must not return pointers to local variables!. Once the functions goes out of scope the pointer returned will point to nonsense only. Why do you return a pointer at all? Just return a integer:
GLuint create_texture(SDL_Surface *surface) {
GLuint texture = 0;
/*...*/
return texture;
}
Because of this you're also not going to delete the texture afterward. You upload it to OpenGL, but then loose the reference to it.
Your code misses a glEnable(GL_TEXTURE_2D) that's why you can't see any effects of texture. However your use of textures is suboptimal. They way you did it, you recreate a whole new texture each time you're about to draw that text. If that happens in a animation loop, you'll
run out of texture memory rather soon
slow it down significantly
(1) can be addressed by not generating a new texture name each redraw
(2) can be addresses by uploading new texture data only when the text changes and by not using glTexImage2D, but glTexSubImage2D (of course, if the dimensions of the texture change, it must be glTexImage2D).
EDIT, found another possible issue, but first fix your pointer issue.
You should make sure, that you're using GL_REPLACE or GL_MODULATE texture environment mode. If using GL_DECAL or GL_BLEND you end up with red text on a red quad.
There was leaking memory of of the function in my previous post and the program was crashing after some time...
I improved this by separating the texture loading and displaying:
The first function must be called before the SDL loop.It loads text string into memory:
Every string loaded must have different txtNum parameter
GLuint texture[100];
SDL_Rect area[100];
void Load_string(char * text, SDL_Color clr, int txtNum, const char* file, int ptsize){
TTF_Font* tmpfont;
tmpfont = TTF_OpenFont(file, ptsize);
SDL_Surface *sText = SDL_DisplayFormatAlpha(TTF_RenderUTF8_Solid( tmpfont, text, clr ));
area[txtNum].x = 0;area[txtNum].y = 0;area[txtNum].w = sText->w;area[txtNum].h = sText->h;
glGenTextures(1, &texture[txtNum]);
glBindTexture(GL_TEXTURE_2D, texture[txtNum]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, sText->w, sText->h, 0, GL_BGRA, GL_UNSIGNED_BYTE, sText->pixels);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
SDL_FreeSurface( sText );
TTF_CloseFont(tmpfont);
}
The second one displays the string, must be called in the SDL loop:
void drawText(float coords[3], int txtNum) {
glBindTexture(GL_TEXTURE_2D, texture[txtNum]);
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS); {
glTexCoord2f(0, 0); glVertex3f(coords[0], coords[1], coords[2]);
glTexCoord2f(1, 0); glVertex3f(coords[0] + area[txtNum].w, coords[1], coords[2]);
glTexCoord2f(1, 1); glVertex3f(coords[0] + area[txtNum].w, coords[1] + area[txtNum].h, coords[2]);
glTexCoord2f(0, 1); glVertex3f(coords[0], coords[1] + area[txtNum].h, coords[2]);
} glEnd();
glDisable(GL_TEXTURE_2D);
}