I am trying to draw a box in OpenGL (top is open). I gave it textures but they seem to render very weirdly. The side texture appears to be on the bottom sometimes when I rotate the box and what not.
This is the front view:
After rotating it a bit:
Clearly, something is wrong here. The blue one is the texture for the side panel but when it is rotated, it appears to go on the floor. And the grey one is texture for the front panel. So, in the front view, the back panel shouldn't be visible at all. But, it can be seen.
Code snippets:
Method for drawing the container (lid not included):
void box::drawContainer() {
GLuint tex = loadTexture("wood.bmp");
glPushMatrix();
glBindTexture(GL_TEXTURE_2D, tex);
drawBlock(vertex(0, 0, 0), length + 0.2, length + 0.2, thickness); // bottom
tex = loadTexture("tiles.bmp");
glBindTexture(GL_TEXTURE_2D, tex);
drawBlock(
vertex((length - thickness) / 2.0,
(containerHeight + thickness) / 2.0, 0), thickness, breadth,
containerHeight); // right
tex = loadTexture("ocean.bmp");
glBindTexture(GL_TEXTURE_2D, tex);
drawBlock(
vertex((thickness - length) / 2.0,
(containerHeight + thickness) / 2.0, 0), thickness, breadth,
containerHeight); // left
tex = loadTexture("smoke.bmp");
glBindTexture(GL_TEXTURE_2D, tex);
drawBlock(
vertex(0, (containerHeight + thickness) / 2.0,
(breadth - thickness) / 2.0), (length - 2.0 * thickness),
thickness, containerHeight); // front
tex = loadTexture("lightning.bmp");
glBindTexture(GL_TEXTURE_2D, tex);
drawBlock(
vertex(0, (containerHeight + thickness) / 2.0,
(thickness - breadth) / 2.0), (length - 2.0 * thickness),
thickness, containerHeight); // back
glPopMatrix();
}
The drawBlock method:
void object::drawBlock(vertex center, float length, float breadth,
float height) {
glPushMatrix();
glTranslatef(center.x, center.y, center.z);
glScalef(length, height, breadth);
glBegin(GL_QUADS);
drawPrimitive(vertex(-0.5, 0.5, 0.5), vertex(-0.5, -0.5, 0.5),
vertex(0.5, -0.5, 0.5), vertex(0.5, 0.5, 0.5),
vertex(-0.5, 0.5, -0.5), vertex(-0.5, -0.5, -0.5),
vertex(0.5, -0.5, -0.5), vertex(0.5, 0.5, -0.5));
glEnd();
glPopMatrix();
}
drawPrimitive method:
void object::drawPrimitive(vertex v1, vertex v2, vertex v3, vertex v4,
vertex v5, vertex v6, vertex v7, vertex v8) {
drawFace(v1, v2, v3, v4);
drawFace(v5, v6, v7, v8);
drawFace(v1, v5, v6, v2);
drawFace(v4, v3, v7, v8);
drawFace(v1, v4, v8, v5);
drawFace(v2, v6, v7, v3);
}
And, drawFace method:
void object::drawFace(vertex v1, vertex v2, vertex v3, vertex v4) {
glTexCoord2f(0, 1);
glVertex3f(v1.x, v1.y, v1.z);
glTexCoord2f(0, 0);
glVertex3f(v2.x, v2.y, v2.z);
glTexCoord2f(1, 0);
glVertex3f(v3.x, v3.y, v3.z);
glTexCoord2f(1, 1);
glVertex3f(v4.x, v4.y, v4.z);
}
The main function:
int main(int argc, char** argv) {
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGBA);
glutInitWindowSize(640, 480);
glutInitWindowPosition(50, 50);
glutCreateWindow(title);
glutDisplayFunc(display);
glutKeyboardFunc(processKey);
glutReshapeFunc(reshape);
initGL();
glutMainLoop();
return 0;
}
initGL method:
void initGL() {
glClearColor(0.0f, 0.0f, 0.0f, 1.0f); // Set background color to black and opaque
glClearDepth(1.0f); // Set background depth to farthest
glEnable(GL_DEPTH_TEST); // Enable depth testing for z-culling
glDepthFunc(GL_LEQUAL); // Set the type of depth-test
glShadeModel(GL_FLAT);
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); // Nice perspective corrections
}
loadTexture method:
GLuint inline loadTexture(const char* fName) {
// Data read from the header of the BMP file
unsigned char header[54]; // Each BMP file begins by a 54-bytes header
unsigned int dataPos; // Position in the file where the actual data begins
unsigned char * data;
unsigned int imageSize;
unsigned int width, height;
// Actual RGB data
// Open the file
FILE * file = fopen(fName, "rb");
if (!file) {
printf("Image could not be opened\n");
}
if (fread(header, 1, 54, file) != 54) { // If not 54 bytes read : problem
printf("Not a correct BMP file\n");
return 0;
}
if (header[0] != 'B' || header[1] != 'M') {
printf("Not a correct BMP file\n");
return 0;
}
// Read ints from the byte array
dataPos = *(int*) &(header[0x0A]);
imageSize = *(int*) &(header[0x22]);
width = *(int*) &(header[0x12]);
height = *(int*) &(header[0x16]);
// Some BMP files are misformatted, guess missing information
if (imageSize == 0)
imageSize = width * height * 3; // 3 : one byte for each Red, Green and Blue component
if (dataPos == 0)
dataPos = 54;
// Create a buffer
data = new unsigned char[imageSize];
// Read the actual data from the file into the buffer
fread(data, 1, imageSize, file);
//Everything is in memory now, the file can be closed
fclose(file);
// Create one OpenGL texture
GLuint textureID;
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR,
GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
return textureID;
}
The draw method which calls drawContainer:
void box::draw()
{
glEnable(GL_TEXTURE_2D);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
drawContainer();
glDisable(GL_TEXTURE_2D);
}
Please tell me if any more code needs to be inserted or anything needs to be removed.
It's already said in other answer, though I looks like it's a problem with the GL_DEPTH_TEST not being enabled.
Though, yes I do see that your code contains the following:
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
Context Problem
I think the problem is that you haven't actually allocated/given any memory to the depth buffer. Thereby the GL_DEPTH_TEST can't be performed.
Though I can see that you actually say:
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGBA);
Remember: You need to pass GLUT_DEPTH and not GL_DEPTH, etc. (I'm saying this, in case anybody else faces the same problem as you, and this is usually a common mistake)
Have you tried using:
glutInitDisplayMode(GLUT_DEPTH | GLUT_SINGLE | GLUT_RGBA);
Hardware Problem
The problem might also be because of the hardware you're using on the computer, some hardware doesn't support 32-bit depth buffers, thereby you can use 24-bit, etc depth buffers.
Perspective Problem
I don't see anywhere in your code where you actually set the perspective, this might also be the reason that the depth testing is messed up. Since if you are using 0.0 for the near plane in the perspective projection matrix, well it might mess up the depth testing.
I apologize for the much open answer, but I'm not able to test this right now. Also if it is happening because of your hardware, then I/we can't test it.
You seem to be missing a
glEnable(GL_DEPTH_TEST);
Also, make sure that your OpenGL context has a depth buffer (this is platform-dependent; since you don't show how you create your context window, I cannot show you how to do it).
And finally, make sure to pass GL_DEPTH_BUFFER_BIT to glClear wherever you call it.
Your code is not complete, so this is just a guess. (you didn't show loadTexture)
I do not see where you enable and disable textures, therefore the last active texture is going to be used for the rest of the rendering, possibly causing problems.
The proper order of using texture is to enable it, bind to a texture, render, and then disable the texture. Something like this :
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textureID);
// render here
glDisable(GL_TEXTURE_2D);
OpenGL common mistakes may help you solve your problem.
If you use box::drawContainer() to render the image, then you are doing it wrongly, since you are going to create new textures every time the window is rendered. You need to do it once, and then just use the textures. When the textures are not needed, you can release them.
You never seem to clear your color/depth buffer. You need to do it before each frame. Also, be sure that your depth test is still enabled when you trace your container :
glIsEnabled( GL_DEPTH_TEST );
If not, look in the code you haven't posted.
Hope this helps
Turns out I gave the z-perspective wrong. Thanks for all the help
Related
I'm a little bit confused.
I want to texture a "cylinder", so my first approach was this (with n the number of faces and k the number of iteration)
void cylindrer(double r, int n,double h){
double x[n],y[n];
for(int k=0; k<n; k++){
x[k]=r*cos(2*k*M_PI/n);
y[k]=r*sin(2*k*M_PI/n);
}
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texObject[1]);
for(int k=0; k<n ;k++){
int m= (k+1)%n;
glBegin(GL_POLYGON);
glTexCoord2f(1.0/n*(k),0.0); glVertex3f( x[k], y[k], h/2);
glTexCoord2f(1.0/n*(k),1); glVertex3f( x[k], y[k], -h/2);
glTexCoord2f(1.0/n*(k+1),1); glVertex3f( x[m], y[m], -h/2);
glTexCoord2f(1.0/n*(k+1),0.0); glVertex3f( x[m], y[m], h/2);
glEnd();
}
glDisable(GL_TEXTURE_2D);
}
The texture is applied but reverse so I'd changed to
glBegin(GL_POLYGON);
glTexCoord2f(1.0/n*(n-k), 0.0); glVertex3f( x[k], y[k], h/2);
glTexCoord2f(1.0/n*(n-k), 1.0); glVertex3f( x[k], y[k], -h/2);
glTexCoord2f(1.0/n*(n-k-1), 1.0); glVertex3f( x[m], y[m], -h/2);
glTexCoord2f(1.0/n*(n-k-1), 0.0); glVertex3f( x[m], y[m], h/2);
glEnd();
And it works and look like this :
when I use this texture :
But now I want to rotate the texture of 90 degrees, so I create a new jpeg file and rotate it.
So the texture now look like this :
But this is the result when I use it :
The texture is twisted around the cylinder and I don't understand why.
Any idea ?
How I load texture :
#include <math.h>
#include <jpeglib.h>
#include <jerror.h>
GLuint texObject[2]; // textures
int main(int argc,char **argv){
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowPosition(200,200);
glutInitWindowSize(1366,768);
glutCreateWindow("");
glClearColor(0.0,0.0,0.0,0.0);
//glColor3f(1.0,1.0,1.0);
glShadeModel(GL_FLAT);
glPointSize(2.0);
glEnable(GL_DEPTH_TEST);
glGenTextures(2, texObject);
loadJpegImage("./textureCorps5.jpg", 1);
glutMainLoop();
return 0;
}
void loadJpegImage(char *fichier, int i)
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
FILE *file;
unsigned char *ligne;
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
#ifdef __WIN32
if (fopen_s(&file,fichier,"rb") != 0)
{
fprintf(stderr,"Error\n");
exit(1);
}
#elif __GNUC__
if ((file = fopen(fichier,"rb")) == 0)
{
fprintf(stderr,"Error\n");
exit(1);
}
#endif
jpeg_stdio_src(&cinfo, file);
jpeg_read_header(&cinfo, TRUE);
unsigned char image[cinfo.image_width*cinfo.image_height*3];
jpeg_start_decompress(&cinfo);
ligne=image;
while (cinfo.output_scanline<cinfo.output_height)
{
ligne=image+3*cinfo.image_width*cinfo.output_scanline;
jpeg_read_scanlines(&cinfo,&ligne,1);
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
glBindTexture(GL_TEXTURE_2D, texObject[i]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, cinfo.image_width, cinfo.image_height, 0,
GL_RGB, GL_UNSIGNED_BYTE, image);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
}
GL_UNPACK_ALIGNMENTspecifies the alignment requirements for the start of each pixel row in memory. By default GL_UNPACK_ALIGNMENT is set to 4.
This means each row of the texture is supposed to have a lenght of 4*N bytes.
Your texture is an RGB texture, which needs 24 bits or 3 bytes for each texel and you tightly packed the texels and especially the lines of the texture. This means that you may disregard the alignment of 4 for the start of a line of the texture (Except 3 times the width of the texture is divisible by 4 without a remaining).
To deal with that you have to change the alignment to 1. This means you have to set glPixelStorei(GL_UNPACK_ALIGNMENT, 1); before glTexImage2D, for reading a tightly packed texture.
Otherwise you will get an offset per line of 0-3 bytes when reading the texture. This causes a continuously twisted texture.
Instead you can take care of the alignment and the aligned length of a line when you create the turned texture, too:
int bytesPerLine = cinfo.image_width * 3;
bytesPerLine += bytesPerLine % 4;
unsigned char image[bytesPerLine * cinfo.image_height];
jpeg_start_decompress(&cinfo);
while (cinfo.output_scanline<cinfo.output_height)
{
ligne = image + bytesPerLine*cinfo.output_scanline;
jpeg_read_scanlines(&cinfo,&ligne,1);
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
Okay so I am working on a toy 2d engine. I initially used regular SDL_Surfaces for rendering and the built in SDL_Renderer. But I thought why not use OpenGL, get some experience with that.
But I am stuck now. I have a context and things are rendered to the screen, but it looks like the textures I am trying to display are way to big to fit in the screen. Like I only see a couple of pixels, but not really.
The texture class can be found here:
#include "texture.h"
Texture::Texture(std::string path, bool loadNow) {
//Initialize texture ID
mTextureID = 0;
//Initialize texture dimensions
width = 0;
height = 0;
this->path = path;
if(loadNow) {
loadTexture(path);
}
}
Texture::~Texture() {
freeTexture();
}
bool Texture::loadTexture(std::string path) {
//Texture loading success
loaded = false;
SDL_Surface *image = IMG_Load(path.c_str());
//Image loaded successfully
if(image != NULL) {
if((image->w & (image->w - 1)) == 0) {
printf("Warning: image width not power of 2 -> %s\n", path.c_str());
}
if((image->h & (image->h - 1)) == 0) {
printf("Warning: image height not power of 2 -> %s\n", path.c_str());
}
loaded = loadTextureFromPixels32(image, (GLuint)image->w, (GLuint)image->h);
}
//Report error
if(!loaded) {
printf( "Unable to load %s\n", path.c_str() );
}
return loaded;
}
bool Texture::loadTextureFromPixels32(SDL_Surface *image, GLuint width, GLuint height ) {
//Free texture if it exists
freeTexture();
//Get texture dimensions
this->width = width;
this->height = height;
//Generate texture ID
glGenTextures(1, &mTextureID);
//Bind texture ID
glBindTexture(GL_TEXTURE_2D, mTextureID);
//Generate texture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image->pixels);
//Set texture parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//Unbind texture
glBindTexture(GL_TEXTURE_2D, 0);
//Check for error
GLenum error = glGetError();
if(error != GL_NO_ERROR) {
printf("Error loading texture from %p pixels!\n", image->pixels);
return false;
}
return true;
}
void Texture::render(GLfloat x, GLfloat y) {
if(loaded) {
//If the texture exists
if(mTextureID != 0) {
GLfloat realX = x;// - (this->width / 2);
GLfloat realY = y;// - (this->height / 2);
//Remove any previous transformations
glLoadIdentity();
//Move to rendering point
glTranslatef(realX, realY, 0.f);
glClearDepth(1.0f);
//Set texture ID
glBindTexture(GL_TEXTURE_2D, mTextureID);
//Render textured quad
glBegin(GL_QUADS);
glTexCoord2f( 0.f, 0.f ); glVertex2f(0.f, 0.f);
glTexCoord2f( 1.f, 0.f ); glVertex2f(width, 0.f);
glTexCoord2f( 1.f, 1.f ); glVertex2f(width, height);
glTexCoord2f( 0.f, 1.f ); glVertex2f(0.f, height);
glEnd();
}
} else {
// do nothing
}
}
GLuint Texture::getWidth() {
return this->width;
}
GLuint Texture::getHeight() {
return this->height;
}
void Texture::freeTexture() {
//Delete texture
if(mTextureID != 0) {
glDeleteTextures(1, &mTextureID);
mTextureID = 0;
}
width = 0;
height = 0;
}
I am guessing the problem is here, but it could also be in how I initialize OpenGL so here is that:
void Main::initGL() {
/* Request opengl 3.2 context.
* SDL doesn't have the ability to choose which profile at this time of writing,
* but it should default to the core profile */
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 2);
/* Turn on double buffering with a 24bit Z buffer.
* You may need to change this to 16 or 32 for your system */
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 32);
glContext = SDL_GL_CreateContext(this->window);
glViewport(0.0, 0.0, SCREEN_WIDTH, SCREEN_HEIGHT);
glOrtho( 0.0, SCREEN_WIDTH, SCREEN_HEIGHT, 0.0, 1.0, -1.0 );
SDL_GL_SetSwapInterval(0);
//Initialize clear color
glClearColor( 0.f, 0.f, 0.f, 1.f );
//Enable texturing
glEnable( GL_TEXTURE_2D );
//Check for error
GLenum error = glGetError();
if(error != GL_NO_ERROR) {
printf("Error initializing OpenGL!\n");
}
}
SDL is correctly initialized otherwise there wouldn't be anything on the screen. I am completely new to OpenGL so any help would be appreciated.
You mix ordinate GL_TEXTURE_2D stuff with GL_TEXTURE_RECTANGLE, and enabling both is a very bad idea. You are using texcoords in the range [0,1], so you actually seem to want to use GL_TEXTURE_2D. You should rewrite your texture code to use that, and dropt those rectangle textures entirely.
The next thing is that your projection setup is wrong. Your glOrtho call has no effect since you completely overwrite this by loading the identity matrix a few lines later. You should make yourself familiar with the stae machine approach the GL is using. As your matrices are set up currently, you draw a huge quad with most of it completely ot of the screen.
Now that part is completely strange:
/* Request opengl 3.2 context.
* SDL doesn't have the ability to choose which profile at this time of writing,
* but it should default to the core profile */
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 2);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 1);
This code is will never create a core profile, because core profiles didn't even exist in GL2.1, they were introduced in GL3.2. It is unclear what SDL version you are using, but modern SDL is capable of selecting the profile.
However, your code is using completely outdated ande deprecated OpenGL, there is no way this will work with a core profile. If you learn OpenGL in this decade, I'd strongly suggest that you forget about all that and start with some documentation/tutorial on modern GL, and actually use a core profiel.
So I've recently been learning some openGL. I've initially been using the SDL library to print images on screen but I figured it would be interested to try and achieve something similar with openGL and thus also being able to apply shaders to my images for neat effects such as lighting effects and night/day cycles and such. What I'm doing right now is simply loading a texture, then applying that texture to a quad with the same size of the texture. This works well.
Now I want to apply some shaders. This is an example of a vertex and fragment shader that I could apply to one of my textured quads:
in vec2 LVertexPos2D;
void main()
{
gl_Position = vec4( LVertexPos2D.x, LVertexPos2D.y, 0, 1);
}
which does nothing, then my fragment shader:
out vec4 LFragment;
void main()
{
LFragment = vec4(1.0, 1.0, 1.0, 1.0);
}
Which obviously just turns the texture I'm applying it on into a white block, which isn't exactly what I want. Somehow I need to retrieve the current texel data so I can modify that instead of simply changing it.
I've read that the function call to texture2D is supposed to return a vec4 of the current pixel data but I haven't gotten this to work. (Having a hard time finding a good explanation of the function inputs and how it works). Furthermore texture2D is supposedly deprecated but I can't get its replacement (texture()) to work either. Any nudges in the right direction would be greatly appreciated!
Edit: I'll throw in some more info on how I'm doing things, this is the function that loads my textures:
texture makeTexture(std::string fileLocation)
{
texture tempTexture;
SDL_Surface *mySurface = IMG_Load(fileLocation.c_str());
if (mySurface == NULL)
{
std::cout << "Error in loading image at: " << fileLocation << std::endl;
return tempTexture;
}
GLuint myTexture;
glGenTextures(1, &myTexture);
glBindTexture(GL_TEXTURE_2D, myTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, mySurface->w, mySurface->h, 0, GL_RGBA, GL_UNSIGNED_BYTE, mySurface->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
SDL_FreeSurface(mySurface);
tempTexture.texture_id = myTexture;
tempTexture.h = mySurface->h;
tempTexture.w = mySurface->w;
return tempTexture;
}
Where this is my texture struct:
struct texture
{
int w;
int h;
GLuint texture_id;
};
and this function draws any texture to a given x and y coordinate:
void draw(int y, int x, texture &tempTexture)
{
glBindTexture(GL_TEXTURE_2D, tempTexture.texture_id);
glBegin(GL_QUADS);
glTexCoord2f(0, 1);
glVertex2f(-1 + ((float)(x) / SCREEN_WIDTH) * 2, 1 - ((float)(y + tempTexture.h) / SCREEN_HEIGHT) * 2); //Bottom left
glTexCoord2f(1, 1);
glVertex2f(-1 + ((float)(x + tempTexture.w)/SCREEN_WIDTH)*2, 1 - ((float)(y + tempTexture.h) / SCREEN_HEIGHT) * 2); //Bottom right?
glTexCoord2f(1, 0);
glVertex2f(-1 + ((float)(x + tempTexture.w) / SCREEN_WIDTH) * 2, 1.0 - ((float)y / SCREEN_HEIGHT) * 2); //top right
glTexCoord2f(0, 0);
glVertex2f(-1 + ((float)(x) / SCREEN_WIDTH) * 2, 1.0 - ((float)y / SCREEN_HEIGHT) * 2); //Top left (notification: Coordinates are (x,y), not (y,x).
glEnd();
glBindTexture(GL_TEXTURE_2D, 0);
}
then in my main render function I'm now doing:
draw(0, 0, myTexture);
glUseProgram(gProgramID);
glUniform1i(baseImageLoc, myTexture2.texture_id);
draw(100, 100, myTexture2);
glUseProgram(NULL);
where myTexture is just a meadow of grass and myTexture2 is a player character that I want to apply some shading shenanigans to. gPriogramID is a program that has my two aformentioned shaders loaded to it.
In order to access texture data in a shader you have to do the following:
First you need to glBind your texture to a specific texture unit (change active texture unit using glActiveTexture.
Pass the texture unit index as a uniform sampler to the shader.
Access the texture in the shader like the following.
// tex holds the value of the texture unit to be used (not the texture)
uniform sampler2D tex;
void main()
{
vec4 color = texture(tex,texCoord);
LFragment = color;
}
You also need to pass texCoords to the shader as in vertex attribute.
I want to draw a 2D array of pixel data (RGB / grayscale values) on the screen as fast as possible, using OpenGL. The pixel data changes frequently.
I had hoped that I would find a simple function that would let me push in a pointer to an array representing the pixel data, since this is probably the fastest approach. Unfortunately, I have found no such function.
What is the best way to accomplish this task?
Maybe glDrawPixels is the function you are looking for? Though if the data is static it would be better to create a texture with it, and then draw that each frame.
I recently had a similar problem, as I am trying to render a video to screen (ie repeatedly upload pixel data to the VRAM), my approach is:
use glTexImage2D and glTexSubImage2D to upload the data to the texture (ie bind the texture (and texture unit, if applicable) before calling that)
in my case as the video frame rate (usually about 24 fps) is lower than the framerate of my application (aimed at 60 fps), in order to avoid uploading the same data again I use a framebuffer object (check out glGenFramebuffers/glBindFramebuffer/glDeleteFramebuffers) and link my texture with the framebuffer (glFramebufferTexture2D). I then upload that texture once, and draw the same frame multiple times (just normal texture access with glBindTexture)
I don't know which platform you are using, but as I am targetting Mac I use some Apple extensions to ensure the data transfer to the VRAM happens through DMA (ie make glTexSubImage2D return immediately to let the CPU do other work) - please feel free to ask me for more info if you are using Mac too
also as you are using just grayscale, you might want to consider just using a GL_LUMINANCE texture (ie 1 byte per pixel) rather than RGB based format to make the upload faster (but that depends on the size of your texture data, I was streaming HD 1920x1080 video so I needed to make sure to keep it down)
also be aware of the format your hardware is using to avoid unnecessary data conversions (ie normally it seems better to use BGRA data than for example just RGB)
finally, in my code I replaced all the fixed pipeline functionality with shaders (in particular the conversion of the data from grayscale or YUV format to RGB), but again all that depends on the size of your data, and the workload of your CPU or GPU
Hope this helps, feel free to message me if you need further info
I would think the fastest way would be to draw a screen sized quad with ortho projection and use a pixel shader and Texture Buffer Object to draw directly to the texture in the pixel shader. Due to latency transferring to/from the TBO you may want to see if double buffering would help.
If speed isn't much of a concern (you just need fairly interactive framerates) glDrawPixels is easy to use and works well enough for many purposes.
My solution for getting dynamically changing image data to the screen in OpenGL,
#define WIN32_LEAN_AND_MEAN
#include "wx/wx.h"
#include "wx/sizer.h"
#include "wx/glcanvas.h"
#include "BasicGLPane.h"
// include OpenGL
#ifdef __WXMAC__
#include "OpenGL/glu.h"
#include "OpenGL/gl.h"
#else
#include <GL/glu.h>
#include <GL/gl.h>
#endif
#include "ORIScanMainFrame.h"
BEGIN_EVENT_TABLE(BasicGLPane, wxGLCanvas)
EVT_MOTION(BasicGLPane::mouseMoved)
EVT_LEFT_DOWN(BasicGLPane::mouseDown)
EVT_LEFT_UP(BasicGLPane::mouseReleased)
EVT_RIGHT_DOWN(BasicGLPane::rightClick)
EVT_LEAVE_WINDOW(BasicGLPane::mouseLeftWindow)
EVT_SIZE(BasicGLPane::resized)
EVT_KEY_DOWN(BasicGLPane::keyPressed)
EVT_KEY_UP(BasicGLPane::keyReleased)
EVT_MOUSEWHEEL(BasicGLPane::mouseWheelMoved)
EVT_PAINT(BasicGLPane::render)
END_EVENT_TABLE()
// Test data for image generation. floats range 0.0 to 1.0, in RGBRGBRGB... order.
// Array is 1024 * 3 long. Note that 32 * 32 is 1024 and is the largest image we can randomly generate.
float* randomFloatRGB;
float* randomFloatRGBGrey;
BasicGLPane::BasicGLPane(wxFrame* parent, int* args) :
wxGLCanvas(parent, wxID_ANY, args, wxDefaultPosition, wxDefaultSize, wxFULL_REPAINT_ON_RESIZE)
{
m_context = new wxGLContext(this);
randomFloatRGB = new float[1024 * 3];
randomFloatRGBGrey = new float[1024 * 3];
// In GL images 0,0 is in the lower left corner so the draw routine does a vertical flip to get 'regular' images right side up.
for (int i = 0; i < 1024; i++) {
// Red
randomFloatRGB[i * 3] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Green
randomFloatRGB[i * 3 + 1] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Blue
randomFloatRGB[i * 3 + 2] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Telltale 2 white pixels in 0,0 corner.
if (i < 2) {
randomFloatRGB[i * 3] = randomFloatRGB[i * 3 + 1] = randomFloatRGB[i * 3 + 2] = 1.0f;
}
randomFloatRGBGrey[i * 3] = randomFloatRGB[i * 3];
randomFloatRGBGrey[i * 3 + 1] = randomFloatRGB[i * 3];
randomFloatRGBGrey[i * 3 + 2] = randomFloatRGB[i * 3];
}
// To avoid flashing on MSW
SetBackgroundStyle(wxBG_STYLE_CUSTOM);
}
BasicGLPane::~BasicGLPane()
{
delete m_context;
}
void BasicGLPane::resized(wxSizeEvent& evt)
{
// wxGLCanvas::OnSize(evt);
Refresh();
}
int BasicGLPane::getWidth()
{
return GetSize().x;
}
int BasicGLPane::getHeight()
{
return GetSize().y;
}
void BasicGLPane::render(wxPaintEvent& evt)
{
assert(GetParent());
assert(GetParent()->GetParent());
ORIScanMainFrame* mf = dynamic_cast<ORIScanMainFrame*>(GetParent()->GetParent());
assert(mf);
switch (mf->currentMainView) {
case ORIViewSelection::ViewCamera:
renderCamera(evt);
break;
case ORIViewSelection::ViewDepth:
renderDepth(evt);
break;
case ORIViewSelection::ViewPointCloud:
renderPointCloud(evt);
break;
case ORIViewSelection::View3DModel:
render3DModel(evt);
break;
default:
renderNone(evt);
}
}
void BasicGLPane::renderNone(wxPaintEvent& evt) {
if (!IsShown())
return;
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glClearColor(0.08f, 0.11f, 0.15f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glFlush();
SwapBuffers();
glPopAttrib();
}
GLuint makeOpenGlTextureFromDataLuninanceFloats(int width, int height, float* f) {
GLuint textureID;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_FLOAT, width, height, 0, GL_FLOAT, GL_LUMINANCE, f);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
return textureID;
}
GLuint makeOpenGlTextureFromRGBInts(int width, int height, unsigned int* f) {
GLuint textureID;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_INT, f);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
return textureID;
}
/// <summary>
/// Range of each float is 0.0f to 1.0f
/// </summary>
/// <param name="width"></param>
/// <param name="height"></param>
/// <param name="floatRGB"></param>
/// <returns></returns>
GLuint makeOpenGlTextureFromRGBFloats(int width, int height, float* floatRGB) {
GLuint textureID;
// 4.6.0 NVIDIA 457.30 (R Keene machine, 11/25/2020)
// auto sss = glGetString(GL_VERSION);
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_FLOAT, floatRGB);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
return textureID;
}
void BasicGLPane::DrawTextureToScreenFloat(int w, int h, float* floatDataPtr, GLuint (*textureFactory)(int width, int height, float* floatRGB)) {
if (w <= 0 || h <= 0 || floatDataPtr == NULL || w > 5000 || h > 5000) {
assert(false);
return;
}
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glPushMatrix();
glPushClientAttrib(GL_CLIENT_ALL_ATTRIB_BITS);
glClearColor(0.15f, 0.11f, 0.02f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// 4.6.0 NVIDIA 457.30 (R Keene machine, 11/25/2020)
// auto sss = glGetString(GL_VERSION);
float onePixelW = (float)getWidth() / (float)w;
float onePixelH = (float)getHeight() / (float)h;
float orthoW = w;
float orthoH = h;
if (onePixelH > onePixelW) {
orthoH = h * onePixelH / onePixelW;
}
else {
orthoW = w * onePixelW / onePixelH;
}
// We want the image at the top of the window, not the bottom if the window is too tall.
int topOfScreen = (float)getHeight() / onePixelH;
// If the winjdow resizes after creation you need to change the viewport.
glViewport(0, 0, getWidth(), getHeight());
gluOrtho2D(0.0, orthoW, (double)topOfScreen - (double)orthoH, topOfScreen);
GLuint myTextureName = textureFactory(w, h, floatDataPtr);
glBegin(GL_QUADS);
{
// This order of UV coords and verticies will do the vertical flip of the image to get the 'regular' image 0,0
// in the top left corner.
glTexCoord2f(0.0f, 1.0f); glVertex3f(0.0f, 0.0f, 0.0f);
glTexCoord2f(1.0f, 1.0f); glVertex3f(0.0f + w, 0.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f); glVertex3f(0.0f + w, 0.0f + h, 0.0f);
glTexCoord2f(0.0f, 0.0f); glVertex3f(0.0f, 0.0f + h, 0.0f);
}
glEnd();
glDeleteTextures(1, &myTextureName);
glFlush();
SwapBuffers();
glPopClientAttrib();
glPopMatrix();
glPopAttrib();
}
void BasicGLPane::DrawTextureToScreenMat(wxPaintEvent& evt, cv::Mat m, float brightness) {
m.type();
if (m.empty()) {
renderNone(evt);
return;
}
if (m.type() == CV_32FC1) { // Grey scale.
DrawTextureToScreenFloat(m.cols, m.rows, (float*)m.data, makeOpenGlTextureFromDataLuninanceFloats);
}
if (m.type() == CV_32FC3) { // Color.
DrawTextureToScreenFloat(m.cols, m.rows, (float*)m.data, makeOpenGlTextureFromRGBFloats);
}
else {
renderNone(evt);
}
}
void BasicGLPane::renderCamera(wxPaintEvent& evt) {
if (!IsShown())
return;
DrawTextureToScreenMat(evt, ORITopControl::Instance->im_white);
}
void BasicGLPane::renderDepth(wxPaintEvent& evt) {
if (!IsShown())
return;
DrawTextureToScreenMat(evt, ORITopControl::Instance->depth_map);
}
void BasicGLPane::render3DModel(wxPaintEvent& evt) {
if (!IsShown())
return;
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glPushMatrix();
glClearColor(0.08f, 0.11f, 0.15f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glFlush();
SwapBuffers();
glPopMatrix();
glPopAttrib();
}
void BasicGLPane::renderPointCloud(wxPaintEvent& evt) {
if (!IsShown())
return;
boost::unique_lock<boost::mutex> lk(ORITopControl::Instance->pointCloudCacheMutex);
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glPushMatrix();
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glViewport(0, 0, getWidth(), getHeight());
glClearColor(0.08f, 0.11f, 0.15f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (ORITopControl::Instance->pointCloudCache.size() > 0) {
glMatrixMode(GL_PROJECTION);
gluPerspective( /* field of view in degree */ 40.0,
/* aspect ratio */ 1.0,
/* Z near */ 1.0, /* Z far */ 500.0);
glMatrixMode(GL_MODELVIEW);
gluLookAt(100, 70, 200, // Eye
25, 25, 25, // Look at pt
0, 0, 1); // Up Vector
glPointSize(2.0);
glBegin(GL_POINTS);
// Use explicit for loop because pointCloudFragments can grow asynchronously.
for (int i = 0; i < ORITopControl::Instance->pointCloudCache.size(); i++) {
auto frag = ORITopControl::Instance->pointCloudCache[i];
auto current_point_cloud_ptr = frag->cloud;
glPushMatrix();
// glMultMatrixf(frag->xform.data());
for (size_t n = 0; n < current_point_cloud_ptr->size(); n++) {
glColor3ub(255, 255, 255);
glVertex3d(current_point_cloud_ptr->points[n].x, current_point_cloud_ptr->points[n].y, current_point_cloud_ptr->points[n].z);
}
glPopMatrix();
}
glEnd();
}
glFlush();
SwapBuffers();
glPopMatrix();
glPopAttrib();
}
I've been attempting to render text onto an openGL window using SDL and the SDL_TTF library on windows XP, VS2010.
Versions:
SDL version 1.2.14
SDL TTF devel 1.2.10
openGL (version is at least 2-3 years old).
I have successfully created an openGL window using SDL / SDL_image and can render lines / polygons onto it with no problems.
However, moving onto text it appears that there is some flaw in my current program, I am getting the following result when trying this code here
for those not willing to pastebin here are only the crutial code segments:
void drawText(char * text) {
glLoadIdentity();
SDL_Color clrFg = {0,0,255,0}; // set colour to blue (or 'red' for BGRA)
SDL_Surface *sText = TTF_RenderUTF8_Blended( fntCourier, text, clrFg );
GLuint * texture = create_texture(sText);
glBindTexture(GL_TEXTURE_2D, *texture);
// draw a polygon and map the texture to it, may be the source of error
glBegin(GL_QUADS); {
glTexCoord2i(0, 0); glVertex3f(0, 0, 0);
glTexCoord2i(1, 0); glVertex3f(0 + sText->w, 0, 0);
glTexCoord2i(1, 1); glVertex3f(0 + sText->w, 0 + sText->h, 0);
glTexCoord2i(0, 1); glVertex3f(0, 0 + sText->h, 0);
} glEnd();
// free the surface and texture, removing this code has no effect
SDL_FreeSurface( sText );
glDeleteTextures( 1, texture );
}
segment 2:
// create GLTexture out of SDL_Surface
GLuint * create_texture(SDL_Surface *surface) {
GLuint texture = 0;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
// The SDL_Surface appears to have BGR_A formatting, however this ends up with a
// white rectangle no matter which colour i set in the previous code.
int Mode = GL_RGB;
if(surface->format->BytesPerPixel == 4) {
Mode = GL_RGBA;
}
glTexImage2D(GL_TEXTURE_2D, 0, Mode, surface->w, surface->h, 0, Mode,
GL_UNSIGNED_BYTE, surface->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
return &texture;
}
Is there an obvious bit of code I am missing?
Thank you for any help on this subject.
I've been trying to learn openGL and SDL for 3 days now, so please forgive any misinformation on my part.
EDIT:
I notice that using
TTF_RenderUTF8_Shaded
TTF_RenderUTF8_Solid
Throw a null pointer exception, meaning that there is an error within the actual text rendering function (I suspect), I do not know how this means TTF_RenderUTF8_Blended returns a red square but I suspect all troubles hinge on this.
I think the problem is in the glEnable(GL_TEXTURE_2D) and glDisable(GL_TEXTURE_2D) functions which must be called every time the text is painted on the screen.And maybe also the color conversion between the SDL and GL surface is not right.
I have combined create_texture and drawText into a single function that displays the text properly. That's the code:
void drawText(char * text, TTF_Font* tmpfont) {
SDL_Rect area;
SDL_Color clrFg = {0,0,255,0};
SDL_Surface *sText = SDL_DisplayFormatAlpha(TTF_RenderUTF8_Blended( tmpfont, text, clrFg ));
area.x = 0;area.y = 0;area.w = sText->w;area.h = sText->h;
SDL_Surface* temp = SDL_CreateRGBSurface(SDL_HWSURFACE|SDL_SRCALPHA,sText->w,sText->h,32,0x000000ff,0x0000ff00,0x00ff0000,0x000000ff);
SDL_BlitSurface(sText, &area, temp, NULL);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, sText->w, sText->h, 0, GL_RGBA, GL_UNSIGNED_BYTE, temp->pixels);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS); {
glTexCoord2d(0, 0); glVertex3f(0, 0, 0);
glTexCoord2d(1, 0); glVertex3f(0 + sText->w, 0, 0);
glTexCoord2d(1, 1); glVertex3f(0 + sText->w, 0 + sText->h, 0);
glTexCoord2d(0, 1); glVertex3f(0, 0 + sText->h, 0);
} glEnd();
glDisable(GL_TEXTURE_2D);
SDL_FreeSurface( sText );
SDL_FreeSurface( temp );
}
screenshot
I'm initializing OpenGL as follows:
int Init(){
glClearColor( 0.1, 0.2, 0.2, 1);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho( 0, 600, 300, 0, -1, 1 );
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
if( glGetError() != GL_NO_ERROR ){
return false;
}
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_COLOR, GL_ONE_MINUS_SRC_ALPHA);
}
I think you should just add glEnable(GL_BLEND), because the code for the text surface says TTF_RenderUTF8_Blended( fntCourier, text, clrFg ) and you have to enable the blending abilities of opengl.
EDIT
Okay, I finally took the time to put your code through a compiler. Most importantly, compiler with -Werror so that warning turn into errors
GLuint * create_texture(SDL_Surface *surface) {
GLuint texture = 0;
/*...*/
return &texture;
}
I didn't see it first, because that's something like C coder's 101 and is quite unexpected: You must not return pointers to local variables!. Once the functions goes out of scope the pointer returned will point to nonsense only. Why do you return a pointer at all? Just return a integer:
GLuint create_texture(SDL_Surface *surface) {
GLuint texture = 0;
/*...*/
return texture;
}
Because of this you're also not going to delete the texture afterward. You upload it to OpenGL, but then loose the reference to it.
Your code misses a glEnable(GL_TEXTURE_2D) that's why you can't see any effects of texture. However your use of textures is suboptimal. They way you did it, you recreate a whole new texture each time you're about to draw that text. If that happens in a animation loop, you'll
run out of texture memory rather soon
slow it down significantly
(1) can be addressed by not generating a new texture name each redraw
(2) can be addresses by uploading new texture data only when the text changes and by not using glTexImage2D, but glTexSubImage2D (of course, if the dimensions of the texture change, it must be glTexImage2D).
EDIT, found another possible issue, but first fix your pointer issue.
You should make sure, that you're using GL_REPLACE or GL_MODULATE texture environment mode. If using GL_DECAL or GL_BLEND you end up with red text on a red quad.
There was leaking memory of of the function in my previous post and the program was crashing after some time...
I improved this by separating the texture loading and displaying:
The first function must be called before the SDL loop.It loads text string into memory:
Every string loaded must have different txtNum parameter
GLuint texture[100];
SDL_Rect area[100];
void Load_string(char * text, SDL_Color clr, int txtNum, const char* file, int ptsize){
TTF_Font* tmpfont;
tmpfont = TTF_OpenFont(file, ptsize);
SDL_Surface *sText = SDL_DisplayFormatAlpha(TTF_RenderUTF8_Solid( tmpfont, text, clr ));
area[txtNum].x = 0;area[txtNum].y = 0;area[txtNum].w = sText->w;area[txtNum].h = sText->h;
glGenTextures(1, &texture[txtNum]);
glBindTexture(GL_TEXTURE_2D, texture[txtNum]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, sText->w, sText->h, 0, GL_BGRA, GL_UNSIGNED_BYTE, sText->pixels);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
SDL_FreeSurface( sText );
TTF_CloseFont(tmpfont);
}
The second one displays the string, must be called in the SDL loop:
void drawText(float coords[3], int txtNum) {
glBindTexture(GL_TEXTURE_2D, texture[txtNum]);
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS); {
glTexCoord2f(0, 0); glVertex3f(coords[0], coords[1], coords[2]);
glTexCoord2f(1, 0); glVertex3f(coords[0] + area[txtNum].w, coords[1], coords[2]);
glTexCoord2f(1, 1); glVertex3f(coords[0] + area[txtNum].w, coords[1] + area[txtNum].h, coords[2]);
glTexCoord2f(0, 1); glVertex3f(coords[0], coords[1] + area[txtNum].h, coords[2]);
} glEnd();
glDisable(GL_TEXTURE_2D);
}