OpenGL drawing meshes incorrectly - c++

I'm attempting to make an OpenGL Engine in C++, but cannot render meshes correctly. Meshes, when rendered, create faces that connect two random points on the mesh, or a random point on the mesh with 0,0,0.
The problem can be seen here:
(I made it a wireframe to see the problem more clearly)
Code:
// Render all meshes (Graphics.cpp)
for( int curMesh = 0; curMesh < numMesh; curMesh++ ) {
// Save pointer of buffer
meshes[curMesh]->updatebuf();
Buffer buffer = meshes[curMesh]->buffer;
// Update model matrix
glm::mat4 mvp = Proj*View*(meshes[curMesh]->model);
// Initialize vertex array
glBindBuffer( GL_ARRAY_BUFFER, vertbuffer );
glBufferData( GL_ARRAY_BUFFER, sizeof(GLfloat)*buffer.numcoords*3, meshes[curMesh]->verts, GL_STATIC_DRAW );
// Pass information to shader
GLuint posID = glGetAttribLocation( shader, "s_vPosition" );
glVertexAttribPointer( posID, 3, GL_FLOAT, GL_FALSE, 0, (void*)0 );
glEnableVertexAttribArray( posID );
// Check if texture applicable
if( meshes[curMesh]->texID != NULL && meshes[curMesh]->uvs != NULL ) {
// Initialize uv array
glBindBuffer( GL_ARRAY_BUFFER, uvbuffer );
glBufferData( GL_ARRAY_BUFFER, sizeof(GLfloat)*buffer.numcoords*2, meshes[curMesh]->uvs, GL_STATIC_DRAW );
// Pass information to shader
GLuint uvID = glGetAttribLocation( shader, "s_vUV" );
glVertexAttribPointer( uvID, 2, GL_FLOAT, GL_FALSE, 0, (void*)(0) );
glEnableVertexAttribArray( uvID );
// Set mesh texture
glActiveTexture( GL_TEXTURE0 );
glBindTexture( GL_TEXTURE_2D, meshes[curMesh]->texID );
GLuint texID = glGetUniformLocation( shader, "Sampler" );
glUniform1i( texID, 0 );
}
// Actiavte shader
glUseProgram( shader );
// Set MVP matrix
GLuint mvpID = glGetUniformLocation( shader, "MVP" );
glUniformMatrix4fv( mvpID, 1, GL_FALSE, &mvp[0][0] );
// Draw verticies on screen
bool wireframe = true;
if( wireframe )
for(int i = 0; i < buffer.numcoords; i += 3)
glDrawArrays(GL_LINE_LOOP, i, 3);
else
glDrawArrays( GL_TRIANGLES, 0, buffer.numcoords );
}
// Mesh Class (Graphics.h)
class mesh {
public:
mesh();
void updatebuf();
Buffer buffer;
GLuint texID;
bool updated;
GLfloat* verts;
GLfloat* uvs;
glm::mat4 model;
};
My Obj loading code is here: https://www.dropbox.com/s/tdcpg4vok11lf9d/ObjReader.txt (It's pretty crude and isn't organized, but should still work)

This looks like a primitive restart issue to me. Hard to tell what exactly is the problem without seeing some code. It would help a lot to see the about 20 lines above and below and including the drawing calls render the teapot. I.e. the 20 lines before the corresponding glDrawArrays, glDrawElements or glBegin call and the 20 lines after.

subtract 1 from the indices for your use, since these are 1-based indices, and you will almost certainly need 0-based indices.

This is because your triangles are not connected for the wireframe to look perfect.
In case triangles is not connected you should construct index buffer.

Related

glVertexAttribPointer() - when should this be called?

I have some example code that calls glVertexAttribPointer() in 2 places. Is this necessary or can it just be done once?
First time - associating the vertex buffer data:
glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, sizeof( COLVERTEX ), 0 );
glBufferData( GL_ARRAY_BUFFER, sizeof( v ), v, GL_STATIC_DRAW );
Second time - in the rendering callback function:
glEnableVertexAttribArray(0);
glBindBuffer( GL_ARRAY_BUFFER, vboQuad );
glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, sizeof( COLVERTEX ), 0 );
glDrawArrays( GL_QUADS, 0, 4 );
glDisableVertexAttribArray(0);
Is it really necessary to describe the vertex attributes twice?
BufferData fills VBO with raw data. At this point it doesn't matter how data is supposed to be interpreted when drawing (e.g. the same data may be interpreted as vertex positions at one draw but as normals in another). So yes, you can remove this first call.
If you use vertex array objects, you could set vertex attribute pointers only once (via binding VBO, enabling vertex attibute, and setting vertex attribute pointer) and then just call glBindVertexArray before drawing and have all recorded vertex attrubtes set up (you don't even need to bind VBO containing vertex attributes before draw call).

Using Vertex Buffer in OpenGL

I using many polygons (in RAM), this very slowly. Say, please: how using a vertex buffer in OpenGL? (functions etc.)
(programing language - C++)
You should really read a tutorial about this like this one:
http://www.opengl-tutorial.org/beginners-tutorials/tutorial-2-the-first-triangle/#The_VAO
I can give you this code snippet:
float vertices[] = {
1, 0, 0, 1,
0, 1, 0, 1,
0, 0, 1, 1,
};
GLuint vertexnumber = 3; //Amount of vertices in your array
int VertexStrideSize = 4*sizeof(float); //How much values you give for one vertex
// Create the vertex buffer object
GLuint buf;
glGenBuffers( 1, &buf ); //Create the buffer
glBindBuffer( GL_ARRAY_BUFFER, buf ); //Binding the buffer
glBufferData( GL_ARRAY_BUFFER, VertexStrideSize*vertexnumber, vertices, GL_STATIC_DRAW ); //Fill the buffer
// For your vertex shader
GLuint posLoc = glGetAttribLocation(shadername, "aPosition"); //In your shader you can use the variable aPosition now as a input with "in vec4 aPosition"
glVertexAttribPointer(posLoc, 4, GL_FLOAT, GL_FALSE, VertexStrideSize, 0);
glEnableVertexAttribArray(posLoc);

glVertexAttribPointer() working only with the first stream

I am trying to use glVertexAttribPointer() to give some data to my vertex shader. The thing is that it's working only with the FIRST attribute...
Here is my OpenGL code:
struct Flag_vertex
{
GLfloat position_1[ 8 ];
GLfloat position_2[ 8 ];
};
Flag_vertex flag_vertex;
... // fill some data to flag_vertex
GLuint vertexbuffer_id;
glGenBuffers( 1, &vertexbuffer_id );
glBindBuffer( GL_ARRAY_BUFFER, vertexbuffer_id );
glBufferData( GL_ARRAY_BUFFER, sizeof(flag_vertex), &flag_vertex, GL_STATIC_DRAW );
glEnableVertexAttribArray( 0 );
glEnableVertexAttribArray( 1 );
glBindBuffer( GL_ARRAY_BUFFER, vertexbuffer_id );
glVertexAttribPointer( 0, 2, GL_FLOAT, GL_FALSE, 0, (void*)offsetof(Flag_vertex, position_1) );
glVertexAttribPointer( 1, 2, GL_FLOAT, GL_FALSE, 0, (void*)offsetof(Flag_vertex, position_2) );
and my shader is something like:
#version 420 core
layout(location = 0) in vec2 in_position_1;
layout(location = 1) in vec2 in_position_2;
out vec2 texcoord;
void main()
{
gl_Position = vec4(in_position_X, 0.0, 1.0);
texcoord = in_position_X * vec2(0.5) + vec2(0.5);
}
If I use "in_position_1" my texture RENDERS PERFECTLY, but if I use in_position_2 nothing happens...
Tip: before link my shaders I am doing:
glBindAttribLocation( programID, 0, "in_position_1");
glBindAttribLocation( programID, 1, "in_position_2");
Why it works only with the first stream? I need more data going to my vertex... I need to send color, etc... any hint?
glVertexAttribPointer( 0, 2, GL_FLOAT, GL_FALSE, 0, (void*)offsetof(Flag_vertex, position_1) );
glVertexAttribPointer( 1, 2, GL_FLOAT, GL_FALSE, 0, (void*)offsetof(Flag_vertex, position_2) );
These lines don't make sense. At least, not with how Flag_vertex is defined. If Flag_vertex is really supposed to be a vertex (and not a quad), then it makes no sense for it to have 8 floats. If each Flag_vertex defines a full quad, then you named it wrong; it's not a vertex at all, it's Flag_quad.
So it's hard to know what you're even trying to accomplish here.
Also:
If I use "in_position_1" my texture RENDERS PERFECTLY, but if I use in_position_2 nothing happens...
Of course it does. Your position data is in attribute 0. Your position data is therefore not in attribute 1. If you pretend attribute 1 has your position data when it clearly doesn't, you will not get reasonable results.
Your problem is that you're always using attribute 0 when you should be using both of them. You shouldn't be picking one or the other. Use in_position_1 for the position and in_position_2 for the texture coordinate. And try to name them reasonably, based on what they do (like position and texture_coord or something). Don't use numbers for them.
Tip: before link my shaders I am doing:
That is the exact same thing as the layout(location=#) setting in the shader. If you want it in the shader, then put it in the shader. If you want it in your OpenGL code, then put it in your OpenGL code. Don't put it in both places.

Why won't my openGL vertex buffer object draw anything?

I am trying to render a model in openGL 4 using the glDrawElements/Arrays function. I am currently reading in files with vertex data and indices for polygons. I cannot get anyting to display on screen though except the axes im using for reference. I have used this general approach a bunch of times with success. I dont know what I could be doing wrong and have been stuck for almost a day.
This is where I buffer my data
// get position of "in vec4 vPosition;" from shader program
// displays axes properly
vPosition = glGetAttribLocation( program, "vPosition" );
// create buffers
glGenVertexArrays(2, vao);
glGenBuffers( 3, vbo );
// here I bind vao[0] and vbo[0] then buffer
// a set of XYZ axes which display correctly
// buffer model
glBindVertexArray( vao[1] );
glBindBuffer( GL_ARRAY_BUFFER, vbo[1] );
glBufferData( GL_ARRAY_BUFFER, sizeof(board->verts),
board->verts, GL_STATIC_DRAW );
glEnableVertexAttribArray( vPosition );
glVertexAttribPointer( vPosition, 4, GL_FLOAT, GL_FALSE, 0,
BUFFER_OFFSET(0));
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER , vbo[2] );
glBufferData(
GL_ELEMENT_ARRAY_BUFFER,
sizeof(board->indices),
board->indices.data(),
GL_STATIC_DRAW);
fyi board is a model of a surfboard in which the vertices and indices for glDrawElements are read in. These vertices and indices are printed out correctly if the following code is included just above the call to glBufferData
for( int i = 0; i < board->numVerts; i++ ){
std::cerr << i << ": "<<board->verts[i].x << " "<<board->verts[i].y<<" "<<
board->verts[i].z << " " << board->verts[i].w << std::endl;
}
the same goes for the indices if a similar print loop is put in. Here is where I attempt to draw the model:
void draw_model(){
glUniform4fv( color_loc, 1, glm::value_ptr(blue));
glBindVertexArray( vao[1] );
glDrawArrays( GL_LINE_STRIP, 0, 600 );
//glDrawElements(
// GL_LINES,
// board->indices.size(),
// GL_UNSIGNED_INT,
// NULL
//);
}
the call to DrawElements (commented out) does not display anything. the call to DrawArrays was a troubleshooting effort to see if I was just using DrawElements incorrectly (i have only used drawArrays in the past). board is defined as model3d *board. here is its class
class model3d{
public:
glm::vec4 *verts;
std::vector<int> indices;
int numVerts;
int numPolys;
model3d( const char*, const char* );
private:
void load_coords( const char* );
void load_polys( const char* );
};
why does my data not seem to buffer?
Don't use sizeof(board->verts) as it'll only return the size of the pointer.

Many quads, not plenty fps despite VBO

I got some time and I've read about VBO and that's what I got :
http://img64.imageshack.us/img64/5733/fps8.jpg
Ok, it's much better than before. It's compiled on Release. I uses VBO (probably, if everthing's OK) and glDrawArrays to draw.
Here's drawing code. Please give me advice how to optimize it. I wanted with terrain ... uh few thousand FPS, is it real ?
void DrawVBO (void)
{
int i;
CVert* m_pVertices;
CTexCoord* m_pTexCoords;
unsigned int m_nTextureId;
m_pVertices = NULL;
m_pTexCoords = NULL;
m_nVertexCount = 0;
m_nVBOVertices = m_nVBOTexCoords = m_nTextureId = 0;
if( IsExtensionSupported( "GL_ARB_vertex_buffer_object" ) )
{
// Pobierz wskaźniki na funkcje OpenGL
glGenBuffersARB = (PFNGLGENBUFFERSARBPROC) wglGetProcAddress("glGenBuffersARB");
glBindBufferARB = (PFNGLBINDBUFFERARBPROC) wglGetProcAddress("glBindBufferARB");
glBufferDataARB = (PFNGLBUFFERDATAARBPROC) wglGetProcAddress("glBufferDataARB");
glDeleteBuffersARB = (PFNGLDELETEBUFFERSARBPROC)
wglGetProcAddress("glDeleteBuffersARB");
}
todrawquads=0;
nIndex=0;
// my function counting how many quads I will draw
for (i=0;i<MAX_CHUNKS_LOADED;i++)
{
if (chunks_loaded[i].created==1)
{
countquads(i);
}
}
m_nVertexCount=4*todrawquads;
m_pVertices = new CVec[m_nVertexCount];
m_pTexCoords = new CTexCoord[m_nVertexCount];
// another my function adding every quad which i'm going to draw (its verticles) to array
for (i=0;i<MAX_CHUNKS_LOADED;i++)
{
if (chunks_loaded[i].created==1)
{
addchunktodraw(i,m_pVertices,m_pTexCoords);
}
}
glClearColor (1,1,1, 0.0);
glColor3f(1,1,1);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Clear Screen And Depth Buffer
glLoadIdentity (); // Reset The Modelview Matrix
fps++;
// Camera settings.
//gluLookAt (zom, zom, zom, 0.0, 0.0, 0.0, 0, 0, 1);
gluLookAt (zoom, zoom, zoom, 0.0, 0.0, 0.0, 0, 0, 1);
glRotatef((rot_x / 180 * 3.141592654f),1,0,0);
glRotatef((rot_y / 180 * 3.141592654f),0,1,0);
glRotatef((rot_z / 180 * 3.141592654f),0,0,1);
//m_nTextureId = t_terrain;
// Generate And Bind The Vertex Buffer
glGenBuffersARB( 1, &m_nVBOVertices ); // Get A Valid Name
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOVertices ); // Bind The Buffer
// Load The Data
glBufferDataARB( GL_ARRAY_BUFFER_ARB, m_nVertexCount*3*sizeof(float), m_pVertices, GL_STATIC_DRAW_ARB );
// Generate And Bind The Texture Coordinate Buffer
glGenBuffersARB( 1, &m_nVBOTexCoords ); // Get A Valid Name
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOTexCoords ); // Bind The Buffer
// Load The Data
glBufferDataARB( GL_ARRAY_BUFFER_ARB, m_nVertexCount*2*sizeof(float), m_pTexCoords, GL_STATIC_DRAW_ARB );
// Enable Pointers
glEnableClientState( GL_VERTEX_ARRAY ); // Enable Vertex Arrays
glEnableClientState( GL_TEXTURE_COORD_ARRAY ); // Enable Texture Coord Arrays
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOVertices );
glVertexPointer( 3, GL_FLOAT, 0, (char *) NULL ); // Set The Vertex Pointer To The Vertex Buffer
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOTexCoords );
glTexCoordPointer( 2, GL_FLOAT, 0, (char *) NULL ); // Set The TexCoord Pointer To The TexCoord Buffe
glDrawArrays( GL_QUADS, 0, m_nVertexCount); // Draw All Of The Triangles At Once
glDisableClientState( GL_VERTEX_ARRAY ); // Disable Vertex Arrays
glDisableClientState( GL_TEXTURE_COORD_ARRAY ); // Disable Texture Coord Arrays
liniergb();
glutSwapBuffers();
delete [] m_pVertices; m_pVertices = NULL;
delete [] m_pTexCoords; m_pTexCoords = NULL;
}
So what I can do with it ?
(Code above is main draw function)
edit
I've moved this :
if( IsExtensionSupported( "GL_ARB_vertex_buffer_object" ) )
{
// Pobierz wskaźniki na funkcje OpenGL
glGenBuffersARB = (PFNGLGENBUFFERSARBPROC) wglGetProcAddress("glGenBuffersARB");
glBindBufferARB = (PFNGLBINDBUFFERARBPROC) wglGetProcAddress("glBindBufferARB");
glBufferDataARB = (PFNGLBUFFERDATAARBPROC) wglGetProcAddress("glBufferDataARB");
glDeleteBuffersARB = (PFNGLDELETEBUFFERSARBPROC)
wglGetProcAddress("glDeleteBuffersARB");
}
to my main function. Probably no improvement.
edit2
Now , also I can see that it's eating memory. Every few seconds program memory usage is rising up more and more ... What's wrong ? What I'm not deleting ?
edit3
Ok , thanks sooooo much. I've moved some code outside draw function and ... much more fps !
Thanks so much !
http://img197.imageshack.us/img197/5193/fpsfinal.jpg
It's 640x640 blocks (so 40 times bigger) map with 650'000 quads (about 70 times more) and still ~170 fps. Great ! And no memory leaks. Thanks again !
You're reloading all your buffers and freeing them again in every single frame? Stop doing that and your frame rate will go up.
Note that your current code will eventually run out of VBO identifiers, since you never delete the VBOs you create.
Linking extension functions definitely doesn't need to be done every frame either.
Your DrawVBO function should contain only:
glClearColor (1,1,1, 0.0);
glColor3f(1,1,1);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Clear Screen And Depth Buffer
glLoadIdentity (); // Reset The Modelview Matrix
fps++;
// Camera settings.
//gluLookAt (zom, zom, zom, 0.0, 0.0, 0.0, 0, 0, 1);
gluLookAt (zoom, zoom, zoom, 0.0, 0.0, 0.0, 0, 0, 1);
glRotatef((rot_x / 180 * 3.141592654f),1,0,0);
glRotatef((rot_y / 180 * 3.141592654f),0,1,0);
glRotatef((rot_z / 180 * 3.141592654f),0,0,1);
// Enable Pointers
glEnableClientState( GL_VERTEX_ARRAY ); // Enable Vertex Arrays
glEnableClientState( GL_TEXTURE_COORD_ARRAY ); // Enable Texture Coord Arrays
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOVertices );
glVertexPointer( 3, GL_FLOAT, 0, (char *) NULL ); // Set The Vertex Pointer To The Vertex Buffer
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOTexCoords );
glTexCoordPointer( 2, GL_FLOAT, 0, (char *) NULL ); // Set The TexCoord Pointer To The TexCoord Buffe
glDrawArrays( GL_QUADS, 0, m_nVertexCount); // Draw All Of The Triangles At Once
glDisableClientState( GL_VERTEX_ARRAY ); // Disable Vertex Arrays
glDisableClientState( GL_TEXTURE_COORD_ARRAY ); // Disable Texture Coord Arrays
liniergb();
glutSwapBuffers();
You need to move the rest to separate function called only once at start-up (or when the terrain changes).
A few things stand out:
In your drawing function you're allocating memory for your geometry data
m_pVertices = new CVec[m_nVertexCount];
m_pTexCoords = new CTexCoord[m_nVertexCount];
Memory allocation is a extreme expensive operation, this is one of those things that should be done only once. OpenGL is not meant to be "initialized" – but the data structures you're going to pass to it are!
Here you're copying the newly allocated buffers to OpenGL, again and again with each frame. This is exactly the opposite of what to do.
// Generate And Bind The Vertex Buffer
glGenBuffersARB( 1, &m_nVBOVertices ); // Get A Valid Name
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOVertices ); // Bind The Buffer
// Load The Data
glBufferDataARB( GL_ARRAY_BUFFER_ARB, m_nVertexCount*3*sizeof(float), m_pVertices, GL_STATIC_DRAW_ARB );
// Generate And Bind The Texture Coordinate Buffer
glGenBuffersARB( 1, &m_nVBOTexCoords ); // Get A Valid Name
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOTexCoords ); // Bind The Buffer
// Load The Data
glBufferDataARB( GL_ARRAY_BUFFER_ARB, m_nVertexCount*2*sizeof(float), m_pTexCoords, GL_STATIC_DRAW_ARB );
The whole idea of VBOs is, to load the data only once, copy it to OpenGL and then never reallocate it again. Note that this is not OpenGL initialization, it's data initialization, something totally reasonable. I see that you have named your variables m_pVertices and m_pTexCoords indicating that those are class member variables. Then the solution is simple: Move that whole initialization code into some loader function. Also instead of naked C++ arrays I strongly suggest using std::vector.
So let's fix this:
// Load Extensions only once. Well, once per context actually, however
// why don't you just use an extension wrapper and forget about those
// gritty details? Google GLEW or GLee
void init_extensions()
{
if( IsExtensionSupported( "GL_ARB_vertex_buffer_object" ) )
{
// Pobierz wska\u017aniki na funkcje OpenGL
glGenBuffersARB = (PFNGLGENBUFFERSARBPROC) wglGetProcAddress("glGenBuffersARB");
glBindBufferARB = (PFNGLBINDBUFFERARBPROC) wglGetProcAddress("glBindBufferARB");
glBufferDataARB = (PFNGLBUFFERDATAARBPROC) wglGetProcAddress("glBufferDataARB");
glDeleteBuffersARB = (PFNGLDELETEBUFFERSARBPROC) wglGetProcAddress("glDeleteBuffersARB");
}
}
class Block0r
{
protected:
GLuint m_nTextureId;
GLuint m_nVBOVertices;
GLuint m_nVBOTexCoords;
GLuint m_nVertexCount;
// Call this one time to load the data
void LoadVBO()
{
std::vector<CVert> vertices;
std::vector<CTexCoord> texCoords;
// my function counting how many quads I will draw
todrawquads = 0;
for(int i=0; i < MAX_CHUNKS_LOADED; i++) {
if( chunks_loaded[i].created == 1 ) {
countquads(i);
}
}
m_nVertexCount = 4*todrawquads;
vertices.resize(vertexcount);
texcoords.resize(vertexcount);
for (i=0;i<MAX_CHUNKS_LOADED;i++) {
if (chunks_loaded[i].created==1) {
addchunktodraw(i, &vertices[0], &texcoords[0]);
}
}
glGenBuffersARB( 1, &m_nVBOVertices );
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOVertices );
glBufferDataARB( GL_ARRAY_BUFFER_ARB, vertices.size()*sizeof(CVert), &vertices[0], GL_STATIC_DRAW_ARB );
glGenBuffersARB( 1, &m_nVBOTexCoords );
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOTexCoords );
glBufferDataARB( GL_ARRAY_BUFFER_ARB, texCoords.size()*sizeof(CTexCoord), &texCoords[0], GL_STATIC_DRAW_ARB );
}
void DrawVBO()
{
glClearColor (1,1,1, 0.0);
glColor3f(1,1,1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
setup_projection(); // this should really be done in the drawing handler
glMatrixMode(GL_MODELVIEW); // don't asssume a certain matrix being active!
glLoadIdentity();
fps++;
gluLookAt (zoom, zoom, zoom, 0.0, 0.0, 0.0, 0, 0, 1);
glRotatef((rot_x / 180 * 3.141592654f),1,0,0);
glRotatef((rot_y / 180 * 3.141592654f),0,1,0);
glRotatef((rot_z / 180 * 3.141592654f),0,0,1);
glEnableClientState( GL_VERTEX_ARRAY );
glEnableClientState( GL_TEXTURE_COORD_ARRAY );
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOVertices );
glVertexPointer( 3, GL_FLOAT, 0, (GLvoid *) 0 );
glBindBufferARB( GL_ARRAY_BUFFER_ARB, m_nVBOTexCoords );
glTexCoordPointer( 2, GL_FLOAT, 0, (GLvoid *) 0 );
glDrawArrays( GL_QUADS, 0, m_nVertexCount);
glDisableClientState( GL_VERTEX_ARRAY );
glDisableClientState( GL_TEXTURE_COORD_ARRAY );
liniergb();
glutSwapBuffers();
}
}
On a side note: Comments like // Generate And Bind The Vertex Buffer or // Set The Vertex Pointer To The Vertex Buffer are not very usefull. Those comments just redundantly tell, what one can read from the code anyway.
Comments should be added to code, which inner workings are not immediately understandably, or if you had to do some kind of hack to fix something and this hack would puzzle someone else, or yourself in a few months time.