I've got a problem with Vertex Buffer Object, it seems it doesn't work properly.
It doesn't show anything on screen.
Here is my Code:
void gl::glRecti(int x,int y,int w,int h,glColor *color)
{
GLuint vbo = 0;
GLfloat verts[] =
{
x,y,
x,y + h,
x + w,y + h,
x + w,y,
};
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(verts), verts, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER,vbo);
glVertexPointer( 4 , GL_FLOAT , sizeof(float) * 8, NULL );
glDrawArrays(GL_QUADS,0,4);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisableClientState(GL_VERTEX_ARRAY);
}
PS: I'm very new in OpenGL programming. Any help would be appreciated.
Your vertex pointer does not make sense:
glVertexPointer( 4 , GL_FLOAT , sizeof(float) * 8, NULL );
You are telling the GL that each vertex position is specified as a 4-dimensional vector, and that the offset between two consecutive vertices is 8 floats.
What you supply is a tighlty packed array of 2-dimensional positions, so you should use 2 as the size parameter, and 2*sizeof(float) for the stride (or 0, which is a shortcut for thigly packed arrays).
Related
This question already has an answer here:
Direct State access with vertex buffers
(1 answer)
Closed 2 years ago.
The Description
I'm currently learning OpenGL and want to try out the the direct state access extension from OpenGL 4.5. Thus I setup a simple triangle render example (in 3D) which should render my triangle. The code below is executed each frame and draws a triangle in 3D space.
void Geometry::draw(glm::vec3 pos, glm::vec3 pos1, glm::vec3 pos2) {
float vertices[9] = {
pos.x, pos.y, pos.z,
pos1.x, pos1.y, pos1.z,
pos2.x, pos2.y, pos2.z
};
unsigned int m_VaoID;
glGenVertexArrays(1, &m_VaoID);
glBindVertexArray(m_VaoID);
unsigned int m_VboID;
glGenBuffers(1, &m_VboID);
glBindBuffer(GL_ARRAY_BUFFER, m_VboID);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 3, 0);
Shader shader = Shader("../../shader.glsl");
shader.compileShader();
shader.useShader();
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDeleteVertexArrays(1, &m_VaoID);
glDeleteBuffers(1, &m_VboID);
}
However, as soon as I change the code to make use of the direct state access extension, it no longer renders a triangle and instead produces error messages.
void Geometry::drawDSA(glm::vec3 pos, glm::vec3 pos1, glm::vec3 pos2) {
float vertices[9] = {
pos.x, pos.y, pos.z,
pos1.x, pos1.y, pos1.z,
pos2.x, pos2.y, pos2.z
};
unsigned int m_VaoID;
glCreateVertexArrays(1, &m_VaoID);
glVertexArrayAttribFormat(m_VaoID, 0, 3, GL_FLOAT, GL_FALSE, 0);
unsigned int m_VboID;
glCreateBuffers(1, &m_VboID);
glNamedBufferData(m_VboID, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexArrayAttribBinding(m_VaoID, 0, m_VboID);
Shader shader = Shader("../../shader.glsl");
shader.compileShader();
shader.useShader();
glBindVertexArray(m_VaoID);
glBindBuffer(GL_ARRAY_BUFFER, m_VboID);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDeleteVertexArrays(1, &m_VaoID);
glDeleteBuffers(1, &m_VboID);
}
Both methods get called in my update loop with the same values (between -1.f and 0.f).
The error output:
Here is the console output of the error I'm getting:
[OpenGL Debug HIGH] GL_INVALID_VALUE in glVertexArrayAttribBinding(bindingindex=16 >= GL_MAX_VERTEX_ATTRIB_BINDINGS)
[OpenGL Debug HIGH] GL_INVALID_VALUE in glVertexArrayAttribBinding(bindingindex=17 >= GL_MAX_VERTEX_ATTRIB_BINDINGS)
[OpenGL Debug HIGH] GL_INVALID_VALUE in glVertexArrayAttribBinding(bindingindex=18 >= GL_MAX_VERTEX_ATTRIB_BINDINGS)
[OpenGL Debug HIGH] GL_INVALID_VALUE in glVertexArrayAttribBinding(bindingindex=19 >= GL_MAX_VERTEX_ATTRIB_BINDINGS)
What I tried so far
I looked through the Khronos Wiki and through the docs.gl documentation, but I could not find out why my code produces the above error instead of rendering my triangle.
The 3d parameter of glVertexArrayAttribBinding is not the vertex buffer object, it is a binding index. It is an arbitrary id which you have to choose.
The following instruction associates a binding index to an attribute index (0) of the specified VAO.
glVertexArrayAttribBinding(m_VaoID, 0, m_VboID);
glVertexArrayAttribBinding(m_VaoID, 0, binding_index);
Furthermore you have to useglVertexArrayVertexBuffer to associate a buffer object to a binding index of a specified VAO:
glVertexArrayVertexBuffer(m_VaoID, binding_index, m_VboID, 0, sizeof(float) * 3);
I am studying the source code of an open source project and they have a use of the function glDrawElements which I don't understand. While being a programmer, I am quite new to the GL API so would appreciate if someone could tell me how this works.
Let's start with the drawing part. The code looks like this:
for (int i = 0; i < numObjs; i++) {
glDrawElements(GL_TRIANGLES, vboIndexSize(i), GL_UNSIGNED_INT, (void*)(UPTR)vboIndexOffset(i));
}
vboIndiexSize(i) returns the number of indices for the current object, and vboIndexOffset returns the offset in bytes, in a flat memory array in which vertex data AND the indices of the objects are stored.
The part I don't understand, is the (void*)(UPTR)vboIndexOffset(i)). I look at the code many times and the function vboIndexOffset returns a int32 and UPTR also cast the returned value to an int32. So how you can you cast a int32 to a void* and expect this to work? But let's assume I made a mistake there and that it actually returns a pointer to this variable instead. The 4th argument of the glDrawElements call is an offset in byte within a memory block. Here is how the data is actually stored on the GPU:
int ofs = m_vertices.getSize();
for (int i = 0; i < numObj; i++)
{
obj[i].ofsInVBO = ofs;
obj[i].sizeInVBO = obj[i].indices->getSize() * 3;
ofs += obj[i].indices->getNumBytes();
}
vbo.resizeDiscard(ofs);
memcpy(vbo.getMutablePtr(), vertices.getPtr(), vertices.getSize());
for (int i = 0; i < numObj; i++)
{
memcpy(
m_vbo.getMutablePtr(obj[i].ofsInVBO),
obj[i].indices->getPtr(),
obj[i].indices->getNumBytes());
}
So all they do is calculate the number of bytes needed to store the vertex data then add to this number the number of bytes needed to store the indices of all the objects we want to draw. Then they allocate memory of that size, and copy the data in this memory: first the vertex data and then the indices. One this is done they push it to the GPU using:
glGenBuffers(1, &glBuffer);
glBindBuffer(GL_ARRAY_BUFFER, glBuffer);
checkSize(size, sizeof(GLsizeiptr) * 8 - 1, "glBufferData");
glBufferData(GL_ARRAY_BUFFER, (GLsizeiptr)size, data, GL_STATIC_DRAW);
What's interesting is that they store everything in the GL_ARRAY_BUFFER. They never store the vertex data in a GL_ARRAY_BUFFER and then the indices using a GL_ELEMENT_ARRAY_BUFFER.
But to go back to the code where the drawing is done, they first do the usual stuff to declare vertex attribute. For each attribute:
glBindBuffer(GL_ARRAY_BUFFER, glBuffer);
glEnableVertexAttribArray(loc);
glVertexAttribPointer(loc, size, type, GL_FALSE, stride, pointer);
This makes sense and is just standard. And then the code I already mentioned:
for (int i = 0; i < numObjs; i++) {
glDrawElements(GL_TRIANGLES, vboIndexSize(i), GL_UNSIGNED_INT, (void*)(UPTR)vboIndexOffset(i));
}
So the question: even if (UPTR) actually returns the pointer to variable (the code doesn't indicate this but I may be mistaken, it's a large project), I didn't know it was possible to store all vertex and indices data with the same memory block using GL_ARRAY_BUFFER and then using glDrawElements and having the 4th argument being the offset to the first element of this index list for the current object from this memory block. I thought you needed to use GL_ARRAY_BUFFER and GL_ELEMENT_BUFFER to declare the vertex data and the indices separately. I didn't think you could declare all the data in one go using GL_ARRAY_BUFFER and can't get it to work on my side anyway.
Has anyone see this working before? I haven't got a chance to get it working as yet, and wonder if someone could just potentially tell me if there's something specific I need to be aware of to get it to work. I tested with a simple triangle with position, normal and texture coordinates data, thus I have 8 * 3 floats for the vertex data and I have an array of 3 integers for the indices, 0, 1, 2. I then copy everything in a memory block, initialize the glBufferData with this, then try to draw the triangle with:
int n = 96; // offset in bytes into the memory block, fist int in the index list
glDrawElements(GL_TRIANGLES, 3, GL_UNSIGNED_INT, (void*)(&n));
It doesn't crash but I can't see the triangle.
EDIT:
Adding the code that doesn't seem to work for me (crashes).
float vertices[] = {
0, 1, 0, // Vertex 1 (X, Y)
2, -1, 0, // Vertex 2 (X, Y)
-1, -1, 0, // Vertex 3 (X, Y)
3, 1, 0,
};
U8 *ptr = (U8*)malloc(4 * 3 * sizeof(float) + 6 * sizeof(unsigned int));
memcpy(ptr, vertices, 4 * 3 * sizeof(float));
unsigned int indices[6] = { 0, 1, 2, 0, 3, 1 };
memcpy(ptr + 4 * 3 * sizeof(float), indices, 6 * sizeof(unsigned int));
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, 4 * 3 * sizeof(float) + 6 * sizeof(unsigned int), ptr, GL_STATIC_DRAW);
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(0);
free(ptr);
Then when it comes to draw:
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
// see stackoverflow.com/questions/8283714/what-is-the-result-of-null-int/
typedef void (*TFPTR_DrawElements)(GLenum, GLsizei, GLenum, uintptr_t);
TFPTR_DrawElements myGlDrawElements = (TFPTR_DrawElements)glDrawElements;
myGlDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, uintptr_t(4 * 3 * sizeof(float)));
This crashes the app.
see answer below for solution
This is due to OpenGL re-using fixed-function pipeline calls. When you bind a GL_ARRAY_BUFFER VBO, a subsequent call to glVertexAttribPointer expects an offset into the VBO (in bytes), which is then cast to a (void *). The GL_ARRAY_BUFFER binding remains in effect until another buffer is bound, just as the GL_ELEMENT_ARRAY_BUFFER binding remains in effect until another 'index' buffer is bound.
You can encapsulate the buffer binding and attribute pointer (offset) states using a Vertex Array Object.
The address in your example isn't valid. Cast offsets with: (void *) n
Thanks for the answers. I think though that (and after doing some research on the web),
first you should be using glGenVertexArray. It seems that this is THE standard now for OpenGL4.x so rather than calling glVertexAttribPointer before drawing the geometry, it seems like it's best practice to create a VAO when the data is pushed to the GPU buffers.
I (actually) was able to make combine the vertex data and the indices within the SAME buffer (a GL_ARRAY_BUFFER) and then draw the primitive using glDrawElements (see below). The standard way anyway is to push the vertex data to a GL_ARRAY_BUFFER and the indices to a GL_ELEMENT_ARRAY_BUFFER separately. So if that's the standard way of doing it, it's probably better not to try to be too smart and just use these functions.
Example:
glGenBuffers(1, &vbo);
// push the data using GL_ARRAY_BUFFER
glGenBuffers(1, &vio);
// push the indices using GL_ELEMENT_ARRAY_BUFFER
...
glGenVertexArrays(1, &vao);
// do calls to glVertexAttribPointer
...
Please correct me if I am wrong, but that seems the correct (and only) way to go.
EDIT:
However, it is actually possible to "pack" the vertex data and the indices together into an ARRAY_BUFFER as long as a call to glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbo) is done prior to calling glDrawElements.
Working code (compared with code in original post):
float vertices[] = {
0, 1, 0, // Vertex 1 (X, Y)
2, -1, 0, // Vertex 2 (X, Y)
-1, -1, 0, // Vertex 3 (X, Y)
3, 1, 0,
};
U8 *ptr = (U8*)malloc(4 * 3 * sizeof(float) + 6 * sizeof(unsigned int));
memcpy(ptr, vertices, 4 * 3 * sizeof(float));
unsigned int indices[6] = { 0, 1, 2, 0, 3, 1 };
memcpy(ptr + 4 * 3 * sizeof(float), indices, 6 * sizeof(unsigned int));
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, 4 * 3 * sizeof(float) + 6 * sizeof(unsigned int), ptr, GL_STATIC_DRAW);
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(0);
free(ptr);
Then when it comes to draw:
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo); // << THIS IS ACTUALLY NOT NECESSARY
// VVVV THIS WILL MAKE IT WORK VVVV
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbo);
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
// see stackoverflow.com/questions/8283714/what-is-the-result-of-null-int/
typedef void (*TFPTR_DrawElements)(GLenum, GLsizei, GLenum, uintptr_t);
TFPTR_DrawElements myGlDrawElements = (TFPTR_DrawElements)glDrawElements;
myGlDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, uintptr_t(4 * 3 * sizeof(float)));
I have a working Vertex-Buffer-Object but I need to add the normals.
The normales are stored in the same array as the vertex positons. They are interleaved
Vx Vy Vz Nx Ny Nz
This is my code so far:
GLfloat values[NUM_POINTS*3 + NUM_POINTS*3];
void initScene() {
for(int i = 0; i < (NUM_POINTS) ; i = i+6){
values[i+0] = bunny[i];
values[i+1] = bunny[i+1];
values[i+2] = bunny[i+2];
values[i+3] = normals[i];
values[i+4] = normals[i+1];
values[i+5] = normals[i+2];
}
glGenVertexArrays(1,&bunnyVAO);
glBindVertexArray(bunnyVAO);
glGenBuffers(1, &bunnyVBO);
glBindBuffer(GL_ARRAY_BUFFER, bunnyVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(bunny), bunny, GL_STATIC_DRAW);
glVertexAttribPointer(0,3, GL_FLOAT, GL_FALSE, 0,0);
glEnableVertexAttribArray(0);
glGenBuffers(1, &bunnyIBO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bunnyIBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(triangles), triangles, GL_STATIC_DRAW);
// unbind active buffers //
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
void renderScene() {
if (bunnyVBO != 0) {
// x: bind VAO //
glEnableClientState(GL_VERTEX_ARRAY);
glBindVertexArray(bunnyVAO);
glDrawElements(GL_TRIANGLES, NUM_TRIANGLES, GL_UNSIGNED_INT, NULL);
glDisableClientState(GL_VERTEX_ARRAY);
// unbind active buffers //
glBindVertexArray(0);
}
}
I can see something on the screen but it is not right as the normals are not used correctly...
How can I use the values array correctly connected with my code so far.
You need to call glVertexAttribPointer two times, once for the vertices and once for the normals. This is how you tell OpenGL how your data is layed out inside your vertex buffer.
// Vertices consist of 3 floats, occurring every 24 bytes (6 floats),
// starting at byte 0.
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 24, 0);
// Normals consist of 3 floats, occurring every 24 bytes starting at byte 12.
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 24, 12);
This is assuming that your normal attribute in your shader has an index of 1.
in my project I want to display many objects (spheres) using vbo.
I manage to display 1 object without a problem ,but when it comes to 2 or more, all objects(vbos) are replaced by the last defined object(vbo).
CosmicBody(int x)
{
this->verticesSize=0;
//this->VaoId=x;
//this->VaoId=1;
this->VboId=x;
};
void CosmicBody::InitShape(unsigned int uiStacks, unsigned int uiSlices, float fA, float fB, float fC)
{
float tStep = (Pi) / (float)uiSlices;
float sStep = (Pi) / (float)uiStacks;
float SlicesCount=(Pi+0.0001)/tStep;
float StackCount=(2*Pi+0.0001)/sStep;
this->verticesSize=((int) (SlicesCount+1) * (int) (StackCount+1))*2;
glm::vec4 *vertices=NULL;
vertices=new glm::vec4[verticesSize];
int count=0;
for(float t = -Pi/2; t <= (Pi/2)+.0001; t += tStep)
{
for(float s = -Pi; s <= Pi+.0001; s += sStep)
{
vertices[count++]=glm::vec4(fA * cos(t) * cos(s),fB * cos(t) * sin(s),fC * sin(t),1.0f);
vertices[count++]=glm::vec4(fA * cos(t+tStep) * cos(s),fB * cos(t+tStep) * sin(s),fC * sin(t+tStep),1.0f);
}
}
glGenBuffers(1, &VboId);
glBindBuffer(GL_ARRAY_BUFFER, VboId);
glBufferData(GL_ARRAY_BUFFER, 16*verticesSize, vertices, GL_STATIC_DRAW);
glGenVertexArrays(1, &VaoId);
glBindVertexArray(VaoId);
glBindBuffer(GL_ARRAY_BUFFER, VboId);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, 0);
delete[] vertices;
}
void CosmicBody::Draw()
{
glBindBuffer(GL_ARRAY_BUFFER, this->VboId);
glEnableVertexAttribArray(0);
glDrawArrays(GL_TRIANGLE_STRIP, 0,this->verticesSize); //when I replace this->verticesSize with number of vertices of last object ,instead of getting x different objects I get same instances of the last one.
glDisableVertexAttribArray(0);
}
When you use VAO's you should bind VAO for drawing, not the VBO buffer:
void CosmicBody::Draw()
{
glBindVertexArray( this->VaoId ); // <-- this is the difference
glDrawArrays(GL_TRIANGLE_STRIP, 0,this->verticesSize);
glBindVertexArray(0); // unbind the VAO
}
And move the glEnableVertexAttribArray(0); in the CosmicBody::InitShape after you bind VAO, no need to enable/disable it every time you draw:
...
glGenVertexArrays(1, &VaoId);
glBindVertexArray(VaoId);
glEnableVertexAttribArray(0); // <-- here
glBindBuffer(GL_ARRAY_BUFFER, VboId);
...
I decided to import Wavefront .OBJ format to a test-scene that I'm working on. I get the model (vertices) to be in the right place and it displays fine. When I then apply a texture a lot of things looks distorted. I checked my Maya scene (there it looks good), and the object has many more uv-coordinates than vertex positions (this is what makes the scene looks weird in OpenGL, is my guess).
How would I go about loading a scene like that. Do I need to duplicate vertices and how do I store it in the vertex-buffer object?
You are right that you have to duplicate the vertices.
In addition to that you have to sort them in draw order, meaning that you have to order the vertices with the same offsets as the texture coordinates and normals.
basically you'll need this kind of structure:
float *verts = {v1_x,v1_y,v1_z,v1_w,v2_x,v2_y,v2_z,v2_w,...};
float *normals = {n1_x,n1_y,n1_z,n2_x,n2_y,n2_z,...};
float *texcoords = {t1_u,t1_v,t1_w,t2_u,t2_v,t2_w,...};
This however would mean that you have at least 108bytes per Triangle.
3(vert,norm,tex)
*3(xyz/uvw)
*3(points in tri)
*4(bytes in a float))
-----------------------
= 108
You can significantly reduce that number by only duplicating the vertices that actually are duplicate (have identical texture coordinate,vertices and normals meaning: smoothed normals and no UV borders) and using an Index Buffer Object to set the draw order.
I faced the same problem recently in a small project and I just split the models along the hard-edges and UV-Shell borders therefore creating only the necessary duplicate Vertices. Then I used the glm.h and glm.cpp from Nate Robins and copied/sorted the normals and texture coordinates in the same order as the vertices.
Then setup the VBO and IBO:
//this is for Data that does not change dynamically
//GL_DYNAMIC_DRAW and others are available
GLuint mDrawMode = GL_STATIC_DRAW;
//////////////////////////////////////////////////////////
//Setup the VBO
//////////////////////////////////////////////////////////
GLuint mId;
glGenBuffers(1, &mId);
glBindBuffer(GL_ARRAY_BUFFER, mId);
glBufferData(GL_ARRAY_BUFFER,
mMaxNumberOfVertices * (mVertexBlockSize + mNormalBlockSize + mColorBlockSize + mTexCoordBlockSize),
0,
mDrawMode);
glBufferSubData(GL_ARRAY_BUFFER, mVertexOffset, numberOfVertsToStore * mVertexBlockSize, vertices);
glBufferSubData(GL_ARRAY_BUFFER, mNormalOffset, numberOfVertsToStore * mNormalBlockSize, normals);
glBufferSubData(GL_ARRAY_BUFFER, mColorOffset, numberOfVertsToStore * mColorBlockSize, colors);
glBufferSubData(GL_ARRAY_BUFFER, mTexCoordOffset, numberOfVertsToStore * mTexCoordBlockSize, texCoords);
//////////////////////////////////////////////////////////
//Setup the IBO
//////////////////////////////////////////////////////////
GLuint IBOId;
glGenBuffers(1, &IBOId);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IBOId);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, mMaxNumberOfIndices * sizeof(GLuint), 0, mDrawMode);
glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, numberOfIndicesToStore * sizeof(GLuint), indices);
//////////////////////////////////////////////////////////
//This is how to draw the object
//////////////////////////////////////////////////////////
glBindBuffer(GL_ARRAY_BUFFER, mId);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IBOId);
//Enables and Disables are only necessary each draw
//when they change between objects
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(mVertexComponents, GL_FLOAT, 0, (void*)mVertexOffset);
if(mNormalBlockSize){
glEnableClientState(GL_NORMAL_ARRAY);
glNormalPointer(GL_FLOAT, 0, (void*)mNormalOffset);
}
if(mColorBlockSize){
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(mColorComponents, GL_FLOAT, 0, (void*)mColorOffset);
}
if(mTexCoordBlockSize){
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(mTexCoordComponents, GL_FLOAT, 0, (void*)mTexCoordOffset);
}
glDrawRangeElements(primMode,
idFirstVertex,
idLastVertex,
idLastVertex - idFirstVertex + 1,
mAttachedIndexBuffer->getDataType(),
0);
if(mTexCoordBlockSize)
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
if(mColorBlockSize)
glDisableClientState(GL_COLOR_ARRAY);
if(mNormalBlockSize)
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);