I'm attempting to add a vertex buffer to the Mesh.cpp file of the pixel city procedural city-generating program. Part of the current code looks like this:
for (qsi = _quad_strip.begin(); qsi < _quad_strip.end(); ++qsi) {
glBegin (GL_QUAD_STRIP);
for (n = qsi->index_list.begin(); n < qsi->index_list.end(); ++n) {
glTexCoord2fv (&_vertex[*n].uv.x);
glVertex3fv (&_vertex[*n].position.x);
}
glEnd ();
}
This draws textures onto the rectangular sides of some of the buildings. Just going off the VBO tutorials I've found online, I attempt to convert this to use a vertex buffer like so (I store vboId in Mesh.h):
for (qsi = _quad_strip.begin(); qsi < _quad_strip.end(); ++qsi) {
void * varray = (char *) malloc(sizeof(GLfloat)*5*qsi->index_list.size());
GLfloat *p = (GLfloat*)varray;
int counter = 0;
for (n = qsi->index_list.begin(); n < qsi->index_list.end(); ++n) {
memcpy(&p[counter+0],&_vertex[*n].uv.x, sizeof(GLfloat)*2);
memcpy(&p[counter+2],&_vertex[*n].position.x, sizeof(GLfloat)*3);
counter+=5;
}
glGenBuffersARB(1, &vboId);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vboId);
glBufferDataARB(GL_ARRAY_BUFFER_ARB, sizeof(GLfloat)*5*qsi->index_list.size(), p, GL_STATIC_DRAW_ARB);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glTexCoordPointer(2, GL_FLOAT, sizeof(GLfloat)*5, (GLfloat*)0);
glVertexPointer(3, GL_FLOAT, sizeof(GLfloat)*5, (GLfloat*)2);
glDrawArrays(GL_QUAD_STRIP, 0, qsi->index_list.size());
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
free(varray);
}
However, this code simply doesn't work. Nothing is rendered. Sometimes, when I mess with the parameters into glTexCoordPointer or glVertexPointer, I really skewed/garbage data drawn on the screen (or the program crashes), but nothing that has even come close to remotely working.
Your vertex pointer is wrong. When using VBOs, the pointer is interpreted as the byte offset relative to the currently bound buffer. So, you need sizeof(GLfloat)*2 bytes as offset.
glVertexPointer(3, GL_FLOAT, sizeof(GLfloat)*5, (char*)0 +sizeof(GLfloat)*2);
As a side note: you could save the additional data copy if you would create the VBO with the correct size but NULL as the data pointer (which basically creates the data storage) and memory-map it with glMapBuffer(), completely avoivding the malloc()ed temporary buffer you use.
Related
I am using the following library to render text in OpenGL: fontstash. I have another header file which adds support for OpenGL 3.0+. The question is why is the render implementation with core profile much slower than the immediate mode?
Here is the render code with immediate mode:
static void glfons__renderDraw(void* userPtr, const float* verts, const float* tcoords, const unsigned int* colors, int nverts)
{
GLFONScontext* gl = (GLFONScontext*)userPtr;
if (gl->tex == 0) return;
glBindTexture(GL_TEXTURE_2D, gl->tex);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(float)*2, verts);
glTexCoordPointer(2, GL_FLOAT, sizeof(float)*2, tcoords);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(unsigned int), colors);
glDrawArrays(GL_TRIANGLES, 0, nverts);
glDisable(GL_TEXTURE_2D);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
}
Here is the core profile render code:
static void gl3fons__renderDraw(void* userPtr, const float* verts, const float* tcoords, const unsigned int* colors, int nverts)
{
GLFONScontext* gl = (GLFONScontext*)userPtr;
if (gl->tex == 0) return;
if (gl->shader == 0) return;
if (gl->vao == 0) return;
if (gl->vbo == 0) return;
// init shader
glUseProgram(gl->shader);
// init texture
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, gl->tex);
glUniform1i(gl->texture_uniform, 0);
// init our projection matrix
glUniformMatrix4fv(gl->projMat_uniform, 1, false, gl->projMat);
// bind our vao
glBindVertexArray(gl->vao);
// setup our buffer
glBindBuffer(GL_ARRAY_BUFFER, gl->vbo);
glBufferData(GL_ARRAY_BUFFER, (2 * sizeof(float) * 2 * nverts) + (sizeof(int) * nverts), NULL, GL_DYNAMIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float) * 2 * nverts, verts);
glBufferSubData(GL_ARRAY_BUFFER, sizeof(float) * 2 * nverts, sizeof(float) * 2 * nverts, tcoords);
glBufferSubData(GL_ARRAY_BUFFER, 2 * sizeof(float) * 2 * nverts, sizeof(int) * nverts, colors);
// setup our attributes
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(float) * 2, (void *) (sizeof(float) * 2 * nverts));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(int), (void *) (2 * sizeof(float) * 2 * nverts));
glDrawArrays(GL_TRIANGLES, 0, nverts);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glUseProgram(0);
}
I made a small test for each implementation and the results show that immediate mode is significantly faster than core.
Both tests fill the screen with AAA... and I log the time it took to do this for each frame. This is the loop:
// Create GL stash for 512x512 texture, our coordinate system has zero at top-left.
struct FONScontext* fs = glfonsCreate(512, 512, FONS_ZERO_TOPLEFT);
// Add font to stash.
int fontNormal = fonsAddFont(fs, "sans", "fontstash/example/DroidSerif-Regular.ttf");
// Render some text
float dx = 10, dy = 10;
unsigned int white = glfonsRGBA(255,255,255,255);
std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
fonsSetFont(fs, fontNormal);
fonsSetSize(fs, 20.0f);
fonsSetColor(fs, white);
for(int i = 0; i < 90; i++){
for( int j = 0; j < 190; j++){
dx += 10;
fonsDrawText(fs, dx, dy, "A", NULL);
}
dy += 10;
dx = 10;
}
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> time_span = t2 - t1;
std::cout<<"Time to render: "<<time_span.count()<<"ms"<<std::endl;
And the results show more than 400ms difference between the two:
Core profile (left) vs Immediate mode (right)
What should be changed in order to speed up performance?
I don't know exactly what gl is in this program, but it's pretty clear that, every time you want to render a piece of text, you perform the following operations:
Allocate storage for a buffer, reallocating whatever storage had been created from the last time this was called.
Perform three separate uploads of data to that buffer.
These are not good ways to stream vertex data to the GPU. There are specific techniques for doing this well, but this is not one of them. In particular, the fact that you are constantly reallocating the same buffer is going to kill performance.
The most effective way to deal with this is to have a single buffer with a fixed amount of storage. It gets allocated exactly once and never again. Ideally, whatever API you're getting vertex data from would provide it in an interleaved format, so that you would only need to perform one upload rather than three. But apparently, Fontstash is apparently not so generous.
In any case, the main idea is to avoid reallocation and synchronization. The latter means never trying to write over data that has been written to recently. So your buffer needs to be sufficiently large to hold twice the number of font vertices you ever expect to render. Essentially, you double-buffer the vertex data: writing to one set of data while the other set is being read from.
So at the beginning of the frame, you figure out what the byte offset to where you want to render will be. This will either be the start of the buffer or half-way through it. Then, for each blob of text, you write vertex data to this offset and increment the offset accordingly.
And to avoid having to change VAO state, you should interleave the vertex data manually. Instead of uploading three arrays, you should interleave the vertices so that you're effectively making one gigantic array of vertices. So you never need to call glVertexAttribPointer in the middle of this function; you just use the parameters to glDraw* to draw the part of the array you want.
This also means you only need one glBufferSubData call. But if you have access to persistent mapped buffers, you don't even need that, since you can just write to the memory directly while using the other portion of it. Though if you use persistent mapping, you will need to use a fence sync object when you switch buffer regions to make sure that you're not writing to vertex data that is still being read by the GPU.
I have a bunch of code (copied from various tutorials) that is supposed to draw a random color-changing cube that the camera shifts around every second or so (with a variable, not using timers yet). It worked in the past before I moved my code into distinctive classes and shoved it all into my main function, but now I can't see anything on the main window other than a blank background. I cannot pinpoint any particular issue here as I am getting no errors or exceptions, and my own personally defined code checks out; when I debugged, every variable had a value I expected, and the shaders I used (in string form) worked in the past before I re-organized my code. I can print out the vertices of the cube in the same scope as the glDrawArrays() function as well, and they have the correct values too. Basically, I have no idea what's wrong with my code that is causing nothing to be drawn.
My best guess is that I called - or forgot to call - some opengl function improperly with the wrong data in one of the three methods of my Model class. In my program, I create a Model object (after glfw and glad are initialized, which then calls the Model constructor), update it every once and a while (time doesn't matter) through the update() function, then draw it to my screen every time my main loop is run through the draw() function.
Possible locations of code faults:
Model::Model(std::vector<GLfloat> vertexBufferData, std::vector<GLfloat> colorBufferData) {
mVertexBufferData = vertexBufferData;
mColorBufferData = colorBufferData;
// Generate 1 buffer, put the resulting identifier in vertexbuffer
glGenBuffers(1, &VBO);
// The following commands will talk about our 'vertexbuffer' buffer
glBindBuffer(GL_ARRAY_BUFFER, VBO);
// Give our vertices to OpenGL.
glBufferData(GL_ARRAY_BUFFER, sizeof(mVertexBufferData), &mVertexBufferData.front(), GL_STATIC_DRAW);
glGenBuffers(1, &CBO);
glBindBuffer(GL_ARRAY_BUFFER, CBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(mColorBufferData), &mColorBufferData.front(), GL_STATIC_DRAW);
// Create and compile our GLSL program from the shaders
programID = loadShaders(zachos::DATA_DEF);
glUseProgram(programID);
}
void Model::update() {
for (int v = 0; v < 12 * 3; v++) {
mColorBufferData[3 * v + 0] = (float)std::rand() / RAND_MAX;
mColorBufferData[3 * v + 1] = (float)std::rand() / RAND_MAX;
mColorBufferData[3 * v + 2] = (float)std::rand() / RAND_MAX;
}
glBufferData(GL_ARRAY_BUFFER, sizeof(mColorBufferData), &mColorBufferData.front(), GL_STATIC_DRAW);
}
void Model::draw() {
// Setup some 3D stuff
glm::mat4 mvp = Mainframe::projection * Mainframe::view * model;
GLuint MatrixID = glGetUniformLocation(programID, "MVP");
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &mvp[0][0]);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, CBO);
glVertexAttribPointer(
1, // attribute. No particular reason for 1, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the array
glDrawArrays(GL_TRIANGLES, 0, mVertexBufferData.size() / 3);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
};
My question is simple, how come my program won't draw a cube on my screen? Is the issue within these three functions or elsewhere? I can provide more general information about the drawing process if needed, though I believe the code I provided is enough, since I literally just call model.draw().
sizeof(std::vector) will usually just be 24bytes (since the struct contains 3 pointers typically). So basically both of your buffers have 6 floats loaded in them, which is not enough verts for a single triangle, lets alone a cube!
You should instead be calling size() on the vector when loading the data into the vertex buffers.
glBufferData(GL_ARRAY_BUFFER,
mVertexBufferData.size() * sizeof(float), ///< this!
mVertexBufferData.data(), ///< prefer calling data() here!
GL_STATIC_DRAW);
I'm trying to implement a batch-rendering system using OpenGL, but the triangle I'm trying to render doesn't show up.
In the constructor of my Renderer-class, I'm initializing the VBO and VAO and also load my shader program (this does work, so the error can't be found here). The VBO is supposed to be capable of holding the maximum amount of vertices I'll permit which is defined in the header to be 30000. The VAO contains the information about how the data that I'll store in that buffer is laid out - in this case I use a struct called VertexData which only contains a 3D-vector ('vertex'), but will also contain stuff like colors etc. later on. So I create the buffer with the size I already stated, don't fill in any content yet and provide the layout using 'glVertexAttribPointer'. The '_vertexCount', as the name implies, counts the amount of vertices currently stored inside that buffer for drawing purposes.
The constructor of my Renderer-class (note that every private member variable defined in the header file starts with an _ ):
Renderer::Renderer(std::string vertexShaderPath, std::string fragmentShaderPath) {
_shaderProgram = ShaderLoader::createProgram(vertexShaderPath, fragmentShaderPath);
glGenBuffers(1, &_vbo);
glGenVertexArrays(1, &_vao);
glBindVertexArray(_vao);
glBindBuffer(GL_ARRAY_BUFFER, _vbo);
glEnableVertexAttribArray(0);
glBufferData(GL_ARRAY_BUFFER, RENDERER_MAX_VERTICES * sizeof(VertexData), NULL, GL_DYNAMIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*) 0);
glDisableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
_vertexCount = 0;
}
Once the initization is done, to render anything, the 'begin' procedure has to be called during the main-loop. This gets the current buffer with write permissions to fill in the vertices that should be rendered in the current frame:
void Renderer::begin() {
glBindBuffer(GL_ARRAY_BUFFER, _vbo);
_buffer = (VertexData*) glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
}
After beginning, the 'submit' procedure can be called to add vertices and their corrosponding data to the buffer. I add the data to the location in memory the buffer currently points to, then advance the buffer and increase the vertexcount:
void Renderer::submit(VertexData* data) {
_buffer = data;
_buffer++;
_vertexCount++;
}
Finally, once all vertices are pushed to the buffer, the 'end' procedure will unmap the buffer to enable the actual rendering of the vertices, bind the VAO, use the shader program, render the provided vertices as triangles, unbind the VAO and reset the vertex count:
void Renderer::end() {
glUnmapBuffer(GL_ARRAY_BUFFER);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBindVertexArray(_vao);
glUseProgram(_shaderProgram);
glDrawArrays(GL_TRIANGLES, 0, _vertexCount);
glBindVertexArray(0);
_vertexCount = 0;
}
In the main loop I'm beginning the rendering, submitting three vertices to render a simple triangle and ending the rendering process. This is the most important part of that file:
Renderer renderer("../sdr/basicVertex.glsl", "../sdr/basicFragment.glsl");
Renderer::VertexData one;
one.vertex = glm::vec3(-1.0f, 1.0f, 0.0f);
Renderer::VertexData two;
two.vertex = glm::vec3( 1.0f, 1.0f, 0.0f);
Renderer::VertexData three;
three.vertex = glm::vec3( 0.0f,-1.0f, 0.0f);
...
while (running) {
...
renderer.begin();
renderer.submit(&one);
renderer.submit(&two);
renderer.submit(&three);
renderer.end();
SDL_GL_SwapWindow(mainWindow);
}
This may not be the most efficient way of doing this and I'm open for criticism, but my biggest problem is that nothing appears at all. The problem has to lie within those code snippets, but I can't find it - I'm a newbie when it comes to OpenGL, so help is greatly appreciated. If full source code is required, I'll post it using pastebin, but I'm about 99% sure that I did something wrong in those code snippets.
Thank you very much!
You have the vertex attribute disabled when you make the draw call. This part of the setup code looks fine:
glBindVertexArray(_vao);
glBindBuffer(GL_ARRAY_BUFFER, _vbo);
glEnableVertexAttribArray(0);
glBufferData(GL_ARRAY_BUFFER, RENDERER_MAX_VERTICES * sizeof(VertexData), NULL, GL_DYNAMIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*) 0);
At this point, the attribute is set up and enabled. But this is followed by:
glDisableVertexAttribArray(0);
Now the attribute is disabled, and there's nothing else in the posted code that enables it again. So when you make the draw call, you don't have a vertex attribute that is actually enabled.
You can simply remove the glDisableVertexAttribArray() call to fix this.
Another problem in your code is the submit() method:
void Renderer::submit(VertexData* data) {
_buffer = data;
_buffer++;
_vertexCount++;
}
Both _buffer and data are pointers to a VertexData structure. So the assignment:
_buffer = data;
is a pointer assignment. Instead of copying the data into the buffer, it modifies the buffer pointer. This should be:
*_buffer = *data;
This will copy the vertex data into the buffer, and leave the buffer pointer unchanged until you explicitly increment it in the next statement.
I've been trying to use Vertex Buffer Objects to save vertex data on the GPU and reduce the overhead, but I cannot get it to work. The code is below.
From what I understand you generate the buffer with glGenBuffers, then you bind the buffer with glBindBuffer so it's ready to be used, then you write data to it with glBufferData and its done and can be unbinded and ready for use later with simply binding it again.
However the last part is what I'm having trouble with, when I bind it after I have created and loaded data to it and try to draw using it, it gives me lots of GL Error: Out of Memory.
I doubt that I am running out of memory for my simple mesh, so I must be doing something very wrong.
Thanks.
EDIT 1: I call glGetError after every frame, but since this is the only OpenGL I do in the entire program it shouldn't be a problem
//when loading the mesh we create the VBO
void createBuffer()
{
GLuint buf;
glGenBuffers(1, &buf);
glBindBuffer(GL_ARRAY_BUFFER, buf);
glBufferData(GL_ARRAY_BUFFER, vertNormalBuffer->size() * sizeof(GLfloat), (GLvoid*) bufferData, GL_STATIC_DRAW);
//EDIT 1: forgot to show how I handle the buffer
model->vertexNormalBuffer = &buf;
//Unbinds it
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void Fighter::doRedraw(GLuint shaderProgram)
{
glm::mat4 transformationMatrix = getTransform();
GLuint loc = glGetUniformLocation(shaderProgram,"modelviewMatrix");
glUniformMatrix4fv(loc, 1, GL_FALSE, (GLfloat*) &transformationMatrix);
glBindBuffer(GL_ARRAY_BUFFER, *model->vertexNormalBuffer);
//If I uncomment this line below all works wonderfully, but isnt the purpose of VBO of not uploading the same data again and again?
//glBufferData(GL_ARRAY_BUFFER, model->vertAndNormalArraySize * sizeof(GLfloat), model->vertAndNormalArray, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(2);
renderChild(model, model);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void Fighter::renderChild(ModelObject* model, ModelObject* parent)
{
//Recursively render the mesh children
for(int i = 0; i < model->nChildren; i++)
{
renderChild( dynamic_cast<ModelObject*>(model->children[i]), parent);
}
//Vertex and normal data are interlieved
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 8*sizeof(GLfloat),(void*)(model- >vertexDataPosition*sizeof(GLfloat)));
glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, 8*sizeof(GLfloat), (void*)((model->vertexDataPosition + 4)*sizeof(GLfloat)));
//Draws using two sets of indices
glDrawElements(GL_QUADS, model->nQuads * 4, GL_UNSIGNED_INT,(void*) model->quadsIndices);
glDrawElements(GL_TRIANGLES, model->nTriangles * 3, GL_UNSIGNED_INT, (void*) model->trisIndices);
}
This is your problem:
model->vertexNormalBuffer = &buf;
/* ... */
glBindBuffer(GL_ARRAY_BUFFER, *model->vertexNormalBuffer);
You're storing the address of your buf variable, rather than its contents, and then it falls out of scope when createBuffer returns, and is most likely overwritten with other data, so when you're later rendering, you're using an uninitialized buffer. Just store the contents of buf in your vertexNormalBuffer field instead.
I'll admit I don't know why OpenGL thinks it proper to say that it's "out of memory" just because of that, but perhaps you're just invoking undefined behavior. It does explain, however, why it starts working when you re-fill the buffer with data after you rebind it, because you then implicitly initialize the buffer that you just bound.
I recently switched from intermediate mode and have a new rendering process. There must be something I am not understanding. I think it has something to do with the indices.
Here is my diagram: Region->Mesh->Polygon Array->3 vertex indices which references the master list of vertices.
Here my render code:
// Render the mesh
void WLD::render(GLuint* textures, long curRegion, CFrustum cfrustum)
{
int num = 0;
// Set up rendering states
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// Set up my indices
GLuint indices[3];
// Cycle through the PVS
while(num < regions[curRegion].visibility.size())
{
int i = regions[curRegion].visibility[num];
// Make sure the region is not "dead"
if(!regions[i].dead && regions[i].meshptr != NULL)
{
// Check to see if the mesh is in the frustum
if(cfrustum.BoxInFrustum(regions[i].meshptr->min[0], regions[i].meshptr->min[2], regions[i].meshptr->min[1], regions[i].meshptr->max[0], regions[i].meshptr->max[2], regions[i].meshptr->max[1]))
{
// Cycle through every polygon in the mesh and render it
for(int j = 0; j < regions[i].meshptr->polygonCount; j++)
{
// Assign the index for the polygon to the index in the huge vertex array
// This I think, is redundant
indices[0] = regions[i].meshptr->poly[j].vertIndex[0];
indices[1] = regions[i].meshptr->poly[j].vertIndex[1];
indices[2] = regions[i].meshptr->poly[j].vertIndex[2];
// Enable texturing and bind the appropriate texture
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textures[regions[i].meshptr->poly[j].tex]);
glVertexPointer(3, GL_FLOAT, sizeof(Vertex), &vertices[0].x);
glTexCoordPointer(2, GL_FLOAT, sizeof(Vertex), &vertices[0].u);
// Draw
glDrawElements(GL_TRIANGLES, 3, GL_UNSIGNED_INT, indices);
}
}
}
num++;
}
// End of rendering - disable states
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
}
Sorry if I left anything out. And I really appreciate feedback and help with this. I would even consider paying someone who is good with OpenGL and optimization to help me with this.
There is no point in using array rendering if you're only rendering 3 vertices at a time. The idea is to send thousands through with a single call. That is, you render a single "Polygon Array" or "Mesh" with one call.