I have a model class which contains the buffers for the model to draw, it's implementation looks like this:
Model::Model(std::vector<Vertex> vertices, std::vector<short> indices)
{
mVertices = vertices;
mIndices = indices;
mMatrix = glm::mat4(1.0f);
mIsTextured = false;
Initialize();
}
Model::~Model()
{
glDeleteBuffers(1, &mVertexBuffer);
glDeleteBuffers(1, &mIndiceBuffer);
}
void Model::Initialize()
{
glGenBuffers(1, &mVertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, mVertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)*mVertices.size(), &mVertices[0], GL_STATIC_DRAW);
glGenBuffers(1, &mIndiceBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndiceBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(short)*mIndices.size(), &mIndices[0], GL_STATIC_DRAW);
}
Now I've encountered a very weird problem with the destructor, I use this class like this:
Renderer *renderer = new Renderer();
Model m = parseSKNFromFile("model.skn");
m.ApplyTexture(textureID);
while (!glfwWindowShouldClose(window))
{
update();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
renderer->RenderModel(&m);
glfwSwapBuffers(window);
glfwPollEvents();
}
Using the Model class this way triggers a runtime Access Violation Reading Location error, but if I comment the glDeleteBuffers call inside the constructor everything working find.
It looks like somehow those delete functions called out of nowhere and I cant figure out how and why.
Here's the RenderModel function too:
mShader.bind();
glm::mat4 MVP = mProjection * glm::lookAt(glm::vec3(0, 100, 200), glm::vec3(0, 100, 0), glm::vec3(0, 1, 0)) * model->GetMatrix();
glUniformMatrix4fv(mShader.getUniformLocation("MVP") , 1, GL_FALSE, &MVP[0][0]);
if (model->IsTextured())
{
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, model->GetTexture());
glUniform1i(model->GetTexture(), 0);
}
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glEnableVertexAttribArray(3);
glEnableVertexAttribArray(4);
glBindBuffer(GL_ARRAY_BUFFER, model->GetVertexBuffer());
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)0); //float position[3]
glVertexAttribPointer(1, 1, GL_INT, GL_FALSE, sizeof(Vertex), (void*)12); //char boneIndex[4]
glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)16); //float weights[4]
glVertexAttribPointer(3, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)32); //float normals[3]
glVertexAttribPointer(4, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)44); //float textureCords[2]
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, model->GetIndiceBuffer());
glDrawElements(GL_TRIANGLES, model->GetIndiceSize(), GL_UNSIGNED_SHORT, (void*)0);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glDisableVertexAttribArray(3);
glDisableVertexAttribArray(4);
An assumption: You do not have (can not have) a proper copy constructor/assignment operator.
Hence:
private;
Model(const Model&); // No copy
Model& operator = (const Model&); // No copy
Since you are creating an inline object of Model it will be destroyed automatically when it goes out of scope and that scope is at the end of the function where you instantiate a Renderer.
I would suggest changing the Model to a heap allocated object and destroy it manually before you free the Renderer and before you destroy the window.
Model* m = parseSKNFromFile( "model.skn" );
There are some other suggestions that I would make like taking the arrays of indices and vertices as const references in the constructor of the Model. Also I don't think it is a great idea to call gl commands from the destructor of the object, it would be better to separate that type of functionality to separate functions in the Model as well as not initialize the OpenGL buffers in the constructor either. The reasons for that is in case you want to separate object allocation from the renderer initialization so that you can be loading them on a separate thread from the rendering thread.
Related
Is there a way to share data between shaders?
Do i just create a VAO and then swich shader programs as needed?
I guess shaders probably need to have the same layout then.
Or do i create VBO-s and per object-shader pair i create the VAO that binds data to shader variables?
This should load most data only once to the OpenGl + i could reuse quite alot.
I tried the second approach, but im not sure if it has reduced memory usage or not. The code pieces in question are below, i hope its enough.
Or is there some other, even better way?
Idea is to reduce the memory usage, and general speedup of the process.
void Mesh::bind()
{
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, verticesCount * sizeof(GLfloat), vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, nbo);
glBufferData(GL_ARRAY_BUFFER, normalCount * sizeof(GLfloat), normals, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, uvbo);
glBufferData(GL_ARRAY_BUFFER, uvCount * sizeof(GLfloat), UVs, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indicesCount * sizeof(GLuint), indices, GL_STATIC_DRAW);
}
void MeshRenderer::init()
{
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, mesh->getVertexBufferId());
GLuint posId = shaderProgram->getAttribute("LVertexPos2D");
glVertexAttribPointer(posId, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), NULL);
glEnableVertexAttribArray(posId);
glBindBuffer(GL_ARRAY_BUFFER, mesh->getNormalBufferId());
GLuint normId = shaderProgram->getAttribute("LVertexNorm");
glVertexAttribPointer(normId, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), NULL);
glEnableVertexAttribArray(normId);
glBindBuffer(GL_ARRAY_BUFFER, mesh->getUVBufferId());
GLuint uvId = shaderProgram->getAttribute("uv");
glVertexAttribPointer(uvId, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(GLfloat), NULL);
glEnableVertexAttribArray(uvId);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh->getIndexBufferId());
glBindVertexArray(0);
}
GameObject* GameObject::createCustom(Mesh* mesh)
{
Texture* texture = new Texture();
texture->loadFromFile("UV.png", NULL);
GenericShaderProgram* shader = new GenericShaderProgram("basic.vs", "basic.fs");
shader->loadProgram();
return new GameObject(texture, shader, mesh);
}
GameObject::GameObject(Texture* texture, ShaderProgram* program, Mesh* mesh)
{
this->texture = texture;
dir = new Vector3(1, 0, 0);
meshRenderer = new MeshRenderer(mesh, program);
transform.setRotation(1.57, 1, 0, 0);
transform.translate(Vector3(0, -2, 0));
transform.setScale(Vector3(1, 1, 1));
}
//In main.cpp
Mesh* mesh = new Mesh();
mesh->cubePrimitive();
mesh->init();
object = GameObject::createCustom(mesh);
object2 = GameObject::createCustom(mesh);
object3 = GameObject::createCustom(mesh);
I am attempting to be able to take some OpenGL code that draws objects from a vertex array and add it to a class file. However, the code only runs currently when I have it in my main.cpp file. I call init() from my main() function, before heading into my draw loop.
init(){
GLuint containerVAO, VBO;
glGenVertexArrays(1, &containerVAO);
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindVertexArray(containerVAO);
// Position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
// Normal attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat),(GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
glBindVertexArray(0);
}
The relevant code in my draw loop:
glUseProgram(noTextureShaderID);
glBindVertexArray(containerVAO);
///many different uniforms added here
glDrawArrays(GL_TRIANGLES, 0, 36);
This creates a cube no problem.
Now, when I replace the code inside my init() function (which initialises all objects, not just this one, I change it to this:
init(){
square.init(noTextureShaderID, vertices[], NULL, 36);
//Square is a global variable within my main.cpp file
}
And then I use this function:
void Mesh::init(const GLuint& shaderid, GLfloat vertices[], const char* tex_file, int num_vertices)
{
GLuint VBO;
vao = NULL; //This is a variable within the Mesh class
g_point_count = num_vertices;
glGenVertexArrays(1, &vao);
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindVertexArray(vao);
// Position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
// Normal attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
glBindVertexArray(0);
}
Then, in my draw function I call this instead:
glUseProgram(noTextureShaderID);
glBindVertexArray(square.vao);
///many different uniforms added here
glDrawArrays(GL_TRIANGLES, 0, g_point_count);
But even though both programs seem to have the same code, only the first version generates a cube. What am I missing in this regard?
Your code is not identical in both cases, and this has nothing to do with OpenGL:
void Mesh::init(const GLuint& shaderid, GLfloat vertices[], const char* tex_file, int num_vertices)
{
// ...
glBufferData(..., sizeof(vertices), ...);
}
vertices is passed by reference here, the inner function will never see the array, and sizeof(vertices) will be identical to sizeof(GLfloat*), which is typically 4 or 8 on todays machines. Hence, your buffer is just containing the first one or two floats.
You either have to explicitely provide the array size as an additional parameter, or you use some (reference to an) higher-level object like std:vector, which completely manages the array internally and allows you to query the size.
I am trying to draw two different meshes using OpenGL 4.1. At any given time, only one of the meshes will be drawn, and my goal is to switch between them. I am storing the vertex and index data for the meshes in an object, which below, is called g_obj. So, the data for the first mesh is gotten by g_obj->vertices_ and g_obj->indices, and the data for the second mesh is gotten from g_obj->vertices_linear_ and g_obj->indices_linear_.
My horrible idea is to have distinct VBO's and VAO's for each mesh. Then on the draw call, I would simply bind the appropriate VAO and then do glDraw*. However, my code segfaults on the first draw call (see below).
enum VAO_ID
{
VAO,
VAO_LINEAR,
NUM_VAOS
};
enum BUFFER_ID
{
VERTEX_BUFFER,
VERTEX_BUFFER_LINEAR,
INDEX_BUFFER,
INDEX_BUFFER_LINEAR,
NUM_BUFFERS
};
enum ATTRIBUTE_ID
{
VERTEX_POSITION,
VERTEX_COLOR
};
GLuint g_vaos[NUM_VAOS];
GLuint g_buffers[NUM_BUFFERS];
further down, I create vertex/index buffers and vertex array objects:
void bufferGeometry()
{
// create vertex buffers and vertex array object
glGenVertexArrays(NUM_VAOS, g_vaos);
glGenBuffers(NUM_BUFFERS, g_buffers);
// rotational grating
glBindBuffer(GL_ARRAY_BUFFER, g_buffers[VERTEX_BUFFER]);
glBufferData(GL_ARRAY_BUFFER,
g_obj->num_vertices_ * sizeof(vertex2D),
g_obj->vertices_,
GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, g_buffers[INDEX_BUFFER]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
g_obj->num_indices_ * sizeof(GLushort),
g_obj->indices_,
GL_STATIC_DRAW);
glBindVertexArray(g_vaos[VAO]);
glEnableVertexAttribArray(VERTEX_POSITION);
glVertexAttribPointer(VERTEX_POSITION, 2, GL_FLOAT, GL_FALSE,
sizeof(vertex2D),
(const GLvoid *)offsetof(vertex2D, position));
glEnableVertexAttribArray(VERTEX_COLOR);
glVertexAttribPointer(VERTEX_COLOR, 4, GL_UNSIGNED_BYTE, GL_FALSE,
sizeof(vertex2D),
(const GLvoid *)offsetof(vertex2D, color));
glBindVertexArray(0);
// linear grating
glBindBuffer(GL_ARRAY_BUFFER, g_buffers[VERTEX_BUFFER_LINEAR]);
glBufferData(GL_ARRAY_BUFFER,
g_obj->num_vertices_linear_ * sizeof(vertex2D),
g_obj->vertices_linear_,
GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, g_buffers[INDEX_BUFFER_LINEAR]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
g_obj->num_indices_linear_ * sizeof(GLushort),
g_obj->indices_linear_,
GL_STATIC_DRAW);
glBindVertexArray(g_vaos[VAO_LINEAR]);
glEnableVertexAttribArray(VERTEX_POSITION);
glVertexAttribPointer(VERTEX_POSITION, 2, GL_FLOAT, GL_FALSE,
sizeof(vertex2D),
(const GLvoid *)offsetof(vertex2D, position));
glEnableVertexAttribArray(VERTEX_COLOR);
glVertexAttribPointer(VERTEX_COLOR, 4, GL_UNSIGNED_BYTE, GL_FALSE,
sizeof(vertex2D),
(const GLvoid *)offsetof(vertex2D, color));
glBindVertexArray(0);
}
then, here is the draw call:
glBindVertexArray(g_vaos[VAO]);
g_obj = &g_grating_uno;
glUseProgram(g_program);
glUniform3fv(g_trans_scale_location, 1, g_obj->trans_scale_);
glDrawElements(GL_TRIANGLES, g_obj->num_indices_,
GL_UNSIGNED_SHORT, (const GLvoid *)0);
Which segfaults at the glDrawElements line.
I know that the general "performant" strategy is to pack data from both meshes in single VBO's, and since the data format is the same between the two meshes, then I only need one VAO. But, since I am lazy, I was wondering if anyone can see why my existing strategy does not work or cannot work or if you think the problem lies elsewhere. My guess is that it has to do with my bufferGeometry() function. I am a little weak in my understanding of how VAO's are linked to specific VBO's, if they are at all. Thanks, and sorry for being dumb!
you should repeat the call to bind the element buffer between the glBindVertexArray:
glBindVertexArray(g_vaos[VAO]);
glEnableVertexAttribArray(VERTEX_POSITION);
glVertexAttribPointer(VERTEX_POSITION, 2, GL_FLOAT, GL_FALSE,
sizeof(vertex2D),
(const GLvoid *)offsetof(vertex2D, position));
glEnableVertexAttribArray(VERTEX_COLOR);
glVertexAttribPointer(VERTEX_COLOR, 4, GL_UNSIGNED_BYTE, GL_FALSE,
sizeof(vertex2D),
(const GLvoid *)offsetof(vertex2D, color));
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, g_buffers[INDEX_BUFFER]);
glBindVertexArray(0);
the element buffer binding is part of the VAO state and gets reset when a new VAO gets bound.
Ah, fixed it (I think) by binding the VAO's before binding the VBO's:
void bufferGeometry()
{
// create vertex buffers and vertex array object
glGenVertexArrays(NUM_VAOS, g_vaos);
glGenBuffers(NUM_BUFFERS, g_buffers);
// rotational grating
glBindVertexArray(g_vaos[VAO]);
glBindBuffer(GL_ARRAY_BUFFER, g_buffers[VERTEX_BUFFER]);
glBufferData(GL_ARRAY_BUFFER,
g_obj->num_vertices_ * sizeof(vertex2D),
g_obj->vertices_,
GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, g_buffers[INDEX_BUFFER]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
g_obj->num_indices_ * sizeof(GLushort),
g_obj->indices_,
GL_STATIC_DRAW);
glEnableVertexAttribArray(VERTEX_POSITION);
glVertexAttribPointer(VERTEX_POSITION, 2, GL_FLOAT, GL_FALSE,
sizeof(vertex2D),
(const GLvoid *)offsetof(vertex2D, position));
glEnableVertexAttribArray(VERTEX_COLOR);
glVertexAttribPointer(VERTEX_COLOR, 4, GL_UNSIGNED_BYTE, GL_FALSE,
sizeof(vertex2D),
(const GLvoid *)offsetof(vertex2D, color));
glBindVertexArray(0);
// linear grating
glBindVertexArray(g_vaos[VAO_LINEAR]);
glBindBuffer(GL_ARRAY_BUFFER, g_buffers[VERTEX_BUFFER_LINEAR]);
glBufferData(GL_ARRAY_BUFFER,
g_obj->num_vertices_linear_ * sizeof(vertex2D),
g_obj->vertices_linear_,
GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, g_buffers[INDEX_BUFFER_LINEAR]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
g_obj->num_indices_linear_ * sizeof(GLushort),
g_obj->indices_linear_,
GL_STATIC_DRAW);
glEnableVertexAttribArray(VERTEX_POSITION);
glVertexAttribPointer(VERTEX_POSITION, 2, GL_FLOAT, GL_FALSE,
sizeof(vertex2D),
(const GLvoid *)offsetof(vertex2D, position));
glEnableVertexAttribArray(VERTEX_COLOR);
glVertexAttribPointer(VERTEX_COLOR, 4, GL_UNSIGNED_BYTE, GL_FALSE,
sizeof(vertex2D),
(const GLvoid *)offsetof(vertex2D, color));
glBindVertexArray(0);
}
I'm attempting to copy two vertex buffer objects from one Mesh object to another, through the copy assignment operator. Initially, my Vertex Array Object and the Buffers are initialized as follows:
void Mesh::construct(Vertex* vertices, unsigned int nVerts) {
vertexCount = nVerts;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
std::vector<glm::vec3> positions;
std::vector<glm::vec2> texCoords;
positions.reserve(nVerts);
texCoords.reserve(nVerts);
for (unsigned int i = 0; i < nVerts; i++) {
positions.push_back(vertices[i].getPosition());
texCoords.push_back(vertices[i].getTexCoord());
}
for (int i = 0; i < NUM_BUFFERS; i++) {
glGenBuffers(1, &vab[i]);
}
glBindBuffer(GL_ARRAY_BUFFER, vab[POSITION_VB]);
glBufferData(GL_ARRAY_BUFFER, nVerts * sizeof (positions[0]), &positions[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, vab[TEXCOORD_VB]);
glBufferData(GL_ARRAY_BUFFER, nVerts * sizeof (texCoords[0]), &texCoords[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
This works fine when instantiating a Mesh object, and calling:
void Mesh::render() {
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLES, 0, vertexCount);
glBindVertexArray(0);
}
However, when I try to copy the mesh into another, and render it, I get a segmentation fault on the glDrawArrays(GL_TRIANGLES, 0, vertexCount); line.. This is my copy assignment operator:
Mesh& Mesh::operator=(const Mesh& param) {
if (this == ¶m) {
return *this;
} else {
GLint size = 0;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
for (int i = 0; i < NUM_BUFFERS; i++) {
glGenBuffers(1, &vab[i]);
}
// Vertices
// Bind Buffers
glBindBuffer(GL_COPY_READ_BUFFER, param.vab[POSITION_VB]);
glGetBufferParameteriv(GL_COPY_READ_BUFFER, GL_BUFFER_SIZE, &size);
glBindBuffer(GL_COPY_WRITE_BUFFER, vab[POSITION_VB]);
glBufferData(GL_COPY_WRITE_BUFFER, size, nullptr, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
// Copy Data
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0, size);
// Texture Coords
// Bind Buffers
glBindBuffer(GL_COPY_READ_BUFFER, param.vab[TEXCOORD_VB]);
glGetBufferParameteriv(GL_COPY_READ_BUFFER, GL_BUFFER_SIZE, &size);
glBindBuffer(GL_COPY_WRITE_BUFFER, vab[TEXCOORD_VB]);
glBufferData(GL_COPY_WRITE_BUFFER, size, nullptr, GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, 0);
// Copy Data
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0, size);
// Unbind buffers
glBindVertexArray(0);
this->vertexCount = param.vertexCount;
return *this;
}
}
Can anyone see any problems with this? I've checked that the size being returned from glGetBufferParameteriv(GL_COPY_READ_BUFFER, GL_BUFFER_SIZE, &size); is correct in both cases (for both position and texture coordinate buffer). I've also checked glGetError() after both calls to glCopyBufferSubData, which both return 0. Not sure what to try next? My error may be elsewhere, but this is the first time I have tried copying buffers, so want to check that I'm doing that part right. If it helps, my Mesh destructor is:
Mesh::~Mesh() {
glDeleteVertexArrays(1, &vao);
glDeleteBuffers(NUM_BUFFERS, vab);
}
Through a debugger I can see that this is, of course, being called once, after the line:
this->mesh = Mesh(*texture);
Which is simply constructing a mesh, then assigning it (the texture just sizes the quad to the size of the texture, and calls the constructor shown at the start with the correct vertex positions).
You copy the arrays, but you never bind the copied versions to GL_ARRAY_BUFFER, meaning your glVertexAttribPointer calls are pointing to nothing.
I'm also a little wary of this code:
glBindBuffer(GL_ARRAY_BUFFER, vab[POSITION_VB]);
glBufferData(GL_ARRAY_BUFFER, nVerts * sizeof (positions[0]), &positions[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, vab[TEXCOORD_VB]);
glBufferData(GL_ARRAY_BUFFER, nVerts * sizeof (texCoords[0]), &texCoords[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
It seems like the position vertex pointer will be referring to the texture data, because that's the currently bound vertex buffer when you call glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
I'd think you'd want the order to be like so:
glBindBuffer(GL_ARRAY_BUFFER, vab[POSITION_VB]);
glBufferData(GL_ARRAY_BUFFER, nVerts * sizeof (positions[0]), &positions[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, vab[TEXCOORD_VB]);
glBufferData(GL_ARRAY_BUFFER, nVerts * sizeof (texCoords[0]), &texCoords[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, 0);
But I'm not certain. I usually interleave all my vertex attributes into a single vertex buffer.
I have some code that loops through a set of objects and renders instances of those objects. The list of objects that needs to be rendered is stored as a std::map<MeshResource*, std::vector<MeshRendererer*>>, where an object of class MeshResource contains the vertices and indices with the actual data, and an object of classMeshRenderer defines the point in space the mesh is to be rendered at.
My rendering code is as follows:
glDisable(GL_BLEND);
glEnable(GL_CULL_FACE);
glDepthMask(GL_TRUE);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
for (std::map<MeshResource*, std::vector<MeshRenderer*> >::iterator it = renderables.begin(); it != renderables.end(); it++)
{
it->first->setupBeforeRendering();
cout << "<";
for (unsigned long i =0; i < it->second.size(); i++)
{
//Pass in an identity matrix to the vertex shader- used here only for debugging purposes; the real code correctly inputs any matrix.
uniformizeModelMatrix(Matrix4::IDENTITY);
/**
* StartHere fix rendering problem.
* Ruled out:
* Vertex buffers correctly.
* Index buffers correctly.
* Matrices correct?
*/
it->first->render();
}
it->first->cleanupAfterRendering();
}
geometryPassShader->disable();
glDepthMask(GL_FALSE);
glDisable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
The function in MeshResource that handles setting up the uniforms is as follows:
void MeshResource::setupBeforeRendering()
{
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glEnableVertexAttribArray(3);
glEnableVertexAttribArray(4);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, iboID);
glBindBuffer(GL_ARRAY_BUFFER, vboID);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0); // Vertex position
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid*) 12); // Vertex normal
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid*) 24); // UV layer 0
glVertexAttribPointer(3, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid*) 32); // Vertex color
glVertexAttribPointer(4, 1, GL_UNSIGNED_SHORT, GL_FALSE, sizeof(Vertex), (const GLvoid*) 44); //Material index
}
The code that renders the object is this:
void MeshResource::render()
{
glDrawElements(GL_TRIANGLES, geometry->numIndices, GL_UNSIGNED_SHORT, 0);
}
And the code that cleans up is this:
void MeshResource::cleanupAfterRendering()
{
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glDisableVertexAttribArray(3);
glDisableVertexAttribArray(4);
}
The end result of this is that I get a black screen, although the end of my rendering pipeline after the rendering code (essentially just drawing axes and lines on the screen) works properly, so I'm fairly sure it's not an issue with the passing of uniforms. If, however, I change the code slightly so that the rendering code calls the setup immediately before rendering, like so:
void MeshResource::render()
{
setupBeforeRendering();
glDrawElements(GL_TRIANGLES, geometry->numIndices, GL_UNSIGNED_SHORT, 0);
}
The program works as desired. I don't want to have to do this, though, as my aim is to set up vertex, material, etc. data once per object type and then render each instance updating only the transformation information.
The uniformizeModelMatrix works as follows:
void RenderManager::uniformizeModelMatrix(Matrix4 matrix)
{
glBindBuffer(GL_UNIFORM_BUFFER, globalMatrixUBOID);
glBufferSubData(GL_UNIFORM_BUFFER, 0, sizeof(Matrix4), matrix.ptr());
glBindBuffer(GL_UNIFORM_BUFFER, 0);
}