Drawing multiple object with texture doesn't work - c++

I'm working on a 3D model loader. It can loads my own format. I wrote my own exporter for Blender, it works well. The model format can handle multiple meshes(objects) in one model. Each mesh can have a different texture. I've already written a loader which is working well, but doesn't use shaders.
I switched to shaders, but something is going wrong, the textures aren't appear correctly. Here is my drawing function, I'm sure the mistake is somewhere here:
void CModel::draw(glm::mat4 &MVP)
{
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_2D);
for(unsigned int i=0; i<m_numObjects; i++)
{
glUseProgram(m_programID);
glUniformMatrix4fv(m_matrixUniform, 1, GL_FALSE, &MVP[0][0]);
glBindTexture(GL_TEXTURE_2D, m_textureIDs[i]);
glUniform1i(m_textureUniform, 0);
glBindBuffer(GL_ARRAY_BUFFER, m_bufferIDs[i]);
// vertices
glEnableVertexAttribArray(0);
glVertexAttribPointer(
0, // must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
sizeof(float)*8, // stride
(void*)0 // array buffer offset
);
// UVs
glEnableVertexAttribArray(1);
glVertexAttribPointer(
1,
2,
GL_FLOAT,
GL_FALSE,
sizeof(float)*8,
(void*)6
);
glDrawArrays(GL_TRIANGLES, 0, m_numElements[i]);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glUseProgram(0);
}
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisable(GL_TEXTURE_2D);
}
The vertex and fragment shaders are also working well.
The previous version (without shaders) was this (this worked well):
void CModel::draw()
{
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glClientActiveTexture(GL_TEXTURE0);
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_2D);
for(unsigned int i=0; i<m_numObjects; i++)
{
glBindTexture(GL_TEXTURE_2D, m_textureIDs[i]);
glBindBuffer(GL_ARRAY_BUFFER, m_bufferIDs[i]);
glTexCoordPointer(2, GL_FLOAT, sizeof(float)*8, (float*)(sizeof(float)*6));
glNormalPointer( GL_FLOAT, sizeof(float)*8, (float*)(sizeof(float)*3));
glVertexPointer( 3, GL_FLOAT, sizeof(float)*8, 0);
glDrawArrays(GL_TRIANGLES, 0, m_numElements[i]);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisable(GL_TEXTURE_2D);
}
Here is the entire class:
#include "global.h"
#include "CModel.h"
#include "CModelLoader.h"
#include "CShaderLoader.h"
CModel::CModel(){}
CModel::~CModel()
{
// TODO free memory
}
/** #brief Load mbm file
*
* #fileName: mdm model file name we want to load and draw
*/
void CModel::init(char *fileName)
{
CModelLoader loader;
CShaderLoader shaders;
loader.loadData(fileName);
loader.createDataArray( m_dataArray,
m_dataArraySizes,
m_numObjects,
m_numElements,
m_textureIDs );
glGenVertexArrays(1, &m_vertexArrayID);
glBindVertexArray(m_vertexArrayID);
m_programID = shaders.loadShaders("vertexshader.vert", "fragmentshader.frag");
m_bufferIDs = new unsigned int[m_numObjects];
glGenBuffers(m_numObjects, m_bufferIDs);
for(unsigned int i=0; i<m_numObjects; i++)
{
glBindBuffer(GL_ARRAY_BUFFER, m_bufferIDs[i]);
glBufferData(GL_ARRAY_BUFFER, sizeof(float)*m_dataArraySizes[i], m_dataArray[i], GL_STATIC_DRAW);
}
}
/** #brief Drawing the mdm model
*
*/
void CModel::draw(glm::mat4 &MVP)
{
glEnable(GL_TEXTURE_2D);
glActiveTexture(GL_TEXTURE0);
for(unsigned int i=0; i<m_numObjects; i++)
{
glUseProgram(m_programID);
unsigned int matrixUniform = glGetUniformLocation(m_programID, "MVP");
unsigned int textureUniform = glGetUniformLocation(m_programID, "myTextureSampler");
glUniformMatrix4fv(matrixUniform, 1, GL_FALSE, &MVP[0][0]);
glUniform1i(textureUniform, 0);
glBindTexture(GL_TEXTURE_2D, m_textureIDs[i]);
glBindBuffer(GL_ARRAY_BUFFER, m_bufferIDs[i]);
// vertices
glEnableVertexAttribArray(0);
glVertexAttribPointer(
0, // must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
sizeof(float)*8, // stride
(void*)0 // array buffer offset
);
// UVs
glEnableVertexAttribArray(1);
glVertexAttribPointer(
1,
2,
GL_FLOAT,
GL_FALSE,
sizeof(float)*8,
(void*)6
);
glDrawArrays(GL_TRIANGLES, 0, m_numElements[i]);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glUseProgram(0);
}
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisable(GL_TEXTURE_2D);
}

Finally I solved it. The problem was that I used one array for vertices, normals and texture coordinates instead of storing in separate arrays and bind them separately.

Related

Why is glreadpixels only working in certain cases?

If I try to read the screen with glreadpixels and then draw the same thing again using gldrawpixels it works, BUT if and only if I use anything else to draw than gldrawarrays and gldrawelements (gluSphere/gluCylinder work just fine). I'm trying to draw an object to the screen and save the pixels in an array.
I tried reading/writing to front/back buffers, reading after I swap buffers, all to no avail.
here is the code that I use to draw the object: (please note that I do not use any kind of buffers outside of this scope).
void CCssample5View::DrawCylinder(Model obj)
{
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
Shader castingShader("casting.vs", "casting.fs");
Shader lightShader("light.vs", "light.fs");
GLuint MatrixID = glGetUniformLocation(castingShader.ID, "MVP");
GLuint ViewMatrixID = glGetUniformLocation(castingShader.ID, "V");
GLuint ModelMatrixID = glGetUniformLocation(castingShader.ID, "M");
GLuint textur = loadBMP_custom("flower.bmp");
GLuint TextureID = glGetUniformLocation(castingShader.ID , "myTextureSampler");
indexVBO(obj.vertices, obj.uvs, obj.normals, obj.indices, obj.indexed_vertices, obj.indexed_uvs, obj.indexed_normals);
GLuint vertexbuffer;
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, obj.indexed_vertices.size() * sizeof(glm::vec3), &obj.indexed_vertices[0], GL_STATIC_DRAW);
GLuint uvbuffer;
glGenBuffers(1, &uvbuffer);
glBindBuffer(GL_ARRAY_BUFFER, uvbuffer);
glBufferData(GL_ARRAY_BUFFER, obj.indexed_uvs.size() * sizeof(glm::vec2), &obj.indexed_uvs[0], GL_STATIC_DRAW);
GLuint normalbuffer;
glGenBuffers(1, &normalbuffer);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glBufferData(GL_ARRAY_BUFFER, obj.indexed_normals.size() * sizeof(glm::vec3), &obj.indexed_normals[0], GL_STATIC_DRAW);
GLuint elementbuffer;
glGenBuffers(1, &elementbuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, obj.indices.size() * sizeof(unsigned short), &obj.indices[0], GL_STATIC_DRAW);
castingShader.use();
GLuint LightID = glGetUniformLocation(castingShader.ID, "LightPosition_worldspace");
computeMatricesFromInputs();
GLfloat gProjectionMatrix[16];
glGetFloatv(GL_PROJECTION_MATRIX, gProjectionMatrix);
glm::mat4 ProjectionMatrix = glm::make_mat4(gProjectionMatrix);// = glm::mat4(gProjectionMatrix);
GLfloat gViewMatrix[16];
glGetFloatv(GL_MODELVIEW_MATRIX, gViewMatrix);
glm::mat4 ViewMatrix = glm::make_mat4(gViewMatrix);// = glm::mat4(gProjectionMatrix);
glm::vec3 lightPos = glm::vec3(4, 4, 4);
glUniform3f(LightID, lightPos.x, lightPos.y, lightPos.z);
glUniformMatrix4fv(ViewMatrixID, 1, GL_FALSE, &ViewMatrix[0][0]);
glm::mat4 ModelMatrix1 = glm::mat4(1.0);
glm::mat4 MVP1 = ProjectionMatrix * ViewMatrix * ModelMatrix1;
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textur);
glUniform1i(TextureID, 0);
// Send our transformation to the currently bound shader,
// in the "MVP" uniform
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP1[0][0]);
glUniformMatrix4fv(ModelMatrixID, 1, GL_FALSE, &ModelMatrix1[0][0]);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// 2nd attribute buffer : UVs
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, uvbuffer);
glVertexAttribPointer(
1, // attribute
2, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// 3rd attribute buffer : normals
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glVertexAttribPointer(
2, // attribute
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Index buffer
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer);
// Draw the triangles !
glDrawElements(
GL_TRIANGLES, // mode
obj.indices.size(), // count
GL_UNSIGNED_SHORT, // type
(void*)0 // element array buffer offset
);
//glFlush(); glFinish(); readScreen(screen, GL_RGB, true);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glDeleteBuffers(1, &vertexbuffer);
glDeleteBuffers(1, &uvbuffer);
glDeleteBuffers(1, &normalbuffer);
glDeleteBuffers(1, &elementbuffer);
glDeleteProgram(castingShader.ID);
glDeleteTextures(1, &textur);
glDeleteVertexArrays(1, &VertexArrayID);
}
These are my read and draw screen functions:
void CCssample5View::readScreen(GLubyte* screen, GLenum format, bool back)
{
check();
if (format == GL_RGB) {
check();
if (back) {
glReadBuffer(GL_BACK);
check();
}
else {
glReadBuffer(GL_FRONT);
check();
}
}
//glRasterPos2i(00, 00);
//glDisable(GL_DEPTH_TEST);
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glReadPixels(0, 0, w, h, format, GL_UNSIGNED_BYTE, screen);
glFlush();
glFinish();
bool found = false;
for (size_t u = 0; u <= w * h * 4; u++) {
if (screen[u] != 0) {
found = true;
break;
}
}
assert(found);
check();
}
void CCssample5View::drawScreen(GLubyte *screen, GLenum format, bool back)
{
glClear(GL_COLOR_BUFFER_BIT);
//glGetIntegerv(GL_CURRENT_RASTER_POSITION, rasterpos);
if (back) {
glDrawBuffer(GL_BACK);
}
else {
glDrawBuffer(GL_FRONT);
}
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glDrawPixels(w, h, format, GL_UNSIGNED_BYTE, screen);
check();
glFlush();
glFinish();
}
Can not seem to figure out whats wrong when the drawing is perfect except the reading screen part..
I figured it out. All I had to do was add glUseProgram(0) at the very end of the draw cylinder function. After more than 3 weeks of looking into this.

Can't make OpenGL Buffer Array work - glVertex works

I have an Eigen::MatrixXf with vertices (N*3) and faces (N*3) as variables mvTemp2 and mF.
If I draw my vertices regularly, they give the correct output based on the code below:
while(1){
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glEnable(GL_LIGHTING);
glShadeModel( GL_SMOOTH );
glEnable( GL_TEXTURE_2D );
glViewport( 0, 0, (float)renderWidth/1, (float)renderHeight/1. );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
gluPerspective( 60, (float)renderWidth/(float)renderHeight, 0.1, 10000. );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
glPolygonMode( GL_FRONT_AND_BACK, GL_LINE );
glPushMatrix();
glTranslatef(0,-0.5,-0.5);
// glBindVertexArray(vao);
// glDrawElements(GL_TRIANGLES, indexdata.size(), GL_UNSIGNED_BYTE, (void*)0);
for(int i=0; i<smpl.mF.rows(); i++){
glBegin(GL_POLYGON);
for(int j=0; j<smpl.mF.cols(); j++){
indexdata.push_back(smpl.mF(i,j)+1);
}
glVertex3fv((const GLfloat*)&vertexdata[smpl.mF(i,0)].position);
glVertex3fv((const GLfloat*)&vertexdata[smpl.mF(i,1)].position);
glVertex3fv((const GLfloat*)&vertexdata[smpl.mF(i,2)].position);
glEnd();
}
glPopMatrix();
glutSwapBuffers();
glutPostRedisplay();
std::this_thread::sleep_for(std::chrono::milliseconds(10));
glutMainLoopEvent();
}
However, if I use vertex buffers, i get rubbish output:
// Iterate v
for(int i=0; i<smpl.mVTemp2.rows(); i++){
Vertex v;
for(int j=0; j<smpl.mVTemp2.cols(); j++){
v.position[j] = smpl.mVTemp2(i,j);
}
vertexdata.push_back(v);
}
std::vector<GLubyte> indexdata;
// Iterate f
for(int i=0; i<smpl.mF.rows(); i++){
for(int j=0; j<smpl.mF.cols(); j++){
indexdata.push_back(smpl.mF(i,j));
}
}
// Create and bind a VAO
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
// Create and bind a BO for vertex data
GLuint vbuffer;
glGenBuffers(1, &vbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vbuffer);
// copy data into the buffer object
glBufferData(GL_ARRAY_BUFFER, vertexdata.size() * sizeof(Vertex), &vertexdata[0], GL_STATIC_DRAW);
// set up vertex attributes
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, position)); // vertices
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_TRUE, sizeof(Vertex), (void*)offsetof(Vertex, normal)); // normals
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, texcoord)); // normals
// Create and bind a BO for index data
GLuint ibuffer;
glGenBuffers(1, &ibuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibuffer);
// copy data into the buffer object
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indexdata.size() * sizeof(GLubyte), &indexdata[0], GL_STATIC_DRAW);
glBindVertexArray(0);
while(1){
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glEnable(GL_LIGHTING);
glShadeModel( GL_SMOOTH );
glEnable( GL_TEXTURE_2D );
glViewport( 0, 0, (float)renderWidth/1, (float)renderHeight/1. );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
gluPerspective( 60, (float)renderWidth/(float)renderHeight, 0.1, 10000. );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
glPolygonMode( GL_FRONT_AND_BACK, GL_LINE );
glPushMatrix();
glTranslatef(0,-0.5,-0.5);
glBindVertexArray(vao);
glDrawElements(GL_TRIANGLES, indexdata.size(), GL_UNSIGNED_BYTE, (void*)0);
glPopMatrix();
glutSwapBuffers();
glutPostRedisplay();
std::this_thread::sleep_for(std::chrono::milliseconds(10));
glutMainLoopEvent();
}
I can't understand what I am doing wrong. I am able to draw the faces normally with regular old glVertex and it works, but is too slow. So I want to use vertex buffers, but the code always gives junk output. Why is this the case?
EDIT:
I have solved the problem, however my normals are now wrong. If I draw it normally with Vertex and Normal calls, it works:
std::vector<Vertex> vertexdata;
for(int i=0; i<object.vertices.size(); i++){
Vertex v;
v.position[0] = object.vertices.at(i).coords[0];
v.position[1] = object.vertices.at(i).coords[1];
v.position[2] = object.vertices.at(i).coords[2];
v.normal[0] = object.computedNormals.at(i).coords[0];
v.normal[1] = object.computedNormals.at(i).coords[1];
v.normal[2] = object.computedNormals.at(i).coords[2];
vertexdata.push_back(v);
}
std::vector<GLushort> indexdata;
for(int i=0; i<object.faces.size(); i++){
OBJFace& face = object.faces.at(i);
indexdata.push_back(face.items[0].vertexIndex);
indexdata.push_back(face.items[1].vertexIndex);
indexdata.push_back(face.items[2].vertexIndex);
glBegin(GL_POLYGON);
glNormal3fv(vertexdata[face.items[0].vertexIndex].normal);
glVertex3fv(vertexdata[face.items[0].vertexIndex].position);
glNormal3fv(vertexdata[face.items[1].vertexIndex].normal);
glVertex3fv(vertexdata[face.items[1].vertexIndex].position);
glNormal3fv(vertexdata[face.items[2].vertexIndex].normal);
glVertex3fv(vertexdata[face.items[2].vertexIndex].position);
glEnd();
}
However, if I use it with Vertex Buffers, it does not:
std::vector<Vertex> vertexdata;
for(int i=0; i<object.vertices.size(); i++){
Vertex v;
v.position[0] = object.vertices.at(i).coords[0];
v.position[1] = object.vertices.at(i).coords[1];
v.position[2] = object.vertices.at(i).coords[2];
v.normal[0] = object.computedNormals.at(i).coords[0];
v.normal[1] = object.computedNormals.at(i).coords[1];
v.normal[2] = object.computedNormals.at(i).coords[2];
vertexdata.push_back(v);
}
std::vector<GLushort> indexdata;
for(int i=0; i<object.faces.size(); i++){
OBJFace& face = object.faces.at(i);
indexdata.push_back(face.items[0].vertexIndex);
indexdata.push_back(face.items[1].vertexIndex);
indexdata.push_back(face.items[2].vertexIndex);
}
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbuffer);
// copy data into the buffer object
glBufferData(GL_ARRAY_BUFFER, vertexdata.size() * sizeof(Vertex), &vertexdata[0], GL_STATIC_DRAW);
// set up vertex attributes
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, position)); // vertices
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, normal)); // normals
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, texcoord)); // normals
// Create and bind a BO for index data
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibuffer);
// copy data into the buffer object
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indexdata.size() * sizeof(GLushort), &indexdata[0], GL_STATIC_DRAW);
glBindVertexArray(0);
glBindVertexArray(vao);
glDrawElements(GL_TRIANGLES, indexdata.size(), GL_UNSIGNED_SHORT, (void*)0);
In addition to the answer of Nicol Bolas:
It looks like as the number of indices is too much to encode them in bytes (GLubyte). A byte can store data in the range [0, 255], so only 256 indices can be encoded in a byte. Use GLushort instead. The range of GLushort is [0, 65535]:
std::vector<GLushort> indexdata;
....
GLuint ibuffer;
glGenBuffers(1, &ibuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indexdata.size()*sizeof(GLushort),
indexdata.data(), GL_STATIC_DRAW);
....
glDrawElements(GL_TRIANGLES, indexdata.size(), GL_UNSIGNED_SHORT, (void*)0);
You are not using shaders for vertex processing. As such, generic vertex attributes (as provided by VertexAttrib functions) cannot be used. So, you can either switch to using shaders, or use fixed-function attributes:
// set up vertex attributes
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, sizeof(Vertex), (void*)offsetof(Vertex, position)); // vertices
glEnableClientState(GL_NORMAL_ARRAY);
glNormalPointer(GL_FLOAT, sizeof(Vertex), (void*)offsetof(Vertex, normal)); // normals
glClientActiveTexture(GL_TEXTURE0);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(2, GL_FLOAT, sizeof(Vertex), (void*)offsetof(Vertex, texcoord)); // normals

Why texture (GL_TEXTURE_2D) cannot work with (VertexArray) VAO

I am working on a project with openGL part. The scenegraph part is based on core OpenGL1.0 and still use glBegin and glEnd.
Now I am adding new stuff on it and has to use VAO. I am a beginner and use QOpenGLFunctions_3_0 because I cannot find gl3.h from openGL registry
The problem is now that I use GL_TEXTURE_2D with glBegin(GL_QUADS), it works,
and I draw single glDrawElements( GL_QUADS, 4, GL_UNSIGNED_BYTE, 0); it works too.
But when I use GL_TEXTURE_2D and glDrawElements( GL_QUADS... , the texture is not generated correctly. Please tell me the right way to use it.
Here is thd code
/** draws Texture */
class TextureNode
: public Node
{
public:
TextureNode( const cv::Mat& mat )
{
m_mat = mat;
}
TextureNode( const std::string& id, const cv::Mat& mat )
: Node( id )
{
m_mat = mat;
}
~TextureNode()
{
//glDeleteTextures( 1, &m_texture[0] );
}
void init( TraversalContext& context )
{
initializeOpenGLFunctions();
struct Vertex {
GLfloat position[3];
GLfloat texcoord[2];
GLfloat normal[3];
};
const int NUM_VERTS = 4;
const int NUM_INDICES = 4;
Vertex vertexdata[NUM_VERTS] = {
{{0, 0, 0}, {0,0}, {0,0,1}},
{{0, 100, 0}, {0,1}, {0,0,1}},
{{100, 100, 0}, {1,1}, {0,0,1}},
{{100, 0, 0}, {1,0}, {0,0,1}},
};
GLubyte indexdata[NUM_INDICES] = { 0, 1, 2, 3 };
// Create and bind a VAO
glGenVertexArrays(1, &m_quadVAO);
glBindVertexArray(m_quadVAO);
glGenBuffers(1, &m_quadPositionVBO);
glBindBuffer(GL_ARRAY_BUFFER, m_quadPositionVBO);
// copy data into the buffer object
glBufferData(GL_ARRAY_BUFFER, NUM_VERTS * sizeof(Vertex), vertexdata, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, position));
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, texcoord));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, normal));
glGenBuffers(1, &m_quadIndexVBO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_quadIndexVBO);
// copy data into the buffer object
glBufferData(GL_ELEMENT_ARRAY_BUFFER, NUM_INDICES * sizeof(GLubyte), indexdata, GL_STATIC_DRAW);
// Create and bind a texture
glGenTextures(1, &m_texture); // Create The Texture
glBindTexture(GL_TEXTURE_2D, m_texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, m_mat.cols, m_mat.rows, 0, GL_BGR, GL_UNSIGNED_BYTE, m_mat.data);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_T,GL_CLAMP);
//////// At this point the VAO is set up with two vertex attributes
//////// referencing the same buffer object, and another buffer object
//////// as source for index data. We can now unbind the VAO, go do
//////// something else, and bind it again later when we want to render
//////// with it.
glBindTexture(GL_TEXTURE_2D, 0);
glBindVertexArray(NULL);
//////glBindBuffer(GL_ARRAY_BUFFER, NULL);
//glBindTexture( GL_TEXTURE_2D, NULL );
//////glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, NULL);
}
/** apply transformation */
void doWork( TraversalContext& context )
{
//QTime time;
//time.start();
initializeOpenGLFunctions();
glDisable( GL_LIGHTING );
glEnable(GL_TEXTURE_2D);
glBindVertexArray( m_quadVAO );
//glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glActiveTexture(GL_TEXTURE0 + m_texture - 1 );
glBindTexture(GL_TEXTURE_2D, m_texture);
glDrawElements( GL_QUADS, 4, GL_UNSIGNED_BYTE, 0);
//glBegin(GL_QUADS);
// glColor3d( 0,0,0 );
// glTexCoord2f(1.0f, 0.0f); glVertex2d( 0, 0 );
// glTexCoord2f(1.0f, 1.0f); glVertex2d( 0, m_mat.rows );
// glTexCoord2f(0.0f, 1.0f); glVertex2d( m_mat.cols, m_mat.rows );
// glTexCoord2f(0.0f, 0.0f); glVertex2d( m_mat.cols, 0 );
//glEnd();
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
glBindVertexArray(0);
glEnable( GL_LIGHTING );
}
void destroy( TraversalContext& context )
{
glDeleteVertexArrays( 1, &m_quadVAO );
glDeleteTextures(1,&m_texture);
glDeleteBuffers(1, &m_quadPositionVBO);
glDeleteBuffers(1, &m_quadTexcoordVBO);
glDeleteBuffers(1, &m_quadIndexVBO);
}
protected:
GLuint m_quadVAO;
GLuint m_quadPositionVBO;
GLuint m_quadTexcoordVBO;
GLuint m_quadIndexVBO;
GLuint m_texture;
///** list of vertices */
GLfloat m_vertices[4][2];
cv::Mat m_mat;
};
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, position));
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, texcoord));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, normal));
This is using the generic vertex attributes, but that only makes sense when using sharers, and afaik don't work with the fixed-function pipeline. Either create a shader to use them, or use glVertexPointer, glTexCoordPointer, and/or glNormalPointer instead.
glActiveTexture(GL_TEXTURE0 + m_texture - 1 );
This is incorrect. glActiveTexture selects which texture unit to bind to; it does not use texture names at all. Since you're not using shaders, this should just be glActiveTexture(GL_TEXTURE0).
Side note: You don't need to use an index buffer for drawing a single quad; you can use glDrawArrays instead.

glDrawElements gives EXC_BAD_ACCESS with VBO

I've just started learning opengl and I was trying to implement VBOs.
this is what I'm doing:
in the main method before I iterate, for each mesh I initialize its VBOs.
The mesh->pos and mesh->norm contain the points and the normals of the triangles and the quads of the mesh.
The mesh->triangle and mesh->quad contain the indices.
for (auto mesh : scene->meshes)
{
mesh->vboPos = 0;
glGenBuffers (1, &mesh->vboPos);
glBindBuffer (GL_ARRAY_BUFFER, mesh->vboPos);
glBufferData (GL_ARRAY_BUFFER, 3 * mesh->pos.size() * sizeof(GL_FLOAT), mesh->pos.data(), GL_STATIC_DRAW);
mesh->vboNorm = 0;
glGenBuffers (1, &mesh->vboNorm);
glBindBuffer (GL_ARRAY_BUFFER, mesh->vboNorm);
glBufferData (GL_ARRAY_BUFFER, 3 * mesh->norm.size() * sizeof(GL_FLOAT), mesh->norm.data(), GL_STATIC_DRAW);
mesh->vbiTriangle = 0;
glGenBuffers (1, &mesh-> vbiTriangle);
glBindBuffer (GL_ELEMENT_ARRAY_BUFFER, mesh-> vbiTriangle);
glBufferData (GL_ELEMENT_ARRAY_BUFFER, 3 * mesh->triangle.size() * sizeof(GL_UNSIGNED_INT), mesh->triangle.data(), GL_STATIC_DRAW);
mesh->vbiQuad = 0;
glGenBuffers (1, &mesh->vbiQuad);
glBindBuffer (GL_ELEMENT_ARRAY_BUFFER, mesh->vbiQuad);
glBufferData (GL_ELEMENT_ARRAY_BUFFER, 4 * mesh->quad.size() * sizeof(GL_UNSIGNED_INT), mesh->quad.data(), GL_STATIC_DRAW);
}
while(not glfwWindowShouldClose(window))
{
...
}
After this inside the shade method I call the draw for each mesh I have. This is how the mesh code looks like:
struct Mesh {
frame3f frame = identity_frame3f; // frame
vector<vec3f> pos; // vertex position
vector<vec3f> norm; // vertex normal
vector<vec3i> triangle; // triangle
vector<vec4i> quad; // quad
Material* mat = new Material(); // material
GLuint vboPos = 0;
GLuint vboNorm = 0;
GLuint vbiTriangle =0;
GLuint vbiQuad =0;
int count = 0;
void draw()
{
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, vboPos);
glBindBuffer(GL_ARRAY_BUFFER, vboNorm);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbiTriangle);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbiQuad);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, 0);
glDrawElements(GL_TRIANGLES, triangle.size() * 3, GL_UNSIGNED_INT, 0);
glDrawElements(GL_QUADS, quad.size() * 4, GL_UNSIGNED_INT, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
}
I don't really know what's the problem causing this, but I'm pretty sure I didn't understand something of how VBOs work. Can you enlighten me?
You are trying to do too many things at once here and I have a feeling the driver is overrunning an array trying to read the wrong element array.
Your draw function needs to be re-written:
void draw()
{
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
/* Setup your Position array */
glBindBuffer(GL_ARRAY_BUFFER, vboPos);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
/* Then setup your Normal array */
glBindBuffer(GL_ARRAY_BUFFER, vboNorm);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, 0);
/* DO NOT DO THIS -- you are using generic vertex attributes. */
//glEnableClientState(GL_VERTEX_ARRAY);
/* First the Triangles */
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbiTriangle);
glDrawElements(GL_TRIANGLES, triangle.size() * 3, GL_UNSIGNED_INT, 0);
/* Now the Quads */
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbiQuad);
glDrawElements(GL_QUADS, quad.size() * 4, GL_UNSIGNED_INT, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
}

OpenGL Pseudo-Instanced Rendering shows Black Screen

I have some code that loops through a set of objects and renders instances of those objects. The list of objects that needs to be rendered is stored as a std::map<MeshResource*, std::vector<MeshRendererer*>>, where an object of class MeshResource contains the vertices and indices with the actual data, and an object of classMeshRenderer defines the point in space the mesh is to be rendered at.
My rendering code is as follows:
glDisable(GL_BLEND);
glEnable(GL_CULL_FACE);
glDepthMask(GL_TRUE);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
for (std::map<MeshResource*, std::vector<MeshRenderer*> >::iterator it = renderables.begin(); it != renderables.end(); it++)
{
it->first->setupBeforeRendering();
cout << "<";
for (unsigned long i =0; i < it->second.size(); i++)
{
//Pass in an identity matrix to the vertex shader- used here only for debugging purposes; the real code correctly inputs any matrix.
uniformizeModelMatrix(Matrix4::IDENTITY);
/**
* StartHere fix rendering problem.
* Ruled out:
* Vertex buffers correctly.
* Index buffers correctly.
* Matrices correct?
*/
it->first->render();
}
it->first->cleanupAfterRendering();
}
geometryPassShader->disable();
glDepthMask(GL_FALSE);
glDisable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
The function in MeshResource that handles setting up the uniforms is as follows:
void MeshResource::setupBeforeRendering()
{
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glEnableVertexAttribArray(3);
glEnableVertexAttribArray(4);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, iboID);
glBindBuffer(GL_ARRAY_BUFFER, vboID);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0); // Vertex position
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid*) 12); // Vertex normal
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid*) 24); // UV layer 0
glVertexAttribPointer(3, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid*) 32); // Vertex color
glVertexAttribPointer(4, 1, GL_UNSIGNED_SHORT, GL_FALSE, sizeof(Vertex), (const GLvoid*) 44); //Material index
}
The code that renders the object is this:
void MeshResource::render()
{
glDrawElements(GL_TRIANGLES, geometry->numIndices, GL_UNSIGNED_SHORT, 0);
}
And the code that cleans up is this:
void MeshResource::cleanupAfterRendering()
{
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glDisableVertexAttribArray(3);
glDisableVertexAttribArray(4);
}
The end result of this is that I get a black screen, although the end of my rendering pipeline after the rendering code (essentially just drawing axes and lines on the screen) works properly, so I'm fairly sure it's not an issue with the passing of uniforms. If, however, I change the code slightly so that the rendering code calls the setup immediately before rendering, like so:
void MeshResource::render()
{
setupBeforeRendering();
glDrawElements(GL_TRIANGLES, geometry->numIndices, GL_UNSIGNED_SHORT, 0);
}
The program works as desired. I don't want to have to do this, though, as my aim is to set up vertex, material, etc. data once per object type and then render each instance updating only the transformation information.
The uniformizeModelMatrix works as follows:
void RenderManager::uniformizeModelMatrix(Matrix4 matrix)
{
glBindBuffer(GL_UNIFORM_BUFFER, globalMatrixUBOID);
glBufferSubData(GL_UNIFORM_BUFFER, 0, sizeof(Matrix4), matrix.ptr());
glBindBuffer(GL_UNIFORM_BUFFER, 0);
}