Using vertex attributes for GLSL with LibiGL - c++

I am doing some computationally heavy stuff on meshes with libiGL(c++) and am trying to move these computations over to the GPU. To start off, I am trying to get a basic piece of code running but the attribute values do not seem to get passed on to the shaders.
int m = V.rows();//vertices
P.resize(m);
for (int i = 0; i < m; i++)
{
P(i) = i / m;
}
std::map<std::string, GLuint> attrib;
attrib.insert({ "concentration", 0 });
igl::opengl::create_shader_program(
mesh_vertex_shader_string,
mesh_fragment_shader_string,
attrib,
v.data().meshgl.shader_mesh);
GLuint prog_id = v.data().meshgl.shader_mesh;
const int num_vert = m;
GLuint vao[37 * 37];//37*37 vertices
glGenVertexArrays(m, vao);
GLuint buffer;
for (int i = 0; i < m; i++)
{
glGenBuffers(1, &buffer);
glBindVertexArray(vao[i]);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float), P.data(), GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 1, GL_FLOAT, GL_FALSE, 0, (void*)(i*sizeof(float)));
}
The red color of each vertex should be dependant on its ordering (from 0 red to 1 red) but instead the mesh is colored with no red at all.
I am basing myself off of this code :https://github.com/libigl/libigl/issues/657 and http://www.lighthouse3d.com/cg-topics/code-samples/opengl-3-3-glsl-1-5-sample/.
I modified the shader strings accordingly by adding the following lines:
Vertex Shader
in float concentration;
out float c;
c = concentration;
where the last line is in main();
Fragment Shader
in float c;
outColor.x = c;
where the last line is in main().
What am I doing wrong?
Edit: OpenGL version used is actually 3.2

Related

Rendering multiple meshes (same VBO) + all indices in same EBO (OpenGL)

I'm trying to minimize memory fragmentation on the GPU by storing all vertex data in the same VBO.
After parsing the .obj nodes with Assimp and extracting all the mesh data, I've alloc'd two large buffers that can contain everything and dumped everything in them (one vertex array and another indices array). Rendering all the data works fine with glDrawArrays, but this is not the result I'm trying to obtain.
struct vertex_data
{
glm::vec3 Position;
glm::vec3 Normal;
glm::vec2 TexCoords;
};
uint32_t TotalVertices = 0;
uint32_t TotalIndices = 0;
uint32_t TotalMeshes = Model->NextMeshToLoad;
if (TotalMeshes != 0)
{
for (uint32_t MeshOffset = 0;
MeshOffset < TotalMeshes;
++MeshOffset)
{
mesh *Mesh = Model->Meshes + MeshOffset;
uint32_t MeshTotalVertices = Mesh->NextVertexData;
uint32_t MeshTotalIndices = Mesh->NextIndice;
TotalVertices += MeshTotalVertices;
TotalIndices += MeshTotalIndices;
}
vertex_data *CombinedVertexData = PushArray(Arena, TotalVertices, vertex_data);
uint32_t *CombinedIndices = PushArray(Arena, TotalIndices, uint32_t);
uint32_t CombinedVertexDataOffset = 0;
uint32_t CombinedIndicesOffset = 0;
for (uint32_t MeshOffset = 0;
MeshOffset < TotalMeshes;
++MeshOffset)
{
mesh *Mesh = Model->Meshes + MeshOffset;
uint32_t MeshTotalVertices = Mesh->NextVertexData;
memcpy_s(CombinedVertexData + CombinedVertexDataOffset, TotalVertices * sizeof(vertex_data),
Mesh->VertexData, MeshTotalVertices * sizeof(vertex_data));
CombinedVertexDataOffset += MeshTotalVertices;
uint32_t MeshTotalIndices = Mesh->NextIndice;
memcpy_s(CombinedIndices + CombinedIndicesOffset, TotalIndices * sizeof(uint32_t),
Mesh->Indices, MeshTotalIndices * sizeof(uint32_t));
CombinedIndicesOffset += MeshTotalIndices;
}
Model->CombinedVertexData = CombinedVertexData;
Model->CombinedIndicesData = CombinedIndices;
Model->CombinedVertexDataSize = TotalVertices;
Model->CombinedIndicesDataSize = TotalIndices;
Model->DataStatus = model_data_status::PENDING_UPLOAD;
Model->NextMeshToLoad stores the next empty array location in which it can store a mesh. It also represents the current size of stored meshes - not to be confounded with the maximum size alloc'd for the array.
Mesh->NextVertexData and Mesh->NextIndice work the same, but for each mesh in part.
Uploading the data to the GPU:
if (Model->DataStatus == model_data_status::PENDING_UPLOAD)
{
glGenVertexArrays(1, &Model->VertexArrayObject);
glGenBuffers(1, &Model->VertexBufferObject);
glGenBuffers(1, &Model->ElementBufferObject);
glBindVertexArray(Model->VertexArrayObject);
glBindBuffer(GL_ARRAY_BUFFER, Model->VertexBufferObject);
glBufferData(GL_ARRAY_BUFFER, Model->CombinedVertexDataSize * sizeof(vertex_data), Model->CombinedVertexData, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, Model->ElementBufferObject);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, Model->CombinedIndicesDataSize * sizeof(uint32_t), &Model->CombinedIndicesData[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(vertex_data), 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(vertex_data), (void *)(sizeof(glm::vec3)));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(vertex_data), (void *)(2 * sizeof(glm::vec3)));
glBindVertexArray(0);
}
The problem is that each mesh has a different/separate material. This forces me to issue separate draw calls for each mesh (need to set different uniform properties + bind different textures). I know the vertex and indices size for each mesh in part, so I figured I could just call glDrawElements and use the 4th parameter as an offset into the ELEMENT_ARRAY_BUFFER:
glBindVertexArray(Model->VertexArrayObject);
uint32_t IndicesOffset = 0;
uint32_t MeshesSize = Model->NextMeshToLoad;
for (uint32_t MeshIndex = 0;
MeshIndex < MeshesSize;
++MeshIndex)
{
mesh *Mesh = Model->Meshes + MeshIndex;
uint32_t IndicesDataSize = Mesh->NextIndice;
glDrawElements(GL_TRIANGLES, IndicesDataSize, GL_UNSIGNED_INT, (void *)(IndicesOffset * sizeof(uint32_t)));
IndicesOffset += Mesh->NextIndice;
}
glBindVertexArray(0);
Unfortunately this doesn't seem to work... My data is only partially rendered and I can't understand why this is happening. I've checked the data loaded from the .obj file and it's fine. I've also checked the combined vertex data & the combined indices data... it's all there. But my meshes aren't all there on the screen :( (I've enabled wireframe mode so I can better visualize the issue)
If I try to call glDrawArrays on the combined vertex data, it's all working fine and dandy:
At this moment, both the combined vertex and indices arrays register a size of 57174 elements each. Is it not possible for OpenGL to properly map the indices offset to the vertices data internally and properly render? If I use one VBO + EBO per mesh, everything renders fine, so I know the data I'm receiving is not corrupted (further evidenced by glDrawArrays on the combined vertex data). I've been reading and trying all sorts of solutions without anything working... Pretty much at my wits end. Would really appreciate some help! Thanks!

When loading multiple indexed VAOs, their geometry seems to merge

I've written a very basic .obj file loader (just loads the vertex coords and triangular face indices) and renderer. When I only load one model at a time, it works great. When I load a 2nd model (or more), each one draws itself and part of the others, and I've wracked my brain trying to figure out why this is.
Some relevant code snippets:
This is where I open the .obj file and read the vertex coords and triangle face indices from the file.
bool Model::loadFromFile(string fileName)
{
vector<GLfloat> vertices;
vector<GLuint> indices;
fstream file(fileName);
string line = "";
while (getline(file, line))
{
istringstream lineReader(line);
char c;
//read the first letter of the line
//The first letter designates the meaning of the rest of the line
lineReader >> c;
vec3 vertex; //if we need to read .x, .y, and .z from the file
GLuint index[3]; //if we need to read the 3 indices on a triangle
switch (c)
{
case '#':
//we do nothing with comment lines
break;
case 'v': //V means vertex coords
case 'V': //so Load it into a vec3
lineReader >> vertex.x >> vertex.y >> vertex.z;
vertices.push_back(vertex.x);
vertices.push_back(vertex.y);
vertices.push_back(vertex.z);
break;
case 'vt':
case 'VT':
//no tex coords yet either
break;
case 'f': //F means indices of a face
case 'F':
lineReader >> index[0] >> index[1] >> index[2];
//we subtract one from each index in the file because
//openGL indices are 0-based, but .obj has them as
//1-based
indices.push_back(index[0] - 1);
indices.push_back(index[1] - 1);
indices.push_back(index[2] - 1);
cout << "Indices: " << index[0] << ", " << index[1] << ", " << index[2] << endl;
break;
}
}
return this->load(vertices, indices);
}
At the end of the loadFromFile method, the we pass in the vector of indices and vertices to the model::load() method, which creates and binds the VAO, VBO, and IBO.
bool Model::load(vector<float>& vertices, vector<GLuint>& indices)
{
//create the VAO
glGenVertexArrays(1, &handle);
glBindVertexArray(getHandle());
//and enable vertexPositionAttribute
glEnableVertexAttribArray(pShader->getPositionAttribIndex());
//Populate the position portion of the
//VAO
GLuint vboID = 0;
glGenBuffers(1, &vboID);
glBindBuffer(GL_ARRAY_BUFFER, vboID);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * vertices.size(), &vertices[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, vboID);
glVertexAttribPointer(pShader->getPositionAttribIndex(), 3, GL_FLOAT, GL_FALSE, 0, nullptr);
//Populate the indices portion of the
//VAO
GLuint iboID = 0;
glGenBuffers(1, &iboID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, iboID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(GLuint) * indices.size(), &indices[0], GL_STATIC_DRAW);
//bind nothing to avoid any mistakes or loading
//of random data to my VAO
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
this->numVertices = vertices.size() / 3;
this->numIndices = indices.size();
cout << "Num Indices: " << numIndices << endl;
return true;
}
Once the model is loaded, it's VAO id is stored in a class variable. The VAO is loaded in the model::bind() method.
void Model::bind()
{
glBindVertexArray(getHandle());
}
And drawn with a call to glDrawElements()
void Model::draw()
{
//and draw me, bitchez!
glDrawElements(GL_TRIANGLES, this->numIndices, GL_UNSIGNED_INT, nullptr);
}
The models are drawn inside the level::draw() method:
void Level::draw()
{
pShader->setViewMatrix(camera.getViewMatrix());
for (int modelIndex = 0; modelIndex < models.size(); modelIndex++)
{
models[modelIndex]->bind();
for (int drawableIndex = 0; drawableIndex < drawables.size(); drawableIndex++)
drawables[drawableIndex]->draw();
}
}
There are no OpenGL errors, and the file parsing churns out the right values when I print it to the console.
If anyone can point out how/why this is happening, I would greatly appreciate it.
Well, after a few more hours of digging through code, I noted inside the level::draw method:
for (int modelIndex = 0; modelIndex < models.size(); modelIndex++)
{
models[modelIndex]->bind();
for (int drawableIndex = 0; drawableIndex < drawables.size(); drawableIndex++)
drawables[drawableIndex]->draw();
}
I bind the VAO of every model for every entity I load. Quite literally I was drawing every loaded model for everything on screen. Sorry to waste screen space on the OpenGL section with this!

OpenGL Model/Texture rendering using VAO/VBO

I am trying to render 3D models with textures using Assimp. The conversion goes perfect, all textures positions and what not gets loaded. I have tested the texture images by drawing them to the screen in 2D.
For some reason it does not render the textures to the model.
I am a beginner in OpenGL so forgive me if i dont explain it right.
The tutorial I have based the code on is from here, but i stripped a big part since I have my own camera/movement system.
The model renders like this: http://i.stack.imgur.com/5sK9K.png
whilest the texture in use looks like this: http://i.stack.imgur.com/sWGp7.jpg
The relevant rendering code is the following:
Generating textures from data file:
int Mesh::LoadGLTextures(const aiScene* scene){
if (scene->HasTextures()) return -1; //yes this is correct
/* getTexture Filenames and Numb of Textures */
for (unsigned int m = 0; m<scene->mNumMaterials; m++){
int texIndex = 0;
aiReturn texFound;
aiString path; // filename
while ((texFound = scene->mMaterials[m]->GetTexture(aiTextureType_DIFFUSE, texIndex, &path)) == AI_SUCCESS){
textureIdMap[path.data] = NULL; //fill map with textures, pointers still NULL yet
texIndex++;
}
}
int numTextures = textureIdMap.size();
/* create and fill array with GL texture ids */
GLuint* textureIds = new GLuint[numTextures];
/* get iterator */
std::map<std::string, GLuint>::iterator itr = textureIdMap.begin();
std::string basepath = getBasePath(path);
ALLEGRO_BITMAP *image;
for (int i = 0; i<numTextures; i++){
std::string filename = (*itr).first; // get filename
(*itr).second = textureIds[i]; // save texture id for filename in map
itr++; // next texture
std::string fileloc = basepath + filename; /* Loading of image */
image = al_load_bitmap(fileloc.c_str());
if (image) /* If no error occured: */{
GLuint texId = al_get_opengl_texture(image);
//glGenTextures(numTextures, &textureIds[i]); /* Texture name generation */
glBindTexture(GL_TEXTURE_2D, texId); /* Binding of texture name */
//redefine standard texture values
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); /* We will use linear
interpolation for magnification filter */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); /* We will use linear
interpolation for minifying filter */
textureIdMap[filename] = texId;
} else {
/* Error occured */
std::cout << "Couldn't load Image: " << fileloc.c_str() << "\n";
}
}
//Cleanup
delete[] textureIds;
//return success
return true;
}
Generating VBO/VAO:
void Mesh::genVAOsAndUniformBuffer(const aiScene *sc) {
struct MyMesh aMesh;
struct MyMaterial aMat;
GLuint buffer;
// For each mesh
for (unsigned int n = 0; n < sc->mNumMeshes; ++n){
const aiMesh* mesh = sc->mMeshes[n];
// create array with faces
// have to convert from Assimp format to array
unsigned int *faceArray;
faceArray = (unsigned int *)malloc(sizeof(unsigned int) * mesh->mNumFaces * 3);
unsigned int faceIndex = 0;
for (unsigned int t = 0; t < mesh->mNumFaces; ++t) {
const aiFace* face = &mesh->mFaces[t];
memcpy(&faceArray[faceIndex], face->mIndices, 3 * sizeof(unsigned int));
faceIndex += 3;
}
aMesh.numFaces = sc->mMeshes[n]->mNumFaces;
// generate Vertex Array for mesh
glGenVertexArrays(1, &(aMesh.vao));
glBindVertexArray(aMesh.vao);
// buffer for faces
glGenBuffers(1, &buffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * mesh->mNumFaces * 3, faceArray, GL_STATIC_DRAW);
// buffer for vertex positions
if (mesh->HasPositions()) {
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 3 * mesh->mNumVertices, mesh->mVertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(vertexLoc);
glVertexAttribPointer(vertexLoc, 3, GL_FLOAT, 0, 0, 0);
}
// buffer for vertex normals
if (mesh->HasNormals()) {
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 3 * mesh->mNumVertices, mesh->mNormals, GL_STATIC_DRAW);
glEnableVertexAttribArray(normalLoc);
glVertexAttribPointer(normalLoc, 3, GL_FLOAT, 0, 0, 0);
}
// buffer for vertex texture coordinates
if (mesh->HasTextureCoords(0)) {
float *texCoords = (float *)malloc(sizeof(float) * 2 * mesh->mNumVertices);
for (unsigned int k = 0; k < mesh->mNumVertices; ++k) {
texCoords[k * 2] = mesh->mTextureCoords[0][k].x;
texCoords[k * 2 + 1] = mesh->mTextureCoords[0][k].y;
}
glGenBuffers(1, &buffer);
glEnableVertexAttribArray(texCoordLoc);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 2 * mesh->mNumVertices, texCoords, GL_STATIC_DRAW);
glVertexAttribPointer(texCoordLoc, 2, GL_FLOAT, GL_FALSE, 0, 0);
}
// unbind buffers
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// create material uniform buffer
aiMaterial *mtl = sc->mMaterials[mesh->mMaterialIndex];
aiString texPath; //contains filename of texture
if (AI_SUCCESS == mtl->GetTexture(aiTextureType_DIFFUSE, 0, &texPath)){
//bind texture
unsigned int texId = textureIdMap[texPath.data];
aMesh.texIndex = texId;
aMat.texCount = 1;
} else {
aMat.texCount = 0;
}
float c[4];
set_float4(c, 0.8f, 0.8f, 0.8f, 1.0f);
aiColor4D diffuse;
if (AI_SUCCESS == aiGetMaterialColor(mtl, AI_MATKEY_COLOR_DIFFUSE, &diffuse))
color4_to_float4(&diffuse, c);
memcpy(aMat.diffuse, c, sizeof(c));
set_float4(c, 0.2f, 0.2f, 0.2f, 1.0f);
aiColor4D ambient;
if (AI_SUCCESS == aiGetMaterialColor(mtl, AI_MATKEY_COLOR_AMBIENT, &ambient))
color4_to_float4(&ambient, c);
memcpy(aMat.ambient, c, sizeof(c));
set_float4(c, 0.0f, 0.0f, 0.0f, 1.0f);
aiColor4D specular;
if (AI_SUCCESS == aiGetMaterialColor(mtl, AI_MATKEY_COLOR_SPECULAR, &specular))
color4_to_float4(&specular, c);
memcpy(aMat.specular, c, sizeof(c));
set_float4(c, 0.0f, 0.0f, 0.0f, 1.0f);
aiColor4D emission;
if (AI_SUCCESS == aiGetMaterialColor(mtl, AI_MATKEY_COLOR_EMISSIVE, &emission))
color4_to_float4(&emission, c);
memcpy(aMat.emissive, c, sizeof(c));
float shininess = 0.0;
unsigned int max;
aiGetMaterialFloatArray(mtl, AI_MATKEY_SHININESS, &shininess, &max);
aMat.shininess = shininess;
glGenBuffers(1, &(aMesh.uniformBlockIndex));
glBindBuffer(GL_UNIFORM_BUFFER, aMesh.uniformBlockIndex);
glBufferData(GL_UNIFORM_BUFFER, sizeof(aMat), (void *)(&aMat), GL_STATIC_DRAW);
myMeshes.push_back(aMesh);
}
}
Rendering model:
void Mesh::recursive_render(const aiScene *sc, const aiNode* nd){
// draw all meshes assigned to this node
for (unsigned int n = 0; n < nd->mNumMeshes; ++n){
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, myMeshes[nd->mMeshes[n]].texIndex);
// bind VAO
glBindVertexArray(myMeshes[nd->mMeshes[n]].vao);
// draw
glDrawElements(GL_TRIANGLES, myMeshes[nd->mMeshes[n]].numFaces * 3, GL_UNSIGNED_INT, 0);
}
// draw all children
for (unsigned int n = 0; n < nd->mNumChildren; ++n){
recursive_render(sc, nd->mChildren[n]);
}
}
Any other relevant code parts can be found in my open github project https://github.com/kwek20/StrategyGame/tree/master/Strategy
Mesh.cpp is relevant, as well as main.cpp and Camera.cpp.
As far as I understaind I followed the guidelines well, created a VAO, created VBOs, added data and enabled the proper vertex array attriute tot render the scene with.
I have checked all the data variables and everything is filled according to plan
Could anyone here spot the mistake I have made and or explain it?
Some links are typed weird because of the limit I have :(
It would help if you posted your shaders also.
I can post some rendering code with textures if that helps you out:
Generating the texture for opengl and loading a grayscale (UC8) image with width and height into the GPU
void GLRenderer::getTexture(unsigned char * image, int width, int height)
{
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &mTextureID);
glBindTexture(GL_TEXTURE_2D, mTextureID);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGB8, width, height);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_BGR, GL_UNSIGNED_BYTE, image);
if (aux::checkGlErrors(__LINE__, __FILE__))assert(false);
glBindTexture(GL_TEXTURE_2D, 0);
}
Loading the vertices from assimp onto the gpu
//** buffer a obj file-style model, initialize the VAO
void GLRenderer::bufferModel(float* aVertexArray, int aNumberOfVertices, float* aNormalArray, int aNumberOfNormals, float* aUVList, int aNumberOfUVs, unsigned int* aIndexList, int aNumberOfIndices)
{
//** just to be sure we are current
glfwMakeContextCurrent(mWin);
//** Buffer all data in VBOs
glGenBuffers(1, &mVertex_buffer_object);
glBindBuffer(GL_ARRAY_BUFFER, mVertex_buffer_object);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * aNumberOfVertices * 3, aVertexArray, GL_STATIC_DRAW);
glGenBuffers(1, &mNormal_buffer_object);
glBindBuffer(GL_ARRAY_BUFFER, mNormal_buffer_object);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * aNumberOfNormals * 3, aNormalArray, GL_STATIC_DRAW);
glGenBuffers(1, &mUV_buffer_object);
glBindBuffer(GL_ARRAY_BUFFER, mUV_buffer_object);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * aNumberOfUVs * 2, aUVList, GL_STATIC_DRAW);
glGenBuffers(1, &mIndex_buffer_object);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndex_buffer_object);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * aNumberOfIndices, aIndexList, GL_STATIC_DRAW);
if (aux::checkGlErrors(__LINE__, __FILE__))assert(false);
//** VAO tells our shaders how to match up data from buffer to shader input variables
glGenVertexArrays(1, &mVertex_array_object);
glBindVertexArray(mVertex_array_object);
//** vertices first
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, mVertex_buffer_object);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
//** normals next
if (aNumberOfNormals > 0){
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, mNormal_buffer_object);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, NULL);
}
//** UVs last
if (aNumberOfUVs > 0){
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, mUV_buffer_object);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 0, NULL);
}
//** indexing for reusing vertices in triangle-meshes
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndex_buffer_object);
//** check errors and store the number of vertices
if (aux::checkGlErrors(__LINE__, __FILE__))assert(false);
mNumVert = aNumberOfVertices;
mNumNormals = aNumberOfNormals;
mNumUVs = aNumberOfUVs;
mNumIndices = aNumberOfIndices;
}
The code above is called like:
//read vertices from file
std::vector<float> vertex, normal, uv;
std::vector<unsigned int> index;
//assimp-wrapping function to load obj to vectors
aux::loadObjToVectors("Resources\\vertices\\model.obj", vertex, normal, index, uv);
mPtr->bufferModel(&vertex[0], static_cast<int>(vertex.size()) / 3, &normal[0], static_cast<int>(normal.size()) / 3, &uv[0], static_cast<int>(uv.size()) / 2, &index[0], static_cast<int>(index.size()));
Then comes the shader-part:
In the vertex shader you just hand-through the UV-coordinate layer
#version 400 core
layout (location = 0) in vec3 vertexPosition_modelspace;
layout (location = 1) in vec3 vertexNormal_modelspace;
layout (location = 2) in vec2 vertexUV;
out vec2 UV;
[... in main then ...]
UV = vertexUV;
While in the fragment shader you assign the value to the pixel:
#version 400 core
in vec2 UV;
uniform sampler2D textureSampler;
layout(location = 0) out vec4 outColor;
[... in main then ...]
// you probably want to calculate lighting here then too, so its just the simplest way to get the texture inside
outColor = vec4(texture2D(textureSampler, UV).rgb, cosAngle);
//you can also check whether the UV coords are correctly bound by using:
outColor = vec4(UV.x, UV.y,1,1);
//and then checking the pixel-values in the resulting image (e.g. render it to a PBO and then download it onto the CPU for)
In the rendering loop also make sure that all the uniforms are correctly bound (especially texture related ones) and that the texture is active and bound
if (mTextureID != -1) {
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, mTextureID);
}
GLint textureLocation = glGetUniformLocation(mShaderProgram, "textureSampler");
glUniform1i(textureLocation, 0);
//**set the poligon mode
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
//**drawElements because of indexing
glDrawElements(GL_TRIANGLES, mNumIndices, GL_UNSIGNED_INT, 0);
I hope I could help you!
Kind regards,
VdoP

OpenGL VBO Drawing

I seem to be having some trouble drawing objects in OpenGL using VBOs. I've attempted to copy the example from: http://www.opengl.org/wiki/VBO_-_just_examples (number 2) but I can't get a plane to appear on screen.
Vertex.h:
#include <freeglut>
struct Vertex {
GLfloat position[3];
GLfloat normal[3];
GLfloat *uvs[2];
unsigned short uvCount;
};
Triangles.h:
#include <GL/glew.h>
#include "Vertex.h"
class Triangles {
public:
Triangles(GLuint program, Vertex *vertices, unsigned int vertexCount, unsigned int *indices[3], unsigned int indiceCount);
~Triangles();
void Draw();
private:
GLuint program;
GLuint VertexVBOID;
GLuint IndexVBOID;
GLuint VaoID;
unsigned int *indices[3];
unsigned int indiceCount;
};
Triangles.cpp:
#include "Triangles.h"
#include <stdio.h>
#include <stddef.h>
Triangles::Triangles(GLuint program, unsigned int *indices[3], unsigned int indiceCount) {
memcpy(this->indices, indices, sizeof(int) * indiceCount * 3);
this->indiceCount = indiceCount;
this->program = program;
glGenVertexArrays(1, &VaoID);
glBindVertexArray(VaoID);
glGenBuffers(1, &VertexVBOID);
glBindBuffer(GL_ARRAY_BUFFER, VertexVBOID);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex) * vertexCount, vertices, GL_STATIC_DRAW);
GLuint attributeLocation = glGetAttribLocation(program, "position");
glEnableVertexAttribArray(attributeLocation);
glVertexAttribPointer(attributeLocation, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid *)(offsetof(Vertex, position)));
attributeLocation = glGetAttribLocation(program, "normal");
glEnableVertexAttribArray(attributeLocation);
glVertexAttribPointer(attributeLocation, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid *)(offsetof(Vertex, normal)));
glGenBuffers(1, &IndexVBOID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IndexVBOID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * 3 * indiceCount, indices, GL_STATIC_DRAW);
};
Triangles::~Triangles() {
glDisableVertexAttribArray(glGetAttribLocation(program, "position"));
glDisableVertexAttribArray(glGetAttribLocation(program, "normal"));
glDeleteBuffers(1, &VertexVBOID);
glDeleteBuffers(1, &IndexVBOID);
glDeleteVertexArrays(1, &VaoID);
}
void Triangles::Draw() {
glBindVertexArray(VaoID);
glDrawElements(GL_TRIANGLES, indiceCount, GL_UNSIGNED_INT, 0);
};
Excerpt from main.cpp (creating triagle object):
Vertex vertices[4];
vertices[0].position[0] = -1;
vertices[0].position[1] = 1;
vertices[0].position[2] = 0;
vertices[0].normal[0] = 0;
vertices[0].normal[0] = 0;
vertices[0].normal[0] = 1;
vertices[0].uvCount = 0;
vertices[1].position[0] = 1;
vertices[1].position[1] = 1;
vertices[1].position[2] = 0;
vertices[1].normal[0] = 0;
vertices[1].normal[0] = 0;
vertices[1].normal[0] = 1;
vertices[1].uvCount = 0;
vertices[2].position[0] = 1;
vertices[2].position[1] = -1;
vertices[2].position[2] = 0;
vertices[2].normal[0] = 0;
vertices[2].normal[0] = 0;
vertices[2].normal[0] = 1;
vertices[2].uvCount = 0;
vertices[3].position[0] = -1;
vertices[3].position[1] = -1;
vertices[3].position[2] = 0;
vertices[3].normal[0] = 0;
vertices[3].normal[0] = 0;
vertices[3].normal[0] = 1;
vertices[3].uvCount = 0;
unsigned int **indices;
indices = new unsigned int*[2];
indices[0] = new unsigned int[3];
indices[0][0] = 0;
indices[0][1] = 1;
indices[0][2] = 2;
indices[1] = new unsigned int[3];
indices[1][0] = 2;
indices[1][1] = 3;
indices[1][2] = 0;
Triangles *t = new Triangles(program, vertices, 4 indices, 2);
createShader(GLenum , char *):
GLuint createShader(GLenum type, char *file) {
GLuint shader = glCreateShader(type);
const char *fileData = textFileRead(file);
glShaderSource(shader, 1, &fileData, NULL);
glCompileShader(shader);
return shader;
}
Shader loading:
GLuint v = createShader(GL_VERTEX_SHADER);
GLuint f = createShader(GL_FRAGMENT_SHADER, "fragmentShader.frag");
program = glCreateProgram();
glAttachShader(program, v);
glAttachShader(program, f);
glLinkProgram(program);
glUseProgram(program);
vertexShader.vert:
in vec3 position;
in vec3 normal;
out vec3 a_normal;
void main() {
gl_Position = vec4(position, 1.0);
}
fragmentShader.frag:
in vec3 a_normal;
out vec4 out_color;
void main() {
out_color = vec4(1.0, 1.0, 1.0, 1.0);
}
Please let me know if more code is needed. As a side note everything compiles just fine, I just don't see the plane that I have constructed on screen (maybe because I didn't use colors?)
My OpenGL information is as follows:
Vendor: ATI Technologies Inc.
Renderer: ATI Radeon HD 5700 Series
Version: 3.2.9756 Compatibility Profile Context
Extensions: extensions = GL_AMDX_name_gen_delete GL_AMDX_random_access_target GL_AMDX_vertex_shader_tessellator GL_AMD_conservative_depth GL_AMD_draw_buffers_blend GL_AMD_performance_monitor GL_AMD_seamless_cubemap_per_texture GL_AMD_shader_stencil_export GL_AMD_texture
In response to your comments:
Unfortunately I do not do error checking
You should always add some OpenGL error checking, it will save you from so many problems. It should look something like the following:
int err = glGetError();
if(err != 0) {
//throw exception or log message or die or something
}
I used matrix functions because I didn't realize the vertex shader would effect that. I assumed the matrix set to the matrix at the top of the stack (the one I pushed before drawing.)
This is an incorrect assumption. The only variable which references deprecated the matrix stack is special (though deprecated) variable gl_ModelViewProjectionMatrix. What you currently have there is just an unused, uninitialized matrix, which is totally ignoring your matrix stack.
As for indices, I'm not exactly sure what you mean. I just drew the vertices on paper and decided the indices based on that.
I'm not referring to the indices of the triangle in your index buffer, but rather the first parameter to your glAttrib* functions. I suppose 'attribute location' is a more correct term than index.
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, ... //attrib location 0
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, ... //attrib location 1
You seem to just be randomly assuming that "0" and "1" map to "position" and "normal". This is not a safe assumption to make. You should be querying the attribute location values for "position" and "normal" with glGetAttribLocation, and then using that value to glEnableVertexAttribArray and glVertexAttribPointer.

OpenGL 3.x: Access violation when using vertex buffer object and glDrawElements(...)

I have trouble rendering some geometry by using a vertex buffer object. I intend to draw a plane of points, so basically one vertex at every discrete position in my space. However, I cannot render that plane, as every time I call glDrawElements(...), application crashes returning an access violation exception. There must be some mistake while initialization, I guess.
This is what I have so far:
#define SPACE_X 512
#define SPACE_Z 512
typedef struct{
GLfloat x, y, z; // position
GLfloat nx, ny, nz; // normals
GLfloat r, g, b, a; // colors
} Vertex;
typedef struct{
GLuint i; // index
} Index;
// create vertex buffer
GLuint vertexBufferObject;
glGenBuffers(1, &vertexBufferObject);
// create index buffer
GLuint indexBufferObject;
glGenBuffers(1, &indexBufferObject);
// determine number of vertices / primitives
const int numberOfVertices = SPACE_X * SPACE_Z;
const int numberOfPrimitives = numberOfVertices; // As I'm going to render GL_POINTS, number of primitives is the same as number of vertices
// create vertex array
Vertex* vertexArray = new Vertex[numberOfVertices];
// create index array
Index* indexArray = new Index[numberOfPrimitives];
// create planes (vertex array)
// color of the vertices is red for now
int index = -1;
for(GLfloat x = -SPACE_X / 2; x < SPACE_X / 2; x++) {
for(GLfloat z = -SPACE_Z / 2; z < SPACE_Z / 2; z++) {
index++;
vertexArray[index].x = x;
vertexArray[index].y = 0.0f;
vertexArray[index].z = z;
vertexArray[index].nx = 0.0f;
vertexArray[index].ny = 0.0f;
vertexArray[index].nz = 1.0f;
vertexArray[index].r = 1.0;
vertexArray[index].g = 0.0;
vertexArray[index].b = 0.0;
vertexArray[index].a = 1.0;
}
}
// bind vertex buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject);
// buffer vertex array
glBufferData(GL_ARRAY_BUFFER, numberOfVertices * sizeof(Vertex), vertexArray, GL_DTREAM_DRAW);
// bind vertex buffer again
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject);
// enable attrib index 0 (positions)
glEnableVertexAttribArray(0);
// pass positions in
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), vertexArray);
// enable attribute index 1 (normals)
glEnableVertexAttribArray(1);
// pass normals in
glVertexAttribPointer((GLuint)1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), &vertexArray[0].nx);
// enable attribute index 2 (colors)
glEnableVertexAttribArray(2);
// pass colors in
glVertexAttribPointer((GLuint)2, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), &vertexArray[0].r);
// create index array
for(GLunit i = 0; i < numberOfPrimitives; i++) {
indexArray[i].i = i;
}
// bind buffer
glBindBuffer(GL_ELEMENET_ARRAY_BUFFER, indexBufferObject);
// buffer indices
glBufferData(GL_ELEMENET_ARRAY_BUFFER, numberOfPrimitives * sizeof(Index), indexArray, GL_STREAM_DRAW);
// bind buffer again
glBindBuffer(GL_ELEMENET_ARRAY_BUFFER, indexBufferObject);
// AND HERE IT CRASHES!
// draw plane of GL_POINTS
glDrawElements(GL_POINTS, numberOfPrimitives, GL_UNSIGNED_INT, indexArray);
// bind default buffers
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// delete vertex / index buffers
glDeleteBuffers(1, &vertexBufferObject);
glDeleteBuffers(1, &indexBufferObject);
delete[] vertexArray;
vertexArray = NULL;
delete[] indexArray;
indexArray = NULL;
When you are using buffer objects, the last parameters in the gl*Pointer and 4th parameter in glDrawElements are no longer addresses in main memory (yours still are!), but offsets into the buffer objects. Make sure to compute these offsets in bytes! The "offsetof" macro is very helpful there.
Look at the second example on this page and compare it to what you did: http://www.opengl.org/wiki/VBO_-_just_examples
And you have one typo: GL_DTREAM_DRAW.
The method glEnableClientState(...) is deprecated! Sorry, for some reason I had overseen that fact.