Related
I followed a tutorial to build a .obj model with OpenGL.
I have only one problem, at the end, we have a vectorglm::vec3 to draw.
In the tutorial they said to use "glBufferData()"
Then I made that
float* _vertices = new float[vertices.size() * 3];
for (int i = 0; i < vertices.size(); ++i)
{
float* _t = glm::value_ptr(vertices[i]);
for (int j = 0; j < 3; ++j)
_vertices[i + j*(vertices.size() - 1)] = _t[j];
}
(I converted my vector un float*)
Then I load it:
mat4 projection;
mat4 modelview;
projection = perspective(70.0, (double)800 / 600, 1.0, 100.0);
modelview = mat4(1.0);
GLuint vertexbuffer;
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(_vertices), _vertices, GL_DYNAMIC_DRAW);
And I finally draw it in my main loop :
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
modelview = lookAt(vec3(3, 1, 3), vec3(0, 0, 0), vec3(0, 1, 0));
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glDrawArrays(GL_TRIANGLES, 0, vertices.size());
glDisableVertexAttribArray(0);
But it does not work... (I have a black screen)
sizeof(_vertices) does not give you what you expect. It returns the size of float*, which is a pointer, and not the number of bytes of the data behind the pointer.
Use vertices.data() for the pointer to the first element in the std::vector and 3 * vertices.size() * sizeof(float) as the number of bytes if your vector contains floats (glm::vec3 containes 3 floats).
together like:
glBufferData(GL_ARRAY_BUFFER, 3 * vertices.size() * sizeof(float), vertices.data(), GL_DYNAMIC_DRAW);
You can also substitute 3 * sizeof(float) to sizeof(glm::vec3).
Also check if your glm::perspective function expects the field of view as degrees or radians, you currently use 70.0 degrees.
I am trying to render 3D models with textures using Assimp. The conversion goes perfect, all textures positions and what not gets loaded. I have tested the texture images by drawing them to the screen in 2D.
For some reason it does not render the textures to the model.
I am a beginner in OpenGL so forgive me if i dont explain it right.
The tutorial I have based the code on is from here, but i stripped a big part since I have my own camera/movement system.
The model renders like this: http://i.stack.imgur.com/5sK9K.png
whilest the texture in use looks like this: http://i.stack.imgur.com/sWGp7.jpg
The relevant rendering code is the following:
Generating textures from data file:
int Mesh::LoadGLTextures(const aiScene* scene){
if (scene->HasTextures()) return -1; //yes this is correct
/* getTexture Filenames and Numb of Textures */
for (unsigned int m = 0; m<scene->mNumMaterials; m++){
int texIndex = 0;
aiReturn texFound;
aiString path; // filename
while ((texFound = scene->mMaterials[m]->GetTexture(aiTextureType_DIFFUSE, texIndex, &path)) == AI_SUCCESS){
textureIdMap[path.data] = NULL; //fill map with textures, pointers still NULL yet
texIndex++;
}
}
int numTextures = textureIdMap.size();
/* create and fill array with GL texture ids */
GLuint* textureIds = new GLuint[numTextures];
/* get iterator */
std::map<std::string, GLuint>::iterator itr = textureIdMap.begin();
std::string basepath = getBasePath(path);
ALLEGRO_BITMAP *image;
for (int i = 0; i<numTextures; i++){
std::string filename = (*itr).first; // get filename
(*itr).second = textureIds[i]; // save texture id for filename in map
itr++; // next texture
std::string fileloc = basepath + filename; /* Loading of image */
image = al_load_bitmap(fileloc.c_str());
if (image) /* If no error occured: */{
GLuint texId = al_get_opengl_texture(image);
//glGenTextures(numTextures, &textureIds[i]); /* Texture name generation */
glBindTexture(GL_TEXTURE_2D, texId); /* Binding of texture name */
//redefine standard texture values
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); /* We will use linear
interpolation for magnification filter */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); /* We will use linear
interpolation for minifying filter */
textureIdMap[filename] = texId;
} else {
/* Error occured */
std::cout << "Couldn't load Image: " << fileloc.c_str() << "\n";
}
}
//Cleanup
delete[] textureIds;
//return success
return true;
}
Generating VBO/VAO:
void Mesh::genVAOsAndUniformBuffer(const aiScene *sc) {
struct MyMesh aMesh;
struct MyMaterial aMat;
GLuint buffer;
// For each mesh
for (unsigned int n = 0; n < sc->mNumMeshes; ++n){
const aiMesh* mesh = sc->mMeshes[n];
// create array with faces
// have to convert from Assimp format to array
unsigned int *faceArray;
faceArray = (unsigned int *)malloc(sizeof(unsigned int) * mesh->mNumFaces * 3);
unsigned int faceIndex = 0;
for (unsigned int t = 0; t < mesh->mNumFaces; ++t) {
const aiFace* face = &mesh->mFaces[t];
memcpy(&faceArray[faceIndex], face->mIndices, 3 * sizeof(unsigned int));
faceIndex += 3;
}
aMesh.numFaces = sc->mMeshes[n]->mNumFaces;
// generate Vertex Array for mesh
glGenVertexArrays(1, &(aMesh.vao));
glBindVertexArray(aMesh.vao);
// buffer for faces
glGenBuffers(1, &buffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * mesh->mNumFaces * 3, faceArray, GL_STATIC_DRAW);
// buffer for vertex positions
if (mesh->HasPositions()) {
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 3 * mesh->mNumVertices, mesh->mVertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(vertexLoc);
glVertexAttribPointer(vertexLoc, 3, GL_FLOAT, 0, 0, 0);
}
// buffer for vertex normals
if (mesh->HasNormals()) {
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 3 * mesh->mNumVertices, mesh->mNormals, GL_STATIC_DRAW);
glEnableVertexAttribArray(normalLoc);
glVertexAttribPointer(normalLoc, 3, GL_FLOAT, 0, 0, 0);
}
// buffer for vertex texture coordinates
if (mesh->HasTextureCoords(0)) {
float *texCoords = (float *)malloc(sizeof(float) * 2 * mesh->mNumVertices);
for (unsigned int k = 0; k < mesh->mNumVertices; ++k) {
texCoords[k * 2] = mesh->mTextureCoords[0][k].x;
texCoords[k * 2 + 1] = mesh->mTextureCoords[0][k].y;
}
glGenBuffers(1, &buffer);
glEnableVertexAttribArray(texCoordLoc);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * 2 * mesh->mNumVertices, texCoords, GL_STATIC_DRAW);
glVertexAttribPointer(texCoordLoc, 2, GL_FLOAT, GL_FALSE, 0, 0);
}
// unbind buffers
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// create material uniform buffer
aiMaterial *mtl = sc->mMaterials[mesh->mMaterialIndex];
aiString texPath; //contains filename of texture
if (AI_SUCCESS == mtl->GetTexture(aiTextureType_DIFFUSE, 0, &texPath)){
//bind texture
unsigned int texId = textureIdMap[texPath.data];
aMesh.texIndex = texId;
aMat.texCount = 1;
} else {
aMat.texCount = 0;
}
float c[4];
set_float4(c, 0.8f, 0.8f, 0.8f, 1.0f);
aiColor4D diffuse;
if (AI_SUCCESS == aiGetMaterialColor(mtl, AI_MATKEY_COLOR_DIFFUSE, &diffuse))
color4_to_float4(&diffuse, c);
memcpy(aMat.diffuse, c, sizeof(c));
set_float4(c, 0.2f, 0.2f, 0.2f, 1.0f);
aiColor4D ambient;
if (AI_SUCCESS == aiGetMaterialColor(mtl, AI_MATKEY_COLOR_AMBIENT, &ambient))
color4_to_float4(&ambient, c);
memcpy(aMat.ambient, c, sizeof(c));
set_float4(c, 0.0f, 0.0f, 0.0f, 1.0f);
aiColor4D specular;
if (AI_SUCCESS == aiGetMaterialColor(mtl, AI_MATKEY_COLOR_SPECULAR, &specular))
color4_to_float4(&specular, c);
memcpy(aMat.specular, c, sizeof(c));
set_float4(c, 0.0f, 0.0f, 0.0f, 1.0f);
aiColor4D emission;
if (AI_SUCCESS == aiGetMaterialColor(mtl, AI_MATKEY_COLOR_EMISSIVE, &emission))
color4_to_float4(&emission, c);
memcpy(aMat.emissive, c, sizeof(c));
float shininess = 0.0;
unsigned int max;
aiGetMaterialFloatArray(mtl, AI_MATKEY_SHININESS, &shininess, &max);
aMat.shininess = shininess;
glGenBuffers(1, &(aMesh.uniformBlockIndex));
glBindBuffer(GL_UNIFORM_BUFFER, aMesh.uniformBlockIndex);
glBufferData(GL_UNIFORM_BUFFER, sizeof(aMat), (void *)(&aMat), GL_STATIC_DRAW);
myMeshes.push_back(aMesh);
}
}
Rendering model:
void Mesh::recursive_render(const aiScene *sc, const aiNode* nd){
// draw all meshes assigned to this node
for (unsigned int n = 0; n < nd->mNumMeshes; ++n){
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, myMeshes[nd->mMeshes[n]].texIndex);
// bind VAO
glBindVertexArray(myMeshes[nd->mMeshes[n]].vao);
// draw
glDrawElements(GL_TRIANGLES, myMeshes[nd->mMeshes[n]].numFaces * 3, GL_UNSIGNED_INT, 0);
}
// draw all children
for (unsigned int n = 0; n < nd->mNumChildren; ++n){
recursive_render(sc, nd->mChildren[n]);
}
}
Any other relevant code parts can be found in my open github project https://github.com/kwek20/StrategyGame/tree/master/Strategy
Mesh.cpp is relevant, as well as main.cpp and Camera.cpp.
As far as I understaind I followed the guidelines well, created a VAO, created VBOs, added data and enabled the proper vertex array attriute tot render the scene with.
I have checked all the data variables and everything is filled according to plan
Could anyone here spot the mistake I have made and or explain it?
Some links are typed weird because of the limit I have :(
It would help if you posted your shaders also.
I can post some rendering code with textures if that helps you out:
Generating the texture for opengl and loading a grayscale (UC8) image with width and height into the GPU
void GLRenderer::getTexture(unsigned char * image, int width, int height)
{
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &mTextureID);
glBindTexture(GL_TEXTURE_2D, mTextureID);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGB8, width, height);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_BGR, GL_UNSIGNED_BYTE, image);
if (aux::checkGlErrors(__LINE__, __FILE__))assert(false);
glBindTexture(GL_TEXTURE_2D, 0);
}
Loading the vertices from assimp onto the gpu
//** buffer a obj file-style model, initialize the VAO
void GLRenderer::bufferModel(float* aVertexArray, int aNumberOfVertices, float* aNormalArray, int aNumberOfNormals, float* aUVList, int aNumberOfUVs, unsigned int* aIndexList, int aNumberOfIndices)
{
//** just to be sure we are current
glfwMakeContextCurrent(mWin);
//** Buffer all data in VBOs
glGenBuffers(1, &mVertex_buffer_object);
glBindBuffer(GL_ARRAY_BUFFER, mVertex_buffer_object);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * aNumberOfVertices * 3, aVertexArray, GL_STATIC_DRAW);
glGenBuffers(1, &mNormal_buffer_object);
glBindBuffer(GL_ARRAY_BUFFER, mNormal_buffer_object);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * aNumberOfNormals * 3, aNormalArray, GL_STATIC_DRAW);
glGenBuffers(1, &mUV_buffer_object);
glBindBuffer(GL_ARRAY_BUFFER, mUV_buffer_object);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * aNumberOfUVs * 2, aUVList, GL_STATIC_DRAW);
glGenBuffers(1, &mIndex_buffer_object);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndex_buffer_object);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * aNumberOfIndices, aIndexList, GL_STATIC_DRAW);
if (aux::checkGlErrors(__LINE__, __FILE__))assert(false);
//** VAO tells our shaders how to match up data from buffer to shader input variables
glGenVertexArrays(1, &mVertex_array_object);
glBindVertexArray(mVertex_array_object);
//** vertices first
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, mVertex_buffer_object);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
//** normals next
if (aNumberOfNormals > 0){
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, mNormal_buffer_object);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, NULL);
}
//** UVs last
if (aNumberOfUVs > 0){
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, mUV_buffer_object);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 0, NULL);
}
//** indexing for reusing vertices in triangle-meshes
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndex_buffer_object);
//** check errors and store the number of vertices
if (aux::checkGlErrors(__LINE__, __FILE__))assert(false);
mNumVert = aNumberOfVertices;
mNumNormals = aNumberOfNormals;
mNumUVs = aNumberOfUVs;
mNumIndices = aNumberOfIndices;
}
The code above is called like:
//read vertices from file
std::vector<float> vertex, normal, uv;
std::vector<unsigned int> index;
//assimp-wrapping function to load obj to vectors
aux::loadObjToVectors("Resources\\vertices\\model.obj", vertex, normal, index, uv);
mPtr->bufferModel(&vertex[0], static_cast<int>(vertex.size()) / 3, &normal[0], static_cast<int>(normal.size()) / 3, &uv[0], static_cast<int>(uv.size()) / 2, &index[0], static_cast<int>(index.size()));
Then comes the shader-part:
In the vertex shader you just hand-through the UV-coordinate layer
#version 400 core
layout (location = 0) in vec3 vertexPosition_modelspace;
layout (location = 1) in vec3 vertexNormal_modelspace;
layout (location = 2) in vec2 vertexUV;
out vec2 UV;
[... in main then ...]
UV = vertexUV;
While in the fragment shader you assign the value to the pixel:
#version 400 core
in vec2 UV;
uniform sampler2D textureSampler;
layout(location = 0) out vec4 outColor;
[... in main then ...]
// you probably want to calculate lighting here then too, so its just the simplest way to get the texture inside
outColor = vec4(texture2D(textureSampler, UV).rgb, cosAngle);
//you can also check whether the UV coords are correctly bound by using:
outColor = vec4(UV.x, UV.y,1,1);
//and then checking the pixel-values in the resulting image (e.g. render it to a PBO and then download it onto the CPU for)
In the rendering loop also make sure that all the uniforms are correctly bound (especially texture related ones) and that the texture is active and bound
if (mTextureID != -1) {
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, mTextureID);
}
GLint textureLocation = glGetUniformLocation(mShaderProgram, "textureSampler");
glUniform1i(textureLocation, 0);
//**set the poligon mode
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
//**drawElements because of indexing
glDrawElements(GL_TRIANGLES, mNumIndices, GL_UNSIGNED_INT, 0);
I hope I could help you!
Kind regards,
VdoP
I'm having a really weird issue with depth testing here.
I'm rendering a simple mesh in an OpenGL 3.3 core profile context on Windows, with depth testing enabled and glDepthFunc set to GL_LESS. On my machine (a laptop with a nVidia Geforce GTX 660M), everything is working as expected, the depth test is working, this is what it looks like:
Now, if I run the program on a different PC, a tower with a Radeon R9 280, it looks more like this:
Strange enough, the really weird thing is that when I call glEnable(GL_DEPTH_TEST) every frame before drawing, the result is correct on both machines.
As it's working when I do that, I figure the depth buffer is correctly created on both machines, it just seems that the depth test is somehow being disabled before rendering when I enable it only once at initialization.
Here's the minimum code that could somehow be part of the problem:
Code called at initialization, after a context is created and made current:
glEnable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
Code called every frame before the buffer swap:
glClearColor(0.4f, 0.6f, 0.8f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// mShaderProgram->getID() simply returns the handle of a simple shader program
glUseProgram(mShaderProgram->getID());
glm::vec3 myColor = glm::vec3(0.7f, 0.5f, 0.4f);
GLuint colorLocation = glGetUniformLocation(mShaderProgram->getID(), "uColor");
glUniform3fv(colorLocation, 1, glm::value_ptr(myColor));
glm::mat4 modelMatrix = glm::mat4(1.0f);
glm::mat4 viewMatrix = glm::lookAt(glm::vec3(0.0f, 3.0f, 5.0f), glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
glm::mat4 projectionMatrix = glm::perspectiveFov(60.0f, (float)mWindow->getProperties().width, (float)mWindow->getProperties().height, 1.0f, 100.0f);
glm::mat4 inverseTransposeMVMatrix = glm::inverseTranspose(viewMatrix*modelMatrix);
GLuint mMatrixLocation = glGetUniformLocation(mShaderProgram->getID(), "uModelMatrix");
GLuint vMatrixLocation = glGetUniformLocation(mShaderProgram->getID(), "uViewMatrix");
GLuint pMatrixLocation = glGetUniformLocation(mShaderProgram->getID(), "uProjectionMatrix");
GLuint itmvMatrixLocation = glGetUniformLocation(mShaderProgram->getID(), "uInverseTransposeMVMatrix");
glUniformMatrix4fv(mMatrixLocation, 1, GL_FALSE, glm::value_ptr(modelMatrix));
glUniformMatrix4fv(vMatrixLocation, 1, GL_FALSE, glm::value_ptr(viewMatrix));
glUniformMatrix4fv(pMatrixLocation, 1, GL_FALSE, glm::value_ptr(projectionMatrix));
glUniformMatrix4fv(itmvMatrixLocation, 1, GL_FALSE, glm::value_ptr(inverseTransposeMVMatrix));
// Similiar to the shader program, mMesh.gl_vaoID is simply the handle of a vertex array object
glBindVertexArray(mMesh.gl_vaoID);
glDrawArrays(GL_TRIANGLES, 0, mMesh.faces.size()*3);
With the above code, I'll get the wrong output on the Radeon.
Note: I'm using GLFW3 for context creation and GLEW for the function pointers (and obviously GLM for the math).
The vertex array object contains three attribute array buffers, for positions, uv coordinates and normals. Each of these should be correctly configured and send to the shaders, as everything is working fine when enabling the depth test every frame.
I should also mention that the Radeon machine runs Windows 8 while the nVidia machine runs Windows 7.
Edit: By request, here's the code used to load the mesh and create the attribute data. I do not create any element buffer objects as I am not using element draw calls.
std::vector<glm::vec3> positionData;
std::vector<glm::vec2> uvData;
std::vector<glm::vec3> normalData;
std::vector<meshFaceIndex> faces;
std::ifstream fileStream(path);
if (!fileStream.is_open()){
std::cerr << "ERROR: Could not open file '" << path << "!\n";
return;
}
std::string lineBuffer;
while (std::getline(fileStream, lineBuffer)){
std::stringstream lineStream(lineBuffer);
std::string typeString;
lineStream >> typeString; // Get line token
if (typeString == TOKEN_VPOS){ // Position
glm::vec3 pos;
lineStream >> pos.x >> pos.y >> pos.z;
positionData.push_back(pos);
}
else{
if (typeString == TOKEN_VUV){ // UV coord
glm::vec2 UV;
lineStream >> UV.x >> UV.y;
uvData.push_back(UV);
}
else{
if (typeString == TOKEN_VNORMAL){ // Normal
glm::vec3 normal;
lineStream >> normal.x >> normal.y >> normal.z;
normalData.push_back(normal);
}
else{
if (typeString == TOKEN_FACE){ // Face
meshFaceIndex faceIndex;
char interrupt;
for (int i = 0; i < 3; ++i){
lineStream >> faceIndex.positionIndex[i] >> interrupt
>> faceIndex.uvIndex[i] >> interrupt
>> faceIndex.normalIndex[i];
}
faces.push_back(faceIndex);
}
}
}
}
}
fileStream.close();
std::vector<glm::vec3> packedPositions;
std::vector<glm::vec2> packedUVs;
std::vector<glm::vec3> packedNormals;
for (auto f : faces){
Face face; // Derp derp;
for (auto i = 0; i < 3; ++i){
if (!positionData.empty()){
face.vertices[i].position = positionData[f.positionIndex[i] - 1];
packedPositions.push_back(face.vertices[i].position);
}
else
face.vertices[i].position = glm::vec3(0.0f);
if (!uvData.empty()){
face.vertices[i].uv = uvData[f.uvIndex[i] - 1];
packedUVs.push_back(face.vertices[i].uv);
}
else
face.vertices[i].uv = glm::vec2(0.0f);
if (!normalData.empty()){
face.vertices[i].normal = normalData[f.normalIndex[i] - 1];
packedNormals.push_back(face.vertices[i].normal);
}
else
face.vertices[i].normal = glm::vec3(0.0f);
}
myMesh.faces.push_back(face);
}
glGenVertexArrays(1, &(myMesh.gl_vaoID));
glBindVertexArray(myMesh.gl_vaoID);
GLuint positionBuffer; // positions
glGenBuffers(1, &positionBuffer);
glBindBuffer(GL_ARRAY_BUFFER, positionBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3)*packedPositions.size(), &packedPositions[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
GLuint uvBuffer; // uvs
glGenBuffers(1, &uvBuffer);
glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec2)*packedUVs.size(), &packedUVs[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*)0);
GLuint normalBuffer; // normals
glGenBuffers(1, &normalBuffer);
glBindBuffer(GL_ARRAY_BUFFER, normalBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3)*packedNormals.size(), &packedNormals[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
The .obj loading routine is mostly adapted from this one:
http://www.limegarden.net/2010/03/02/wavefront-obj-mesh-loader/
This doesn't look like a depth testing issue to me, but more like misalignment in the vertex / index array data. Please show us the code in which you load the vertex buffer objects and the element buffer objects.
It is because of the function ChoosePixelFormat.
In my case the ChoosePixelFormat returns a pixelformat ID with value 8 which provides a depth buffer with 16 bits instead of the required 24 bits.
One simple fix was to set the ID manually to the value of 11 instead of 8 to get a suitable pixelformat for the application with 24 bits of depth-buffer.
I have written a basic program that loads a model and renders it to the screen. I'm using GLSL to transform the model appropriately, but the normals always seem to be incorrect after rotating them with every combination of model matrix, view matrix, inverse, transpose, etc that I could think of. The model matrix is just a rotation around the y-axis using glm:
angle += deltaTime;
modelMat = glm::rotate(glm::mat4(), angle, glm::vec3(0.f, 1.f, 0.f));
My current vertex shader code (I've modified the normal line many many times):
#version 150 core
uniform mat4 projMat;
uniform mat4 viewMat;
uniform mat4 modelMat;
in vec3 inPosition;
in vec3 inNormal;
out vec3 passColor;
void main()
{
gl_Position = projMat * viewMat * modelMat * vec4(inPosition, 1.0);
vec3 normal = normalize(mat3(inverse(modelMat)) * inNormal);
passColor = normal;
}
And my fragment shader:
#version 150 core
in vec3 passColor;
out vec4 outColor;
void main()
{
outColor = vec4(passColor, 1.0);
}
I know for sure that the uniform variables are being passed to the shader properly, as the model itself gets transformed properly, and the initial normals are correct if I do calculations such as directional lighting.
I've created a GIF of the rotating model, sorry about the low quality:
http://i.imgur.com/LgLKHCb.gif?1
What confuses me the most is how the normals appear to rotate on multiple axis, which I don't think should happen when multiplied by a simple rotation matrix on one axis.
Edit:
I've added some more of the client code below.
This is where the buffers get bound for the model, in the Mesh class (vao is GLuint, defined in the class):
GLuint vbo[3];
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glGenBuffers(normals? (uvcoords? 3 : 2) : (uvcoords? 2 : 1), vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo[0]);
glBufferData(GL_ARRAY_BUFFER, vcount * 3 * sizeof(GLfloat), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(0);
if(normals)
{
glBindBuffer(GL_ARRAY_BUFFER, vbo[1]);
glBufferData(GL_ARRAY_BUFFER, vcount * 3 * sizeof(GLfloat), normals, GL_STATIC_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_TRUE, 0, 0);
glEnableVertexAttribArray(1);
}
if(uvcoords)
{
glBindBuffer(GL_ARRAY_BUFFER, vbo[2]);
glBufferData(GL_ARRAY_BUFFER, vcount * 2 * sizeof(GLfloat), uvcoords, GL_STATIC_DRAW);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(2);
}
glBindVertexArray(0);
glGenBuffers(1, &ib);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ib);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, icount * sizeof(GLushort), indices, GL_STATIC_DRAW);
This is where the shaders are compiled after being loaded into memory with a simple readf(), in the Material class:
u32 vertexShader = glCreateShader(GL_VERTEX_SHADER);
u32 fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(vertexShader, 1, (const GLchar**)&vsContent, 0);
glCompileShader(vertexShader);
if(!validateShader(vertexShader)) return false;
glShaderSource(fragmentShader, 1, (const GLchar**)&fsContent, 0);
glCompileShader(fragmentShader);
if(!validateShader(fragmentShader)) return false;
programHandle = glCreateProgram();
glAttachShader(programHandle, vertexShader);
glAttachShader(programHandle, fragmentShader);
glBindAttribLocation(programHandle, 0, "inPosition");
glBindAttribLocation(programHandle, 1, "inNormal");
//glBindAttribLocation(programHandle, 2, "inUVCoords");
glLinkProgram(programHandle);
if(!validateProgram()) return false;
And the validateShader(GLuint) and validateProgram() functions:
bool Material::validateShader(GLuint shaderHandle)
{
char buffer[2048];
memset(buffer, 0, 2048);
GLsizei len = 0;
glGetShaderInfoLog(shaderHandle, 2048, &len, buffer);
if(len > 0)
{
Logger::log("ve::Material::validateShader: Failed to compile shader - %s", buffer);
return false;
}
return true;
}
bool Material::validateProgram()
{
char buffer[2048];
memset(buffer, 0, 2048);
GLsizei len = 0;
glGetProgramInfoLog(programHandle, 2048, &len, buffer);
if(len > 0)
{
Logger::log("ve::Material::validateProgram: Failed to link program - %s", buffer);
return false;
}
glValidateProgram(programHandle);
GLint status;
glGetProgramiv(programHandle, GL_VALIDATE_STATUS, &status);
if(status == GL_FALSE)
{
Logger::log("ve::Material::validateProgram: Failed to validate program");
return false;
}
return true;
}
Each Material instance has a std::map of Meshs, and get rendered as so:
void Material::render()
{
if(loaded)
{
glUseProgram(programHandle);
for(auto it = mmd->uniforms.begin(); it != mmd->uniforms.end(); ++it)
{
GLint loc = glGetUniformLocation(programHandle, (const GLchar*)it->first);
switch(it->second.type)
{
case E_UT_FLOAT3: glUniform3fv(loc, 1, it->second.f32ptr); break;
case E_UT_MAT4: glUniformMatrix4fv(loc, 1, GL_FALSE, it->second.f32ptr); break;
default: break;
}
}
for(Mesh* m : mmd->objects)
{
GLint loc = glGetUniformLocation(programHandle, "modelMat");
glUniformMatrix4fv(loc, 1, GL_FALSE, &m->getTransform()->getTransformMatrix()[0][0]);
m->render();
}
}
}
it->second.f32ptr would be a float pointer to &some_vec3[0] or &some_mat4[0][0].
I manually upload the model's transformation matrix before rendering, however (which is only a rotation matrix, the Transform class (returned by Mesh::getTransform()) will only do a glm::rotation() since I was trying to figure out the problem).
Lastly, the Mesh render code:
if(loaded)
{
glBindVertexArray(vao);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ib);
glDrawElements(GL_TRIANGLES, indexCount, GL_UNSIGNED_SHORT, 0);
}
I think this is all the necessary code, but I can post more if needed.
Your nomal matrix calculation is just wrong. The correct normal matrix would be the transpose of the inverse of the upper-left 3x3 submatrix of the model or modelview matrix (depending on which space you want to do your lighting calculations).
What you do is just inverting the full 4x4 matrix and taking the upper-left 3x3 submatrix of that, which is just totally wrong.
You should calculate transpose(inverse(mat3(modelMat))), but you really shouldn't do this in the shader, but calulate this toghether with the model matrix on the CPU to avoid letting the GPU calculate a quite expensive matrix inversion per vertex.
As long as your transformations consist of only rotations, translations, and uniform scaling, you can simply apply the rotation part of your transformations to the normals.
In general, it's the transposed inverse matrix that needs to be applied to the normals, using only the regular 3x3 linear transformation matrix, without the translation part that extends the matrix to 4x4.
For rotations and uniform scaling, the inverse-transpose is identical to the original matrix. So the matrix operations to invert and transpose matrices are only needed if you apply other types of transformations, like non-uniform scaling, or shear transforms.
Apparently, if the vertex normals of a mesh are incorrect, then strange rotation artifacts will occur. In my case, I had transformed the mesh in my 3D modelling program (Blender) by 90 degrees on the X axis, as Blender uses the z-axis as its vertical axis, whereas my program uses the y-axis as the vertical axis. However, the method I used to transform/rotate the mesh in Blender in my export script did not properly transform the normals, but only the positions of the vertices. Without any prior transformations, the program works as expected. I initially found out that the normals were incorrect by comparing the normalized positions and normals in a symmetrical object (I used a cube with smoothed normals), and saw that the normals were rotated. Thank you to #derhass and #Solkar for guiding me to the answer.
However, if anyone still wants to contribute, I would like to know why the normals don't rotate in one axis when multiplied by a single axis rotation matrix, even if they are incorrect.
When I use immediate mode, it draws correctly but when i pass the same vertices to the GPU, or even point them, it doesn't work.
there is a position buffer holding vertices :
std::vector<glm::vec3> posbuf;
and indices for it.
immediate mode :
for (unsigned int i =0; i < indices.size(); i++) {
glBegin(GL_TRIANGLES);
glVertex3f(posbuf[indices[i].index[0]].x, posbuf[indices[i].index[0]].y, posbuf[indices[i].index[0]].z);
glVertex3f(posbuf[indices[i].index[1]].x, posbuf[indices[i].index[1]].y, posbuf[indices[i].index[1]].z);
glVertex3f(posbuf[indices[i].index[2]].x, posbuf[indices[i].index[2]].y, posbuf[indices[i].index[2]].z);
glEnd();
}
and this is the vertex attribute code :
glEnableVertexAttribArray(GL_ATTRIB_POS);
glVertexAttribPointer(GL_ATTRIB_POS, 3, GL_FLOAT, GL_FALSE, 0,&posbuf[0]);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indVBO);
glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glDisableVertexAttribArray(GL_ATTRIB_POS);
and a shader for it :
#version 120
attribute vec3 position;
void main()
{
vec4 finalv=vec4(position, 1.0);
gl_Position = gl_ModelViewProjectionMatrix * vec4(finalv.xyz,1.0);
}
[frag]
#version 120
void main()
{
gl_FragColor = vec4(1.0,0.0,0.0,1.0);
}
immediate result :
shader result:
I don't know what's wrong, i also tried to pass posbuf using glm::value_ptr, they all give the same result. I am on fedora 18, supporting glsl up to #version 140, opengl 3.3.
EDIT :
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indVBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(Triangle) * indices.size(), &indices[0], GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
struct Triangle {
int index[3];
};
It seems your indices vector contains complete triangles consisting of 3 indices each. But then in glDrawElements you use indices.size() as the number of elements to draw. But glDrawElements doesn't take the number of primitives, but the number of indices, so you're missing 2/3 of your triangles. It should rather be
glDrawElements(GL_TRIANGLES, indices.size() * 3, GL_UNSIGNED_INT, 0);
(And indices should be renamed to triangles to avoid further confusion. By the way, index should probably be unsigned int[3] as promised to OpenGL. On any reasonable system there won't be a representational difference between ints and unsigned ints, but it is a bit inconsistent to use int while telling OpenGL it's an unsigned int.)