I am using Assimp to load models to render in OpenGL but am running into an issue where chunks/pieces of a mesh don't render.
Example:
What model is supposed to look like:
What I end up rendering:
As you can see, some of the model is rendering properly, but not all.
I have verified multiple times that the meshes being loaded from assimp are loading the correct vertices and indices into my "Mesh" class. Here is my code for loading a model:
This function will recursively call itself for all child nodes and load each mesh inside the node. Each mesh will then be transformed into my own "Mesh" class by creating a vector of vertices and faces.
void Model::LoadAssimpNode(aiNode* node, const aiScene* scene)
{
// Process assimp meshes
for (unsigned int i = 0; i < node->mNumMeshes; i++)
{
aiMesh* mesh = scene->mMeshes[node->mMeshes[i]];
this->meshes.push_back(this->LoadAssimpMesh(mesh, scene));
}
// Recursivley processes child nodes
for (unsigned int i = 0; i < node->mNumChildren; i++)
{
this->LoadAssimpNode(node->mChildren[i], scene);
}
}
Mesh Model::LoadAssimpMesh(aiMesh* mesh, const aiScene* scene)
{
std::vector<sVertex> vertices;
for (unsigned int i = 0; i < mesh->mNumVertices; i++)
{
sVertex vertex;
vertex.x = mesh->mVertices[i].x;
vertex.y = mesh->mVertices[i].y;
vertex.z = mesh->mVertices[i].z;
vertex.nx = mesh->mNormals[i].x;
vertex.ny = mesh->mNormals[i].y;
vertex.nz = mesh->mNormals[i].z;
if (mesh->mTextureCoords[0])
{
vertex.u0 = mesh->mTextureCoords[0][i].x;
vertex.v0 = mesh->mTextureCoords[0][i].y;
}
vertices.push_back(vertex);
}
std::vector<sTriangle> faces;
for (unsigned int i = 0; i < mesh->mNumFaces; i++)
{
sTriangle face;
aiFace assimpFace = mesh->mFaces[i];
if (assimpFace.mNumIndices != 3)
{
std::cout << "Face is not a triangle!" << std::endl;
}
for (unsigned int j = 0; j < assimpFace.mNumIndices; j++)
{
face.vertIndex[j] = assimpFace.mIndices[j];
}
faces.push_back(face);
}
std::vector<Texture> textures;
if (mesh->mMaterialIndex >= 0)
{
aiMaterial* material = scene->mMaterials[mesh->mMaterialIndex];
// Sampler names should adhere to the following convention:
// Diffuse: texure_diffuseN
// Specular: texture_specularN
// Normal: texture_normalN
// Where N = texture numbers
for (Texture texture : this->LoadAssimpMaterialTextures(material, aiTextureType_DIFFUSE, "texture_diffuse"))
{
this->loadedTextures.insert(std::make_pair(texture.path.C_Str(), texture));
textures.push_back(texture);
}
for (Texture texture : this->LoadAssimpMaterialTextures(material, aiTextureType_SPECULAR, "texture_specular"))
{
this->loadedTextures.insert(std::make_pair(texture.path.C_Str(), texture));
textures.push_back(texture);
}
}
return Mesh(vertices, faces, textures);
}
The sVertex and sTriangle structs are defined as:
struct sVertex
{
float x, y, z;
float nx, ny, nz;
float u0, v0;
};
struct sTriangle
{
unsigned int vertIndex[3];
};
Now that the model is effectively loaded from assimp, we now call the SetupMesh() function which sets up the meshes' respective VAO, VBO and EBO:
void Mesh::SetupMesh()
{
// Generate IDs for our VAO, VBO and EBO
glGenVertexArrays(1, &this->VAO);
glGenBuffers(1, &this->VBO);
glGenBuffers(1, &this->EBO);
glBindVertexArray(this->VAO);
// Now ANY state that is related to vertex or index buffer
// and vertex attribute layout, is stored in the 'state'
// of the VAO...
// Tell open GL where to look for for vertex data
glBindBuffer(GL_ARRAY_BUFFER, this->VBO);
glBufferData(GL_ARRAY_BUFFER, this->vertices.size() * sizeof(sVertex), &this->vertices[0], GL_STATIC_DRAW);
// Tell open GL where our index buffer begins (AKA: where to look for faces)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, this->EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, this->faces.size() * sizeof(sTriangle), &this->faces[0], GL_STATIC_DRAW);
// Set the vertex attributes for this shader
// Layout information can be found in the vertex shader, currently:
// 0 = position
// 1 = normals
// 2 = texture coordinates
glEnableVertexAttribArray(0); // position
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(sVertex), (void*) offsetof(sVertex, x));
glEnableVertexAttribArray(1); // normal
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(sVertex), (void*) offsetof(sVertex, nx));
glEnableVertexAttribArray(2); // textureCoordinates
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(sVertex), (void*)offsetof(sVertex, u0));
// Now that all the parts are set up, unbind buffers
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
}
Once this is all setup, I will now call the Draw method for each mesh to render in my render loop:
void Mesh::Draw(const CompiledShader& shader)
{
glm::mat4 matModel = glm::mat4(1.0f);
glm::mat4 matTranslate = glm::translate(glm::mat4(1.0f), this->positionXYZ); // Translation matrix
glm::mat4 rotateX = glm::rotate(glm::mat4(1.0f), this->orientationXYZ.x, glm::vec3(1.0f, 0.0f, 0.0f)); // X axis rotation
glm::mat4 rotateY = glm::rotate(glm::mat4(1.0f), this->orientationXYZ.y, glm::vec3(0.0f, 1.0f, 0.0f)); // Y axis rotation
glm::mat4 rotateZ = glm::rotate(glm::mat4(1.0f), this->orientationXYZ.z, glm::vec3(0.0f, 0.0f, 1.0f)); // Z axis rotation
glm::mat4 matScale = glm::scale(glm::mat4(1.0f), glm::vec3(this->scale, this->scale, this->scale)); // Scale the mesh
glm::mat4 matInvTransposeModel = glm::inverse(glm::transpose(matModel));
// Apply all the transformations to our matrix
matModel = matModel * matTranslate;
matModel = matModel * rotateZ;
matModel = matModel * rotateY;
matModel = matModel * rotateX;
matModel = matModel * matScale;
glUseProgram(shader.ID);
glUniformMatrix4fv(glGetUniformLocation(shader.ID, "matModel"), 1, GL_FALSE, glm::value_ptr(matModel)); // Tell shader the model matrix (AKA: Position orientation and scale)
glUniformMatrix4fv(glGetUniformLocation(shader.ID, "matModelInverseTranspose"), 1, GL_FALSE, glm::value_ptr(matInvTransposeModel));
// Draw the mesh
glBindVertexArray(this->VAO);
glDrawElements(GL_TRIANGLES, this->faces.size(), GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
}
My shaders are very simple where the color of a pixel is equal to the vertex's normal:
Vertex Shader:
#version 420
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 normal;
layout (location = 2) in vec2 textureCoordinates;
uniform mat4 matModel;
uniform mat4 matView;
uniform mat4 matProjection;
uniform mat4 matModelInverseTranspose; // For normal calculation
out vec4 fVertWorldLocation;
out vec4 fNormal;
out vec2 TextureCoordinates;
void main()
{
mat4 MVP = matProjection * matView * matModel;
gl_Position = MVP * vec4(position, 1.0f);
TextureCoordinates = textureCoordinates;
// The location of the vertex in "world" space (not screen space)
fVertWorldLocation = matModel * vec4(position, 1.0f);
// Calculate the normal based on any rotation we've applied.
// This inverse transpose removes scaling and tranlation (movement)
// from the matrix.
fNormal = matModelInverseTranspose * vec4(normal, 1.0f);
};
Fragment Shader:
#version 420
in vec2 TextureCoordinates;
in vec4 fNormal;
out vec4 Color;
uniform sampler2D texture_diffuse;
void main()
{
//Color = vec4(texture(texture_diffuse, TextureCoordinates));
//Color = vec4(TextureCoordinates, 1.0f, 1.0f);
Color = fNormal;
}
Sorry for the insane length of this post, but I feel that all of it was necessary to get my point across.
If anyone could point out what I am doing wrong here it would be greatly appreciated! I feel like I need an extra pair of eyes here because I have read my code over countless times and can't seem to come up with anything.
Made a stupid mistake, I was under the impression that the "count" argument in the glDrawElements() function wanted the number of faces NOT the number of indices.
The problem was fixed by changing my glDrawElements call from:
glDrawElements(GL_TRIANGLES, this->faces.size(), GL_UNSIGNED_INT, 0);
To this:
glDrawElements(GL_TRIANGLES, this->faces.size() * 3, GL_UNSIGNED_INT, 0);
Related
For everyone unfamiliar, OpenGL instanced drawing is where many objects are drawn with one shader call, so glDrawArrays is only called once for a thousand objects on the screen instead of once for every object.
Now the question is: how do I implement instanced rendering in OpenGL 3 for objects which have constantly changing vertices? Creating an array or specifying a position on the vertex shader dedicated specifically to where the objects are won't work, as I'm dealing with a constantly changing vector of objects which shift coordinates in different velocities every frame.
The header for the object class I'm working with, and the vertex shader I have, are described below for reference.
//CLASS
class Laser {
public:
GLfloat x, y, xVelocity, yVelocity;
GLuint texture;
GLfloat angle;
GLfloat velocity;
GLfloat width, height;
GLfloat drawWidth = 16;
GLfloat drawHeight = 16;
GLfloat damage;
GLint actsToDissapear = -1;
GLint actsExisting = 0;
GLboolean expired = false;
GLboolean isRotated = false;
GLboolean variableColor = false;
glm::vec3 color;
std::string type = "Laser";
Laser(GLfloat damage, GLfloat width, GLfloat height, GLuint texture, GLfloat x, GLfloat y, GLfloat xVelocity, GLfloat yVelocity, GLfloat drawWidth, GLfloat drawHeight, GLfloat actsToDissapear, GLboolean isRotated, GLfloat angle, GLboolean variableColor, glm::vec3 color);
virtual void draw(SpriteRenderer* s);
virtual void move(Rachel* player);
};
//VERTEX SHADER
#version 330 core
layout (location = 0) in vec4 vertex;
uniform mat4 model;
uniform mat4 projection;
out vec2 TexCoords;
void main() {
TexCoords = vec2(vertex.z, vertex.w);
gl_Position = projection * model * vec4(vertex.xy, 0.0, 1.0);
}
The concept you look for is attribute divisor. See glVertexAttribDivisor.
In a few words: you change your model matrix from uniform to an instanced attribute that's read from a buffer. Each frame you update that buffer with the new positions of each instance. One thing to consider when implementing this is to use (vec3 offset, quat4 orientation) representation for the model matrix in order to reduce the number of consumed attributes by half. Also, depending on the exact problem you have at hand, you can update that buffer directly on the GPU with compute shaders.
Heres a code example of what I think you're looking for. I used instanced rendering for my particle system, it supports textures, colors and movement. Works both on android opengl es and windows opengl. This code requires some work to run, but it should be fairly easy to get going.
#include "ParticleSystem.h"
#include "Engine.h"
#include "Transform.h"
#include "Shader.h"
#include "Texture.h"
#include "Mesh.h"
#include "ShaderHandler.h"
ParticleSystem::ParticleSystem()
{
}
ParticleSystem::~ParticleSystem()
{
shader = nullptr;
texture = nullptr;
glDeleteVertexArrays(1, &vertexArrayObject);
}
void ParticleSystem::init(Engine * engine, float size, Texture * texture, float maxVelocity, bool gravity)
{
this->maxVelocity = maxVelocity;
this->gravity = gravity;
this->size = size;
vertex =
{
-size, -size, 0.0f,
-size, size, 0.0f,
size, size, 0.0f,
size, -size, 0.0f
};
indices =
{
1, 0, 2, 3
};
this->shader = engine->getShaderHandler()->loadShader("res/shaders/texturedInstancedShader");
this->texture = texture;
glGenVertexArrays(1, &this->vertexArrayObject);
glBindVertexArray(this->vertexArrayObject);
glGenBuffers(ParticleSystem::NUM_BUFFERS, this->vertexArrayBuffer);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->VERTEX_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * this->vertex.size(), &this->vertex[0], GL_STATIC_DRAW); //send model to GPU
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->TEXTURE_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec2) * this->texCoords.size(), &this->texCoords[0], GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, this->vertexArrayBuffer[this->INDEX_VB]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * indices.size(), &this->indices[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->POSITION_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * this->positions.size(), NULL, GL_STREAM_DRAW); //NULL (empty) buffer
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->COLOR_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec4) * this->colors.size(), NULL, GL_STREAM_DRAW); //NULL (empty) buffer
glBindVertexArray(0);
}
void ParticleSystem::createPoint(float pps, float deltaTime, glm::vec3 position, float maxLife, glm::vec4 color, glm::vec3 velocity)
{
Particle particle;
float amountPerSecond = pps * deltaTime;
for (float i = 0; i < amountPerSecond; i++)
{
particle.life = (rand() % static_cast<int>(maxLife * 100)) / 100.f;
particle.velocity =
{
((rand() % 200 / 100.f) - 1.f) * velocity.x,
((rand() % 200 / 100.f) - 1.f) * velocity.y,
((rand() % 200 / 100.f) - 1.f) * velocity.z
};
particles.emplace_back(particle);
positions.emplace_back(position);
colors.emplace_back(color);
}
}
void ParticleSystem::draw(glm::mat4 view)
{
if (particles.size() > 0)
{
Transform transform;
this->shader->bind();
this->shader->loadTransform(transform, view);
this->shader->loadInt(U_TEXTURE0, 0);
this->texture->bind(0);
glBindVertexArray(vertexArrayObject);
glVertexAttribDivisor(0, 0);
glVertexAttribDivisor(1, 1);
glVertexAttribDivisor(2, 1);
glVertexAttribDivisor(3, 0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->VERTEX_VB]);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->POSITION_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * positions.size(), &positions[0], GL_STREAM_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->COLOR_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec4) * colors.size(), &colors[0], GL_STREAM_DRAW);
glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, 0, (void*)0);
glEnableVertexAttribArray(3);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->TEXTURE_VB]);
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 0, (void*)0);
glDrawElementsInstanced(GL_TRIANGLE_STRIP, indices.size(), GL_UNSIGNED_INT, 0, positions.size());
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glBindVertexArray(0);
}
}
void ParticleSystem::update(float deltaTime)
{
for (std::size_t i = 0; i < particles.size(); i++)
{
particles[i].life -= (1.f * deltaTime); //decrease life with 1 per second
if (particles[i].life <= 0.f) //dead
{
particles.erase(particles.begin() + i);
colors.erase(colors.begin() + i);
positions.erase(positions.begin() + i);
continue;
}
if (this->gravity == true)
{
if (particles[i].velocity.y > -maxVelocity)
{
particles[i].velocity.y -= maxVelocity * deltaTime; //1 second to reach maximum velocity
}
else
{
particles[i].velocity.y = -maxVelocity;
}
}
positions[i] += (particles[i].velocity * deltaTime);
}
}
Heres the shader:
vertex shader:
#version 330 core
layout(location = 0) in vec3 vertex;
layout(location = 1) in vec3 positions;
layout(location = 2) in vec4 colors;
layout(location = 3) in vec2 texCoords;
out vec2 texCoord;
out vec4 color;
uniform mat4 transform;
void main()
{
color = colors;
texCoord = texCoords;
gl_Position = transform * vec4(vertex + positions, 1.0);
}
fragment shader:
#version 330 core
in vec4 color;
in vec2 texCoord;
out vec4 colors;
uniform sampler2D texture0;
void main()
{
vec4 texel = texture2D(texture0, texCoord);
if (texel.a <= 0.5)
{
discard;
}
colors = color * texel;
}
I am trying to create a program that shows a wave-like animation using Perlin Noise by creating many triangles.
This is the important part of my program:
class OGLT9_NOISE
{
//class for Perlin Noise (noise3d()) and Fractional Brownian Motion (fmb()) generaion
};
glm::vec3 OGLT9_GRAPHICS::getNormal(glm::vec3 a, glm::vec3 b, glm::vec3 c)
{
return glm::normalize(glm::cross(c-a, b-a));
}
void generateTerrain(OGLT9_SHADER *oglt9Shader)
{
static OGLT9_NOISE noise;
static float yValue = 0;
int terrainRes = 7; //terrain's resolution
float terrainSpacing = 10.0f;
vector<glm::vec3> vertexData;
vector<glm::vec3> normalData;
multi_array<float, 2> terrain;
terrain.resize(extents[1<<terrainRes][1<<terrainRes]);
for(long z=-(1<<(terrainRes-1)); z<(1<<(terrainRes-1)); z++)
for(long x=-(1<<(terrainRes-1)); x<(1<<(terrainRes-1)); x++)
terrain[z+(1<<(terrainRes-1))][x+(1<<(terrainRes-1))] = (noise.fbm((double)x/16.0, yValue, (double)z/16.0, 2, 0.4, 1.2, 2.9, 1.1)/2.0+0.5)*100.0;
for(long z=0; z<(1<<terrainRes)-1; z++)
{
for(long x=0; x<(1<<terrainRes)-1; x++)
{
vertexData.push_back(glm::vec3((float)x*terrainSpacing, terrain[z][x], (float)z*terrainSpacing));
vertexData.push_back(glm::vec3(((float)x+1.0f)*terrainSpacing, terrain[z+1][x+1], ((float)z+1.0f)*terrainSpacing));
vertexData.push_back(glm::vec3(((float)x+1.0f)*terrainSpacing, terrain[z][x+1], (float)z*terrainSpacing));
vertexData.push_back(glm::vec3((float)x*terrainSpacing, terrain[z][x], (float)z*terrainSpacing));
vertexData.push_back(glm::vec3((float)x*terrainSpacing, terrain[z+1][x], ((float)z+1.0f)*terrainSpacing));
vertexData.push_back(glm::vec3(((float)x+1.0f)*terrainSpacing, terrain[z+1][x+1], ((float)z+1.0f)*terrainSpacing));
normalData.push_back(getNormal(vertexData[vertexData.size()-6], vertexData[vertexData.size()-5], vertexData[vertexData.size()-4]));
normalData.push_back(normalData[normalData.size()-1]);
normalData.push_back(normalData[normalData.size()-2]);
normalData.push_back(getNormal(vertexData[vertexData.size()-3], vertexData[vertexData.size()-2], vertexData[vertexData.size()-1]));
normalData.push_back(normalData[normalData.size()-1]);
normalData.push_back(normalData[normalData.size()-2]);
}
}
glUseProgram(oglt9Shader->program);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, vertexData.size()*3*sizeof(float), vertexData.data(), GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(0);
glGenBuffers(1, &nbo);
glBindBuffer(GL_ARRAY_BUFFER, nbo);
glBufferData(GL_ARRAY_BUFFER, normalData.size()*3*sizeof(float), normalData.data(), GL_STATIC_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, 0);
numVertices = vertexData.size()*3;
yValue += 0.01f;
}
void render()
{
//Clear screen and enable depth buffer
//Create and transmit matrices and light direction to shaders
generateTerrain(oglt9Shader);
glDrawArrays(GL_TRIANGLES, 0, numVertices);
glDeleteBuffers(1, &vbo);
glDeleteBuffers(1, &nbo);
//Swap buffers to window
}
And my vertex shader...
#version 430 core
layout (location = 0) in vec3 vPosition;
layout (location = 1) in vec3 vNormal;
uniform mat4 mMatrix;
uniform mat4 vMatrix;
uniform mat4 pMatrix;
out vec3 fPosition;
out vec3 fNormal;
void main(void)
{
gl_Position = pMatrix * vMatrix * mMatrix * vec4(vPosition, 1.0);
fPosition = vPosition;
fNormal = normalize(transpose(inverse(mat3(mMatrix))) * vNormal);
}
#version 430 core
in vec3 fPosition;
in vec3 fNormal;
out vec4 outColor;
uniform vec3 lightDirection;
...and fragment shader.
void main(void)
{
vec3 rawColor = vec3(1.0);
vec3 ambientColor = vec3(1.0, 1.0, 1.0);
float diffuseIntensity = max(0.0, dot(fNormal, lightDirection));
vec3 diffuseColor = diffuseIntensity * vec3(0.9, 0.9, 0.9);
outColor = vec4(rawColor*ambientColor*diffuseColor, 1.0);
}
This is the final image:
So, what can I do to make the triangles smooth so you can't see these hard edges anymore?
You're using the same normal for all 3 vertices of each triangle. This will essentially result in flat shading, meaning that the color of each triangle is constant.
What you need is normals that better approximate the actual normals of the surface, instead of calculating the normal of each triangle separately. To get a smooth looking surface, you need to have one normal per vertex, and then use that normal when specifying the vertex for all the triangles that share the vertex.
The most efficient way of doing this is that you really store each vertex/normal of your grid in the VBO only once. You can then use an index buffer to reference the vertices when defining the triangles. This means that you have an additional buffer of type GL_ELEMENT_ARRAY_BUFFER containing indices, and then draw with glDrawElements(). You should be able to find reference information and tutorials on how to do that.
To actually obtain the normals, one common approach is that you average the triangle normals of all adjacent triangles to calculate the normal at a vertex.
I want to draw simple square. First I use glDrawArrays but now I want to change it to glDrawElements. I read bunch of tutorials but for some reason it doesn't render anything.
Renderer class:
class Renderer_t {
private:
...
glm::mat4 projectionMatrix; // Store the projection matrix
glm::mat4 viewMatrix; // Store the view matrix
glm::mat4 modelMatrix; // Store the model matrix
unsigned int vaoID[1]; // Our Vertex Array Object
unsigned int vboID[3]; // Our Vertex Buffer Object
...
};
Initialize of scene:
Renderer_t::Renderer_t(SDL_Window* window): scene(nullptr), width(800), height(600) {
LOG(info) << "Renderer_t constructor";
gl = SDL_GL_CreateContext(window);
glbinding::Binding::initialize();
//Initialize scene
glClearColor(0.4f, 0.6f, 0.9f, 0.0f);
shader = new Shader("../assets/shader.vert", "../assets/shader.frag");
float ratio = width/height;
projectionMatrix = glm::perspective(60.0f, ratio, 0.1f, 100.f); // Create our perspective projection matrix
int vertnum = 4 * 3; //6x
//Create square
float* vertices = new float[vertnum]; // Vertices for our square
float* colors = new float[vertnum]; // Colors for our vertices
unsigned int* indices = new unsigned int[6];
indices[0] = 0; indices[0] = 1; indices[0] = 2;
indices[0] = 2; indices[0] = 3; indices[0] = 0;
vertices[0] = -0.5; vertices[1] = -0.5; vertices[2] = 0.0; // Bottom left corner
colors[0] = 1.0; colors[1] = 1.0; colors[2] = 1.0; // Bottom left corner
vertices[3] = -0.5; vertices[4] = 0.5; vertices[5] = 0.0; // Top left corner
colors[3] = 1.0; colors[4] = 0.0; colors[5] = 0.0; // Top left corner
vertices[6] = 0.5; vertices[7] = 0.5; vertices[8] = 0.0; // Top Right corner
colors[6] = 0.0; colors[7] = 1.0; colors[8] = 0.0; // Top Right corner
vertices[9] = 0.5; vertices[10] = -0.5; vertices[11] = 0.0; // Bottom right corner
colors[9] = 0.0; colors[10] = 0.0; colors[11] = 1.0; // Bottom right corner
/*
vertices[12] = -0.5; vertices[13] = -0.5; vertices[14] = 0.0; // Bottom left corner
colors[12] = 1.0; colors[13] = 1.0; colors[14] = 1.0; // Bottom left corner
vertices[15] = 0.5; vertices[16] = 0.5; vertices[17] = 0.0; // Top Right corner
colors[15] = 0.0; colors[16] = 1.0; colors[17] = 0.0; // Top Right corner
*/
glGenVertexArrays(1, &vaoID[0]); // Create our Vertex Array Object
glBindVertexArray(vaoID[0]); // Bind our Vertex Array Object so we can use it
glGenBuffers(3, &vboID[0]); // Generate our Vertex Buffer Objects
glBindBuffer(GL_ARRAY_BUFFER, vboID[0]); // Bind our Vertex Buffer Object
glBufferData(GL_ARRAY_BUFFER, vertnum * sizeof(GLfloat), vertices, GL_STATIC_DRAW); // Set the size and data of our VBO and set it to STATIC_DRAW
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, 0, 0); // Set up our vertex attributes pointer
glEnableVertexAttribArray(0); // Enable our Vertex Array Object
glBindBuffer(GL_ARRAY_BUFFER, vboID[1]); // Bind our second Vertex Buffer Object
glBufferData(GL_ARRAY_BUFFER, vertnum * sizeof(GLfloat), colors, GL_STATIC_DRAW); // Set the size and data of our VBO and set it to STATIC_DRAW
glVertexAttribPointer((GLuint)1, 3, GL_FLOAT, GL_FALSE, 0, 0); // Set up our vertex attributes pointer
glEnableVertexAttribArray(1); // Enable the second vertex attribute array
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboID[2]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, 6 * sizeof(GLuint), indices, GL_STATIC_DRAW);
glVertexAttribPointer((GLuint)2, 3, GL_INT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(2);
glBindVertexArray(0); // Disable our Vertex Buffer Object
delete[] vertices; // Delete our vertices from memory
delete[] colors; // Delete our vertices from memory
delete[] indices;
LOG(info) << "Renderer_t constructor done";
}
Rendering:
void Renderer_t::render() {
LOG(info) << "Renderer_t.render()";
glViewport(0, 0, width, height); // Set the viewport size to fill the window
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT); // Clear required buffers
viewMatrix = glm::translate(glm::mat4(1.0f), glm::vec3(0.0f, 0.0f, -5.f)); // Create our view matrix
modelMatrix = glm::scale(glm::mat4(1.0f), glm::vec3(1.0f)); // Create our model matrix
shader->bind(); // Bind our shader
int projectionMatrixLocation = glGetUniformLocation(shader->id(), "projectionMatrix"); // Get the location of our projection matrix in the shader
int viewMatrixLocation = glGetUniformLocation(shader->id(), "viewMatrix"); // Get the location of our view matrix in the shader
int modelMatrixLocation = glGetUniformLocation(shader->id(), "modelMatrix"); // Get the location of our model matrix in the shader
glUniformMatrix4fv(projectionMatrixLocation, 1, GL_FALSE, &projectionMatrix[0][0]); // Send our projection matrix to the shader
glUniformMatrix4fv(viewMatrixLocation, 1, GL_FALSE, &viewMatrix[0][0]); // Send our view matrix to the shader
glUniformMatrix4fv(modelMatrixLocation, 1, GL_FALSE, &modelMatrix[0][0]); // Send our model matrix to the shader
glBindVertexArray(vaoID[0]); // Bind our Vertex Array Object
// glDrawArrays(GL_TRIANGLES, 0, 6); // Draw our square
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboID[2]);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, NULL);
glBindVertexArray(0); // Unbind our Vertex Array Object
shader->unbind(); // Unbind our shader
LOG(info) << "Renderer_t.render() done";
}
I also use shaders:
shader.vert
version 130
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
uniform mat4 modelMatrix;
in vec3 in_Position;
in vec3 in_Color;
out vec3 pass_Color;
void main(void)
{
gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4(in_Position, 1.0);
pass_Color = in_Color;
}
shader.fraq
#version 130
in vec3 pass_Color;
out vec4 out_Color;
void main(void)
{
out_Color = vec4(pass_Color, 1.0);
}
Just so you know, a triangle list with elements: 0,1,2 0,2,3 is the same thing as glDrawArrays(GL_TRIANGLE_FAN, 0, 4). You could just use a triangle fan here and not have to use indices at all.
Your actual problem is that you keep writing each element in your element array to [0].
Right now your code reads:
indices[0] = 0; indices[0] = 1; indices[0] = 2;
indices[0] = 2; indices[0] = 3; indices[0] = 0;
However, to function correctly it needs to read:
indices[0] = 0; indices[1] = 1; indices[2] = 2;
indices[3] = 2; indices[4] = 3; indices[5] = 0; // This is equivalent to 0, 2, 3
Everything in BDL's answer are also important things you should note.
You try to bind the ELEMENT_ARRAY_BUFFER to a varying of your shader
glVertexAttribPointer((GLuint)2, 3, GL_INT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(2);
Index buffers do not have to be bound to a shader, since they are not directly used as input to them. Beside this, your shader has only two in-variables (most probably numbered 0 and 1), so using 2 will never work.
Another hint:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboID[2]);
in your render() is not necessary, since the binding is already stored in the vao during the initialization. But this should not effect the output.
Here is my shader program:
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 1) in vec3 vertexNormal_modelspace;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform mat3 blNormalMatrix;
uniform vec3 lightPos;
out vec4 forFragColor;
const vec3 diffuseColor = vec3(0.55, 0.09, 0.09);
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP * vec4(vertexPosition_modelspace,1);
vec3 MaterialAmbientColor = vec3(0.1,0.1,0.1) * diffuseColor;
// all following gemetric computations are performed in the
// camera coordinate system (aka eye coordinates)
vec3 vertexNormal_cameraspace = (V*M*vec4(vertexNormal_modelspace,0)).xyz;
vec4 vertexPosition_cameraspace4 = V*M* vec4(vertexPosition_modelspace,1);
vec3 vertexPosition_cameraspace = vec3(vertexPosition_cameraspace4).xyz;
vec3 lightDir = normalize(lightPos - vertexPosition_cameraspace);
float lambertian = clamp(dot(lightDir,vertexNormal_cameraspace), 0.0,1.0);
forFragColor = vec4(lambertian*diffuseColor , 1.0);
}
My problem is that this "worked" in the older opengl profile, didn't even have the version number, I think it was around Opengl 2.1 or so, the key change was that I originally had normal = gl_normalMatrix * gl_normal and things worked.
However that was based on my professor's code which I've updated to the 3.3+ core profile and after maybe fixing the deprecated functions I am now left with this:
https://drive.google.com/file/d/0B6oLZ_d7S-U7cVpkUXpVXzdaZEk/edit?usp=sharing is a link to the video of my program's behavior.
The light source should be a point light at (0,0,3) or so that shouldn't move; but its not following a particularly logical behaviorial pattern, I can't make sense of it.
I tried passing the inverse transpose of the model matrix and using them as a replacement normalMatrix but it wrecked my normals. So I don't know.
This was my normalMatrix:
glm::mat3 MyNormalMatrix = glm::mat3(glm::transpose(glm::inverse(ModelMatrix)));
Edit: Here is my Display code:
glClearColor(0.0f, 0.0f, 0.4f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
// Use our shader
glUseProgram(programID);
// Get our transformations iff we move the camera around.
glm::mat4 MyModelMatrix = ModelMatrix * thisTran * ThisRot;
MVP = ProjectionMatrix * ViewMatrix * MyModelMatrix;
glm::mat4 ModelView = ViewMatrix * MyModelMatrix;
glm::mat3 MyNormalMatrix = glm::mat3(glm::transpose(glm::inverse(ModelView)));
glm::vec3 newLightPos = lightPos;
// Send our transformation to the currently bound shader,
// in the "MVP" uniform
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
glUniformMatrix4fv(ModelMatrixID, 1, GL_FALSE, &MyModelMatrix[0][0]);
glUniformMatrix4fv(ViewMatrixID, 1, GL_FALSE, &ViewMatrix[0][0]);
glUniformMatrix4fv(BlNormalMatrix,1,GL_FALSE, &MyNormalMatrix[0][0]);
glUniformMatrix4fv(BlRotations, 1, GL_FALSE, &ThisRot[0][0]);
glUniform3f(BlCamera, cameraLoc.x, cameraLoc.y, cameraLoc.z);
glUniform3f(lPosition, newLightPos.x,newLightPos.y,newLightPos.z);
// VBO buffer: vertices
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// 2rd attribute buffer : normals
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glVertexAttribPointer(
1, // attribute
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// draw object using opengl 3.3 shit
glDrawArrays(GL_TRIANGLES, 0, vertices.size() );
The problem ultimately turned out to be an issue with the Model Loader provided by my Professor, was somehow incompatible with modern opengl and would only "mostly" work in that it was clearly missing the left/right normals or they had invalid values. Solved with using an implementation of Assimp.
The code, with assimp linked is like this:
void blInitResWAssimp() {
cout << "blInitResWAssimp" << endl;
blCreateModelViewProjectionMatrix();
//loads object
bool res = loadAssImp("Resources/RCSS-subdiv.obj", indices, indexed_vertices, indexed_uvs, indexed_normals);
//bool res = loadAssImp("Resources/cheb.obj", indices, indexed_vertices, indexed_uvs, indexed_normals);
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// Load it into a VBO
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, indexed_vertices.size() * sizeof(glm::vec3), &indexed_vertices[0], GL_STATIC_DRAW);
// Normal buffer
glGenBuffers(1, &normalbuffer);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glBufferData(GL_ARRAY_BUFFER, indexed_normals.size() * sizeof(glm::vec3), &indexed_normals[0], GL_STATIC_DRAW);
// Generate a buffer for the indices as well
glGenBuffers(1, &elementbuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(unsigned short), &indices[0], GL_STATIC_DRAW);
//ModelMatrix = ModelMatrix * glm::translate(glm::mat4(1.0f), glm::vec3(-0.5, -0.5, 0));
}
Assimp stuff
bool loadAssImp(
const char * path,
std::vector<unsigned short> & indices,
std::vector<glm::vec3> & vertices,
std::vector<glm::vec2> & uvs,
std::vector<glm::vec3> & normals
){
Assimp::Importer importer;
const aiScene* scene = importer.ReadFile(path, 0/*aiProcess_JoinIdenticalVertices | aiProcess_SortByPType*/);
if (!scene) {
fprintf(stderr, importer.GetErrorString());
getchar();
return false;
}
const aiMesh* mesh = scene->mMeshes[0]; // In this simple example code we always use the 1rst mesh (in OBJ files there is often only one anyway)
const aiMaterial* material = scene->mMaterials[0];
// Fill vertices positions
vertices.reserve(mesh->mNumVertices);
for (unsigned int i = 0; i<mesh->mNumVertices; i++){
aiVector3D pos = mesh->mVertices[i];
vertices.push_back(glm::vec3(pos.x, pos.y, pos.z));
}
// Fill vertices texture coordinates
/*
uvs.reserve(mesh->mNumVertices);
for (unsigned int i = 0; i<mesh->mNumVertices; i++){
aiVector3D UVW = mesh->mTextureCoords[0][i]; // Assume only 1 set of UV coords; AssImp supports 8 UV sets.
uvs.push_back(glm::vec2(UVW.x, UVW.y));
}*/
// Fill vertices normals
normals.reserve(mesh->mNumVertices);
for (unsigned int i = 0; i<mesh->mNumVertices; i++){
aiVector3D n = mesh->mNormals[i];
//aiVector3D n = mesh->mVertices[i];
normals.push_back(glm::vec3(n.x, n.y, n.z));
}
// Fill face ind5ices
indices.reserve(3 * mesh->mNumFaces);
for (unsigned int i = 0; i<mesh->mNumFaces; i++){
// Assume the model has only triangles.
indices.push_back(mesh->mFaces[i].mIndices[0]);
indices.push_back(mesh->mFaces[i].mIndices[1]);
indices.push_back(mesh->mFaces[i].mIndices[2]);
}
// The "scene" pointer will be deleted automatically by "importer"
}
OpenGL glm calculations don't seem to work in my program. Nothing moves even when i use the glm translate function to translate the z axis with a variable every frame. Am i missing something?
main.cpp
#define GLEW_STATIC
#define NO_SDL_GLEXT
#include "glew.h"
#include <sdl.h>
#undef main
#include "SDL_opengl.h"
#include "timer.h"
#include <time.h>
#include <shader.h>
using namespace std;
#include <glm/gtc/matrix_projection.hpp>
#include <glm/gtc/matrix_transform.hpp>
using namespace glm;
unsigned int vaoID[1]; // Our Vertex Array Object
unsigned int vboID[1]; // Our Vertex Buffer Object
glm::mat4 projectionMatrix; // Store the projection matrix
glm::mat4 viewMatrix; // Store the view matrix
glm::mat4 modelMatrix; // Store the model matrix
Shader *shader; // Our GLSL shader
float ztransform(0);
bool exited(false);
SDL_Event event;
const int FRAMES_PER_SECOND = 60;
void createSquare(void) {
float* vertices = new float[18]; // Vertices for our square
vertices[0] = -0.5; vertices[1] = -0.5; vertices[2] = 0.0; // Bottom left corner
vertices[3] = -0.5; vertices[4] = 0.5; vertices[5] = 0.0; // Top left corner
vertices[6] = 0.5; vertices[7] = 0.5; vertices[8] = 0.0; // Top Right corner
vertices[9] = 0.5; vertices[10] = -0.5; vertices[11] = 0.0; // Bottom right corner
vertices[12] = -0.5; vertices[13] = -0.5; vertices[14] = 0.0; // Bottom left corner
vertices[15] = 0.5; vertices[16] = 0.5; vertices[17] = 0.0; // Top Right corner
glGenVertexArrays(1, &vaoID[0]); // Create our Vertex Array Object
glBindVertexArray(vaoID[0]); // Bind our Vertex Array Object so we can use it
glGenBuffers(1, vboID); // Generate our Vertex Buffer Object
glBindBuffer(GL_ARRAY_BUFFER, vboID[0]); // Bind our Vertex Buffer Object
glBufferData(GL_ARRAY_BUFFER, 18 * sizeof(GLfloat), vertices, GL_STATIC_DRAW); // Set the size and data of our VBO and set it to STATIC_DRAW
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, 0, 0); // Set up our vertex attributes pointer
glEnableVertexAttribArray(0); // Disable our Vertex Array Object
glBindVertexArray(0); // Disable our Vertex Buffer Object
delete [] vertices; // Delete our vertices from memory
}
void startGL()
{
SDL_Init(SDL_INIT_EVERYTHING);
SDL_SetVideoMode(800, 600, 32, SDL_OPENGL);
glewInit();
glClearColor(0.4f, 0.0f, 1.0f, 0.0f);
projectionMatrix = glm::perspective(60.0f, (float)800 / (float)600, 0.1f, 100.f); // Create our perspective projection matrix
shader = new Shader("shader.vert", "shader.frag"); // Create our shader by loading our vertex and fragment shader
createSquare();
}
void drawstuff()
{
glViewport(0, 0, 800, 600); // Set the viewport size to fill the window
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT); // Clear required buffers
viewMatrix = glm::translate(glm::mat4(1.0f), glm::vec3(0.0f, 0.0f, ztransform)); // Create our view matrix which will translate us back 5 units
modelMatrix = glm::scale(glm::mat4(1.0f), glm::vec3(0.5f)); // Create our model matrix which will halve the size of our model
shader->bind(); // Bind our shader
int projectionMatrixLocation = glGetUniformLocation(shader->id(), "projectionMatrix"); // Get the location of our projection matrix in the shader
int viewMatrixLocation = glGetUniformLocation(shader->id(), "viewMatrix"); // Get the location of our view matrix in the shader
int modelMatrixLocation = glGetUniformLocation(shader->id(), "modelMatrix"); // Get the location of our model matrix in the shader
glUniformMatrix4fv(projectionMatrixLocation, 1, GL_FALSE, &projectionMatrix[0][0]); // Send our projection matrix to the shader
glUniformMatrix4fv(viewMatrixLocation, 1, GL_FALSE, &viewMatrix[0][0]); // Send our view matrix to the shader
glUniformMatrix4fv(modelMatrixLocation, 1, GL_FALSE, &modelMatrix[0][0]); // Send our model matrix to the shader
glBindVertexArray(vaoID[0]); // Bind our Vertex Array Object
glDrawArrays(GL_TRIANGLES, 0, 6); // Draw our square
glBindVertexArray(0); // Unbind our Vertex Array Object
shader->unbind(); // Unbind our shader
}
int main (int argc, char* args[])
{
Timer fps;
startGL();
while(exited == false)
{
while( SDL_PollEvent(&event) )
{
if( event.type == SDL_QUIT )
exited = true;
}
drawstuff();
ztransform+=.1
SDL_GL_SwapBuffers();
if( fps.get_ticks() < 1000 / FRAMES_PER_SECOND )
SDL_Delay( ( 1000 / FRAMES_PER_SECOND ) - fps.get_ticks() );
}
SDL_Quit();
return 0;
}
shader.frag
#version 150 core
in vec3 pass_Color;
out vec4 out_Color;
void main(void)
{
out_Color = vec4(pass_Color, 1.0);
}
shader.vert
#version 150 core
in vec3 in_Position;
in vec3 in_Color;
out vec3 pass_Color;
void main(void)
{
gl_Position = vec4(in_Position, 1.0);
pass_Color = in_Color;
}
You have to apply your transformation in your vertex shader.
you should define in your vertex shader
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
uniform mat4 modelMatrix;
And then apply these transformations to your input position (note: i may have gotten the order wrong)
gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4(in_position, 1.0);
Generally though, you would multiply the 3 matrices together in your c++ program and pass in a modelViewProjection matrix.