Can I somehow render more stuff (opengl) - c++

I'm trying to render lots of stuff with OpenGL 3.3 Am i missing some tricks to make this faster?
Does it matter if I use glBufferData or glBufferSubData?
I have coded OpenGL for 5 days now, so I know that there are lots of unkown uknowns to me. And those are what i'm looking for, can you point me to any ways of making this even faster?
I think i'm using what's called "Instanced Rendering". All my stuff is rendered via a single glDrawElementsInstancedBaseVertex call.
Did I miss any relevant code? There's so much of it that I can't really paste it all here.
I'v gotten as far as 20000 objects with 24 vertices using the following code:
Called once per mesh at start, not during frames.
void Mesh::initMesh(IndexedModel const & p_model)
{
d->drawCount = p_model.indices.size();
glGenVertexArrays(1, &(d->vertexArrayObject));
glBindVertexArray(d->vertexArrayObject);
glGenBuffers(eNumBuffers, d->vertexArrayBuffers);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[ePosition_Vb]);
glBufferData(GL_ARRAY_BUFFER, sizeof(p_model.positions[0]) * p_model.positions.size(), p_model.positions.data(), GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eTexCoord_Vb]);
glBufferData(GL_ARRAY_BUFFER, sizeof(p_model.texCoords[0]) * p_model.texCoords.size(), p_model.texCoords.data(), GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eNormal_Vb]);
glBufferData(GL_ARRAY_BUFFER, sizeof(p_model.normals[0]) * p_model.normals.size(), p_model.normals.data(), GL_STATIC_DRAW);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, d->vertexArrayBuffers[eIndex_Vb]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * p_model.indices.size(), p_model.indices.data(), GL_STATIC_DRAW);
GLint mat4_pos0 = 3;
GLint shinyPos = 7;
GLint materialPos = 8;
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eModel_Vb]);
for (unsigned int i = 0; i < 4; i++)
{
glEnableVertexAttribArray(mat4_pos0 + i);
glVertexAttribPointer(mat4_pos0 + i, 4, GL_FLOAT, GL_FALSE, sizeof(glm::mat4),
(const GLvoid*)(sizeof(GLfloat) * i * 4));
glVertexAttribDivisor(mat4_pos0 + i, 1);
}
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eShiny_Vb]);
glEnableVertexAttribArray(shinyPos);
glVertexAttribPointer(shinyPos, 1, GL_FLOAT, GL_FALSE, 0, 0);
glVertexAttribDivisor(shinyPos, 1);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eSpecular_Vb]);
glEnableVertexAttribArray(materialPos);
glVertexAttribPointer(materialPos, 1, GL_FLOAT, GL_FALSE, 0, 0);
glVertexAttribDivisor(materialPos, 1);
}
Called once per frame.
void Mesh::draw(std::vector<Object*> const & p_objects, GLuint p_program)
{
std::vector<glm::mat4> models;
std::vector<glm::float32> shinies;
std::vector<glm::vec3> specularColors;
models.reserve(p_objects.size());
shinies.reserve(p_objects.size());
specularColors.reserve(p_objects.size());
for (int index = 0;
index < p_objects.size();
index++)
{
models.push_back(p_objects[index]->getTransform());
shinies.push_back(p_objects[index]->getShininess());
specularColors.push_back(p_objects[index]->getSpecularColor());
}
unsigned int bytesOfModels = models.size() * sizeof(models[0]);
unsigned int bytesOfShinies = shinies.size() * sizeof(shinies[0]);
unsigned int bytesOfSpecularColors = specularColors.size() * sizeof(specularColors[0]);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eModel_Vb]);
glBufferData(GL_ARRAY_BUFFER, bytesOfModels, models.data(), GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eShiny_Vb]);
glBufferData(GL_ARRAY_BUFFER, bytesOfShinies, shinies.data(), GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eSpecular_Vb]);
glBufferData(GL_ARRAY_BUFFER, bytesOfSpecularColors, specularColors.data(), GL_DYNAMIC_DRAW);
// glDrawElementsInstanced(GL_TRIANGLES, d->drawCount, GL_UNSIGNED_SHORT, 0, models.size());
// glDrawArraysInstanced(GL_TRIANGLE_FAN, 0, d->drawCount, models.size());
glDrawElementsInstancedBaseVertex(GL_TRIANGLES,
d->drawCount,
GL_UNSIGNED_INT,
0,
p_objects.size(),
0);
}
Called once per frame
void GenericRenderer::renderObjects(std::vector<Object*> p_objects)
{
if (p_objects.empty())
{
return;
}
m_texture->bind(0);
m_shader->bind();
m_shader->updateCamera(m_camera);
m_shader->updateLightSource(*m_light);
m_shader->updateObjects(p_objects);
m_mesh->bind();
for (size_t index = 0;
index < p_objects.size();
index++)
{
p_objects[index]->setOrigin(m_camera);
p_objects[index]->updateTransform();
}
m_mesh->draw(p_objects, m_shader->getProgram());
m_mesh->unbind();
}
Vertex Shader
#version 330
uniform mat4 camera;
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
layout (location = 2) in vec3 normal;
layout (location = 3) in mat4 model;
layout (location = 7) in float materialShininess;
layout (location = 8) in vec3 materialSpecularColor;
out vec3 fragVert;
out vec2 fragTexCoord;
out vec3 fragNormal;
out mat4 fragModel;
out float fragMaterialShininess;
out vec3 fragMaterialSpecularColor;
void main()
{
fragModel = model;
fragTexCoord = texCoord;
fragNormal = normal;
fragVert = position;
fragMaterialShininess = materialShininess;
fragMaterialSpecularColor = materialSpecularColor;
gl_Position = camera * model * vec4(position, 1);
}
Fragment Shader
#version 150
uniform vec3 cameraPosition;
uniform float exposure;
uniform float lightDistanceModifier;
uniform sampler2D tex;
uniform struct Light {
vec3 position;
vec3 intensities; //a.k.a the color of the light
float attenuation;
float ambientCoefficient;
} light;
in vec2 fragTexCoord;
in vec3 fragNormal;
in vec3 fragVert;
in mat4 fragModel;
in float fragMaterialShininess;
in vec3 fragMaterialSpecularColor;
out vec4 finalColor;
void main() {
vec3 normal = normalize(transpose(inverse(mat3(fragModel))) * fragNormal);
vec3 surfacePos = vec3(fragModel * vec4(fragVert, 1));
vec4 surfaceColor = texture(tex, fragTexCoord);
vec3 surfaceToLight = normalize(light.position - surfacePos);
vec3 surfaceToCamera = normalize(cameraPosition - surfacePos);
//ambient
vec3 ambient = light.ambientCoefficient * surfaceColor.rgb * light.intensities;
//diffuse
float diffuseCoefficient = max(0.0, dot(normal, surfaceToLight));
vec3 diffuse = diffuseCoefficient * surfaceColor.rgb * light.intensities;
//specular
float specularCoefficient = 0.0;
if(diffuseCoefficient > 0.0)
specularCoefficient = pow(max(0.0, dot(surfaceToCamera, reflect(-surfaceToLight, normal))), fragMaterialShininess);
vec3 specular = specularCoefficient * fragMaterialSpecularColor * light.intensities;
//attenuation
float distanceToLight = length(light.position - surfacePos);
distanceToLight *= lightDistanceModifier;
float attenuation = 1.0 / (1.0 + light.attenuation * pow(distanceToLight, 2));
//linear color (color before gamma correction)
vec3 linearColor = ambient + attenuation*(diffuse + specular);
//final color (after gamma correction)
vec3 gamma = vec3(1.0/2.2);
vec3 mapped = vec3(1.0) - exp(-linearColor * exposure);
mapped = pow(mapped, vec3(1.0 / gamma));
finalColor = vec4(mapped, surfaceColor.a);
}

OpenGL state changes are very expensive. If you are rendering 20000 objects individually per frame then you re most likely CPU bound. Your goal should be to render as many vertices as possible with as few state changes as possible.
If your 20000 objects are all using the same model then your situation is a prime candidate for instanced rendering. Instanced rendering lets you render the same model thousands of times in one draw call. If you couple this with a separate vertex buffer that contains WVP matrices for each model then you can render each of those model instances at a unique location within the world.
Be warned though, instanced rendering isn't some sort of panacea to all your draw call woes. It has it's own unique overhead with constructing a buffer of MVP matrices on the CPU each frame. If the number of instances you're rendering isn't at least in the hundreds you'll likely see worse performance than your current rendering method.
EDIT: You already using instanced rendering, my apologies.
After reading your code more thoroughly you are likely right in your assumption that you're GPU bound. However, it's not currently clear why you are constructing specular and shininess buffers once per frame when these attributes tend to remain constant for a material.

Related

Issue with passing integer vertex attributes with "in" keyword

I'm working on bone animation. I have a vertex struct that basically looks like
struct MeshVertex
{
glm::vec3 pos;
glm::vec3 normal;
glm::vec2 tex;
glm::vec3 tangent;
glm::vec3 bitangent;
uint32_t ids[4] = {};
float weights[4] = {};
void print() const;
};
The mesh is a basic cube with one bone. Therefore ids = {0,0,0,0} and weights = {1.0f,0.0f,0.0f,0.0f} for every single vertex. In my mesh class I have a static function Mesh::genFormat() that handles attributes. vao is a static int in the mesh class and for_i is just a convenient macro I use to do for loops. Note that I correctly use glVertexArrayAttribIFormat.
Mesh::Mesh(const std::vector<MeshVertex>& vertices, const std::vector<uint>& indices, const std::vector<Texture>& textures)
{
m_textures = textures;
m_num_indices = indices.size();
// create vertex and index buffers
glCreateBuffers(1, &m_vbo);
glCreateBuffers(1, &m_ibo);
glNamedBufferData(m_vbo, sizeof(MeshVertex) * vertices.size(), &vertices[0], GL_STATIC_DRAW);
glNamedBufferData(m_ibo, sizeof(uint) * indices.size(), &indices[0], GL_STATIC_DRAW);
}
void Mesh::genFormat()
{
glCreateVertexArrays(1, &vao);
for_i(7) { glEnableVertexArrayAttrib(vao, i); }
glVertexArrayAttribFormat(vao, 0, 3, GL_FLOAT, false, offsetof(MeshVertex, pos)));
glVertexArrayAttribFormat(vao, 1, 3, GL_FLOAT, false, offsetof(MeshVertex, normal));
glVertexArrayAttribFormat(vao, 2, 2, GL_FLOAT, false, offsetof(MeshVertex, tex));
glVertexArrayAttribFormat(vao, 3, 3, GL_FLOAT, false, offsetof(MeshVertex, tangent));
glVertexArrayAttribFormat(vao, 4, 3, GL_FLOAT, false, offsetof(MeshVertex, bitangent));
glVertexArrayAttribIFormat(vao, 5, 4, GL_UNSIGNED_INT, offsetof(MeshVertex, ids)));
glVertexArrayAttribFormat(vao, 6, 4, GL_FLOAT, false, offsetof(MeshVertex, weights)));
for_i(7) { glVertexArrayAttribBinding(vao, i, 0); }
glBindVertexArray(0);
}
The following GLSL won't render anything.
#version 460 core
layout(location = 0) in vec3 Pos;
layout(location = 1) in vec3 Normal;
layout(location = 2) in vec2 Tex;
layout(location = 3) in vec3 Tan;
layout(location = 4) in vec3 BiTan;
layout(location = 5) in uvec4 BoneIds;
layout(location = 6) in vec4 Weights;
out vec3 normal;
out vec2 tex;
layout(binding = 2, std140) uniform Camera
{
mat4 VP;
vec4 cpos;
};
uniform mat4 node;
uniform mat4 bones_inverse_bind_mesh_parent[50];
void main()
{
tex = Tex;
mat4 W = mat4(0.0f);
if (Weights[0] != 0.0f)
{
for (uint i = 0; i < 4; i++)
W = W + (Weights[i] * bones_inverse_bind_mesh_parent[BoneIds[i]]);
W = node * W;
}
else
W = node;
gl_Position = VP * W * vec4(Pos, 1.0);
}
Since BoneIds[i] is always zero, if I replace
W = W + (Weights[i] * bones_inverse_bind_mesh_parent[BoneIds[i]]);
with
W = W + (Weights[i] * bones_inverse_bind_mesh_parent[0]);
the result should be unchanged. My matrix transforms are currently a bit off (something to fix later), but now the cube renders fine. So there is something wrong with BoneIds. After bashing my head against the wall on this for a while, I instead replaced
layout(location = 5) in uvec4 BoneIds;
with
layout(location = 5) varying uvec4 BoneIds;
after seeing some old GLSL online, and now everything works. What I don't understand is why. I've seen plenty of GLSL code on the internet work with integer attributes using the in keyword.
UPDATE :
If I replace glVertexArrayAttribIFormat in Mesh::genFormat() with
glVertexArrayAttribFormat(vao, 5, 4, GL_UNSIGNED_INT, false, offsetof(MeshVertex, ids));
in C++ and
layout(location = 5) in vec4 BoneIds;
in GLSL and cast bone ids from float to int in the glsl code, the code also works.
Okay I solved the issue, even though I don't quite understand how this fixes the problem. My preferred graphics processor was on auto but when I forced it to use the NVIDIA processor over my integrated graphics, everything works out fine. image of solution
Update :
I think it is as simple as my Intel processor graphics supporting OpenGL 4.4 and glVertexArrayAttribIFormat came about in OpenGL 4.5.

glGetUniformLocation returning -1 during transform feedback when it is used

The function returns -1, even though Uniform is defined and used within the vertex shader, I suspect the cause may be that the out attributes might not be properly bound with the target buffer (not sure if that is the case). Without this one Uniform, most of my values will stay the same.
Drawing the Transform Feedback
/*code included in update*/
glUseProgram(feedbackShader->GetProgram());
glEnable(GL_RASTERIZER_DISCARD);
/*end of code included in update*/
glBindBuffer(GL_ARRAY_BUFFER, particleBuffer[isEvenBuffer]);
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, transformBuffer[!isEvenBuffer]);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glEnableVertexAttribArray(3);
glEnableVertexAttribArray(4);
glEnableVertexAttribArray(5);
glVertexAttribPointer(5, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), 0); //Location
glVertexAttribPointer(4, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)12); //Velocity
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)24); //InitLocation
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)36); //InitVelocity
glVertexAttribPointer(3, 1, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)48); //Lifetime
glVertexAttribPointer(2, 1, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)52); //InitLifetime
GLint uniformLocation = glGetUniformLocation(feedbackShader->GetProgram(), "time");
glUniform1f(uniformLocation, msec);
glBeginTransformFeedback(GL_POINTS);
glDrawTransformFeedback(GL_POINTS, transformBuffer[isEvenBuffer]);
glEndTransformFeedback();
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glDisableVertexAttribArray(3);
glDisableVertexAttribArray(4);
glDisableVertexAttribArray(5);
The Vertex Shader
#version 410
in vec3 inLocation;
in vec3 inVelocity;
in vec3 inInitLocation;
in vec3 inInitVelocity;
in float inLifeTime;
in float inInitlifeTime;
out vec3 outLocation;
out vec3 outVelocity;
out vec3 outInitLocation;
out vec3 outInitVelocity;
out float outLifeTime;
out float outInitlifeTime;
uniform float time;
vec3 Gravity = vec3(0.0f,-0.98f,0.0f);
float dampeningFactor = 0.5;
void main()
{
outLifeTime = inLifeTime - time;
if(outLifeTime > 0.0f){
outVelocity = (inVelocity + Gravity * time) * dampeningFactor;
outLocation = inLocation + inVelocity * time;
}else{
outVelocity = inInitVelocity;
outLocation = inInitLocation;
outLifeTime = inInitlifeTime;
}
outInitVelocity = inInitVelocity;
outInitLocation = inInitLocation;
outInitlifeTime = inInitlifeTime;
}
UPDATE
There were a few extra bits of information you all asked for.
The Vertex Shader
#version 410
in vec3 inLocation;
in vec3 inVelocity;
in vec3 inInitLocation;
in vec3 inInitVelocity;
in float inLifeTime;
in float inInitlifeTime;
out vec3 outLocation;
out vec3 outVelocity;
out vec3 outInitLocation;
out vec3 outInitVelocity;
out float outLifeTime;
out float outInitlifeTime;
uniform float time;
vec3 Gravity = vec3(0.0f,-0.98f,0.0f);
float dampeningFactor = 0.5;
void main()
{
outLifeTime = inLifeTime - time;
if(outLifeTime > 0.0f){
outVelocity = (inVelocity + Gravity * time) * dampeningFactor;
outLocation = inLocation + inVelocity * time;
}else{
outVelocity = inInitVelocity;
outLocation = inInitLocation;
outLifeTime = inInitlifeTime;
}
outInitVelocity = inInitVelocity;
outInitLocation = inInitLocation;
outInitlifeTime = inInitlifeTime;
}
The Feedback Varyings (I was stupid and didn't have this at first, though the same issue remains in my code)
const GLchar* feedbackVaryings[] = {
"outLocation",
"outVelocity",
"outInitLocation",
"outInitVelocity",
"outLifeTime",
"outInitlifeTime"
};
glTransformFeedbackVaryings(feedbackShader->LinkProgram(), 6, feedbackVaryings, GL_INTERLEAVED_ATTRIBS);
I also added the gluseProgram in the "Drawing the Transform Feedback section".
I assume that the instruction
feedbackShader->LinkProgram()
linkes the program. But the varying transform feedback variables have to be specified before linking the program.
See OpenGL 4.6 API Core Profile Specification; 7.3.1.1 Naming Active Resources; page 104
The order of the active resource list is implementation-dependent for all
interfaces except for TRANSFORM_FEEDBACK_VARYING. If variables in the
TRANSFORM_FEEDBACK_VARYING interface were specified using the TransformFeedbackVaryings command, the active resource list will be arranged in the variable order specified in the most recent call to TransformFeedbackVaryings before the last call to LinkProgram.
This means, first you have to attach the compiled vertex shader object to the program object (glAttachShader). Then you have to specify the transform feedback varying (glTransformFeedbackVaryings). Finally you have to link the program (glLinkProgram):
GLuint shader_obj;
GLuint program_obj;
.....
glAttachShader(program_obj, shader_obj);
const GLchar* feedbackVaryings[] = {
"outLocation",
"outVelocity",
"outInitLocation",
"outInitVelocity",
"outLifeTime",
"outInitlifeTime"
};
glTransformFeedbackVaryings(program_obj, 6, feedbackVaryings, GL_INTERLEAVED_ATTRIBS);
glLinkProgram(program_obj);
Further I recommend to use Layout Qualifier to define the attribute indices of the vertex shaser inpout variables.
e.g.
layout (location = 0) in vec3 inLocation;
layout (location = 1) in vec3 inVelocity;
layout (location = 2) in vec3 inInitLocation;
layout (location = 3) in vec3 inInitVelocity;
layout (location = 4) in float inLifeTime;
layout (location = 5) in float inInitlifeTime;
As an alternative the attribute index can be determined by glGetAttribLocation after linking the program or set by glBindAttribLocation before linking the program.

Can't apply texture to grid of vertices in Qt OpenGL

Right now I am working at creating a heightmap-based terrain grid, similar to the Lighthouse 3D Terrain Tutorial, except that I am using VBO's and EBO's. All has been going well until I have tried to texture my grid. Currently I am applying one texture that spans the entire grid. Using Window 7's sample Jellyfish picture, I end up with this:
For those familiar with the picture, you can see that it is being repeated several times throughout the terrain grid. This led me to believe that my UV coordinates were being corrupted. However, if I use a function that always returns 0 to determine the height at each grid vertex, I end up with this:
Now I am thoroughly confused, and I can't seem to find any other resources to help me.
My code is as follows:
generate_terrain() function:
QImage terrainImage;
terrainImage.load(imagePath.data());
int width = terrainImage.width();
int height = terrainImage.height();
float uStep = 1.0f / width;
float vStep = 1.0f / height;
grid = new std::vector<float>;
indices = new std::vector<unsigned short>;
for (int i = 0; i <= height-1; ++i) {
for (int j = 0; j <= width-1; ++j) {
QVector3D vertex1{j, heightFunction(terrainImage.pixel(j, i)), i};
QVector3D vertex2{j, heightFunction(terrainImage.pixel(j, i+1)), i+1};
QVector3D vertex3{j+1, heightFunction(terrainImage.pixel(j+1, i+1)), i+1};
QVector3D edge1 = vertex2 - vertex1;
QVector3D edge2 = vertex3 - vertex1;
QVector3D normal = QVector3D::crossProduct(edge1, edge2);
normal.normalize();
grid->push_back(vertex1.x());
grid->push_back(vertex1.y());
grid->push_back(vertex1.z());
grid->push_back(normal.x());
grid->push_back(normal.y());
grid->push_back(normal.z());
grid->push_back(j * uStep);
grid->push_back(i * vStep);
}
}
for (int i = 0; i < height-1; ++i) {
for (int j = 0; j < width-1; ++j) {
indices->push_back(i * width + j);
indices->push_back((i+1) * width + j);
indices->push_back((i+1) * width + (j+1));
indices->push_back((i+1) * width + (j+1));
indices->push_back(i * width + (j+1));
indices->push_back(i * width + j);
}
}
vertices = grid->size()/8;
indexCount = indices->size();
Texture Loading:
f->glGenTextures(1, &textureId);
f->glBindTexture(GL_TEXTURE_2D, textureId);
QImage texture;
texture.load(texturePath.data());
QImage glTexture = QGLWidget::convertToGLFormat(texture);
f->glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, glTexture.width(), glTexture.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, glTexture.bits());
f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
Drawing:
f->glActiveTexture(GL_TEXTURE0);
f->glBindTexture(GL_TEXTURE_2D, textureId);
program->setUniformValue(textureUniform.data(), 0);
f->glBindBuffer(GL_ARRAY_BUFFER, vbo.bufferId());
f->glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8*sizeof(float), 0);
f->glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8*sizeof(float), (void *) (sizeof(float) * 3));
f->glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8*sizeof(float), (void *) (sizeof(float) * 6));
f->glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo.bufferId());
f->glEnableVertexAttribArray(0);
f->glEnableVertexAttribArray(1);
f->glEnableVertexAttribArray(2);
f->glDrawElements(GL_TRIANGLES, indexCount, GL_UNSIGNED_SHORT, 0);
f->glDisableVertexAttribArray(2);
f->glDisableVertexAttribArray(1);
f->glDisableVertexAttribArray(0);
Shaders:
Vertex:
attribute vec3 vertex_modelspace;
attribute vec3 normal_in;
attribute vec2 uv_in;
uniform mat4 mvp;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform vec3 lightPosition;
varying vec2 uv;
varying vec3 normal;
varying vec3 fragPos;
void main(void)
{
gl_Position = projection * view * model * vec4(vertex_modelspace, 1);
uv = uv_in;
normal = normal_in;
fragPos = vec3(model * vec4(vertex_modelspace, 1));
}
Fragment:
varying vec2 uv;
varying vec3 normal;
varying vec3 fragPos;
uniform sampler2D texture;
uniform vec3 lightPosition;
void main(void)
{
vec3 lightColor = vec3(0.6, 0.6, 0.6);
float ambientStrength = 0.2;
vec3 ambient = ambientStrength * lightColor;
vec3 norm = normalize(normal);
vec3 lightDirection = normalize(lightPosition - fragPos);
float diff = max(dot(norm, lightDirection), 0.0);
vec3 diffuse = diff * lightColor;
vec3 color = texture2D(texture, uv).rgb;
vec3 result = (ambient + diffuse) * color;
gl_FragColor = vec4(result, 1.0);
}
I am completely stuck, so any suggestions are welcome :)
P.S. I am also working at trying to get my lighting to look better, so any tips on that would be welcome as well.
Your code is assuming values for the attribute locations, which are the values used as the first argument to glVertexAttribPointer() and glEnableVertexAttribArray(). For example here:
f->glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8*sizeof(float), 0);
f->glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8*sizeof(float), (void *) (sizeof(float) * 3));
f->glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8*sizeof(float), (void *) (sizeof(float) * 6));
you're assuming that the positions have location 0, the normals location 1, and the texture coordinates location 2.
This is not guaranteed by anything you have currently in your code. The order of the attribute declarations in the GLSL code does not define the location assignment. For example from the OpenGL 3.2 spec:
When a program is linked, any active attributes without a binding speciļ¬ed through BindAttribLocation will be automatically be bound to vertex attributes by the GL.
Note that this does not specify how the automatic assignment of the locations is done. This means that it's implementation dependent.
To fix this, there are two approaches:
You can call glBindAttribLocation() for all your attributes before the shader program is linked.
You can query the automatically assigned locations by calling glGetAttribLocation() after the program is linked.
In newer OpenGL versions (GLSL 3.30 and later, which is the version matching OpenGL 3.3), you also have the option to specify the location directly in the GLSL code, using qualifiers of the form layout(location=...).
None of these options has any major advantages over the others. Just use the one that works best based on your preferences and software architecture.

Make many lit triangles look smooth

I am trying to create a program that shows a wave-like animation using Perlin Noise by creating many triangles.
This is the important part of my program:
class OGLT9_NOISE
{
//class for Perlin Noise (noise3d()) and Fractional Brownian Motion (fmb()) generaion
};
glm::vec3 OGLT9_GRAPHICS::getNormal(glm::vec3 a, glm::vec3 b, glm::vec3 c)
{
return glm::normalize(glm::cross(c-a, b-a));
}
void generateTerrain(OGLT9_SHADER *oglt9Shader)
{
static OGLT9_NOISE noise;
static float yValue = 0;
int terrainRes = 7; //terrain's resolution
float terrainSpacing = 10.0f;
vector<glm::vec3> vertexData;
vector<glm::vec3> normalData;
multi_array<float, 2> terrain;
terrain.resize(extents[1<<terrainRes][1<<terrainRes]);
for(long z=-(1<<(terrainRes-1)); z<(1<<(terrainRes-1)); z++)
for(long x=-(1<<(terrainRes-1)); x<(1<<(terrainRes-1)); x++)
terrain[z+(1<<(terrainRes-1))][x+(1<<(terrainRes-1))] = (noise.fbm((double)x/16.0, yValue, (double)z/16.0, 2, 0.4, 1.2, 2.9, 1.1)/2.0+0.5)*100.0;
for(long z=0; z<(1<<terrainRes)-1; z++)
{
for(long x=0; x<(1<<terrainRes)-1; x++)
{
vertexData.push_back(glm::vec3((float)x*terrainSpacing, terrain[z][x], (float)z*terrainSpacing));
vertexData.push_back(glm::vec3(((float)x+1.0f)*terrainSpacing, terrain[z+1][x+1], ((float)z+1.0f)*terrainSpacing));
vertexData.push_back(glm::vec3(((float)x+1.0f)*terrainSpacing, terrain[z][x+1], (float)z*terrainSpacing));
vertexData.push_back(glm::vec3((float)x*terrainSpacing, terrain[z][x], (float)z*terrainSpacing));
vertexData.push_back(glm::vec3((float)x*terrainSpacing, terrain[z+1][x], ((float)z+1.0f)*terrainSpacing));
vertexData.push_back(glm::vec3(((float)x+1.0f)*terrainSpacing, terrain[z+1][x+1], ((float)z+1.0f)*terrainSpacing));
normalData.push_back(getNormal(vertexData[vertexData.size()-6], vertexData[vertexData.size()-5], vertexData[vertexData.size()-4]));
normalData.push_back(normalData[normalData.size()-1]);
normalData.push_back(normalData[normalData.size()-2]);
normalData.push_back(getNormal(vertexData[vertexData.size()-3], vertexData[vertexData.size()-2], vertexData[vertexData.size()-1]));
normalData.push_back(normalData[normalData.size()-1]);
normalData.push_back(normalData[normalData.size()-2]);
}
}
glUseProgram(oglt9Shader->program);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, vertexData.size()*3*sizeof(float), vertexData.data(), GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(0);
glGenBuffers(1, &nbo);
glBindBuffer(GL_ARRAY_BUFFER, nbo);
glBufferData(GL_ARRAY_BUFFER, normalData.size()*3*sizeof(float), normalData.data(), GL_STATIC_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, 0);
numVertices = vertexData.size()*3;
yValue += 0.01f;
}
void render()
{
//Clear screen and enable depth buffer
//Create and transmit matrices and light direction to shaders
generateTerrain(oglt9Shader);
glDrawArrays(GL_TRIANGLES, 0, numVertices);
glDeleteBuffers(1, &vbo);
glDeleteBuffers(1, &nbo);
//Swap buffers to window
}
And my vertex shader...
#version 430 core
layout (location = 0) in vec3 vPosition;
layout (location = 1) in vec3 vNormal;
uniform mat4 mMatrix;
uniform mat4 vMatrix;
uniform mat4 pMatrix;
out vec3 fPosition;
out vec3 fNormal;
void main(void)
{
gl_Position = pMatrix * vMatrix * mMatrix * vec4(vPosition, 1.0);
fPosition = vPosition;
fNormal = normalize(transpose(inverse(mat3(mMatrix))) * vNormal);
}
#version 430 core
in vec3 fPosition;
in vec3 fNormal;
out vec4 outColor;
uniform vec3 lightDirection;
...and fragment shader.
void main(void)
{
vec3 rawColor = vec3(1.0);
vec3 ambientColor = vec3(1.0, 1.0, 1.0);
float diffuseIntensity = max(0.0, dot(fNormal, lightDirection));
vec3 diffuseColor = diffuseIntensity * vec3(0.9, 0.9, 0.9);
outColor = vec4(rawColor*ambientColor*diffuseColor, 1.0);
}
This is the final image:
So, what can I do to make the triangles smooth so you can't see these hard edges anymore?
You're using the same normal for all 3 vertices of each triangle. This will essentially result in flat shading, meaning that the color of each triangle is constant.
What you need is normals that better approximate the actual normals of the surface, instead of calculating the normal of each triangle separately. To get a smooth looking surface, you need to have one normal per vertex, and then use that normal when specifying the vertex for all the triangles that share the vertex.
The most efficient way of doing this is that you really store each vertex/normal of your grid in the VBO only once. You can then use an index buffer to reference the vertices when defining the triangles. This means that you have an additional buffer of type GL_ELEMENT_ARRAY_BUFFER containing indices, and then draw with glDrawElements(). You should be able to find reference information and tutorials on how to do that.
To actually obtain the normals, one common approach is that you average the triangle normals of all adjacent triangles to calculate the normal at a vertex.

OPENGL: drawing multiple point lights

I'm having an issue drawing multiple point lights in my scene. I am working on a simple maze-style game in OpenGL, where the maze is randomly generated. Each "room" in the maze is represented by a Room struct, like so:
struct Room
{
int x, z;
bool already_visited, n, s, e, w;
GLuint vertex_buffer, texture, uv_buffer, normal_buffer;
std::vector<glm::vec3>vertices, normals;
std::vector<glm::vec2>uvs;
glm::vec3 light_pos; //Stores the position of a light in the room
};
Each room has a light in it, the position of this light is stored in light_pos. This light is used in a simple per-vertex shader, like so:
layout(location = 0) in vec3 pos;
layout(location = 1) in vec2 uv_coords;
layout(location = 2) in vec3 normal;
uniform mat4 mvpMatrix;
uniform mat4 mvMatrix;
uniform vec3 lightpos;
out vec2 vs_uv;
out vec3 vs_normal;
out vec3 color;
void main()
{
gl_Position = mvpMatrix * vec4(pos,1.0);
vs_normal = normal;
vs_uv = uv_coords;
vec3 lightVector = normalize(lightpos - pos);
float diffuse = clamp(dot(normal,lightVector),0.0,1.0);
color = vec3(diffuse,diffuse,diffuse);
}
My fragment shader looks like this (ignore the "vs_normal", it is unused for now):
in vec2 vs_uv;
in vec3 vs_normal;
in vec3 color;
uniform sampler2D tex;
out vec4 frag_color;
void main()
{
frag_color = vec4(color,1.0) * texture(tex,vs_uv).rgba;
}
And my drawing code looks like this:
mat4 mvMatrix = view_matrix*model_matrix;
mat4 mvpMatrix = projection_matrix * mvMatrix;
glBindVertexArray(vertexBufferObjectID);
glUseProgram(shaderProgram);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
for (int x = 0; x < NUM_ROOMS_X; x++)
{
for (int z = 0; z < NUM_ROOMS_Z; z++)
{
//int x = int(std::round(position.x / ROOM_SIZE_X_MAX));
//int z = int(std::round(position.z / ROOM_SIZE_Z_MAX));
Room rm = room_array[x][z];
glBindBuffer(GL_ARRAY_BUFFER, rm.vertex_buffer);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glBindBuffer(GL_ARRAY_BUFFER, rm.uv_buffer);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glBindBuffer(GL_ARRAY_BUFFER, rm.normal_buffer);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glUniformMatrix4fv(mvpMatrixID, 1, GL_FALSE, &mvpMatrix[0][0]);
glUniformMatrix4fv(mvMatrixID, 1, GL_FALSE, &mvMatrix[0][0]);
glUniform3fv(light_ID, 3, &rm.light_pos[0]); //Here is where I'm setting the new light position. It looks like this is ignored
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, rm.texture);
glUniform1i(texture_ID, 0);
glDrawArrays(GL_QUADS, 0, rm.vertices.size());
}
}
glUseProgram(0);
glBindVertexArray(0);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glBindTexture(GL_TEXTURE_2D, 0);
However, here is what the result looks like (I've modified my drawing code to draw a box where each light is located, and I've circled the room at position (0,0)):
http://imgur.com/w4uPMD6
As you can see, it looks like only the light at position (0,0) affects any of the rooms on the map, the other lights are simply ignored. I know that the lights are positioned correctly, because the boxes I use to show the positions are correct. I think even though I'm setting the new light_pos, it isn't going through for some reason. Any ideas?
One thing that your are doing, which is not very common, is to pass the light position as a vertex attribute. Optimally, you should pass it to the shader as a uniform variable, just as you do with the matrices. But I doubt that is the problem here.
I believe your problem is that you are doing the light calculations in different spaces. The vertexes of the surfaces that you draw are in object/model space, while I'm guessing, your light is located at a point defined in world space. Try multiplying your light position by the inverse of the model matrix you are applying to the vertexes. I'm not familiar with GLM, but I figure there must be an inverse() function in it:
vec3 light_pos_object_space = inverse(model_matrix) * rm.light_pos;
glVertexAttrib3fv(light_ID, &light_pos_object_space[0]);
Figured out my problem. I was calling this function:
glUniform3fv(light_ID, 3, &rm.light_pos[0]);
When I should have been calling this:
glUniform3fv(light_ID, 1, &rm.light_pos[0]);