Vertex pos/normal/coord not rendering correctly - c++

I'm having trouble sending both normals and a u,v pair to my shaders. If I remove the normal, things work as expected.
EDIT
It appears the v_normal is receiving the values that are intended for v_coord. I still have no idea though.
/EDIT
This is my vertex:
struct Vertex{
Vertex(vec3 const & v) : pos(v) {}
vec3 pos;
vec3 normal;
real u, v;
};
This is the initialization code:
const int VERTEX_POS_INDX = 0;
const int VERTEX_NORMAL_INDX = 1;
const int VERTEX_TEXCOORD_INDX = 2;
const int VERTEX_POS_SIZE = 3;
const int VERTEX_NORMAL_SIZE = 3;
const int VERTEX_TEXCOORD_SIZE = 2;
GLuint vbo, ibo;
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sphere->vertices.size()*sizeof(Vertex), &sphere->vertices[0], GL_STATIC_DRAW);
glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sphere->indices.size()*sizeof(unsigned short), &sphere->indices[0], GL_STATIC_DRAW);
glEnableVertexAttribArray ( VERTEX_POS_INDX );
glEnableVertexAttribArray ( VERTEX_NORMAL_INDX );
glEnableVertexAttribArray ( VERTEX_TEXCOORD_INDX );
int offset = 0;
glVertexAttribPointer ( VERTEX_POS_INDX, VERTEX_POS_SIZE, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offset );
offset += VERTEX_POS_SIZE * sizeof(real);
glVertexAttribPointer ( VERTEX_NORMAL_INDX, VERTEX_NORMAL_SIZE, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offset );
offset += VERTEX_NORMAL_SIZE * sizeof(real);
glVertexAttribPointer ( VERTEX_TEXCOORD_INDX, VERTEX_TEXCOORD_INDX, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offset );
glBindAttribLocation ( programObject, VERTEX_POS_INDX, "a_position" );
glBindAttribLocation ( programObject, VERTEX_NORMAL_INDX, "a_normal" );
glBindAttribLocation ( programObject, VERTEX_TEXCOORD_INDX, "a_coord" );
The vertex shader:
precision highp float;
uniform mat4 u_mv;
uniform mat4 u_mvp;
uniform vec3 u_light;
uniform vec3 u_up;
attribute vec3 a_position;
attribute vec2 a_coord;
attribute vec3 a_normal;
varying vec2 v_coord;
varying vec3 v_normal;
void main() {
v_coord = a_coord;
v_normal = a_normal;
gl_Position = u_mvp * vec4(a_position, 1);
}
The fragment shader:
precision highp float;
uniform vec3 u_up;
varying vec3 v_normal;
varying vec2 v_coord;
precision highp float;
uniform vec3 u_up;
varying vec3 v_normal;
varying vec2 v_coord;
void main()
{
vec2 coord = v_coord;
vec3 normal = v_normal;
coord.x = mod(v_coord.x * 5.0, 1.0);
coord.y = mod(v_coord.y * 5.0, 1.0);
gl_FragColor = vec4 (
mod(coord.x*1.0,1.0),
mod(coord.y*1.0,1.0),
mod(normal.z*5.0,1.0)*0.0,
1.0 );
}

I just had to use glGetAttributeLocation to define the indexes instead of assuming that glBindAttributeLocation will do the trick.
EDIT
Using the bind attribute function before linking the program did the trick and preserved my intentions with the code.

Related

GLSL Geometry Shader causes "No vertex shader bound at draw" in RenderDoc

I am trying to get a basic geometry shader to work, but I am completely failing. After checking numerous resources, I still cannot find a solution to my problem.
Here is my code for my vertex, geometry, and fragment shaders.
Vertex Shader:
#version 330 core
// Vertex Shader Inputs
layout (location = 0) in vec3 Pos;
layout (location = 1) in vec3 Norm;
layout (location = 2) in vec3 Color;
// Vertex to Fragment Shader Outputs
out DATA {
vec3 vsPos;
vec3 vsNorm;
vec4 vsColor;
} data_out;
// Main.cpp Imports
uniform mat4 camMatrix; // viewProjection Matrix
uniform mat4 model;
void main()
{
vec3 vsPos = vec3(model * vec4(Pos, 1.0f));
vec3 vsNorm = mat3(transpose(inverse(model))) * Norm; // Normal vector correction
vec4 vsColor = vec4(Color, 1.0f);
gl_Position = camMatrix * vec4(vsPos, 1.0f);
}
Geometry Shader:
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices = 3) out;
out vec3 gsPos;
out vec3 gsNorm;
out vec3 gsColor;
in DATA {
vec3 vsPos;
vec3 vsNorm;
vec4 vsColor;
} data_in[];
uniform mat4 camMatrix;
void main()
{
for (int i=0; i<3; i++)
{
gsPos = data_in[i].vsPos;
gsNorm = data_in[i].vsNorm;
gsColor = data_in[i].vsColor;
gl_Position = camMatrix * vec4(data_in[i].vsPos, 1.0f);
EmitVertex();
}
EndPrimitive();
}
Fragment Shader:
#version 330 core
out vec4 FragColor;
// Fragment Shader Inputs
in vec3 gsPos;
in vec3 gsNorm;
in vec4 gsColor;
// Fragment Shader Uniforms
uniform sampler2D diffuse0;
uniform sampler2D specular0;
uniform vec4 lightColor;
uniform vec3 lightPos;
uniform vec3 camPos;
vec4 pointLight()
{
vec3 lightVec = (lightPos - vsPos);
// intensity of light with respect to distance
float dist = length(lightVec);
float a = 0.7;
float b = 0.4;
float c = 1.0;
float inten = 1.0f / (a * dist * dist + b * dist + c);
// ambient lighting
float ambient = 0.75f;
// diffuse lighting
vec3 fsNorm = normalize(gsNorm);
vec3 lightDirection = normalize(lightVec);
float diffuse = max(dot(fsNorm, lightDirection), 0.0f);
// specular lighting
float specular = 0.0f;
if (diffuse != 0.0f)
{
float specularLight = 0.50f;
vec3 viewDirection = normalize(gsNorm - gsPos);
vec3 halfwayVec = normalize(viewDirection + lightDirection);
float specAmount = pow(max(dot(fsNorm, halfwayVec), 0.0f), 32);
specular = specAmount * specularLight;
};
return inten * (gsColor * (diffuse + ambient) + gsColor * specular) * lightColor;
}
void main()
{// outputs final color
FragColor = pointLight();
}
My mesh generation function:
void genMesh()
{
VAO.Bind();
VBO VBO(vtx);
EBO EBO(idx);
VAO.LinkAttrib(VBO, 0, 3, GL_FLOAT, sizeof(Vertex), (void*)0);
VAO.LinkAttrib(VBO, 1, 3, GL_FLOAT, sizeof(Vertex), (void*)(3 * sizeof(float)));
VAO.LinkAttrib(VBO, 2, 4, GL_FLOAT, sizeof(Vertex), (void*)(6 * sizeof(float)));
VAO.Unbind();
VBO.Unbind();
EBO.Unbind();
};
My mesh draw function:
void Mesh::Draw(Shader& shader, Camera& camera)
{
shader.Activate();
VAO.Bind();
// Take care of the camera Matrix
glUniform3f(glGetUniformLocation(shader.ID, "camPos"),
camera.Position.x,
camera.Position.y,
camera.Position.z);
camera.Matrix(shader, "camMatrix");
// Draw the actual mesh
glDrawElements(GL_TRIANGLES, idx.size() * sizeof(GLuint), GL_UNSIGNED_INT, 0);
};
I call my mesh generation function outside of the main while loop, then I draw the mesh in my main while loop.
Debugging my program through RenderDoc gives me the error, "No vertex shader bound at draw!" Without the geometry shader (keeping everything else roughly the same), I do not get any errors in RenderDoc. I tried updating my graphics drivers, but I am just getting the same error. Please help me, I feel like I am losing my mind.
In the fragment shader gsColor is defined at a vec4 variable but in the geometry shader it is declared a vec3 variable

OpenGL - vertex color in shader gets swapped

I'm trying to send colors to the shader but the colors get swapped,
I send 0xFF00FFFF (magenta) but I get 0xFFFF00FF (yellow) in the shader.
I think is happening something like this, by experimenting:
My vertex shader:
#version 330 core
layout(location = 0) in vec4 position;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec4 color;
uniform mat4 pr_matrix;
uniform mat4 vw_matrix = mat4(1.0);
uniform mat4 ml_matrix = mat4(1.0);
out DATA
{
vec4 position;
vec3 normal;
vec4 color;
} vs_out;
void main()
{
gl_Position = pr_matrix * vw_matrix * ml_matrix * position;
vs_out.position = position;
vs_out.color = color;
vs_out.normal = normalize(mat3(ml_matrix) * normal);
}
And the fragment shader:
#version 330 core
layout(location = 0) out vec4 out_color;
in DATA
{
vec3 position;
vec3 normal;
vec4 color;
} fs_in;
void main()
{
out_color = fs_in.color;
//out_color = vec4(fs_in.color.y, 0, 0, 1);
//out_color = vec4((fs_in.normal + 1 / 2.0), 1.0);
}
Here is how I set up the mesh:
struct Vertex_Color {
Vec3 vertex;
Vec3 normal;
GLint color; // GLuint tested
};
std::vector<Vertex_Color> verts = std::vector<Vertex_Color>();
[loops]
int color = 0xFF00FFFF; // magenta, uint tested
verts.push_back({ vert, normal, color });
glBufferData(GL_ARRAY_BUFFER, verts.size() * sizeof(Vertex_Color), &verts[0], GL_DYNAMIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex_Color), (const GLvoid*)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex_Color), (const GLvoid*)(offsetof(Vertex_Color, normal)));
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(Vertex_Color), (const GLvoid*)(offsetof(Vertex_Color, color)));
glEnableVertexAttribArray(2);
Here are some examples:
I can't figure it out what's wrong. Thanks in advance.
Your code is reinterpreting an int as 4 consecutive bytes in memory. The internal encoding for int (and all other types) is machine-specific. In your case, you got 32 bit integers stored in little endian byte order, which is kind of the typical case for PC environments.
You could use an array like GLubyte color[4] to explicitely get a defined memory layout.
If you really want to use an integer type, you could send the data as a an integer attribute with glVertexAttribIPointer (note the I there) and use unpackUnorm4x8 om the shader to get a normalized float vector. However, that requires at least GLSL 4.10, and might be less efficient than the standard approach.

Can I somehow render more stuff (opengl)

I'm trying to render lots of stuff with OpenGL 3.3 Am i missing some tricks to make this faster?
Does it matter if I use glBufferData or glBufferSubData?
I have coded OpenGL for 5 days now, so I know that there are lots of unkown uknowns to me. And those are what i'm looking for, can you point me to any ways of making this even faster?
I think i'm using what's called "Instanced Rendering". All my stuff is rendered via a single glDrawElementsInstancedBaseVertex call.
Did I miss any relevant code? There's so much of it that I can't really paste it all here.
I'v gotten as far as 20000 objects with 24 vertices using the following code:
Called once per mesh at start, not during frames.
void Mesh::initMesh(IndexedModel const & p_model)
{
d->drawCount = p_model.indices.size();
glGenVertexArrays(1, &(d->vertexArrayObject));
glBindVertexArray(d->vertexArrayObject);
glGenBuffers(eNumBuffers, d->vertexArrayBuffers);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[ePosition_Vb]);
glBufferData(GL_ARRAY_BUFFER, sizeof(p_model.positions[0]) * p_model.positions.size(), p_model.positions.data(), GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eTexCoord_Vb]);
glBufferData(GL_ARRAY_BUFFER, sizeof(p_model.texCoords[0]) * p_model.texCoords.size(), p_model.texCoords.data(), GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eNormal_Vb]);
glBufferData(GL_ARRAY_BUFFER, sizeof(p_model.normals[0]) * p_model.normals.size(), p_model.normals.data(), GL_STATIC_DRAW);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, d->vertexArrayBuffers[eIndex_Vb]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * p_model.indices.size(), p_model.indices.data(), GL_STATIC_DRAW);
GLint mat4_pos0 = 3;
GLint shinyPos = 7;
GLint materialPos = 8;
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eModel_Vb]);
for (unsigned int i = 0; i < 4; i++)
{
glEnableVertexAttribArray(mat4_pos0 + i);
glVertexAttribPointer(mat4_pos0 + i, 4, GL_FLOAT, GL_FALSE, sizeof(glm::mat4),
(const GLvoid*)(sizeof(GLfloat) * i * 4));
glVertexAttribDivisor(mat4_pos0 + i, 1);
}
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eShiny_Vb]);
glEnableVertexAttribArray(shinyPos);
glVertexAttribPointer(shinyPos, 1, GL_FLOAT, GL_FALSE, 0, 0);
glVertexAttribDivisor(shinyPos, 1);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eSpecular_Vb]);
glEnableVertexAttribArray(materialPos);
glVertexAttribPointer(materialPos, 1, GL_FLOAT, GL_FALSE, 0, 0);
glVertexAttribDivisor(materialPos, 1);
}
Called once per frame.
void Mesh::draw(std::vector<Object*> const & p_objects, GLuint p_program)
{
std::vector<glm::mat4> models;
std::vector<glm::float32> shinies;
std::vector<glm::vec3> specularColors;
models.reserve(p_objects.size());
shinies.reserve(p_objects.size());
specularColors.reserve(p_objects.size());
for (int index = 0;
index < p_objects.size();
index++)
{
models.push_back(p_objects[index]->getTransform());
shinies.push_back(p_objects[index]->getShininess());
specularColors.push_back(p_objects[index]->getSpecularColor());
}
unsigned int bytesOfModels = models.size() * sizeof(models[0]);
unsigned int bytesOfShinies = shinies.size() * sizeof(shinies[0]);
unsigned int bytesOfSpecularColors = specularColors.size() * sizeof(specularColors[0]);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eModel_Vb]);
glBufferData(GL_ARRAY_BUFFER, bytesOfModels, models.data(), GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eShiny_Vb]);
glBufferData(GL_ARRAY_BUFFER, bytesOfShinies, shinies.data(), GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eSpecular_Vb]);
glBufferData(GL_ARRAY_BUFFER, bytesOfSpecularColors, specularColors.data(), GL_DYNAMIC_DRAW);
// glDrawElementsInstanced(GL_TRIANGLES, d->drawCount, GL_UNSIGNED_SHORT, 0, models.size());
// glDrawArraysInstanced(GL_TRIANGLE_FAN, 0, d->drawCount, models.size());
glDrawElementsInstancedBaseVertex(GL_TRIANGLES,
d->drawCount,
GL_UNSIGNED_INT,
0,
p_objects.size(),
0);
}
Called once per frame
void GenericRenderer::renderObjects(std::vector<Object*> p_objects)
{
if (p_objects.empty())
{
return;
}
m_texture->bind(0);
m_shader->bind();
m_shader->updateCamera(m_camera);
m_shader->updateLightSource(*m_light);
m_shader->updateObjects(p_objects);
m_mesh->bind();
for (size_t index = 0;
index < p_objects.size();
index++)
{
p_objects[index]->setOrigin(m_camera);
p_objects[index]->updateTransform();
}
m_mesh->draw(p_objects, m_shader->getProgram());
m_mesh->unbind();
}
Vertex Shader
#version 330
uniform mat4 camera;
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
layout (location = 2) in vec3 normal;
layout (location = 3) in mat4 model;
layout (location = 7) in float materialShininess;
layout (location = 8) in vec3 materialSpecularColor;
out vec3 fragVert;
out vec2 fragTexCoord;
out vec3 fragNormal;
out mat4 fragModel;
out float fragMaterialShininess;
out vec3 fragMaterialSpecularColor;
void main()
{
fragModel = model;
fragTexCoord = texCoord;
fragNormal = normal;
fragVert = position;
fragMaterialShininess = materialShininess;
fragMaterialSpecularColor = materialSpecularColor;
gl_Position = camera * model * vec4(position, 1);
}
Fragment Shader
#version 150
uniform vec3 cameraPosition;
uniform float exposure;
uniform float lightDistanceModifier;
uniform sampler2D tex;
uniform struct Light {
vec3 position;
vec3 intensities; //a.k.a the color of the light
float attenuation;
float ambientCoefficient;
} light;
in vec2 fragTexCoord;
in vec3 fragNormal;
in vec3 fragVert;
in mat4 fragModel;
in float fragMaterialShininess;
in vec3 fragMaterialSpecularColor;
out vec4 finalColor;
void main() {
vec3 normal = normalize(transpose(inverse(mat3(fragModel))) * fragNormal);
vec3 surfacePos = vec3(fragModel * vec4(fragVert, 1));
vec4 surfaceColor = texture(tex, fragTexCoord);
vec3 surfaceToLight = normalize(light.position - surfacePos);
vec3 surfaceToCamera = normalize(cameraPosition - surfacePos);
//ambient
vec3 ambient = light.ambientCoefficient * surfaceColor.rgb * light.intensities;
//diffuse
float diffuseCoefficient = max(0.0, dot(normal, surfaceToLight));
vec3 diffuse = diffuseCoefficient * surfaceColor.rgb * light.intensities;
//specular
float specularCoefficient = 0.0;
if(diffuseCoefficient > 0.0)
specularCoefficient = pow(max(0.0, dot(surfaceToCamera, reflect(-surfaceToLight, normal))), fragMaterialShininess);
vec3 specular = specularCoefficient * fragMaterialSpecularColor * light.intensities;
//attenuation
float distanceToLight = length(light.position - surfacePos);
distanceToLight *= lightDistanceModifier;
float attenuation = 1.0 / (1.0 + light.attenuation * pow(distanceToLight, 2));
//linear color (color before gamma correction)
vec3 linearColor = ambient + attenuation*(diffuse + specular);
//final color (after gamma correction)
vec3 gamma = vec3(1.0/2.2);
vec3 mapped = vec3(1.0) - exp(-linearColor * exposure);
mapped = pow(mapped, vec3(1.0 / gamma));
finalColor = vec4(mapped, surfaceColor.a);
}
OpenGL state changes are very expensive. If you are rendering 20000 objects individually per frame then you re most likely CPU bound. Your goal should be to render as many vertices as possible with as few state changes as possible.
If your 20000 objects are all using the same model then your situation is a prime candidate for instanced rendering. Instanced rendering lets you render the same model thousands of times in one draw call. If you couple this with a separate vertex buffer that contains WVP matrices for each model then you can render each of those model instances at a unique location within the world.
Be warned though, instanced rendering isn't some sort of panacea to all your draw call woes. It has it's own unique overhead with constructing a buffer of MVP matrices on the CPU each frame. If the number of instances you're rendering isn't at least in the hundreds you'll likely see worse performance than your current rendering method.
EDIT: You already using instanced rendering, my apologies.
After reading your code more thoroughly you are likely right in your assumption that you're GPU bound. However, it's not currently clear why you are constructing specular and shininess buffers once per frame when these attributes tend to remain constant for a material.

convert deprecated opengl function to modern opengl (3.3)

i need help to convert the render function of IQM model (void renderiqm()) to modern OpenGL (without the deprecated functions like enableClientState(), etc).
The code of renderiqm() function is here:
https://github.com/lsalzman/iqm/blob/master/demo/gpu-demo.cpp
and these are the shaders:
Vertex Shaders:
"#version 120\n"
"#ifdef GL_ARB_uniform_buffer_object\n"
" #extension GL_ARB_uniform_buffer_object : enable\n"
" layout(std140) uniform animdata\n"
" {\n"
" uniform mat3x4 bonemats[80];\n"
" };\n"
"#else\n"
" uniform mat3x4 bonemats[80];\n"
"#endif\n"
"attribute vec4 vweights;\n"
"attribute vec4 vbones;\n"
"attribute vec4 vtangent;\n"
"void main(void)\n"
"{\n"
" mat3x4 m = bonemats[int(vbones.x)] * vweights.x;\n"
" m += bonemats[int(vbones.y)] * vweights.y;\n"
" m += bonemats[int(vbones.z)] * vweights.z;\n"
" m += bonemats[int(vbones.w)] * vweights.w;\n"
" vec4 mpos = vec4(gl_Vertex * m, gl_Vertex.w);\n"
" gl_Position = gl_ModelViewProjectionMatrix * mpos;\n"
" gl_TexCoord[0] = gl_MultiTexCoord0;\n"
" mat3 madjtrans = mat3(cross(m[1].xyz, m[2].xyz), cross(m[2].xyz, m[0].xyz), cross(m[0].xyz, m[1].xyz));\n"
" vec3 mnormal = gl_Normal * madjtrans;\n"
" vec3 mtangent = vtangent.xyz * madjtrans; // tangent not used, just here as an example\n"
" vec3 mbitangent = cross(mnormal, mtangent) * vtangent.w; // bitangent not used, just here as an example\n"
" gl_FrontColor = gl_Color * (clamp(dot(normalize(gl_NormalMatrix * mnormal), gl_LightSource[0].position.xyz), 0.0, 1.0) * gl_LightSource[0].diffuse + gl_LightSource[0].ambient);\n"
"}\n",
Fragment Shader:
"uniform sampler2D tex;\n"
"void main(void)\n"
"{\n"
" gl_FragColor = gl_Color * texture2D(tex, gl_TexCoord[0].xy);\n"
"}\n",
i tried to convert this to modern opengl (vbo, vao, shader version 330, etc), but i dont know which is the fail. Can you show me the correct form to convert it to modern opengl?
PD: im using opengl version 3.3.
Add Important information:
Ok, here is my renderiqm() modified function :
void renderiqm(GLSLProgram& mainShader)
{
m_matrix->loadIdentity();
matrixTransformations();
mainShader.sendUniform("modelYAxisDiff",getYAxisDiff());
mainShader.sendUniform4x4("modelMatrix", m_matrix->getModelMatrix());
//envía la matriz de normales
// mainShader.sendUniform4x4("normalMatrix", m_matrix->getNormalMatrix());
mainShader.sendUniform3x3("normalMatrix", glm::value_ptr(m_matrix->getNormalMatrix()));
//envía la matriz de transformación (modelview)
mainShader.sendUniform4x4("modelViewMatrix", glm::value_ptr(m_matrix->getModelViewMatrix()));
mainShader.sendUniform("objColor", m_color);
m_matrix->pushMatrix(MODEL_MATRIX);
glBindBuffer(GL_UNIFORM_BUFFER, ubo);
glBufferData(GL_UNIFORM_BUFFER, ubosize, NULL, GL_STREAM_DRAW);
glBufferSubData(GL_UNIFORM_BUFFER, bonematsoffset, numjoints*sizeof(Matrix3x4), outframe[0].a.v);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
glBindBufferBase(GL_UNIFORM_BUFFER, 0, ubo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
vertex *vert = NULL;
GLsizei stride = sizeof(vert->position)+sizeof(vert->normal)+sizeof(vert->tangent)+sizeof(vert->texcoord);
if(numframes > 0)
{
stride+= sizeof(vert->blendindex)+sizeof(vert->blendweight);
}
//vertices
glEnableVertexAttribArray(0);
glVertexAttribPointer(0,3, GL_FLOAT, GL_FALSE, stride, 0);
//normales
glEnableVertexAttribArray(1);
glVertexAttribPointer(1,3,GL_FLOAT, GL_FALSE, stride, (void*)sizeof(vert->position));
//tangente
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, stride, (void*)sizeof(vert->position)+sizeof(vert->normal));
//texturas
glEnableVertexAttribArray(3);
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, stride, (void*)sizeof(vert->position)+sizeof(vert->normal)+sizeof(vert->tangent));
if(numframes > 0)
{
//blendweight
glEnableVertexAttribArray(4);
glVertexAttribPointer(4, 4, GL_UNSIGNED_BYTE, GL_TRUE, stride, (void*)sizeof(vert->position)+sizeof(vert->normal)+sizeof(vert->tangent)+sizeof(vert->texcoord));
//blendindex
glEnableVertexAttribArray(5);
glVertexAttribPointer(5, 4, GL_UNSIGNED_BYTE, GL_FALSE, stride, (void*)sizeof(vert->position)+sizeof(vert->normal)+sizeof(vert->tangent)+sizeof(vert->texcoord)+sizeof(vert->blendindex));
}
iqmtriangle *tris = NULL;
//DEBUG
int i=0;
for(i = 0; i < m_textureIndices.size(); i++)
{
iqmmesh &m = meshes[i];
glBindTexture(GL_TEXTURE_2D, textures[i] ? textures[i] : notexture);
tTextures[i].bindTexture(0);
glDrawElements(GL_TRIANGLES, 3*m.num_triangles, GL_UNSIGNED_INT, &tris[m.first_triangle]);
}
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glDisableVertexAttribArray(3);
if(numframes > 0)
{
glDisableVertexAttribArray(4);
glDisableVertexAttribArray(5);
}
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
//glPopMatrix();
m_matrix->popMatrix(MODEL_MATRIX);
}
Clarification:
mainShader is a tested shader interface that works fine (no errors here),
m_matrix is a GLM interfaces and also works fine.
the vertexShader would be this one:
string vAnimationShader = "\n\
#version 330 \n\
\n\
uniform mat4 projectionMatrix; \n\
uniform mat4 modelViewMatrix; \n\
uniform mat4 normalMatrix; \n\
uniform mat4 modelMatrix; \n\
uniform vec3 objColor; \n\
\n\
uniform mat4 DepthBiasMVP; \n\
smooth out vec4 ShadowCoord;\n\
\n\
smooth out vec2 texCoord; \n\
smooth out vec3 vNormal; \n\
smooth out vec3 vEyeSpacePos; \n\
smooth out vec3 vWorldPos; \n\
smooth out vec3 vObjColor; \n\
\n\
layout (location = 0) in vec3 inPosition; \n\
layout (location = 3) in vec2 inCoord; \n\
layout (location = 1) in vec3 inNormal; \n\
\n\
uniform mat3x4 bonemats[80];\n\
\n\
layout (location = 4) in vec4 vweights;\n\
layout (location = 5) in vec4 vbones;\n\
layout (location = 2) in vec4 vtangent;\n\
void main(void)\n\
{\n\
vObjColor = objColor; \n\
mat3x4 m = bonemats[int(vbones.x)] * vweights.x;\n\
m += bonemats[int(vbones.y)] * vweights.y;\n\
m += bonemats[int(vbones.z)] * vweights.z;\n\
m += bonemats[int(vbones.w)] * vweights.w;\n\
vec4 vEyeSpacePosVertex = modelViewMatrix*vec4(inPosition, 1.0); \n\
gl_Position = projectionMatrix*vEyeSpacePosVertex; \n\
mat3 madjtrans = mat3(cross(m[1].xyz, m[2].xyz), cross(m[2].xyz, m[0].xyz), cross(m[0].xyz, m[1].xyz));\n\
vec3 mnormal = inNormal * madjtrans;\n\
vec3 mtangent = vtangent.xyz * madjtrans; // tangent not used, just here as an example\n\
vec3 mbitangent = cross(mnormal, mtangent) * vtangent.w; // bitangent not used, just here as an example\n\
texCoord = inCoord; \n\
vec4 vRes = normalMatrix*vec4(inNormal, 0.0); \n\
vNormal = vRes.xyz; \n\
vEyeSpacePos = vEyeSpacePosVertex.xyz; \n\
vec4 vWorldPosVertex = modelMatrix*vec4(inPosition, 1.0); \n\
vWorldPos = vWorldPosVertex.xyz; \n\
ShadowCoord = DepthBiasMVP * vWorldPosVertex; \n\
}\n\
";
i didn't find the error... in the screen i dont see the iqm model.. all the other things draw perfectly, but iqm model doesn't appear.
IMPORTANT DISCOVERING:
recently, i have discovered that the problem occurs when the Opengl Context is 3.2+ . if I switch to the 3.1 Version of the opengl context , it works perfectly with my modified function.
i didn't find what the problem is... i associate that with a deprecated function but i cant findit. Please, can you help me? Thanks a lot.
The problem was the initialization of vbo without a vao, i fixed it and now the model is rendering correctly.

Make many lit triangles look smooth

I am trying to create a program that shows a wave-like animation using Perlin Noise by creating many triangles.
This is the important part of my program:
class OGLT9_NOISE
{
//class for Perlin Noise (noise3d()) and Fractional Brownian Motion (fmb()) generaion
};
glm::vec3 OGLT9_GRAPHICS::getNormal(glm::vec3 a, glm::vec3 b, glm::vec3 c)
{
return glm::normalize(glm::cross(c-a, b-a));
}
void generateTerrain(OGLT9_SHADER *oglt9Shader)
{
static OGLT9_NOISE noise;
static float yValue = 0;
int terrainRes = 7; //terrain's resolution
float terrainSpacing = 10.0f;
vector<glm::vec3> vertexData;
vector<glm::vec3> normalData;
multi_array<float, 2> terrain;
terrain.resize(extents[1<<terrainRes][1<<terrainRes]);
for(long z=-(1<<(terrainRes-1)); z<(1<<(terrainRes-1)); z++)
for(long x=-(1<<(terrainRes-1)); x<(1<<(terrainRes-1)); x++)
terrain[z+(1<<(terrainRes-1))][x+(1<<(terrainRes-1))] = (noise.fbm((double)x/16.0, yValue, (double)z/16.0, 2, 0.4, 1.2, 2.9, 1.1)/2.0+0.5)*100.0;
for(long z=0; z<(1<<terrainRes)-1; z++)
{
for(long x=0; x<(1<<terrainRes)-1; x++)
{
vertexData.push_back(glm::vec3((float)x*terrainSpacing, terrain[z][x], (float)z*terrainSpacing));
vertexData.push_back(glm::vec3(((float)x+1.0f)*terrainSpacing, terrain[z+1][x+1], ((float)z+1.0f)*terrainSpacing));
vertexData.push_back(glm::vec3(((float)x+1.0f)*terrainSpacing, terrain[z][x+1], (float)z*terrainSpacing));
vertexData.push_back(glm::vec3((float)x*terrainSpacing, terrain[z][x], (float)z*terrainSpacing));
vertexData.push_back(glm::vec3((float)x*terrainSpacing, terrain[z+1][x], ((float)z+1.0f)*terrainSpacing));
vertexData.push_back(glm::vec3(((float)x+1.0f)*terrainSpacing, terrain[z+1][x+1], ((float)z+1.0f)*terrainSpacing));
normalData.push_back(getNormal(vertexData[vertexData.size()-6], vertexData[vertexData.size()-5], vertexData[vertexData.size()-4]));
normalData.push_back(normalData[normalData.size()-1]);
normalData.push_back(normalData[normalData.size()-2]);
normalData.push_back(getNormal(vertexData[vertexData.size()-3], vertexData[vertexData.size()-2], vertexData[vertexData.size()-1]));
normalData.push_back(normalData[normalData.size()-1]);
normalData.push_back(normalData[normalData.size()-2]);
}
}
glUseProgram(oglt9Shader->program);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, vertexData.size()*3*sizeof(float), vertexData.data(), GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(0);
glGenBuffers(1, &nbo);
glBindBuffer(GL_ARRAY_BUFFER, nbo);
glBufferData(GL_ARRAY_BUFFER, normalData.size()*3*sizeof(float), normalData.data(), GL_STATIC_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, 0);
numVertices = vertexData.size()*3;
yValue += 0.01f;
}
void render()
{
//Clear screen and enable depth buffer
//Create and transmit matrices and light direction to shaders
generateTerrain(oglt9Shader);
glDrawArrays(GL_TRIANGLES, 0, numVertices);
glDeleteBuffers(1, &vbo);
glDeleteBuffers(1, &nbo);
//Swap buffers to window
}
And my vertex shader...
#version 430 core
layout (location = 0) in vec3 vPosition;
layout (location = 1) in vec3 vNormal;
uniform mat4 mMatrix;
uniform mat4 vMatrix;
uniform mat4 pMatrix;
out vec3 fPosition;
out vec3 fNormal;
void main(void)
{
gl_Position = pMatrix * vMatrix * mMatrix * vec4(vPosition, 1.0);
fPosition = vPosition;
fNormal = normalize(transpose(inverse(mat3(mMatrix))) * vNormal);
}
#version 430 core
in vec3 fPosition;
in vec3 fNormal;
out vec4 outColor;
uniform vec3 lightDirection;
...and fragment shader.
void main(void)
{
vec3 rawColor = vec3(1.0);
vec3 ambientColor = vec3(1.0, 1.0, 1.0);
float diffuseIntensity = max(0.0, dot(fNormal, lightDirection));
vec3 diffuseColor = diffuseIntensity * vec3(0.9, 0.9, 0.9);
outColor = vec4(rawColor*ambientColor*diffuseColor, 1.0);
}
This is the final image:
So, what can I do to make the triangles smooth so you can't see these hard edges anymore?
You're using the same normal for all 3 vertices of each triangle. This will essentially result in flat shading, meaning that the color of each triangle is constant.
What you need is normals that better approximate the actual normals of the surface, instead of calculating the normal of each triangle separately. To get a smooth looking surface, you need to have one normal per vertex, and then use that normal when specifying the vertex for all the triangles that share the vertex.
The most efficient way of doing this is that you really store each vertex/normal of your grid in the VBO only once. You can then use an index buffer to reference the vertices when defining the triangles. This means that you have an additional buffer of type GL_ELEMENT_ARRAY_BUFFER containing indices, and then draw with glDrawElements(). You should be able to find reference information and tutorials on how to do that.
To actually obtain the normals, one common approach is that you average the triangle normals of all adjacent triangles to calculate the normal at a vertex.