I'm working on bone animation. I have a vertex struct that basically looks like
struct MeshVertex
{
glm::vec3 pos;
glm::vec3 normal;
glm::vec2 tex;
glm::vec3 tangent;
glm::vec3 bitangent;
uint32_t ids[4] = {};
float weights[4] = {};
void print() const;
};
The mesh is a basic cube with one bone. Therefore ids = {0,0,0,0} and weights = {1.0f,0.0f,0.0f,0.0f} for every single vertex. In my mesh class I have a static function Mesh::genFormat() that handles attributes. vao is a static int in the mesh class and for_i is just a convenient macro I use to do for loops. Note that I correctly use glVertexArrayAttribIFormat.
Mesh::Mesh(const std::vector<MeshVertex>& vertices, const std::vector<uint>& indices, const std::vector<Texture>& textures)
{
m_textures = textures;
m_num_indices = indices.size();
// create vertex and index buffers
glCreateBuffers(1, &m_vbo);
glCreateBuffers(1, &m_ibo);
glNamedBufferData(m_vbo, sizeof(MeshVertex) * vertices.size(), &vertices[0], GL_STATIC_DRAW);
glNamedBufferData(m_ibo, sizeof(uint) * indices.size(), &indices[0], GL_STATIC_DRAW);
}
void Mesh::genFormat()
{
glCreateVertexArrays(1, &vao);
for_i(7) { glEnableVertexArrayAttrib(vao, i); }
glVertexArrayAttribFormat(vao, 0, 3, GL_FLOAT, false, offsetof(MeshVertex, pos)));
glVertexArrayAttribFormat(vao, 1, 3, GL_FLOAT, false, offsetof(MeshVertex, normal));
glVertexArrayAttribFormat(vao, 2, 2, GL_FLOAT, false, offsetof(MeshVertex, tex));
glVertexArrayAttribFormat(vao, 3, 3, GL_FLOAT, false, offsetof(MeshVertex, tangent));
glVertexArrayAttribFormat(vao, 4, 3, GL_FLOAT, false, offsetof(MeshVertex, bitangent));
glVertexArrayAttribIFormat(vao, 5, 4, GL_UNSIGNED_INT, offsetof(MeshVertex, ids)));
glVertexArrayAttribFormat(vao, 6, 4, GL_FLOAT, false, offsetof(MeshVertex, weights)));
for_i(7) { glVertexArrayAttribBinding(vao, i, 0); }
glBindVertexArray(0);
}
The following GLSL won't render anything.
#version 460 core
layout(location = 0) in vec3 Pos;
layout(location = 1) in vec3 Normal;
layout(location = 2) in vec2 Tex;
layout(location = 3) in vec3 Tan;
layout(location = 4) in vec3 BiTan;
layout(location = 5) in uvec4 BoneIds;
layout(location = 6) in vec4 Weights;
out vec3 normal;
out vec2 tex;
layout(binding = 2, std140) uniform Camera
{
mat4 VP;
vec4 cpos;
};
uniform mat4 node;
uniform mat4 bones_inverse_bind_mesh_parent[50];
void main()
{
tex = Tex;
mat4 W = mat4(0.0f);
if (Weights[0] != 0.0f)
{
for (uint i = 0; i < 4; i++)
W = W + (Weights[i] * bones_inverse_bind_mesh_parent[BoneIds[i]]);
W = node * W;
}
else
W = node;
gl_Position = VP * W * vec4(Pos, 1.0);
}
Since BoneIds[i] is always zero, if I replace
W = W + (Weights[i] * bones_inverse_bind_mesh_parent[BoneIds[i]]);
with
W = W + (Weights[i] * bones_inverse_bind_mesh_parent[0]);
the result should be unchanged. My matrix transforms are currently a bit off (something to fix later), but now the cube renders fine. So there is something wrong with BoneIds. After bashing my head against the wall on this for a while, I instead replaced
layout(location = 5) in uvec4 BoneIds;
with
layout(location = 5) varying uvec4 BoneIds;
after seeing some old GLSL online, and now everything works. What I don't understand is why. I've seen plenty of GLSL code on the internet work with integer attributes using the in keyword.
UPDATE :
If I replace glVertexArrayAttribIFormat in Mesh::genFormat() with
glVertexArrayAttribFormat(vao, 5, 4, GL_UNSIGNED_INT, false, offsetof(MeshVertex, ids));
in C++ and
layout(location = 5) in vec4 BoneIds;
in GLSL and cast bone ids from float to int in the glsl code, the code also works.
Okay I solved the issue, even though I don't quite understand how this fixes the problem. My preferred graphics processor was on auto but when I forced it to use the NVIDIA processor over my integrated graphics, everything works out fine. image of solution
Update :
I think it is as simple as my Intel processor graphics supporting OpenGL 4.4 and glVertexArrayAttribIFormat came about in OpenGL 4.5.
Related
The function returns -1, even though Uniform is defined and used within the vertex shader, I suspect the cause may be that the out attributes might not be properly bound with the target buffer (not sure if that is the case). Without this one Uniform, most of my values will stay the same.
Drawing the Transform Feedback
/*code included in update*/
glUseProgram(feedbackShader->GetProgram());
glEnable(GL_RASTERIZER_DISCARD);
/*end of code included in update*/
glBindBuffer(GL_ARRAY_BUFFER, particleBuffer[isEvenBuffer]);
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, transformBuffer[!isEvenBuffer]);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glEnableVertexAttribArray(3);
glEnableVertexAttribArray(4);
glEnableVertexAttribArray(5);
glVertexAttribPointer(5, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), 0); //Location
glVertexAttribPointer(4, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)12); //Velocity
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)24); //InitLocation
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)36); //InitVelocity
glVertexAttribPointer(3, 1, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)48); //Lifetime
glVertexAttribPointer(2, 1, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)52); //InitLifetime
GLint uniformLocation = glGetUniformLocation(feedbackShader->GetProgram(), "time");
glUniform1f(uniformLocation, msec);
glBeginTransformFeedback(GL_POINTS);
glDrawTransformFeedback(GL_POINTS, transformBuffer[isEvenBuffer]);
glEndTransformFeedback();
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glDisableVertexAttribArray(3);
glDisableVertexAttribArray(4);
glDisableVertexAttribArray(5);
The Vertex Shader
#version 410
in vec3 inLocation;
in vec3 inVelocity;
in vec3 inInitLocation;
in vec3 inInitVelocity;
in float inLifeTime;
in float inInitlifeTime;
out vec3 outLocation;
out vec3 outVelocity;
out vec3 outInitLocation;
out vec3 outInitVelocity;
out float outLifeTime;
out float outInitlifeTime;
uniform float time;
vec3 Gravity = vec3(0.0f,-0.98f,0.0f);
float dampeningFactor = 0.5;
void main()
{
outLifeTime = inLifeTime - time;
if(outLifeTime > 0.0f){
outVelocity = (inVelocity + Gravity * time) * dampeningFactor;
outLocation = inLocation + inVelocity * time;
}else{
outVelocity = inInitVelocity;
outLocation = inInitLocation;
outLifeTime = inInitlifeTime;
}
outInitVelocity = inInitVelocity;
outInitLocation = inInitLocation;
outInitlifeTime = inInitlifeTime;
}
UPDATE
There were a few extra bits of information you all asked for.
The Vertex Shader
#version 410
in vec3 inLocation;
in vec3 inVelocity;
in vec3 inInitLocation;
in vec3 inInitVelocity;
in float inLifeTime;
in float inInitlifeTime;
out vec3 outLocation;
out vec3 outVelocity;
out vec3 outInitLocation;
out vec3 outInitVelocity;
out float outLifeTime;
out float outInitlifeTime;
uniform float time;
vec3 Gravity = vec3(0.0f,-0.98f,0.0f);
float dampeningFactor = 0.5;
void main()
{
outLifeTime = inLifeTime - time;
if(outLifeTime > 0.0f){
outVelocity = (inVelocity + Gravity * time) * dampeningFactor;
outLocation = inLocation + inVelocity * time;
}else{
outVelocity = inInitVelocity;
outLocation = inInitLocation;
outLifeTime = inInitlifeTime;
}
outInitVelocity = inInitVelocity;
outInitLocation = inInitLocation;
outInitlifeTime = inInitlifeTime;
}
The Feedback Varyings (I was stupid and didn't have this at first, though the same issue remains in my code)
const GLchar* feedbackVaryings[] = {
"outLocation",
"outVelocity",
"outInitLocation",
"outInitVelocity",
"outLifeTime",
"outInitlifeTime"
};
glTransformFeedbackVaryings(feedbackShader->LinkProgram(), 6, feedbackVaryings, GL_INTERLEAVED_ATTRIBS);
I also added the gluseProgram in the "Drawing the Transform Feedback section".
I assume that the instruction
feedbackShader->LinkProgram()
linkes the program. But the varying transform feedback variables have to be specified before linking the program.
See OpenGL 4.6 API Core Profile Specification; 7.3.1.1 Naming Active Resources; page 104
The order of the active resource list is implementation-dependent for all
interfaces except for TRANSFORM_FEEDBACK_VARYING. If variables in the
TRANSFORM_FEEDBACK_VARYING interface were specified using the TransformFeedbackVaryings command, the active resource list will be arranged in the variable order specified in the most recent call to TransformFeedbackVaryings before the last call to LinkProgram.
This means, first you have to attach the compiled vertex shader object to the program object (glAttachShader). Then you have to specify the transform feedback varying (glTransformFeedbackVaryings). Finally you have to link the program (glLinkProgram):
GLuint shader_obj;
GLuint program_obj;
.....
glAttachShader(program_obj, shader_obj);
const GLchar* feedbackVaryings[] = {
"outLocation",
"outVelocity",
"outInitLocation",
"outInitVelocity",
"outLifeTime",
"outInitlifeTime"
};
glTransformFeedbackVaryings(program_obj, 6, feedbackVaryings, GL_INTERLEAVED_ATTRIBS);
glLinkProgram(program_obj);
Further I recommend to use Layout Qualifier to define the attribute indices of the vertex shaser inpout variables.
e.g.
layout (location = 0) in vec3 inLocation;
layout (location = 1) in vec3 inVelocity;
layout (location = 2) in vec3 inInitLocation;
layout (location = 3) in vec3 inInitVelocity;
layout (location = 4) in float inLifeTime;
layout (location = 5) in float inInitlifeTime;
As an alternative the attribute index can be determined by glGetAttribLocation after linking the program or set by glBindAttribLocation before linking the program.
I'm trying to send colors to the shader but the colors get swapped,
I send 0xFF00FFFF (magenta) but I get 0xFFFF00FF (yellow) in the shader.
I think is happening something like this, by experimenting:
My vertex shader:
#version 330 core
layout(location = 0) in vec4 position;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec4 color;
uniform mat4 pr_matrix;
uniform mat4 vw_matrix = mat4(1.0);
uniform mat4 ml_matrix = mat4(1.0);
out DATA
{
vec4 position;
vec3 normal;
vec4 color;
} vs_out;
void main()
{
gl_Position = pr_matrix * vw_matrix * ml_matrix * position;
vs_out.position = position;
vs_out.color = color;
vs_out.normal = normalize(mat3(ml_matrix) * normal);
}
And the fragment shader:
#version 330 core
layout(location = 0) out vec4 out_color;
in DATA
{
vec3 position;
vec3 normal;
vec4 color;
} fs_in;
void main()
{
out_color = fs_in.color;
//out_color = vec4(fs_in.color.y, 0, 0, 1);
//out_color = vec4((fs_in.normal + 1 / 2.0), 1.0);
}
Here is how I set up the mesh:
struct Vertex_Color {
Vec3 vertex;
Vec3 normal;
GLint color; // GLuint tested
};
std::vector<Vertex_Color> verts = std::vector<Vertex_Color>();
[loops]
int color = 0xFF00FFFF; // magenta, uint tested
verts.push_back({ vert, normal, color });
glBufferData(GL_ARRAY_BUFFER, verts.size() * sizeof(Vertex_Color), &verts[0], GL_DYNAMIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex_Color), (const GLvoid*)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex_Color), (const GLvoid*)(offsetof(Vertex_Color, normal)));
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(Vertex_Color), (const GLvoid*)(offsetof(Vertex_Color, color)));
glEnableVertexAttribArray(2);
Here are some examples:
I can't figure it out what's wrong. Thanks in advance.
Your code is reinterpreting an int as 4 consecutive bytes in memory. The internal encoding for int (and all other types) is machine-specific. In your case, you got 32 bit integers stored in little endian byte order, which is kind of the typical case for PC environments.
You could use an array like GLubyte color[4] to explicitely get a defined memory layout.
If you really want to use an integer type, you could send the data as a an integer attribute with glVertexAttribIPointer (note the I there) and use unpackUnorm4x8 om the shader to get a normalized float vector. However, that requires at least GLSL 4.10, and might be less efficient than the standard approach.
I'm trying to render lots of stuff with OpenGL 3.3 Am i missing some tricks to make this faster?
Does it matter if I use glBufferData or glBufferSubData?
I have coded OpenGL for 5 days now, so I know that there are lots of unkown uknowns to me. And those are what i'm looking for, can you point me to any ways of making this even faster?
I think i'm using what's called "Instanced Rendering". All my stuff is rendered via a single glDrawElementsInstancedBaseVertex call.
Did I miss any relevant code? There's so much of it that I can't really paste it all here.
I'v gotten as far as 20000 objects with 24 vertices using the following code:
Called once per mesh at start, not during frames.
void Mesh::initMesh(IndexedModel const & p_model)
{
d->drawCount = p_model.indices.size();
glGenVertexArrays(1, &(d->vertexArrayObject));
glBindVertexArray(d->vertexArrayObject);
glGenBuffers(eNumBuffers, d->vertexArrayBuffers);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[ePosition_Vb]);
glBufferData(GL_ARRAY_BUFFER, sizeof(p_model.positions[0]) * p_model.positions.size(), p_model.positions.data(), GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eTexCoord_Vb]);
glBufferData(GL_ARRAY_BUFFER, sizeof(p_model.texCoords[0]) * p_model.texCoords.size(), p_model.texCoords.data(), GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eNormal_Vb]);
glBufferData(GL_ARRAY_BUFFER, sizeof(p_model.normals[0]) * p_model.normals.size(), p_model.normals.data(), GL_STATIC_DRAW);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, d->vertexArrayBuffers[eIndex_Vb]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * p_model.indices.size(), p_model.indices.data(), GL_STATIC_DRAW);
GLint mat4_pos0 = 3;
GLint shinyPos = 7;
GLint materialPos = 8;
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eModel_Vb]);
for (unsigned int i = 0; i < 4; i++)
{
glEnableVertexAttribArray(mat4_pos0 + i);
glVertexAttribPointer(mat4_pos0 + i, 4, GL_FLOAT, GL_FALSE, sizeof(glm::mat4),
(const GLvoid*)(sizeof(GLfloat) * i * 4));
glVertexAttribDivisor(mat4_pos0 + i, 1);
}
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eShiny_Vb]);
glEnableVertexAttribArray(shinyPos);
glVertexAttribPointer(shinyPos, 1, GL_FLOAT, GL_FALSE, 0, 0);
glVertexAttribDivisor(shinyPos, 1);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eSpecular_Vb]);
glEnableVertexAttribArray(materialPos);
glVertexAttribPointer(materialPos, 1, GL_FLOAT, GL_FALSE, 0, 0);
glVertexAttribDivisor(materialPos, 1);
}
Called once per frame.
void Mesh::draw(std::vector<Object*> const & p_objects, GLuint p_program)
{
std::vector<glm::mat4> models;
std::vector<glm::float32> shinies;
std::vector<glm::vec3> specularColors;
models.reserve(p_objects.size());
shinies.reserve(p_objects.size());
specularColors.reserve(p_objects.size());
for (int index = 0;
index < p_objects.size();
index++)
{
models.push_back(p_objects[index]->getTransform());
shinies.push_back(p_objects[index]->getShininess());
specularColors.push_back(p_objects[index]->getSpecularColor());
}
unsigned int bytesOfModels = models.size() * sizeof(models[0]);
unsigned int bytesOfShinies = shinies.size() * sizeof(shinies[0]);
unsigned int bytesOfSpecularColors = specularColors.size() * sizeof(specularColors[0]);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eModel_Vb]);
glBufferData(GL_ARRAY_BUFFER, bytesOfModels, models.data(), GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eShiny_Vb]);
glBufferData(GL_ARRAY_BUFFER, bytesOfShinies, shinies.data(), GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, d->vertexArrayBuffers[eSpecular_Vb]);
glBufferData(GL_ARRAY_BUFFER, bytesOfSpecularColors, specularColors.data(), GL_DYNAMIC_DRAW);
// glDrawElementsInstanced(GL_TRIANGLES, d->drawCount, GL_UNSIGNED_SHORT, 0, models.size());
// glDrawArraysInstanced(GL_TRIANGLE_FAN, 0, d->drawCount, models.size());
glDrawElementsInstancedBaseVertex(GL_TRIANGLES,
d->drawCount,
GL_UNSIGNED_INT,
0,
p_objects.size(),
0);
}
Called once per frame
void GenericRenderer::renderObjects(std::vector<Object*> p_objects)
{
if (p_objects.empty())
{
return;
}
m_texture->bind(0);
m_shader->bind();
m_shader->updateCamera(m_camera);
m_shader->updateLightSource(*m_light);
m_shader->updateObjects(p_objects);
m_mesh->bind();
for (size_t index = 0;
index < p_objects.size();
index++)
{
p_objects[index]->setOrigin(m_camera);
p_objects[index]->updateTransform();
}
m_mesh->draw(p_objects, m_shader->getProgram());
m_mesh->unbind();
}
Vertex Shader
#version 330
uniform mat4 camera;
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
layout (location = 2) in vec3 normal;
layout (location = 3) in mat4 model;
layout (location = 7) in float materialShininess;
layout (location = 8) in vec3 materialSpecularColor;
out vec3 fragVert;
out vec2 fragTexCoord;
out vec3 fragNormal;
out mat4 fragModel;
out float fragMaterialShininess;
out vec3 fragMaterialSpecularColor;
void main()
{
fragModel = model;
fragTexCoord = texCoord;
fragNormal = normal;
fragVert = position;
fragMaterialShininess = materialShininess;
fragMaterialSpecularColor = materialSpecularColor;
gl_Position = camera * model * vec4(position, 1);
}
Fragment Shader
#version 150
uniform vec3 cameraPosition;
uniform float exposure;
uniform float lightDistanceModifier;
uniform sampler2D tex;
uniform struct Light {
vec3 position;
vec3 intensities; //a.k.a the color of the light
float attenuation;
float ambientCoefficient;
} light;
in vec2 fragTexCoord;
in vec3 fragNormal;
in vec3 fragVert;
in mat4 fragModel;
in float fragMaterialShininess;
in vec3 fragMaterialSpecularColor;
out vec4 finalColor;
void main() {
vec3 normal = normalize(transpose(inverse(mat3(fragModel))) * fragNormal);
vec3 surfacePos = vec3(fragModel * vec4(fragVert, 1));
vec4 surfaceColor = texture(tex, fragTexCoord);
vec3 surfaceToLight = normalize(light.position - surfacePos);
vec3 surfaceToCamera = normalize(cameraPosition - surfacePos);
//ambient
vec3 ambient = light.ambientCoefficient * surfaceColor.rgb * light.intensities;
//diffuse
float diffuseCoefficient = max(0.0, dot(normal, surfaceToLight));
vec3 diffuse = diffuseCoefficient * surfaceColor.rgb * light.intensities;
//specular
float specularCoefficient = 0.0;
if(diffuseCoefficient > 0.0)
specularCoefficient = pow(max(0.0, dot(surfaceToCamera, reflect(-surfaceToLight, normal))), fragMaterialShininess);
vec3 specular = specularCoefficient * fragMaterialSpecularColor * light.intensities;
//attenuation
float distanceToLight = length(light.position - surfacePos);
distanceToLight *= lightDistanceModifier;
float attenuation = 1.0 / (1.0 + light.attenuation * pow(distanceToLight, 2));
//linear color (color before gamma correction)
vec3 linearColor = ambient + attenuation*(diffuse + specular);
//final color (after gamma correction)
vec3 gamma = vec3(1.0/2.2);
vec3 mapped = vec3(1.0) - exp(-linearColor * exposure);
mapped = pow(mapped, vec3(1.0 / gamma));
finalColor = vec4(mapped, surfaceColor.a);
}
OpenGL state changes are very expensive. If you are rendering 20000 objects individually per frame then you re most likely CPU bound. Your goal should be to render as many vertices as possible with as few state changes as possible.
If your 20000 objects are all using the same model then your situation is a prime candidate for instanced rendering. Instanced rendering lets you render the same model thousands of times in one draw call. If you couple this with a separate vertex buffer that contains WVP matrices for each model then you can render each of those model instances at a unique location within the world.
Be warned though, instanced rendering isn't some sort of panacea to all your draw call woes. It has it's own unique overhead with constructing a buffer of MVP matrices on the CPU each frame. If the number of instances you're rendering isn't at least in the hundreds you'll likely see worse performance than your current rendering method.
EDIT: You already using instanced rendering, my apologies.
After reading your code more thoroughly you are likely right in your assumption that you're GPU bound. However, it's not currently clear why you are constructing specular and shininess buffers once per frame when these attributes tend to remain constant for a material.
I currently load some attributes for my vertex shader in the following way:
glBindBuffer(GL_ARRAY_BUFFER, MeshVBs[eyeNum]->GLBuffer);
UINT stride = sizeof(ovrDistortionVertex);
const VertexAttribDesc VertexDesc[] =
//Name, Size, Type, Normalized, Offset
{ {"Position", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, ScreenPosNDC)},
{"inV", 1, GL_FLOAT, false, offsetof(ovrDistortionVertex, VignetteFactor)},
{"inTexCoord0", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesR)},
{"inTexCoord1", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesG)},
{"inTexCoord2", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesB)} };
for (size_t i = 0; i < 5; i++)
{
VertexAttribDesc vad = VertexDesc[i];
glEnableVertexAttribArray((GLuint)i);
glVertexAttribPointer((GLuint)i, vad.Size, vad.Type, vad.Normalized, stride, reinterpret_cast<char*>(vad.Offset));
}
That is the used vertex shader
#version 330 core
uniform vec2 EyeToSourceUVScale;
uniform vec2 EyeToSourceUVOffset;
attribute vec2 Position;
attribute float inT;
attribute vec2 inTexCoord0;
attribute vec2 inTexCoord1;
attribute vec2 inTexCoord2;
varying vec4 oPosition;
varying vec2 oTexCoord0;
varying vec2 oTexCoord1;
varying vec2 oTexCoord2;
varying float oVignette;
vec2 TexCoord0 = vec2((inTexCoord0.x), (-inTexCoord0.y));
vec2 TexCoord1 = vec2((inTexCoord1.x), (-inTexCoord1.y));
vec2 TexCoord2 = vec2((inTexCoord2.x), (-inTexCoord2.y));
float Vignette = inT;
vec2 normalizeTexCoord( in vec2 TexCoord )
{
return ( EyeToSourceUVScale*TexCoord) + EyeToSourceUVOffset;
}
void main(){
oTexCoord0 = normalizeTexCoord( TexCoord0);
oTexCoord1 = normalizeTexCoord( TexCoord1);
oTexCoord2 = normalizeTexCoord( TexCoord2);
oVignette = Vignette;
gl_Position.xyzw = vec4( Position.xy , 0.500000, 1.00000);
}
That Works! But if i rename "inT" in the vertex shader into "inV" nothing works anymore. Any ideas why it doesn't work?
Let me know if you need more information about the context to give an answer.
----SOLUTION----
Before the main loop :
UINT stride = sizeof(ovrDistortionVertex);
const VertexAttribDesc VertexDesc[] =
//Name, Size, Type, Normalized, Offset
{ {"Position", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, ScreenPosNDC)},
{"inVignette", 1, GL_FLOAT, false, offsetof(ovrDistortionVertex, VignetteFactor)},
{"inTexCoord0", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesR)},
{"inTexCoord1", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesG)},
{"inTexCoord2", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesB)} };
for (int i = 0;i<5;i++)
{
VertexAttribDesc vad = VertexDesc[i];
glBindAttribLocation (shader_programm, i, vad.Name);
}
glLinkProgram(shader_programm);
In the main loop remained:
glBindBuffer(GL_ARRAY_BUFFER, MeshVBs[eyeNum]->GLBuffer);
for (size_t i = 0; i < 5; i++)
{
VertexAttribDesc vad = VertexDesc[i];
glEnableVertexAttribArray((GLuint)i);
glVertexAttribPointer((GLuint)i, vad.Size, vad.Type, vad.Normalized, stride, reinterpret_cast<char*>(vad.Offset));
}
and the new vertex shader (fragment shader has also changed varying -> in)
#version 330 core
uniform vec2 EyeToSourceUVScale;
uniform vec2 EyeToSourceUVOffset;
in vec2 Position;
in float inVignette;
in vec2 inTexCoord0;
in vec2 inTexCoord1;
in vec2 inTexCoord2;
out vec4 oPosition;
out vec2 oTexCoord0;
out vec2 oTexCoord1;
out vec2 oTexCoord2;
out float oVignette;
vec2 TexCoord0 = vec2((inTexCoord0.x), (-inTexCoord0.y));
vec2 TexCoord1 = vec2((inTexCoord1.x), (-inTexCoord1.y));
vec2 TexCoord2 = vec2((inTexCoord2.x), (-inTexCoord2.y));
float Vignette = inVignette;
vec2 normalizeTexCoord( in vec2 TexCoord )
{
return ( EyeToSourceUVScale*TexCoord) + EyeToSourceUVOffset;
}
void main(){
oTexCoord0 = normalizeTexCoord( TexCoord0);
oTexCoord1 = normalizeTexCoord( TexCoord1);
oTexCoord2 = normalizeTexCoord( TexCoord2);
oVignette = Vignette;
gl_Position.xyzw = vec4( Position.xy , 0.500000, 1.00000);
}
That syntax is not valid for a 330 core shader. You need to use the new in and out syntax that GLSL 130 introduced. That means in the vertex shader, inputs (attribute) all become in and outputs (varying) all become out. In the matching fragment shader, the outputs (varying) from the vertex shader are inputs and thus are declared using in.
With all that said, I see nowhere that you actually query the location of your vertex attributes. Many drivers will automatically assign attributes locations in alphabetical order, so changing inT to inV is going to change its location to come after inTexCoord2.
Since this is a GLSL 3.30 shader, you have the option of binding the attribute locations explicitly in the shader, binding them from the API before linking, or querying them after the fact.
To make this work as simply as possible, I would do something like this:
const VertexAttribDesc VertexDesc[] =
//Name, Size, Type, Normalized, Offset
{ {"Position", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, ScreenPosNDC)},
{"inV", 1, GL_FLOAT, false, offsetof(ovrDistortionVertex, VignetteFactor)},
{"inTexCoord0", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesR)},
{"inTexCoord1", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesG)},
{"inTexCoord2", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesB)} };
for (size_t i = 0; i < 5; i++)
{
VertexAttribDesc vad = VertexDesc[i];
glBindAttribLocation (prog, i, vad.Name);
}
Keep in mind, that has to be done before you link your GLSL program. Attribute locations are assigned during the link operation. You can change them later, but you have to re-link the program.
I'm having an issue drawing multiple point lights in my scene. I am working on a simple maze-style game in OpenGL, where the maze is randomly generated. Each "room" in the maze is represented by a Room struct, like so:
struct Room
{
int x, z;
bool already_visited, n, s, e, w;
GLuint vertex_buffer, texture, uv_buffer, normal_buffer;
std::vector<glm::vec3>vertices, normals;
std::vector<glm::vec2>uvs;
glm::vec3 light_pos; //Stores the position of a light in the room
};
Each room has a light in it, the position of this light is stored in light_pos. This light is used in a simple per-vertex shader, like so:
layout(location = 0) in vec3 pos;
layout(location = 1) in vec2 uv_coords;
layout(location = 2) in vec3 normal;
uniform mat4 mvpMatrix;
uniform mat4 mvMatrix;
uniform vec3 lightpos;
out vec2 vs_uv;
out vec3 vs_normal;
out vec3 color;
void main()
{
gl_Position = mvpMatrix * vec4(pos,1.0);
vs_normal = normal;
vs_uv = uv_coords;
vec3 lightVector = normalize(lightpos - pos);
float diffuse = clamp(dot(normal,lightVector),0.0,1.0);
color = vec3(diffuse,diffuse,diffuse);
}
My fragment shader looks like this (ignore the "vs_normal", it is unused for now):
in vec2 vs_uv;
in vec3 vs_normal;
in vec3 color;
uniform sampler2D tex;
out vec4 frag_color;
void main()
{
frag_color = vec4(color,1.0) * texture(tex,vs_uv).rgba;
}
And my drawing code looks like this:
mat4 mvMatrix = view_matrix*model_matrix;
mat4 mvpMatrix = projection_matrix * mvMatrix;
glBindVertexArray(vertexBufferObjectID);
glUseProgram(shaderProgram);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
for (int x = 0; x < NUM_ROOMS_X; x++)
{
for (int z = 0; z < NUM_ROOMS_Z; z++)
{
//int x = int(std::round(position.x / ROOM_SIZE_X_MAX));
//int z = int(std::round(position.z / ROOM_SIZE_Z_MAX));
Room rm = room_array[x][z];
glBindBuffer(GL_ARRAY_BUFFER, rm.vertex_buffer);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glBindBuffer(GL_ARRAY_BUFFER, rm.uv_buffer);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glBindBuffer(GL_ARRAY_BUFFER, rm.normal_buffer);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glUniformMatrix4fv(mvpMatrixID, 1, GL_FALSE, &mvpMatrix[0][0]);
glUniformMatrix4fv(mvMatrixID, 1, GL_FALSE, &mvMatrix[0][0]);
glUniform3fv(light_ID, 3, &rm.light_pos[0]); //Here is where I'm setting the new light position. It looks like this is ignored
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, rm.texture);
glUniform1i(texture_ID, 0);
glDrawArrays(GL_QUADS, 0, rm.vertices.size());
}
}
glUseProgram(0);
glBindVertexArray(0);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glBindTexture(GL_TEXTURE_2D, 0);
However, here is what the result looks like (I've modified my drawing code to draw a box where each light is located, and I've circled the room at position (0,0)):
http://imgur.com/w4uPMD6
As you can see, it looks like only the light at position (0,0) affects any of the rooms on the map, the other lights are simply ignored. I know that the lights are positioned correctly, because the boxes I use to show the positions are correct. I think even though I'm setting the new light_pos, it isn't going through for some reason. Any ideas?
One thing that your are doing, which is not very common, is to pass the light position as a vertex attribute. Optimally, you should pass it to the shader as a uniform variable, just as you do with the matrices. But I doubt that is the problem here.
I believe your problem is that you are doing the light calculations in different spaces. The vertexes of the surfaces that you draw are in object/model space, while I'm guessing, your light is located at a point defined in world space. Try multiplying your light position by the inverse of the model matrix you are applying to the vertexes. I'm not familiar with GLM, but I figure there must be an inverse() function in it:
vec3 light_pos_object_space = inverse(model_matrix) * rm.light_pos;
glVertexAttrib3fv(light_ID, &light_pos_object_space[0]);
Figured out my problem. I was calling this function:
glUniform3fv(light_ID, 3, &rm.light_pos[0]);
When I should have been calling this:
glUniform3fv(light_ID, 1, &rm.light_pos[0]);