How to update array of matrices to glsl shader - c++

I'm currently working with skeletal animation and I'm really close to getting it working. Currently, I have a struct that has a matrix with 100 spots ( this is so that I can max have 100 joints ) like so :
struct skelShader {
glm::mat4 currentJointTrans[100];
};
The struct should be binded in the shader, I've done it like this:
glGenBuffers(1, &sksBuff);
glBindBuffer(GL_UNIFORM_BUFFER, sksBuff);
// bind buffer to work further with it...
// allocate memory for the buffer in the GPU
glBufferData(GL_UNIFORM_BUFFER, sizeof(skelShader), NULL, GL_STATIC_DRAW);
// because we hard-coded "binding=3" in the shader code we can do this:
// bind Uniform Buffer to binding point 3 (without caring about index of UBO)
glBindBufferBase(GL_UNIFORM_BUFFER, 4, sksBuff);
// good practice, unbind buffer
glBindBuffer(GL_UNIFORM_BUFFER, 0);
sksBuff is just an GLuint.
I fill this array with new values every render/frame that goes by, these values are the new transformations for the joints. I do it like this:
for (int i = 0; i < skeleton.size(); i++) {
globalSkelInfo.currentJointTrans[i] = skeleton[i]->transformMat[currentFrame - 1] * skeleton[i]->globalBindPosMat;
}
This is working correctly for the root joint, but the rest of the joints/mesh remains in bind pose. The problem should be located in where I update the array. Currently I do it like this in the render function after I've done the multiplication for each joint:
for (int i = 0; i < skeleton.size(); i++) {
glUniformMatrix4fv(glGetUniformLocation(aShaderProgram, ("currentJointTrans[" + std::to_string(i) + "]").c_str()),
1, GL_FALSE, glm::value_ptr(globalSkelInfo.currentJointTrans[i]));
}
After this I draw. The root joints values seem to be moving correctly, but the rest of the mesh is in bindpose and doesn't move. In the Vertex Shader I try to update the matrix like this:
#version 440
const int maxJoints = 100;
const int maxWeights = 4;
layout(location = 0) in vec3 vertex_position;
layout(location = 1) in vec2 vertex_UV;
layout(location = 2) in vec3 vertex_normal;
layout(location = 3) in vec4 vertex_weight;
layout(location = 4) in ivec4 vertex_controllers;
out vec2 outUVs;
out vec3 outNorm;
layout(binding = 3 , std140) uniform uniformBlock
{
vec3 camPos;
mat4 world;
mat4 LookAt;
mat4 projection;
mat4 MVP;
};
layout(binding = 4 , std140) uniform animationStruct
{
mat4 currentJointTrans[maxJoints];
};
void main() {
vec4 finalModelPos = vec4(0.0);
vec4 finalNormal = vec4(0.0);
for (int i = 0; i < 4; i++) {
mat4 jointTrans = currentJointTrans[vertex_controllers[i]];
vec4 posePos = jointTrans * vec4(vertex_position, 1.0);
finalModelPos += posePos * vertex_weight[i];
vec4 worldNormal = jointTrans * vec4(vertex_normal, 0.0);
finalNormal += worldNormal * vertex_weight[i];
}
gl_Position = MVP * finalModelPos;
outNorm = finalNormal.xyz;
outUVs = vertex_UV;
}
My theory is that the updating of the struct skelShader with my currentJointTrans array is incorrect. Any tips on how I should do this instead?

glUniform* calls cannot set data in uniform buffers. Indeed, the whole point of uniform buffers is that the uniform data comes from a buffer object. That's why you had to create one.
So if you want to set the uniform data for a uniform block, you set that data into the buffer object.

Related

How to provide 14 float values per vertex to the shader?

I have downloaded a model, and have exported it as .fbx. The model contains several animations (6 to be precise), and I want to have one of them displayed. Following Anton Gerdelan's book on openGL I wrote an importer, which uses assimp to parse the model, buffers the relevant vertex data and retrieves the offset matrices needed for the animation.
Not having much experience with skeletal animation, I think I've been able to make the necessary changes to anton's importer, so that it can work on the more complex model that I need. However, the importer assumes that each vertex is only influenced by 1 bone, which unfortunately is not the case.
After some tinkering, I figured out that each vertex of the model can be influenced by at most 14 bones at a time. Since I am not sure how I could pass 14 values to the shader containing boneId and the relevant weight I tried changing the code to accommodate up to 4 bones at a time. This is the code that parses the bone id & weights and buffers them:
*bone_count = (int)mesh->mNumBones;
char bone_names[256][64];
struct vertexdata {
int IDs[4];
float Weights[4];
int ptr;
};
vector<vertexdata> vdata;
vdata.resize(*point_count);
for (int i = 0; i < *point_count; i++) {
vdata[i].ptr = 0;
}
for (int b_i = 0; b_i < *bone_count; b_i++) {
const aiBone* bone = mesh->mBones[b_i];
strcpy(bone_names[b_i], bone->mName.data);
printf("bone_names[%i]=%s\n", b_i, bone_names[b_i]);
bone_offset_mats[b_i] = convert_assimp_matrix(bone->mOffsetMatrix);
//getting weights for each bone
int num_weights = (int)bone->mNumWeights;
for (int w_i = 0; w_i < num_weights; w_i++) {
aiVertexWeight weight = bone->mWeights[w_i];
int vid = weight.mVertexId;
float vweight = weight.mWeight;
if (vdata[vid].ptr < 4) {
vdata[vid].IDs[vdata[vid].ptr] = b_i;
vdata[vid].Weights[vdata[vid].ptr] = vweight;
vdata[vid].ptr++;
}
int vertex_id = (int)weight.mVertexId;
}
}
//buffering bone id data
GLuint vbo1;
glGenBuffers(1, &vbo1);
glBindBuffer(GL_ARRAY_BUFFER, vbo1);
glBufferData(GL_ARRAY_BUFFER, sizeof(vdata[0]) * vdata.size(), &vdata[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(3);
glVertexAttribIPointer(3, 4, GL_INT, sizeof(vertexdata), (const GLvoid*)0);
glEnableVertexAttribArray(4);
glVertexAttribPointer(4, 4, GL_FLOAT, GL_FALSE, sizeof(vertexdata), (const GLvoid*)16);
and in the shaders:
vertex shader
#version 330 core
layout (location = 0) in vec3 pos;
layout (location = 1) in vec3 norm;
layout (location = 2) in vec2 UV;
layout (location = 3) in ivec4 boneIDs;
layout (location = 4) in vec4 Weights;
uniform mat4 view, projection, model;
uniform mat4 bone_matrices[40];
out vec2 tCoords;
void main()
{
mat4 boneTransform = bone_matrices[boneIDs[0]] * Weights[0];
boneTransform += bone_matrices[boneIDs[1]] * Weights[1];
boneTransform += bone_matrices[boneIDs[2]] * Weights[2];
boneTransform += bone_matrices[boneIDs[3]] * Weights[3];
tCoords = UV;
gl_Position = projection * view * boneTransform * model * vec4(pos, 1.0);
}
fragment shader
#version 330 core
in vec2 tCoords;
out vec4 fragColour;
uniform sampler2D tex;
void main()
{
fragColour = texture(tex, tCoords);
}
The model is rendered properly, but I am not observing any movement. Again, not knowing much about skeletal animation, I can only assume that it's because I haven't included every bone that influences each vertex, and the corresponding weight. However, when buffering the data the shaders only accept up to vec4 aka 4 values per vertex. How can I pass 14 IDs and 14 weights? Could this be the cause of the animation not working?

Passing array of mat4 to GLSL Shader uniform

I did run into some trouble setting up my animation shader for my OpenGL application. Basically it takes in an array of 50 glm::mat4 matrices and should set them as uniform in my GLSL shader. Yet only the first value is actually send to the shader, all other array entries in the shader are set to 0.
I think the problem occurs when passing from C++ to GLSL:
class model{
...
glm::mat4 finalBoneTransforms[50];
...
}
model::draw(){
//Set Joints
int jointLoc = glGetUniformLocation(shaderID, "jointTransforms");
glUniformMatrix4fv(jointLoc, 50 , GL_FALSE, glm::value_ptr(finalBoneTransforms[0]));
...
}
So how comes that only the first value is passed? Shouldn't OpenGL take in 50 elements stored in the contiguous memory to the first element, which is referenced via the value_ptr?
I would highly prefer to use arrays instead of vectors to make sure not to suffer any pointer loss due to reallocation. Arent elements in an array stored in contiguous memory? Any other obvious mistakes causing that weird behaviour?
Edit: Heres the shader code:
#version 330 core
const int MAX_JOINTS = 50;
const int MAX_WEIGHTS = 4;
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec3 aNormals;
layout (location = 2) in vec2 aTexCoord;
layout (location = 3) in vec4 aBoneWeight;
layout (location = 4) in ivec4 aBoneIndex;
out vec2 texCoords;
uniform mat4 jointTransforms[MAX_JOINTS];
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main(void) {
vec4 totalLocalPos = vec4(0.0);
vec4 totalNormal = vec4(0.0);
for (int i = 0; i<MAX_WEIGHTS; i++) {
mat4 jointTransform = jointTransforms[aBoneIndex[i]];
vec4 posePosition = jointTransform * vec4(aPos, 1.0);
totalLocalPos += posePosition * aBoneWeight[i];
}
gl_Position = projection*view * totalLocalPos;
texCoords = aTexCoord;
};

memoryBarrier() behaving unexpectedly in Geometry Shader

I am trying to get a hold of how memoryBarrier() works in OpenGL 4.4
I tried the following once with a texture image and once with Shader Storage Buffer Object (SSBO).
The basic idea is to create an array of flags for however many objects that need to be rendered in my scene and then perform a simple test in the geometry shader.
For each primitive in GS, if at least one vertex passes the test, it
sets the corresponding flag in the array at the location specified
by this primitive's object ID (Object IDs are passed to GS as vertex
attributes).
I then perform a memoryBarrier() to make sure all threads have written their values.
Next, I have all primitives read from the flags array and only emit a vertex if the flag is set.
Here is some code from my shaders to explain:
// Vertex Shader:
#version 440
uniform mat4 model_view;
uniform mat4 projection;
layout(location = 0) in vec3 in_pos;
layout(location = 1) in vec3 in_color;
layout(location = 2) in int lineID;
out VS_GS_INTERFACE
{
vec4 position;
vec4 color;
int lineID;
} vs_out;
void main(void) {
vec4 pos = vec4(in_pos, 1.0);
vs_out.position = pos;
vs_out.color = vec4(in_colo, 1.0);
vs_out.lineID = lineID;
gl_Position = projection * model_view * pos;
}
and here is a simple Geometry shader in which I use only a simple test based on lineID ( I realize this test doesn't need a shared data structure but this is just to test program behavior)
#version 440
layout (lines) in;
layout (line_strip, max_vertices = 2) out;
layout (std430, binding = 0) buffer BO {
int IDs[];
};
in VS_GS_INTERFACE
{
vec4 position;
vec4 color;
int lineID;
} gs_in[];
out vec4 f_color;
void main()
{
if(gs_in[0].lineID < 500)
{
IDs[gs_in[0].lineID] = 1;
}
else
{
IDs[gs_in[0].lineID] = -1;
}
memoryBarrier();
// read back the flag value
int flag = IDs[gs_in[0].lineID];
if ( flag > 0)
{
int n;
for( n = 0; n < gl_in.length(), n++)
{
f_color = gs_in[n].color;
gl_Position = gl_in[n].gl_Position;
emitVertex();
}
}
}
No matter what value I put instead of 500, this code always renders only 2 objects. If I change the condition for rendering in the GS to if( flag > = 0) it seems to me that all objects are rendered which means the -1 is never written by the time these IDs are read back by the shader.
Can someone please explain why the writes are not coherently visible to all shader invocations despite the memoryBarrier() and what would be the most efficient work around to get this to work?
Thanks.

Skeletal Animation ussing ASSIMP and GLSL: bone uniform array size

I'm working on an ASSIMP skeletal animation loader and renderer and right now all the data is correctly loaded and interpolated at its current timeframe. However, there is still one part that isn't working as it should and that's the vertex shader stage.
Via a VBO I pass in two vec4s that contain the bone IDs and the weights for each vertex (up to a maximum of 4 bones/weights per vertex) and the vertex shader has a matrix array of 100 bone transformations (pre-calculated per frame) that are indexed via the bone IDs.
However, it seems that the bones uniform doesn't contain the proper transformations. For debugging purposes I colored the model with the weight values and the bone IDs value and they contain a color (and thus valid values). However, when I transform my vertex via the bone transformation and color the model with the result, the entire model is colored black, meaning the transformation matrices are all 0.0. So they're not initialized properly.
I think the problem is with passing the matrices to the uniform array, or perhaps the maximum size of uniforms allowed (I also tried setting the number of uniform matrices to 32 (number of bones on current model) but without effect)?
Before passing the information to the shader, the transformation matrices are indeed valid matrices (not identity/empty matrices) so the fault should probably be in the GLSL shader or the passing of the uniforms.
The following code is from the vertex shader:
#version 330
layout (location = 0) in vec3 position;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec3 tangent;
layout(location = 3) in vec3 color;
layout(location = 4) in vec2 texCoord;
layout(location = 5) in ivec4 boneIDs;
layout(location = 6) in vec4 weights;
uniform mat4 projection;
uniform mat4 view;
uniform mat4 model;
uniform mat4 bones[100];
out vec2 TexCoord;
out vec4 colorz;
void main()
{
vec4 newPos = vec4(position, 1.0);
colorz = vec4(0.0, 1.0, 0.0, 1.0);
if (weights != vec4(0.0, 0.0, 0.0, 0.0))
{
mat4 boneTransform = bones[boneIDs[0]] * weights[0];
boneTransform += bones[boneIDs[1]] * weights[1];
boneTransform += bones[boneIDs[2]] * weights[2];
boneTransform += bones[boneIDs[3]] * weights[3];
// newPos = boneTransform * vec4(position, 1.0);
vec4 test = vec4(1.0);
colorz = boneTransform * test;
// newPos = boneTransform * newPos;
}
TexCoord = texCoord;
gl_Position = projection * view * model * newPos;
}
The following snippet of code pass the matrix data to the GLSL shader:
// Sets bone transformation matrices
void Shader::SetBoneMatrix(GLint index, aiMatrix4x4 matrix)
{
glm::mat4 mat = glm::transpose(glm::make_mat4(&matrix.a1));
glUniformMatrix3fv(boneLocations[index], 1, GL_FALSE, glm::value_ptr(mat));
}
Also the code that gets all the uniform locations of the bones array:
for(unsigned int i = 0; i < 100; i++)
{
string name = "bones[";
string number;
stringstream ss;
ss << i;
ss >> number;
name += number;
name += ']';
boneLocations[i] = glGetUniformLocation(this->program, name.c_str());
}
Oké, via glslDevil I came across a continous GL_INVALID_OPERATION error when setting the bone matrix to the shader via glUniformMatrix. The origin of the problem was indeed at the stage where the program passes the information along to the shader.
It is quite a stupid mistake actually since I'm using glUniformMatrix3f instead of glUniformMatrix4f. Changing this did indeed solve the problem and the animations are working perfectly right now.

GLSL Instancing - Max number of inputs for vertex data?

I am trying to implement instancing in my OpenGL program. I got it to work, and then decided to make my GLSL code more efficient by sending the Model-View-Projection multiplication matrix as input to the GLSL program, so that the CPU computes it for each instance, opposed to the GPU. Here is my vertex shader code (most of it is irrelevant to my question):
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 2) in vec3 vertexColor;
layout(location = 3) in vec3 vertexNormal_modelspace;
layout(location = 6) in mat4 models;
layout(location = 10) in mat4 modelsV;
layout(location = 14) in mat4 modelsVP;
// Output data ; will be interpolated for each fragment.
out vec3 newColor;
out vec3 Position_worldspace;
out vec3 Normal_cameraspace;
out vec3 EyeDirection_cameraspace;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 MV;
uniform mat4 P;
uniform mat4 V;
uniform mat4 M;
uniform int num_lights;
uniform vec3 Lights[256];
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = P * modelsV * vec4(vertexPosition_modelspace,1);
// Position of the vertex, in worldspace : M * position
Position_worldspace = (models * vec4(vertexPosition_modelspace,1)).xyz;
// Vector that goes from the vertex to the camera, in camera space.
// In camera space, the camera is at the origin (0,0,0).
vec3 vertexPosition_cameraspace = ( modelsV * vec4(vertexPosition_modelspace,1)).xyz;
EyeDirection_cameraspace = vec3(0,0,0) - vertexPosition_cameraspace;
// Normal of the the vertex, in camera space
Normal_cameraspace = ( modelsV * vec4(vertexNormal_modelspace,0)).xyz;
// UV of the vertex. No special space for this one.
newColor = vertexColor;
}
The above code works, but only because I'm not using the last input modelsVP to calculate gl_position. If I do use it (instead of computing P*modelsV), the instances won't be drawn, and I get this error:
Linking program
Compiling shader : GLSL/meshColor.vertexshader
Compiling shader : GLSL/meshColor.fragmentshader
Linking program
Vertex info
0(10) : error C5102: input semantic attribute "ATTR" has too big of a numeric index (16)
0(10) : error C5102: input semantic attribute "ATTR" has too big of a numeric index (16)
0(10) : error C5041: cannot locate suitable resource to bind variable "modelsVP". Possibly large array.
I'm sure I'm linking it correctly in my OpenGL code, because if I swap the input location modelsVP with modelsV so that it is 10 instead of 14, I am able to use it, but not modelsV. Is there a maximum number of inputs you can have for your vertex shader? I really can't think of any other idea of why else I would get this error...
I'll include more of my OpenGL code that is relevant here, but I'm pretty sure that it's correct (it's not all in the same class or method):
// Buffer data for VBO. The numbers must match the layout in the GLSL code.
#define position 0
#define uv 1
#define color 2
#define normal 3
#define tangent 4
#define bitangent 5
#define model 6 // 4x4 matrices take 4 positions
#define modelV 10
#define modelVP 14
#define num_buffers 18
GLuint VBO[num_buffers];
glGenBuffers(num_buffers, VBO);
for( int i=0; i<ModelMatrices.size(); i++ )
{
mvp.push_back( projection * view * ModelMatrices.at(i) );
mv.push_back( view * ModelMatrices.at(i) );
}
glBindBuffer(GL_ARRAY_BUFFER, VBO[model]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::mat4) * ModelMatrices.size(), &ModelMatrices[0], GL_DYNAMIC_DRAW);
for (unsigned int i = 0; i < 4 ; i++) {
glEnableVertexAttribArray(model + i);
glVertexAttribPointer(model + i, 4, GL_FLOAT, GL_FALSE, sizeof(glm::mat4),
(const GLvoid*)(sizeof(GLfloat) * i * 4));
glVertexAttribDivisor(model + i, 1);
}
glBindBuffer(GL_ARRAY_BUFFER, VBO[modelV]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::mat4) * mv.size(), &mv[0], GL_DYNAMIC_DRAW);
for (unsigned int i = 0; i < 4 ; i++) {
glEnableVertexAttribArray(modelV + i);
glVertexAttribPointer(modelV + i, 4, GL_FLOAT, GL_FALSE, sizeof(glm::mat4),
(const GLvoid*)(sizeof(GLfloat) * i * 4));
glVertexAttribDivisor(modelV + i, 1);
}
glBindBuffer(GL_ARRAY_BUFFER, VBO[modelVP]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::mat4) * mvp.size(), &mvp[0], GL_DYNAMIC_DRAW);
for (unsigned int i = 0; i < 4 ; i++) {
glEnableVertexAttribArray(modelVP + i);
glVertexAttribPointer(modelVP + i, 4, GL_FLOAT, GL_FALSE, sizeof(glm::mat4), (const GLvoid*)(sizeof(GLfloat) * i * 4));
glVertexAttribDivisor(modelVP + i, 1);
}
OpenGL mandates implementations offer a minimum of 16 4-component vertex attributes. Therefore an index of 16 is not guaranteed to be supported by all implementations; see GL_MAX_VERTEX_ATTRIBS for more details.
Your mat4 vertex attributes count as 4 4-component attributes, so an index of 14 is out of range on implementations that only support 16 4-component vertex attributes.
You are using too many vertex attributes. Here's how to reduce the number of attributes without changing anything much about your code (and any functional changes are improvements). The following assumes that models is the "model-to-world" matrix, modelsV is the "model-to-camera" matrix, and that modelsVP is the "model-to-projection" matrix:
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 2) in vec3 vertexColor;
layout(location = 3) in vec3 vertexNormal_modelspace;
layout(location = 6) in mat4 modelsV;
// Output data ; will be interpolated for each fragment.
out vec3 newColor;
//The fragment shader should work in *camera* space, not world space.
out vec4 Position_cameraspace;
out vec3 Normal_cameraspace;
//out vec3 EyeDirection_cameraspace; Can be computed from Position_cameraspace in the FS.
// Values that stay constant for the whole mesh.
uniform mat4 P;
void main()
{
Position_cameraspace = modelsV * vec4(vertexPosition_modelspace, 1.0);
gl_Position = P * Position_cameraspace;
Normal_cameraspace = ( modelsV * vec4(vertexNormal_modelspace,0)).xyz;
newColor = vertexColor;
}
See? Isn't that much simpler? Fewer uniforms in the vertex shader, fewer outputs to the fragment shader, fewer math computations, and fewer vertex attributes.
All you need to do is change your fragment shader to use the camera-space position, rather than the world-space position. Which should be a reasonably easy change.