I'm having difficulty understanding how glBindBufferRange offset / alignment works in the Nvidia example project gl_commandlist_basic. I've read that the offset needs to be a multiple of GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT which is 256 and/or that offset and alignment is very important with glBindBuffer range.. I have an example UBO that works with mat4/vec4 and a non-working example with mat4/mat3/vec4. The UBO doesn't add up to be a multiple of 256 in either case. I'm try to send vec4(0.f, 1.f, 0.f, 1.f).
If mat4 = 64 bytes, mat3 = 36 bytes, vec4 = 16 bytes then the working example has 64+16=80 bytes, which isn't a multiple of 256. The non-working example has 64+36+16 = 116 bytes.
NV uses an inline called uboAligned which is defined as
inline size_t uboAligned(size_t size) { return ((size + 255) / 256) * 256; }
Removing this from the working/non made no difference either way.
I assume I need to add some "padding" to the UBO in the form of a float/vec2/vec3/vec4, etc. How do I determine the correct amount of padding I need if I want to use the mat4/mat3/vec4 UBO?
/* APPLICATION */
typedef struct
{
glm::mat4 MM;
// glm::mat3 NM;
glm::vec4 Cs;
} myData0;
Gluint objectUBO;
glCreateBuffers(1, &objectUBO);
glNamedBufferData(objectUBO, uboAligned(sizeof(abjObjectData) * 2), 0, GL_STATIC_DRAW); //
for (unsigned int i = 0; i < allObj.size(); ++i)
{
myData0 myDataTemp;
myDataTemp.Cs = glm::vec4(0.f, 1.f, 0.f, 1.f);
glNamedBufferSubData(objectUBO, sizeof(abjObjectData) * i, sizeof(abjObjectData), &objDataInit);
}
//hot loop
for (unsigned int i = 0; i < allObj.size(); ++i)
{
glBindBufferRange(GL_UNIFORM_BUFFER, 1, objectUBO, uboAligned(sizeof(abjObjectData)) * i, sizeof(abjObjectData));
//draw
}
/* HW */
out vec4 Ci;
struct ObjectData
{
mat4 MM;
// mat3 NM;
vec4 Cs;
};
layout (std140, binding = 1) uniform objectBuffer { ObjectData object; };
void main()
{
Ci = object.Cs;
}
Simple typo with glNamedBufferData. Changing from
glNamedBufferData(objectUBO, uboAligned(sizeof(abjObjectData) * 2), 0, GL_STATIC_DRAW);
to
glNamedBufferData(objectUBO, uboAligned(sizeof(abjObjectData)) * 2, 0, GL_STATIC_DRAW);
fixes the offset / alignment problems.
OpenGL uses a particular alignment. Assuming you are using std140 layout, for example, this is a structure defined in Cpp :
struct PointLight
{
glm::vec3 position;
int padding; //this is needed for alignement
glm::vec3 color; // because a vec3 has to be aligned
float intensity; //no need for alignment because a float can be read directly without alignement
};
That you can pass to a uniform of a struct like this in shader :
uniform light
{
vec3 Position;
vec3 Color;
float Intensity;
} PointLight;
I would test something like :
struct ObjectData
{
mat4 MM;
mat3 NM;
vec3 padding; //I think you have to add 3 floats of padding
vec4 Cs;
};
But I couldn't find more infos on it, I don't remember where I found it on the first place.
You have different type of memory layout tho, you can check them, std140 define memory layout in the specs here and I advise you to use this layout. If you don't, shared layout will be used and you have to query the layout. You can use OpenGL to query the layout to know what padding you should add BlockLayoutQuery
Concerning glBindBufferRange, I've never heard about the 256bits alignement. Here is an example of how I use it :
const int pointLightCount = SomeNumber;
int pointLightBufferSize = sizeof(PointLight) * pointLightCount + sizeof(int) * 4;
glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo[0]);
glBufferData(GL_SHADER_STORAGE_BUFFER, pointLightBufferSize, 0, GL_DYNAMIC_COPY);
void * lightBuffer = glMapBuffer(GL_SHADER_STORAGE_BUFFER, GL_WRITE_ONLY);
((int*) lightBuffer)[0] = pointLightCount;
for (int i = 0; i < pointLightCount; ++i) {
PointLight p = { something };
((PointLight*) ((int*) lightBuffer + 4))[i] = p;
}
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, 0, ssbo[0], 0, pointLightBufferSize); //Bind all the buffer
Related
I decided to post this as I now believe the problem isn't simply stemming from the shader program, but most probably the OBJ import and mesh initialization process. I wanted to write a quick Lambert shader to finally get stuff appearing on the screen. The final result is riddled with interesting artifacts and visibility issues:
It appears as though the vertex positions are encoded correctly, but the either the normals or indices are completely messed up.
Vertex Shader
#version 330
// MeshVertex
in layout(location=0) vec3 a_Position;
in layout(location=1) vec3 a_Normal;
in layout(location=2) vec2 a_UV;
in layout(location=3) vec3 a_Tangent;
in layout(location=4) vec3 a_BiTangent;
uniform mat4 View;
uniform mat4 Projection;
uniform mat4 Model;
out VS_out
{
vec3 fragNormal;
vec3 fragPos;
} vs_out;
void main()
{
mat3 normalMatrix = mat3(transpose(inverse(Model)));
vec4 position = vec4(a_Position, 1.f);
vs_out.fragPos = (Model * position).xyz;
vs_out.fragNormal = normalMatrix * a_Normal;
gl_Position = Projection * View * Model * position;
}
I initially thought I was passing the vertex normals incorrectly to the fragment shader. I have seen some samples multiply the vertex position by the ModelView matrix. That sounds non-intuitive to me, my lights are positioned in world space, so I would need the world space coordinates of my vertices, hence the multiplication by the Model matrix only. If there are no red flags in this thought process, here is the fragment shader:
#version 330
struct LightSource
{
vec3 position;
vec3 intensity;
};
uniform LightSource light;
in VS_out
{
vec3 fragNormal;
vec3 fragPos;
} fs_in;
struct Material
{
vec4 color;
vec3 ambient;
};
uniform Material material;
void main()
{
// just playing around with some values for now, dont worry, removing this still does not fix the issue
vec3 ambient = normalize(vec3(69, 111, 124));
vec3 norm = normalize(fs_in.fragNormal);
vec3 pos = fs_in.fragPos;
vec3 lightDir = normalize(light.position - pos);
float lambert = max(dot(norm, lightDir), 0.0);
vec3 illumination = (lambert * light.intensity) + ambient;
gl_FragColor = vec4(illumination * material.color.xyz, 1.f);
}
Now the main suspicion is how the OBJ is interpreted. I use the tinyOBJ importer for this. I mostly copied the sample code they had on their GitHub page, and initialized my native vertex type using that data.
OBJ Import Code
bool Model::Load(const void* rawBinary, size_t bytes)
{
tinyobj::ObjReader reader;
if(reader.ParseFromString((const char*)rawBinary, ""))
{
// Fetch meshes
std::vector<MeshVertex> vertices;
std::vector<Triangle> triangles;
const tinyobj::attrib_t& attrib = reader.GetAttrib();
const std::vector<tinyobj::shape_t>& shapes = reader.GetShapes();
m_Meshes.resize(shapes.size());
m_Materials.resize(shapes.size());
// Loop over shapes; in our case, each shape corresponds to a mesh object
for(size_t s = 0; s < shapes.size(); s++)
{
// Loop over faces(polygon)
size_t index_offset = 0;
for(size_t f = 0; f < shapes[s].mesh.num_face_vertices.size(); f++)
{
// Num of face vertices for face f
int fv = shapes[s].mesh.num_face_vertices[f];
ASSERT(fv == 3, "Only supporting triangles for now");
Triangle tri;
// Loop over vertices in the face.
for(size_t v = 0; v < fv; v++) {
// access to vertex
tinyobj::index_t idx = shapes[s].mesh.indices[index_offset + v];
tinyobj::real_t vx = 0.f;
tinyobj::real_t vy = 0.f;
tinyobj::real_t vz = 0.f;
tinyobj::real_t nx = 0.f;
tinyobj::real_t ny = 0.f;
tinyobj::real_t nz = 0.f;
tinyobj::real_t tx = 0.f;
tinyobj::real_t ty = 0.f;
vx = attrib.vertices[3 * idx.vertex_index + 0];
vy = attrib.vertices[3 * idx.vertex_index + 1];
vz = attrib.vertices[3 * idx.vertex_index + 2];
if(attrib.normals.size())
{
nx = attrib.normals[3 * idx.normal_index + 0];
ny = attrib.normals[3 * idx.normal_index + 1];
nz = attrib.normals[3 * idx.normal_index + 2];
}
if(attrib.texcoords.size())
{
tx = attrib.texcoords[2 * idx.texcoord_index + 0];
ty = attrib.texcoords[2 * idx.texcoord_index + 1];
}
// Populate our native vertex type
MeshVertex meshVertex;
meshVertex.Position = glm::vec3(vx, vy, vz);
meshVertex.Normal = glm::vec3(nx, ny, nz);
meshVertex.UV = glm::vec2(tx, ty);
meshVertex.BiTangent = glm::vec3(0.f);
meshVertex.Tangent = glm::vec3(0.f);
vertices.push_back(meshVertex);
tri.Idx[v] = index_offset + v;
}
triangles.emplace_back(tri);
index_offset += fv;
// per-face material
//shapes[s].mesh.material_ids[f];
}
// Adding meshes should occur here!
m_Meshes[s] = std::make_unique<StaticMesh>(vertices, triangles);
// m_Materials[s] = ....
}
}
return true;
}
With the way I understand OBJ, the notion of OpenGL indices does not equate to a Face elements found in the OBJ. This is because each face element has different indices into the position, normal,and texcoord arrays. So instead, I just copy the vertex attributes indexed by the face element into my native MeshVertex structure -- this represents one vertex of my mesh; the corresponding face element ID is then simply the corresponding index for my index buffer object. In my case, I use a Triangle structure instead, but it's effectively the same thing.
The Triangle struct if interested:
struct Triangle
{
uint32_t Idx[3];
Triangle(uint32_t v1, uint32_t v2, uint32_t v3)
{
Idx[0] = v1;
Idx[1] = v2;
Idx[2] = v3;
}
Triangle(const Triangle& Other)
{
Idx[0] = Other.Idx[0];
Idx[1] = Other.Idx[1];
Idx[2] = Other.Idx[2];
}
Triangle()
{
}
};
Other than that, I have no idea what can cause this problem, I am open to hearing new thoughts; perhaps someone experienced understands what these artifacts signify. If you want to take a deeper dive, I can post the mesh initialization code as well.
EDIT:
So I tried importing an FBX format, and I encountered a very similar issue. I am now considering silly errors in my OpenGL code to initialize the mesh.
This initializes OpenGL buffers based on arbitrary vertex data, and triangles to index by
void Mesh::InitBuffers(const void* vertexData, size_t size, const std::vector<Triangle>& triangles)
{
glGenVertexArrays(1, &m_vao);
glBindVertexArray(m_vao);
// Interleaved Vertex Buffer
glGenBuffers(1, &m_vbo);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
glBufferData(GL_ARRAY_BUFFER, size, vertexData, GL_STATIC_DRAW);
// Index Buffer
glGenBuffers(1, &m_ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(Triangle) * triangles.size(), triangles.data(), GL_STATIC_DRAW);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
Then I setup the layout of the vertex buffer using a BufferLayout structure that specifies the attributes we want.
void Mesh::SetBufferLayout(const BufferLayout& layout)
{
glBindVertexArray(m_vao);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
uint32_t stride = layout.GetStride();
int i = 0;
for(const BufferElement& element : layout)
{
glEnableVertexAttribArray(i);
glVertexAttribPointer(i++, element.GetElementCount(), GLType(element.Type), element.Normalized, stride, (void*)(element.Offset));
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
So in our case, the BufferLayout corresponds to the MeshVertex I populated, containing a Position(float3), Normal(float3), UV(float2), Tangent(float3), BiTangent(float3). I can confirm via debugging that the strides and offsets, and other values coming from the BufferElement are exactly what I expect; so I am concerned with the nature of the OpenGL calls I am making.
Alright, let us all just forget this has happened. This is very embarrassing, everything was working fine after all. I simply "forgot" to call the following before rendering:
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
So understandably, all kinds of shapes were being rendered and culled in completely random fashion. (Why is it not enabled by default?)
I have downloaded a model, and have exported it as .fbx. The model contains several animations (6 to be precise), and I want to have one of them displayed. Following Anton Gerdelan's book on openGL I wrote an importer, which uses assimp to parse the model, buffers the relevant vertex data and retrieves the offset matrices needed for the animation.
Not having much experience with skeletal animation, I think I've been able to make the necessary changes to anton's importer, so that it can work on the more complex model that I need. However, the importer assumes that each vertex is only influenced by 1 bone, which unfortunately is not the case.
After some tinkering, I figured out that each vertex of the model can be influenced by at most 14 bones at a time. Since I am not sure how I could pass 14 values to the shader containing boneId and the relevant weight I tried changing the code to accommodate up to 4 bones at a time. This is the code that parses the bone id & weights and buffers them:
*bone_count = (int)mesh->mNumBones;
char bone_names[256][64];
struct vertexdata {
int IDs[4];
float Weights[4];
int ptr;
};
vector<vertexdata> vdata;
vdata.resize(*point_count);
for (int i = 0; i < *point_count; i++) {
vdata[i].ptr = 0;
}
for (int b_i = 0; b_i < *bone_count; b_i++) {
const aiBone* bone = mesh->mBones[b_i];
strcpy(bone_names[b_i], bone->mName.data);
printf("bone_names[%i]=%s\n", b_i, bone_names[b_i]);
bone_offset_mats[b_i] = convert_assimp_matrix(bone->mOffsetMatrix);
//getting weights for each bone
int num_weights = (int)bone->mNumWeights;
for (int w_i = 0; w_i < num_weights; w_i++) {
aiVertexWeight weight = bone->mWeights[w_i];
int vid = weight.mVertexId;
float vweight = weight.mWeight;
if (vdata[vid].ptr < 4) {
vdata[vid].IDs[vdata[vid].ptr] = b_i;
vdata[vid].Weights[vdata[vid].ptr] = vweight;
vdata[vid].ptr++;
}
int vertex_id = (int)weight.mVertexId;
}
}
//buffering bone id data
GLuint vbo1;
glGenBuffers(1, &vbo1);
glBindBuffer(GL_ARRAY_BUFFER, vbo1);
glBufferData(GL_ARRAY_BUFFER, sizeof(vdata[0]) * vdata.size(), &vdata[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(3);
glVertexAttribIPointer(3, 4, GL_INT, sizeof(vertexdata), (const GLvoid*)0);
glEnableVertexAttribArray(4);
glVertexAttribPointer(4, 4, GL_FLOAT, GL_FALSE, sizeof(vertexdata), (const GLvoid*)16);
and in the shaders:
vertex shader
#version 330 core
layout (location = 0) in vec3 pos;
layout (location = 1) in vec3 norm;
layout (location = 2) in vec2 UV;
layout (location = 3) in ivec4 boneIDs;
layout (location = 4) in vec4 Weights;
uniform mat4 view, projection, model;
uniform mat4 bone_matrices[40];
out vec2 tCoords;
void main()
{
mat4 boneTransform = bone_matrices[boneIDs[0]] * Weights[0];
boneTransform += bone_matrices[boneIDs[1]] * Weights[1];
boneTransform += bone_matrices[boneIDs[2]] * Weights[2];
boneTransform += bone_matrices[boneIDs[3]] * Weights[3];
tCoords = UV;
gl_Position = projection * view * boneTransform * model * vec4(pos, 1.0);
}
fragment shader
#version 330 core
in vec2 tCoords;
out vec4 fragColour;
uniform sampler2D tex;
void main()
{
fragColour = texture(tex, tCoords);
}
The model is rendered properly, but I am not observing any movement. Again, not knowing much about skeletal animation, I can only assume that it's because I haven't included every bone that influences each vertex, and the corresponding weight. However, when buffering the data the shaders only accept up to vec4 aka 4 values per vertex. How can I pass 14 IDs and 14 weights? Could this be the cause of the animation not working?
I'm currently working with skeletal animation and I'm really close to getting it working. Currently, I have a struct that has a matrix with 100 spots ( this is so that I can max have 100 joints ) like so :
struct skelShader {
glm::mat4 currentJointTrans[100];
};
The struct should be binded in the shader, I've done it like this:
glGenBuffers(1, &sksBuff);
glBindBuffer(GL_UNIFORM_BUFFER, sksBuff);
// bind buffer to work further with it...
// allocate memory for the buffer in the GPU
glBufferData(GL_UNIFORM_BUFFER, sizeof(skelShader), NULL, GL_STATIC_DRAW);
// because we hard-coded "binding=3" in the shader code we can do this:
// bind Uniform Buffer to binding point 3 (without caring about index of UBO)
glBindBufferBase(GL_UNIFORM_BUFFER, 4, sksBuff);
// good practice, unbind buffer
glBindBuffer(GL_UNIFORM_BUFFER, 0);
sksBuff is just an GLuint.
I fill this array with new values every render/frame that goes by, these values are the new transformations for the joints. I do it like this:
for (int i = 0; i < skeleton.size(); i++) {
globalSkelInfo.currentJointTrans[i] = skeleton[i]->transformMat[currentFrame - 1] * skeleton[i]->globalBindPosMat;
}
This is working correctly for the root joint, but the rest of the joints/mesh remains in bind pose. The problem should be located in where I update the array. Currently I do it like this in the render function after I've done the multiplication for each joint:
for (int i = 0; i < skeleton.size(); i++) {
glUniformMatrix4fv(glGetUniformLocation(aShaderProgram, ("currentJointTrans[" + std::to_string(i) + "]").c_str()),
1, GL_FALSE, glm::value_ptr(globalSkelInfo.currentJointTrans[i]));
}
After this I draw. The root joints values seem to be moving correctly, but the rest of the mesh is in bindpose and doesn't move. In the Vertex Shader I try to update the matrix like this:
#version 440
const int maxJoints = 100;
const int maxWeights = 4;
layout(location = 0) in vec3 vertex_position;
layout(location = 1) in vec2 vertex_UV;
layout(location = 2) in vec3 vertex_normal;
layout(location = 3) in vec4 vertex_weight;
layout(location = 4) in ivec4 vertex_controllers;
out vec2 outUVs;
out vec3 outNorm;
layout(binding = 3 , std140) uniform uniformBlock
{
vec3 camPos;
mat4 world;
mat4 LookAt;
mat4 projection;
mat4 MVP;
};
layout(binding = 4 , std140) uniform animationStruct
{
mat4 currentJointTrans[maxJoints];
};
void main() {
vec4 finalModelPos = vec4(0.0);
vec4 finalNormal = vec4(0.0);
for (int i = 0; i < 4; i++) {
mat4 jointTrans = currentJointTrans[vertex_controllers[i]];
vec4 posePos = jointTrans * vec4(vertex_position, 1.0);
finalModelPos += posePos * vertex_weight[i];
vec4 worldNormal = jointTrans * vec4(vertex_normal, 0.0);
finalNormal += worldNormal * vertex_weight[i];
}
gl_Position = MVP * finalModelPos;
outNorm = finalNormal.xyz;
outUVs = vertex_UV;
}
My theory is that the updating of the struct skelShader with my currentJointTrans array is incorrect. Any tips on how I should do this instead?
glUniform* calls cannot set data in uniform buffers. Indeed, the whole point of uniform buffers is that the uniform data comes from a buffer object. That's why you had to create one.
So if you want to set the uniform data for a uniform block, you set that data into the buffer object.
I have implemented in my OpenGL/GLSL application a uniform block managing the mesh material data (Ambient, Diffuse and Specular lighting and Shininess).
For my first try, I have implemented the following uniform block syntax:
uniform MaterialBlock
{
vec3 Ka, Kd, Ks;
float Shininess;
};
Here's the client code:
scene::MaterialPtr pMaterial = this->FindMaterialByName(name);
GLuint bindingPoint = 0, bufferIndex = 0;
GLint blockSize = 0;
GLuint indices[4];
GLint offset[4];
const GLchar *names[4] = {"Ka", "Kd", "Ks", "Shininess" };
GLuint blockIndex = glGetUniformBlockIndex(this->m_Handle, "MaterialBlock");
glGetActiveUniformBlockiv(this->m_Handle, blockIndex, GL_UNIFORM_BLOCK_DATA_SIZE, &blockSize);
glGetUniformIndices(this->m_Handle, 4, names, indices);
glGetActiveUniformsiv(this->m_Handle, 4, indices, GL_UNIFORM_OFFSET, offset);
char *pBuffer = new char[blockSize];
memset(pBuffer, '\0', blockSize);
glm::vec3 ambient = pMaterial->GetAmbient();
glm::vec3 diffuse = pMaterial->GetDiffuse();
glm::vec3 specular = pMaterial->GetSpecular();
float shininess = pMaterial->GetShininess();
std::copy(reinterpret_cast<char*>(&ambient[0]),
reinterpret_cast<char*>(&ambient[0]) + sizeof(glm::vec4), pBuffer + offset[0]);
std::copy(reinterpret_cast<char*>(&diffuse[0]), reinterpret_cast<char*>(
&diffuse[0]) + sizeof(glm::vec4), pBuffer + offset[1]);
std::copy(reinterpret_cast<char*>(&specular[0]),
reinterpret_cast<char*>(&specular[0]) + sizeof(glm::vec3), pBuffer + offset[2]);
std::copy(reinterpret_cast<char*>(&shininess), reinterpret_cast<char*>(
&shininess) + sizeof(float), pBuffer + offset[3]);
glUniformBlockBinding(this->m_Handle, blockIndex, bindingPoint);
{
glGenBuffers(1, &bufferIndex);
glBindBuffer(GL_UNIFORM_BUFFER, bufferIndex);
{
glBufferData(GL_UNIFORM_BUFFER, blockSize, NULL, GL_DYNAMIC_DRAW);
glBufferSubData(GL_UNIFORM_BUFFER, 0, blockSize, (const GLvoid *)pBuffer);
}
glBindBuffer(GL_UNIFORM_BUFFER, 0);
}
glBindBufferBase(GL_UNIFORM_BUFFER, bindingPoint, bufferIndex);
//TEXTURE.
{
this->SetUniform("colorSampler", 0); //THE CHANNEL HAS TO BE CALCULATED! //int
glActiveTexture(GL_TEXTURE0); //DYNAMICS.
pMaterial->GetTexture()->Lock();
}
Variables content:
blockIndex: 0 //OK
blockSize: 48 //OK
Indices: {1, 2, 3, 78} //OK
Offset: {0, 16, 32, 44} //OK
The fragment shader code:
#version 440
#define MAX_LIGHT_COUNT 10
/*
** Output color value.
*/
layout (location = 0) out vec4 FragColor;
/*
** Inputs.
*/
in vec3 Position;
in vec2 TexCoords;
in vec3 Normal;
/*
** Material uniform block.
*/
uniform MaterialBlock
{
vec3 Ka, Kd, Ks;
float Shininess;
};
uniform sampler2D ColorSampler;
struct Light
{
vec4 Position;
vec3 La, Ld, Ls;
float Kc, Kl, Kq;
};
uniform struct Light LightInfos[MAX_LIGHT_COUNT];
uniform unsigned int LightCount;
/*
** Light attenuation factor.
*/
float getLightAttenuationFactor(vec3 lightDir, Light light)
{
float lightAtt = 0.0f;
float dist = 0.0f;
dist = length(lightDir);
lightAtt = 1.0f / (light.Kc + (light.Kl * dist) + (light.Kq * pow(dist, 2)));
return (lightAtt);
}
/*
** Basic phong shading.
*/
vec3 Basic_Phong_Shading(vec3 normalDir, vec3 lightDir, vec3 viewDir, int idx)
{
vec3 Specular = vec3(0.0f);
float lambertTerm = max(dot(lightDir, normalDir), 0.0f);
vec3 Ambient = LightInfos[idx].La * Ka;
vec3 Diffuse = LightInfos[idx].Ld * Kd * lambertTerm;
if (lambertTerm > 0.0f)
{
vec3 reflectDir = reflect(-lightDir, normalDir);
Specular = LightInfos[idx].Ls * Ks * pow(max(dot(reflectDir, viewDir), 0.0f), Shininess);
}
return (Ambient + Diffuse + Specular);
}
/*
** Fragment shader entry point.
*/
void main(void)
{
vec3 LightIntensity = vec3(0.0f);
vec4 texDiffuseColor = texture2D(ColorSampler, TexCoords);
vec3 normalDir = (gl_FrontFacing ? -Normal : Normal);
for (int idx = 0; idx < LightCount; idx++)
{
vec3 lightDir = vec3(LightInfos[idx].Position) - Position.xyz;
vec3 viewDir = -Position.xyz;
float lightAttenuationFactor = getLightAttenuationFactor(lightDir, LightInfos[idx]);
LightIntensity += Basic_Phong_Shading(
-normalize(normalDir), normalize(lightDir), normalize(viewDir), idx
) * lightAttenuationFactor;
}
FragColor = vec4(LightIntensity, 1.0f) * texDiffuseColor;
}
This code works perfectly. The output is the following:
But I know it's possible to use instance name (like strutures in C/C++) with Uniform Blocks as follows:
uniform MaterialBlock
{
vec3 Ka, Kd, Ks;
float Shininess;
} MaterialInfos;
Of course, all the variable used in shader like 'Ka', 'Kd', 'Ks' and 'Shininess' become 'MaterialInfos.Ka', 'MaterialInfos.Kd', 'MaterialInfos.Ks' and 'MaterialInfos.Shininess'.
But unfortunatly the execution of the program fails because in the client code above the varibales 'indices' and 'offset' are not filled correctly.
Here's the log:
blockIndex: 0 //OK
blockSize: 48 //OK
Indices: {4294967295, 4294967295, 4294967295, 4294967295} //NOT OK
Offset: {-858993460, -858993460, -858993460, -858993460} //NOT OK
So the only the block index and the block size is correct. So to fix the problem I tried to change the line:
const GLchar *names[4] = {"Ka", "Kd", "Ks", "Shininess" };
by the following one:
const GLchar *names[4] = {"MaterialInfos.Ka", "MaterialInfos.Kd", "MaterialInfos.Ks", "MaterialInfos.Shininess" };
But I still have the same log for the variables 'indices' and 'offset'. Consequently my application still fails. I think it's a problem of syntax in the client code (not in GLSL code because I have no GLSL error) but I can't find the solution.
Do you have an idea where my problem comes from ?
When using instanced uniform blocks, the OpenGL application uses the block name (in this case MaterialBlock) before the dot, not the instance name as you have in your current code. The instance name is only ever seen by the GLSL shader.
Therefore your names variable should be defined and initialized as such:
const GLchar *names[4] = {"MaterialBlock.Ka", "MaterialBlock.Kd", "MaterialBlock.Ks", "MaterialBlock.Shininess" };
Try declaring your structType separately from the uniform (of type structType..)
struct MaterialData
{
vec3 kAmbient;
vec3 kDiffuse;
vec3 kSpecular;
float shininess;
};
uniform MaterialData material;
(if you follow this example, both your MatrialBlock and Light declarations are erroneous, for slightly different reasons)
Then you can set uniforms by referring to them as (eg) "material.kAmbient" on the cpu side and read them as material.kAmbient on the gpu side.
I am having a weird problem with compute shaders since I changed the structure size of a buffer I was passing in to the shader.
struct Particle
{
vec3 position;
vec2 uv;
vec3 accumulated_normal;
int id;
int flattened_id;
int movable;
// can the particle move or not ? used to pin parts of the cloth
float mass;
// the mass of the particle (is always 1 in this example)
vec3 old_pos;
// the position of the particle in the previous time step, used as part of the verlet numerical integration scheme
vec3 acceleration;
// a vector representing the current acceleration of the particle
};
Defined like this. I was having problems trying to use the flattened id so I wanted to write the id's back to the buffer I was passing in. The shader looks something like this.
layout (local_size_x = 16, local_size_y = 1, local_size_z = 1) in;
void main()
{
unsigned int flattened_id = gl_LocalInvocationIndex;
particleBuffer.particles[0].id = 16;
particleBuffer.particles[1].id = 17;
particleBuffer.particles[2].id = 18;
particleBuffer.particles[3].id = 19;
//particleBuffer.particles[4].id = 20;
}
So till this point it is fine but the moment I uncomment the last line which is the particleBuffer.particles[4], the mesh vanishes from the screen. I have managed to change the position data from this mesh before but this seems strange. I verified that I have indeed 16 elements of the buffer which I am passing in so it should not go out of bounds either (i.e. cloth1.particles.size() = 16). Any ideas ??
The entire code is here..
https://github.com/ssarangi/OpenGL_Cloth/tree/master/cloth_4_3_compute
glUseProgram(computeShader);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, cloth1.vertex_vbo_storage);
glBufferData(GL_SHADER_STORAGE_BUFFER, cloth1.particles.size() * sizeof(Particle), &(cloth1.particles[0]), GL_DYNAMIC_COPY);
glDispatchCompute(1, 1, 1);
{
GLenum err = gl3wGetError();
}
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, cloth1.vertex_vbo_storage);
Particle * ptr = reinterpret_cast<Particle *>(glMapBufferRange(GL_ARRAY_BUFFER, 0, cloth1.particles.size() * sizeof(Particle), GL_MAP_READ_BIT));
memcpy(&cloth1.particles[0], ptr, cloth1.particles.size()*sizeof(Particle));
glUnmapBuffer(GL_ARRAY_BUFFER);
glBindBuffer(GL_ARRAY_BUFFER, 0);
*********************** EDITED with Andon's comments ******************************
This is the new layout from the C++ side.
struct Particle
{
vec4 position;
vec2 uv;
vec4 accumulated_normal;
vec4 old_pos; // the position of the particle in the previous time step, used as part of the verlet numerical integration scheme
vec4 acceleration; // a vector representing the current acceleration of the particle
int id;
int flattened_id;
int movable; // can the particle move or not ? used to pin parts of the cloth
float mass; // the mass of the particle (is always 1 in this example)
The GLSL side definition. What I am unsure about is if the padding elements need to be included in the glsl structure or not. It still doesn't update the ID's.
struct Particle
{
vec4 position;
vec2 uv;
vec4 accumulated_normal;
vec4 old_pos; // the position of the particle in the previous time step, used as part of the verlet numerical integration scheme
vec4 acceleration; // a vector representing the current acceleration of the particle
int id;
int flattened_id;
int movable; // can the particle move or not ? used to pin parts of the cloth
float mass; // the mass of the particle (is always 1 in this example)
};