OpenGL: Lambert shading imported OBJs results in artifacts and strange culling - c++

I decided to post this as I now believe the problem isn't simply stemming from the shader program, but most probably the OBJ import and mesh initialization process. I wanted to write a quick Lambert shader to finally get stuff appearing on the screen. The final result is riddled with interesting artifacts and visibility issues:
It appears as though the vertex positions are encoded correctly, but the either the normals or indices are completely messed up.
Vertex Shader
#version 330
// MeshVertex
in layout(location=0) vec3 a_Position;
in layout(location=1) vec3 a_Normal;
in layout(location=2) vec2 a_UV;
in layout(location=3) vec3 a_Tangent;
in layout(location=4) vec3 a_BiTangent;
uniform mat4 View;
uniform mat4 Projection;
uniform mat4 Model;
out VS_out
{
vec3 fragNormal;
vec3 fragPos;
} vs_out;
void main()
{
mat3 normalMatrix = mat3(transpose(inverse(Model)));
vec4 position = vec4(a_Position, 1.f);
vs_out.fragPos = (Model * position).xyz;
vs_out.fragNormal = normalMatrix * a_Normal;
gl_Position = Projection * View * Model * position;
}
I initially thought I was passing the vertex normals incorrectly to the fragment shader. I have seen some samples multiply the vertex position by the ModelView matrix. That sounds non-intuitive to me, my lights are positioned in world space, so I would need the world space coordinates of my vertices, hence the multiplication by the Model matrix only. If there are no red flags in this thought process, here is the fragment shader:
#version 330
struct LightSource
{
vec3 position;
vec3 intensity;
};
uniform LightSource light;
in VS_out
{
vec3 fragNormal;
vec3 fragPos;
} fs_in;
struct Material
{
vec4 color;
vec3 ambient;
};
uniform Material material;
void main()
{
// just playing around with some values for now, dont worry, removing this still does not fix the issue
vec3 ambient = normalize(vec3(69, 111, 124));
vec3 norm = normalize(fs_in.fragNormal);
vec3 pos = fs_in.fragPos;
vec3 lightDir = normalize(light.position - pos);
float lambert = max(dot(norm, lightDir), 0.0);
vec3 illumination = (lambert * light.intensity) + ambient;
gl_FragColor = vec4(illumination * material.color.xyz, 1.f);
}
Now the main suspicion is how the OBJ is interpreted. I use the tinyOBJ importer for this. I mostly copied the sample code they had on their GitHub page, and initialized my native vertex type using that data.
OBJ Import Code
bool Model::Load(const void* rawBinary, size_t bytes)
{
tinyobj::ObjReader reader;
if(reader.ParseFromString((const char*)rawBinary, ""))
{
// Fetch meshes
std::vector<MeshVertex> vertices;
std::vector<Triangle> triangles;
const tinyobj::attrib_t& attrib = reader.GetAttrib();
const std::vector<tinyobj::shape_t>& shapes = reader.GetShapes();
m_Meshes.resize(shapes.size());
m_Materials.resize(shapes.size());
// Loop over shapes; in our case, each shape corresponds to a mesh object
for(size_t s = 0; s < shapes.size(); s++)
{
// Loop over faces(polygon)
size_t index_offset = 0;
for(size_t f = 0; f < shapes[s].mesh.num_face_vertices.size(); f++)
{
// Num of face vertices for face f
int fv = shapes[s].mesh.num_face_vertices[f];
ASSERT(fv == 3, "Only supporting triangles for now");
Triangle tri;
// Loop over vertices in the face.
for(size_t v = 0; v < fv; v++) {
// access to vertex
tinyobj::index_t idx = shapes[s].mesh.indices[index_offset + v];
tinyobj::real_t vx = 0.f;
tinyobj::real_t vy = 0.f;
tinyobj::real_t vz = 0.f;
tinyobj::real_t nx = 0.f;
tinyobj::real_t ny = 0.f;
tinyobj::real_t nz = 0.f;
tinyobj::real_t tx = 0.f;
tinyobj::real_t ty = 0.f;
vx = attrib.vertices[3 * idx.vertex_index + 0];
vy = attrib.vertices[3 * idx.vertex_index + 1];
vz = attrib.vertices[3 * idx.vertex_index + 2];
if(attrib.normals.size())
{
nx = attrib.normals[3 * idx.normal_index + 0];
ny = attrib.normals[3 * idx.normal_index + 1];
nz = attrib.normals[3 * idx.normal_index + 2];
}
if(attrib.texcoords.size())
{
tx = attrib.texcoords[2 * idx.texcoord_index + 0];
ty = attrib.texcoords[2 * idx.texcoord_index + 1];
}
// Populate our native vertex type
MeshVertex meshVertex;
meshVertex.Position = glm::vec3(vx, vy, vz);
meshVertex.Normal = glm::vec3(nx, ny, nz);
meshVertex.UV = glm::vec2(tx, ty);
meshVertex.BiTangent = glm::vec3(0.f);
meshVertex.Tangent = glm::vec3(0.f);
vertices.push_back(meshVertex);
tri.Idx[v] = index_offset + v;
}
triangles.emplace_back(tri);
index_offset += fv;
// per-face material
//shapes[s].mesh.material_ids[f];
}
// Adding meshes should occur here!
m_Meshes[s] = std::make_unique<StaticMesh>(vertices, triangles);
// m_Materials[s] = ....
}
}
return true;
}
With the way I understand OBJ, the notion of OpenGL indices does not equate to a Face elements found in the OBJ. This is because each face element has different indices into the position, normal,and texcoord arrays. So instead, I just copy the vertex attributes indexed by the face element into my native MeshVertex structure -- this represents one vertex of my mesh; the corresponding face element ID is then simply the corresponding index for my index buffer object. In my case, I use a Triangle structure instead, but it's effectively the same thing.
The Triangle struct if interested:
struct Triangle
{
uint32_t Idx[3];
Triangle(uint32_t v1, uint32_t v2, uint32_t v3)
{
Idx[0] = v1;
Idx[1] = v2;
Idx[2] = v3;
}
Triangle(const Triangle& Other)
{
Idx[0] = Other.Idx[0];
Idx[1] = Other.Idx[1];
Idx[2] = Other.Idx[2];
}
Triangle()
{
}
};
Other than that, I have no idea what can cause this problem, I am open to hearing new thoughts; perhaps someone experienced understands what these artifacts signify. If you want to take a deeper dive, I can post the mesh initialization code as well.
EDIT:
So I tried importing an FBX format, and I encountered a very similar issue. I am now considering silly errors in my OpenGL code to initialize the mesh.
This initializes OpenGL buffers based on arbitrary vertex data, and triangles to index by
void Mesh::InitBuffers(const void* vertexData, size_t size, const std::vector<Triangle>& triangles)
{
glGenVertexArrays(1, &m_vao);
glBindVertexArray(m_vao);
// Interleaved Vertex Buffer
glGenBuffers(1, &m_vbo);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
glBufferData(GL_ARRAY_BUFFER, size, vertexData, GL_STATIC_DRAW);
// Index Buffer
glGenBuffers(1, &m_ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(Triangle) * triangles.size(), triangles.data(), GL_STATIC_DRAW);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
Then I setup the layout of the vertex buffer using a BufferLayout structure that specifies the attributes we want.
void Mesh::SetBufferLayout(const BufferLayout& layout)
{
glBindVertexArray(m_vao);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
uint32_t stride = layout.GetStride();
int i = 0;
for(const BufferElement& element : layout)
{
glEnableVertexAttribArray(i);
glVertexAttribPointer(i++, element.GetElementCount(), GLType(element.Type), element.Normalized, stride, (void*)(element.Offset));
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
So in our case, the BufferLayout corresponds to the MeshVertex I populated, containing a Position(float3), Normal(float3), UV(float2), Tangent(float3), BiTangent(float3). I can confirm via debugging that the strides and offsets, and other values coming from the BufferElement are exactly what I expect; so I am concerned with the nature of the OpenGL calls I am making.

Alright, let us all just forget this has happened. This is very embarrassing, everything was working fine after all. I simply "forgot" to call the following before rendering:
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
So understandably, all kinds of shapes were being rendered and culled in completely random fashion. (Why is it not enabled by default?)

Related

How to use samplerCube as array

I am using samplerCube for a point light shadow map. For multiple lights, I implemented samplerCube as an array in the following.
uniform samplerCube pointShadowMapTexture[MAX_LIGHT];
But somehow I can't index this samplerCube. Shader compiles and there is no problem. This is working for sampler2D arrays.
I tried indexing it with [0], [1] .. in the shader but always the same image. I am sending different cube textures for each light but somehow shader doesn't index it or doesn't accept it.
I am doing the same for directional lights as sampler2D array. But when it comes to samplerCubes it doesn't work.
The code sending sampler cubes to the shader
void ShaderProgram::bindTexture(GLenum target , const char * name , int id){
GLuint TextureID = glGetUniformLocation(programID, name);
glActiveTexture(GL_TEXTURE0 + textureOrder);
glBindTexture(target , id);
glUniform1i(TextureID, textureOrder);
textureOrder++;
App::checkErrors(__FILE__,__LINE__,name);
}
//depthMapTexture is member of Light class
std::string PointShadowMapTexture = "pointShadowMapTexture[" + std::to_string(LightNumber) + "]";
ShaderProgram::shaders["deferred"]->bindTexture(GL_TEXTURE_CUBE_MAP, PointShadowMapTexture.data(), depthMapTexture );
float SoftPointShadowCalculation(int index , vec3 fragPos ,vec3 lightPos){
vec3 fragToLight = fragPos - lightPos;
float currentDepth = length(fragToLight);
float shadow = 0.0;
float bias = 0.0;
int samples = 20;
float viewDistance = length(viewPos - fragPos);
float diskRadius = (1.0 + (viewDistance / farPlane)) / 25.0;
for(int i = 0; i < samples; ++i){
float closestDepth = texture(pointShadowMapTexture[index], fragToLight + gridSamplingDisk[i] * diskRadius).r;
closestDepth *= farPlane;//farplane
if(currentDepth - bias > closestDepth){
shadow += 0.5;
}
}
shadow /= float(samples);
return shadow;
}
Is this valid for samplerCube type? If not what should I do to have an array of samplerCubes?
I realized that all lights are showing the what last light sees. So when I render model I was using the last light projection*view matrix for each light. :) It took hours to realize.
Now each light rendered with its own matrix.
this might help someone if encounters the same problem
glEnable(GL_CULL_FACE);
glCullFace(GL_FRONT);
for(Light * light : Light::shadowCasterLights){
glBindFramebuffer(GL_FRAMEBUFFER, light->depthMapFrameBuffer);App::checkFrameBufferError(__FILE__,__LINE__);
glViewport(0,0,light->depthMapTextureSize,light->depthMapTextureSize);
ShaderProgram::shaders[light->depthMapShader]->use("ShadowCaster Model");
ShaderProgram::shaders[light->depthMapShader]->uniformMatrix4("ModelMatrix", &scene->ModelMatrix[0][0]);
ShaderProgram::shaders[light->depthMapShader]->attributeBuffer("ModelSpaceVertexPosition", mesh->vertexBufferID, 3);
switch (light->lightType) {
case LightType::DIRECTIONAL:
ShaderProgram::shaders[light->depthMapShader]->use("Light");
ShaderProgram::shaders[light->depthMapShader]->uniformMatrix4("LightSpaceMatrix",&light->lightSpaceMatrix[0][0]);
break;
case LightType::POINT:
ShaderProgram::shaders[light->depthMapShader]->use("Light");
ShaderProgram::shaders[light->depthMapShader]->uniform3f("LightPosition" , &positionVector[0]);
ShaderProgram::shaders[light->depthMapShader]->uniform1f("FarPlane" , light->farPlane);
ShaderProgram::shaders[light->depthMapShader]->uniformMatrix4("LightSpaceMatrix[0]",&light->lightSpaceMatrixCube[0][0][0]);
ShaderProgram::shaders[light->depthMapShader]->uniformMatrix4("LightSpaceMatrix[1]",&light->lightSpaceMatrixCube[1][0][0]);
ShaderProgram::shaders[light->depthMapShader]->uniformMatrix4("LightSpaceMatrix[2]",&light->lightSpaceMatrixCube[2][0][0]);
ShaderProgram::shaders[light->depthMapShader]->uniformMatrix4("LightSpaceMatrix[3]",&light->lightSpaceMatrixCube[3][0][0]);
ShaderProgram::shaders[light->depthMapShader]->uniformMatrix4("LightSpaceMatrix[4]",&light->lightSpaceMatrixCube[4][0][0]);
ShaderProgram::shaders[light->depthMapShader]->uniformMatrix4("LightSpaceMatrix[5]",&light->lightSpaceMatrixCube[5][0][0]);
break;
}
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh->elementBufferID);
glDrawElements(
GL_TRIANGLES, // mode
mesh->indices.size(), // count
GL_UNSIGNED_SHORT, // type
(void *) 0 // element array buffer offset
);
ShaderProgram::shaders[light->depthMapShader]->reset();
}
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0,0,1920,1080);
Upon the power of shadows

How to provide 14 float values per vertex to the shader?

I have downloaded a model, and have exported it as .fbx. The model contains several animations (6 to be precise), and I want to have one of them displayed. Following Anton Gerdelan's book on openGL I wrote an importer, which uses assimp to parse the model, buffers the relevant vertex data and retrieves the offset matrices needed for the animation.
Not having much experience with skeletal animation, I think I've been able to make the necessary changes to anton's importer, so that it can work on the more complex model that I need. However, the importer assumes that each vertex is only influenced by 1 bone, which unfortunately is not the case.
After some tinkering, I figured out that each vertex of the model can be influenced by at most 14 bones at a time. Since I am not sure how I could pass 14 values to the shader containing boneId and the relevant weight I tried changing the code to accommodate up to 4 bones at a time. This is the code that parses the bone id & weights and buffers them:
*bone_count = (int)mesh->mNumBones;
char bone_names[256][64];
struct vertexdata {
int IDs[4];
float Weights[4];
int ptr;
};
vector<vertexdata> vdata;
vdata.resize(*point_count);
for (int i = 0; i < *point_count; i++) {
vdata[i].ptr = 0;
}
for (int b_i = 0; b_i < *bone_count; b_i++) {
const aiBone* bone = mesh->mBones[b_i];
strcpy(bone_names[b_i], bone->mName.data);
printf("bone_names[%i]=%s\n", b_i, bone_names[b_i]);
bone_offset_mats[b_i] = convert_assimp_matrix(bone->mOffsetMatrix);
//getting weights for each bone
int num_weights = (int)bone->mNumWeights;
for (int w_i = 0; w_i < num_weights; w_i++) {
aiVertexWeight weight = bone->mWeights[w_i];
int vid = weight.mVertexId;
float vweight = weight.mWeight;
if (vdata[vid].ptr < 4) {
vdata[vid].IDs[vdata[vid].ptr] = b_i;
vdata[vid].Weights[vdata[vid].ptr] = vweight;
vdata[vid].ptr++;
}
int vertex_id = (int)weight.mVertexId;
}
}
//buffering bone id data
GLuint vbo1;
glGenBuffers(1, &vbo1);
glBindBuffer(GL_ARRAY_BUFFER, vbo1);
glBufferData(GL_ARRAY_BUFFER, sizeof(vdata[0]) * vdata.size(), &vdata[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(3);
glVertexAttribIPointer(3, 4, GL_INT, sizeof(vertexdata), (const GLvoid*)0);
glEnableVertexAttribArray(4);
glVertexAttribPointer(4, 4, GL_FLOAT, GL_FALSE, sizeof(vertexdata), (const GLvoid*)16);
and in the shaders:
vertex shader
#version 330 core
layout (location = 0) in vec3 pos;
layout (location = 1) in vec3 norm;
layout (location = 2) in vec2 UV;
layout (location = 3) in ivec4 boneIDs;
layout (location = 4) in vec4 Weights;
uniform mat4 view, projection, model;
uniform mat4 bone_matrices[40];
out vec2 tCoords;
void main()
{
mat4 boneTransform = bone_matrices[boneIDs[0]] * Weights[0];
boneTransform += bone_matrices[boneIDs[1]] * Weights[1];
boneTransform += bone_matrices[boneIDs[2]] * Weights[2];
boneTransform += bone_matrices[boneIDs[3]] * Weights[3];
tCoords = UV;
gl_Position = projection * view * boneTransform * model * vec4(pos, 1.0);
}
fragment shader
#version 330 core
in vec2 tCoords;
out vec4 fragColour;
uniform sampler2D tex;
void main()
{
fragColour = texture(tex, tCoords);
}
The model is rendered properly, but I am not observing any movement. Again, not knowing much about skeletal animation, I can only assume that it's because I haven't included every bone that influences each vertex, and the corresponding weight. However, when buffering the data the shaders only accept up to vec4 aka 4 values per vertex. How can I pass 14 IDs and 14 weights? Could this be the cause of the animation not working?

OpenGL ES - texturing sphere

I have a sphere. I can map texture on it. But now my texture is outside sphere. And I need inside. My user sit like inside sphere, so he can view inside it (rotate and zoom). So simply like a sky dome, but sphere. Maybe I need fix uv texture coordinates or enable something?
Here code for generating sphere:
class Sphere : public ParametricSurface {
public:
Sphere(float radius) : m_radius(radius)
{
ParametricInterval interval = { ivec2(20, 20), vec2(Pi, TwoPi), vec2(8, 14) };
SetInterval(interval);
}
vec3 Evaluate(const vec2& domain) const
{
float u = domain.x, v = domain.y;
float x = m_radius * sin(u) * cos(v);
float y = m_radius * cos(u);
float z = m_radius * -sin(u) * sin(v);
return vec3(x, y, z);
}
private:
float m_radius;
};
vec2 ParametricSurface::ComputeDomain(float x, float y) const
{
return vec2(x * m_upperBound.x / m_slices.x, y * m_upperBound.y / m_slices.y);
}
void ParametricSurface::GenerateVertices(float * vertices) const
{
float* attribute = vertices;
for (int j = 0; j < m_divisions.y; j++) {
for (int i = 0; i < m_divisions.x; i++) {
// Compute Position
vec2 domain = ComputeDomain(i, j);
vec3 range = Evaluate(domain);
attribute = range.Write(attribute);
// Compute Normal
if (m_vertexFlags & VertexFlagsNormals) {
float s = i, t = j;
// Nudge the point if the normal is indeterminate.
if (i == 0) s += 0.01f;
if (i == m_divisions.x - 1) s -= 0.01f;
if (j == 0) t += 0.01f;
if (j == m_divisions.y - 1) t -= 0.01f;
// Compute the tangents and their cross product.
vec3 p = Evaluate(ComputeDomain(s, t));
vec3 u = Evaluate(ComputeDomain(s + 0.01f, t)) - p;
vec3 v = Evaluate(ComputeDomain(s, t + 0.01f)) - p;
vec3 normal = u.Cross(v).Normalized();
if (InvertNormal(domain))
normal = -normal;
attribute = normal.Write(attribute);
}
// Compute Texture Coordinates
if (m_vertexFlags & VertexFlagsTexCoords) {
float s = m_textureCount.x * i / m_slices.x;
float t = m_textureCount.y * j / m_slices.y;
attribute = vec2(s, t).Write(attribute);
}
}
}
}
void ParametricSurface::GenerateLineIndices(unsigned short * indices) const
{
unsigned short * index = indices;
for (int j = 0, vertex = 0; j < m_slices.y; j++) {
for (int i = 0; i < m_slices.x; i++) {
int next = (i + 1) % m_divisions.x;
*index++ = vertex + i;
*index++ = vertex + next;
*index++ = vertex + i;
*index++ = vertex + i + m_divisions.x;
}
vertex += m_divisions.x;
}
}
void ParametricSurface::GenerateTriangleIndices(unsigned short * indices) const
{
unsigned short * index = indices;
for (int j = 0, vertex = 0; j < m_slices.y; j++) {
for (int i = 0; i < m_slices.x; i++) {
int next = (i + 1) % m_divisions.x;
*index++ = vertex + i;
*index++ = vertex + next;
*index++ = vertex + i + m_divisions.x;
*index++ = vertex + next;
*index++ = vertex + next + m_divisions.x;
*index++ = vertex + i + m_divisions.x;
}
vertex += m_divisions.x;
}
}
And here is VBO creation:
+ (DrawableVBO *)createVBO:(SurfaceType)surfaceType
{
ISurface * surface = [self createSurface:surfaceType]; // just Sphere type
surface->SetVertexFlags(VertexFlagsNormals | VertexFlagsTexCoords); // which vertexes I need
// Get vertice from surface.
//
int vertexSize = surface->GetVertexSize();
int vBufSize = surface->GetVertexCount() * vertexSize;
GLfloat * vbuf = new GLfloat[vBufSize];
surface->GenerateVertices(vbuf);
// Get triangle indice from surface
//
int triangleIndexCount = surface->GetTriangleIndexCount();
unsigned short * triangleBuf = new unsigned short[triangleIndexCount];
surface->GenerateTriangleIndices(triangleBuf);
// Create the VBO for the vertice.
//
GLuint vertexBuffer;
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, vBufSize * sizeof(GLfloat), vbuf, GL_STATIC_DRAW);
// Create the VBO for the triangle indice
//
GLuint triangleIndexBuffer;
glGenBuffers(1, &triangleIndexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, triangleIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, triangleIndexCount * sizeof(GLushort), triangleBuf, GL_STATIC_DRAW);
delete [] vbuf;
delete [] triangleBuf;
delete surface;
DrawableVBO * vbo = [[DrawableVBO alloc] init];
vbo.vertexBuffer = vertexBuffer;
vbo.triangleIndexBuffer = triangleIndexBuffer;
vbo.vertexSize = vertexSize;
vbo.triangleIndexCount = triangleIndexCount;
return vbo;
}
Here is my light setup:
- (void)setupLights
{
// Set up some default material parameters.
//
glUniform3f(_ambientSlot, 0.04f, 0.04f, 0.04f);
glUniform3f(_specularSlot, 0.5, 0.5, 0.5);
glUniform1f(_shininessSlot, 50);
// Initialize various state.
//
glEnableVertexAttribArray(_positionSlot);
glEnableVertexAttribArray(_normalSlot);
glUniform3f(_lightPositionSlot, 1.0, 1.0, 5.0);
glVertexAttrib3f(_diffuseSlot, 0.8, 0.8, 0.8);
}
And finally shaders:
fragment:
precision mediump float;
varying vec4 vDestinationColor;
varying vec2 vTextureCoordOut;
uniform sampler2D Sampler;
void main()
{
gl_FragColor = texture2D(Sampler, vTextureCoordOut) * vDestinationColor;
}
vertex:
uniform mat4 projection;
uniform mat4 modelView;
attribute vec4 vPosition;
attribute vec2 vTextureCoord;
uniform mat3 normalMatrix;
uniform vec3 vLightPosition;
uniform vec3 vAmbientMaterial;
uniform vec3 vSpecularMaterial;
uniform float shininess;
attribute vec3 vNormal;
attribute vec3 vDiffuseMaterial;
varying vec4 vDestinationColor;
varying vec2 vTextureCoordOut;
void main(void)
{
gl_Position = projection * modelView * vPosition;
vec3 N = normalMatrix * vNormal;
vec3 L = normalize(vLightPosition);
vec3 E = vec3(0, 0, 1);
vec3 H = normalize(L + E);
float df = max(0.0, dot(N, L));
float sf = max(0.0, dot(N, H));
sf = pow(sf, shininess);
vec3 color = vAmbientMaterial + df * vDiffuseMaterial + sf * vSpecularMaterial;
vDestinationColor = vec4(color, 1);
vTextureCoordOut = vTextureCoord;
}
Some monkey code but I fix his. Firstly we enable culling and disable front side rendering:
glEnable(GL_CULL_FACE);
glCullFace(GL_FRONT);
Then I change position of the light source:
glUniform3f(_lightPositionSlot, 1.0, 1.0, -2.5);
(I even don't need the light, so next step - I must disable it at all). But finally I have a sphere, user is inside it, can rotate it, zoom in and out and see the texture!

Instance name with Uniform blocks (UBO) does not work using OpenGL/GLSL

I have implemented in my OpenGL/GLSL application a uniform block managing the mesh material data (Ambient, Diffuse and Specular lighting and Shininess).
For my first try, I have implemented the following uniform block syntax:
uniform MaterialBlock
{
vec3 Ka, Kd, Ks;
float Shininess;
};
Here's the client code:
scene::MaterialPtr pMaterial = this->FindMaterialByName(name);
GLuint bindingPoint = 0, bufferIndex = 0;
GLint blockSize = 0;
GLuint indices[4];
GLint offset[4];
const GLchar *names[4] = {"Ka", "Kd", "Ks", "Shininess" };
GLuint blockIndex = glGetUniformBlockIndex(this->m_Handle, "MaterialBlock");
glGetActiveUniformBlockiv(this->m_Handle, blockIndex, GL_UNIFORM_BLOCK_DATA_SIZE, &blockSize);
glGetUniformIndices(this->m_Handle, 4, names, indices);
glGetActiveUniformsiv(this->m_Handle, 4, indices, GL_UNIFORM_OFFSET, offset);
char *pBuffer = new char[blockSize];
memset(pBuffer, '\0', blockSize);
glm::vec3 ambient = pMaterial->GetAmbient();
glm::vec3 diffuse = pMaterial->GetDiffuse();
glm::vec3 specular = pMaterial->GetSpecular();
float shininess = pMaterial->GetShininess();
std::copy(reinterpret_cast<char*>(&ambient[0]),
reinterpret_cast<char*>(&ambient[0]) + sizeof(glm::vec4), pBuffer + offset[0]);
std::copy(reinterpret_cast<char*>(&diffuse[0]), reinterpret_cast<char*>(
&diffuse[0]) + sizeof(glm::vec4), pBuffer + offset[1]);
std::copy(reinterpret_cast<char*>(&specular[0]),
reinterpret_cast<char*>(&specular[0]) + sizeof(glm::vec3), pBuffer + offset[2]);
std::copy(reinterpret_cast<char*>(&shininess), reinterpret_cast<char*>(
&shininess) + sizeof(float), pBuffer + offset[3]);
glUniformBlockBinding(this->m_Handle, blockIndex, bindingPoint);
{
glGenBuffers(1, &bufferIndex);
glBindBuffer(GL_UNIFORM_BUFFER, bufferIndex);
{
glBufferData(GL_UNIFORM_BUFFER, blockSize, NULL, GL_DYNAMIC_DRAW);
glBufferSubData(GL_UNIFORM_BUFFER, 0, blockSize, (const GLvoid *)pBuffer);
}
glBindBuffer(GL_UNIFORM_BUFFER, 0);
}
glBindBufferBase(GL_UNIFORM_BUFFER, bindingPoint, bufferIndex);
//TEXTURE.
{
this->SetUniform("colorSampler", 0); //THE CHANNEL HAS TO BE CALCULATED! //int
glActiveTexture(GL_TEXTURE0); //DYNAMICS.
pMaterial->GetTexture()->Lock();
}
Variables content:
blockIndex: 0 //OK
blockSize: 48 //OK
Indices: {1, 2, 3, 78} //OK
Offset: {0, 16, 32, 44} //OK
The fragment shader code:
#version 440
#define MAX_LIGHT_COUNT 10
/*
** Output color value.
*/
layout (location = 0) out vec4 FragColor;
/*
** Inputs.
*/
in vec3 Position;
in vec2 TexCoords;
in vec3 Normal;
/*
** Material uniform block.
*/
uniform MaterialBlock
{
vec3 Ka, Kd, Ks;
float Shininess;
};
uniform sampler2D ColorSampler;
struct Light
{
vec4 Position;
vec3 La, Ld, Ls;
float Kc, Kl, Kq;
};
uniform struct Light LightInfos[MAX_LIGHT_COUNT];
uniform unsigned int LightCount;
/*
** Light attenuation factor.
*/
float getLightAttenuationFactor(vec3 lightDir, Light light)
{
float lightAtt = 0.0f;
float dist = 0.0f;
dist = length(lightDir);
lightAtt = 1.0f / (light.Kc + (light.Kl * dist) + (light.Kq * pow(dist, 2)));
return (lightAtt);
}
/*
** Basic phong shading.
*/
vec3 Basic_Phong_Shading(vec3 normalDir, vec3 lightDir, vec3 viewDir, int idx)
{
vec3 Specular = vec3(0.0f);
float lambertTerm = max(dot(lightDir, normalDir), 0.0f);
vec3 Ambient = LightInfos[idx].La * Ka;
vec3 Diffuse = LightInfos[idx].Ld * Kd * lambertTerm;
if (lambertTerm > 0.0f)
{
vec3 reflectDir = reflect(-lightDir, normalDir);
Specular = LightInfos[idx].Ls * Ks * pow(max(dot(reflectDir, viewDir), 0.0f), Shininess);
}
return (Ambient + Diffuse + Specular);
}
/*
** Fragment shader entry point.
*/
void main(void)
{
vec3 LightIntensity = vec3(0.0f);
vec4 texDiffuseColor = texture2D(ColorSampler, TexCoords);
vec3 normalDir = (gl_FrontFacing ? -Normal : Normal);
for (int idx = 0; idx < LightCount; idx++)
{
vec3 lightDir = vec3(LightInfos[idx].Position) - Position.xyz;
vec3 viewDir = -Position.xyz;
float lightAttenuationFactor = getLightAttenuationFactor(lightDir, LightInfos[idx]);
LightIntensity += Basic_Phong_Shading(
-normalize(normalDir), normalize(lightDir), normalize(viewDir), idx
) * lightAttenuationFactor;
}
FragColor = vec4(LightIntensity, 1.0f) * texDiffuseColor;
}
This code works perfectly. The output is the following:
But I know it's possible to use instance name (like strutures in C/C++) with Uniform Blocks as follows:
uniform MaterialBlock
{
vec3 Ka, Kd, Ks;
float Shininess;
} MaterialInfos;
Of course, all the variable used in shader like 'Ka', 'Kd', 'Ks' and 'Shininess' become 'MaterialInfos.Ka', 'MaterialInfos.Kd', 'MaterialInfos.Ks' and 'MaterialInfos.Shininess'.
But unfortunatly the execution of the program fails because in the client code above the varibales 'indices' and 'offset' are not filled correctly.
Here's the log:
blockIndex: 0 //OK
blockSize: 48 //OK
Indices: {4294967295, 4294967295, 4294967295, 4294967295} //NOT OK
Offset: {-858993460, -858993460, -858993460, -858993460} //NOT OK
So the only the block index and the block size is correct. So to fix the problem I tried to change the line:
const GLchar *names[4] = {"Ka", "Kd", "Ks", "Shininess" };
by the following one:
const GLchar *names[4] = {"MaterialInfos.Ka", "MaterialInfos.Kd", "MaterialInfos.Ks", "MaterialInfos.Shininess" };
But I still have the same log for the variables 'indices' and 'offset'. Consequently my application still fails. I think it's a problem of syntax in the client code (not in GLSL code because I have no GLSL error) but I can't find the solution.
Do you have an idea where my problem comes from ?
When using instanced uniform blocks, the OpenGL application uses the block name (in this case MaterialBlock) before the dot, not the instance name as you have in your current code. The instance name is only ever seen by the GLSL shader.
Therefore your names variable should be defined and initialized as such:
const GLchar *names[4] = {"MaterialBlock.Ka", "MaterialBlock.Kd", "MaterialBlock.Ks", "MaterialBlock.Shininess" };
Try declaring your structType separately from the uniform (of type structType..)
struct MaterialData
{
vec3 kAmbient;
vec3 kDiffuse;
vec3 kSpecular;
float shininess;
};
uniform MaterialData material;
(if you follow this example, both your MatrialBlock and Light declarations are erroneous, for slightly different reasons)
Then you can set uniforms by referring to them as (eg) "material.kAmbient" on the cpu side and read them as material.kAmbient on the gpu side.

Particle System error

Im working in Particle System Class from this tutorial Particles - Anton's OpenGL 4 Wiki - Dr Anton Gerdelan
Code:
//Pixel Shader
// shader to update a particle system based on a simple kinematics function
#version 150
in vec3 v; // initial velocity
in float tZero; // start time
uniform mat4 projViewModelMatrix;
uniform vec3 emitterPos_wor; // emitter position in world coordinates
uniform float T; // system time T in seconds
out float opacity;
void main() {
// work out how many seconds into our particle's life-time we are (after its starting time)
float t = T - tZero;
vec3 p;
// gradually make particle more transparent over its life-time
opacity = 1 - (t / 3) - 0.2;
// particle stays put until it has reached its birth second
if (t > 0) {
// gravity
vec3 a = vec3(0,-10,0);
// this is a standard kinematics equation of motion with velocity (from VBO) and acceleration (gravity)
p = emitterPos_wor + v * t + 0.5 * a * t * t;
} else {
p = emitterPos_wor;
}
gl_Position = projViewModelMatrix * vec4(p, 1);
}
// Vertex shader
// shader to render simple particle system's points
#version 150
uniform sampler2D textureMap; // I used a texture for my particles
out vec4 fragColour;
uniform vec4 Color;
in float opacity;
void main() {
vec4 texcol = texture2D(textureMap, gl_PointCoord); // using point uv coordinates which are pre-defined over the point
fragColour = vec4(1-opacity,1-opacity,1-opacity,1-opacity) * texcol * Color; // bright blue!
}
/////// CPU
bool ParticleSystem::init(vec3 Position){
std::vector<vec3> Velocidad;
std::vector<float> Life;
for ( int i = 0; i < MAX_PARTICLES; i++ ) {
Velocidad.push_back(vec3(0,-1,0));
}
for ( int i = 0; i < MAX_PARTICLES; i++ ) {
Life.push_back(0.001f * (float)(i));
}
glGenVertexArrays( 1, &m_VAO );
glBindVertexArray( m_VAO );
glGenBuffers(ARRAY_SIZE_IN_ELEMENTS(m_Buffers), m_Buffers);
glBindBuffer(GL_ARRAY_BUFFER, m_Buffers[VEL_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(Velocidad[0]) * Velocidad.size(), &Velocidad[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, m_Buffers[LIF_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(Life[0]) * Life.size(), &Life[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 1, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
return true;
}
/////////////////// FINAL RENDER
bool ParticleSystem::render(){
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D,ModeloOrtho.getTextureFromID(24));
glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE);
glEnable(GL_POINT_SPRITE);
glPointSize(150.0f);
shaderParticle.setUniform("projViewModelMatrix",vsml->get(VSMathLib::PROJ_VIEW_MODEL));
shaderParticle.setUniform("emitterPos_wor",ParticleInit);
shaderParticle.setUniform("T",ParticleTime);
shaderParticle.setUniform("Color",ColorParticle);
glUseProgram(shaderParticle.getProgramIndex());
glBindVertexArray(m_VAO);
glDrawElements(GL_POINTS, 0, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D,0);
return true;
}
The problem is that nothing happens.
And if I change this line:
glDrawElements (GL_POINTS, MAX_PARTICLES, GL_UNSIGNED_INT, 0);
Crash the system.
What am I doing wrong?
vertex shader
/* shader to update a particle system based on a simple kinematics function */
#version 410 core
layout (location = 0) in vec3 v_i; // initial velocity
layout (location = 1) in float start_time;
uniform mat4 V, P;
uniform vec3 emitter_pos_wor; // emitter position in world coordinates
uniform float elapsed_system_time; // system time in seconds
// the fragment shader can use this for it's output colour's alpha component
out float opacity;
void main() {
// work out the elapsed time for _this particle_ after its start time
float t = elapsed_system_time - start_time;
// allow time to loop around so particle emitter keeps going
t = mod (t, 3.0);
opacity = 0.0;
vec3 p = emitter_pos_wor;
// gravity
vec3 a = vec3 (0.0, -1.0, 0.0);
// this is a standard kinematics equation of motion with velocity and
// acceleration (gravity)
p += v_i * t + 0.5 * a * t * t;
// gradually make particle fade to invisible over 3 seconds
opacity = 1.0 - (t / 3.0);
gl_Position = P * V * vec4 (p, 1.0);
gl_PointSize = 15.0; // size in pixels
}
fragment shader
/* shader to render simple particle system points */
#version 410 core
in float opacity;
uniform sampler2D tex; // optional. enable point-sprite coords to use
out vec4 frag_colour;
const vec4 particle_colour = vec4 (0.4, 0.4, 0.8, 0.8);
void main () {
// using point texture coordinates which are pre-defined over the point
vec4 texel = texture (tex, gl_PointCoord);
frag_colour.a = opacity * texel.a;
frag_colour.rgb = particle_colour.rgb * texel.rgb;
}