See EDIT since the first part of the problem is solved.
I am trying to replicate the shadow mapping demo from http://learnopengl.com/#!Advanced-Lighting/Shadows/Shadow-Mapping with my own framework, but interestingly I did not get any shadows. The first significant problem is that my depthmap is not correctly working. I have debugged and double checked each line without success. Maybe another pair of eyes will have more success.
See (top left, 5th row - the image is completely white):
I will write about the second render pass, since it seems that the first one is not working. By the way, the objects are centered at 0, 0, 0. The following code is used for the first render pass:
/// 1. render target is the depth map
glViewport(0, 0, SHADOW_MAP_WIDTH_u32, SHADOW_MAP_HEIGHT_u32);
m_frameBufferObject.bind(); // set the depth map as render target
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
/// place the camera where the light is positioned and render the scene
math::Matrix4D l_lightViewMatrix = math::Matrix4D::lookAt(m_light_p->getPosition(), math::Vector3D(0, 0, 0), math::Vector3D(0, 1, 0));
const math::Matrix4D& l_orthographicLightMatrix_r = m_light_p->getShadowInformation().getProjectionMatrix();
math::Matrix4D lightSpaceMatrix = l_orthographicLightMatrix_r * l_lightViewMatrix;
m_depthMapShader_p->bind();
m_depthMapShader_p->setUniformMat4("lightSpaceMatrix", lightSpaceMatrix);
renderNodes();
m_depthMapShader_p->printShaderInfoLog();
m_depthMapShader_p->unbind();
m_frameBufferObject.unbind();
I have tested that the view matrix and projection matrix generation delivers exactly the same results as GLM (math library for opengl). However, my orthographic matrix is defined by:
left = -10.0f
right = 10.0f
bottom = -10.0f
top = 10.0f
near = -1.0f
far = 7.5f
The initialization of the framebuffer object and the texture is as follows:
// - Create depth texture
glGenTextures(1, &m_shadowTextureBuffer_u32);
glBindTexture(GL_TEXTURE_2D, m_shadowTextureBuffer_u32);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, SHADOW_MAP_WIDTH_u32, SHADOW_MAP_HEIGHT_u32, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
m_frameBufferObject.bind();
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, m_shadowTextureBuffer_u32, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
fprintf(stderr, "Error on building shadow framebuffer\n");
exit(EXIT_FAILURE);
}
m_frameBufferObject.unbind();
The fragment and the vertex shader looks like below.
#version 430
// Fragment shader for rendering the depth values to a texture.
out vec4 gl_FragColor;
void main()
{
gl_FragColor = vec4 (gl_FragCoord.z);
}
#version 430
// Vertex shader for rendering the depth values to a texture.
in layout (location = 0) vec3 position;
in layout (location = 1) vec4 color;
in layout (location = 2) vec3 normal;
in layout (location = 3) vec2 uv;
in layout (location = 4) vec3 tangent;
in layout (location = 5) int materialId;
uniform mat4 pr_matrix;
uniform mat4 vw_matrix;
uniform mat4 ml_matrix;
uniform mat4 lightSpaceMatrix;
void main()
{
gl_Position = lightSpaceMatrix * ml_matrix * vec4(position, 1.0);
}
EDIT:
After some sleep, I have found a little error in my renderer and the shader draws a "nice" depth map.
However, it looks like that the texture mapping (depth comparison) is in the same coordinate system.
But the second rendering step is still not correct:
The vertex and the fragment shader for the second render pass looks like
#version 430
in layout (location = 0) vec3 position;
in layout (location = 1) vec4 color;
in layout (location = 2) vec3 normal;
in layout (location = 3) vec2 uv;
in layout (location = 4) vec3 tangent;
in layout (location = 5) int materialId;
uniform mat4 pr_matrix = mat4(1.0);
uniform mat4 vw_matrix = mat4(1.0);
uniform mat4 ml_matrix = mat4(1.0);
uniform mat4 lightSpaceMatrix = mat4(1.0);
out VS_OUT
{
vec4 color;
vec2 texture_coordinates;
vec3 normal;
vec3 tangent;
vec3 binormal;
vec3 worldPos;
vec4 shadowProj;
flat int materialIdOut;
} vs_out;
void main()
{
vs_out.color = color;
vs_out.texture_coordinates = uv;
mat3 normalMatrix = transpose ( inverse ( mat3 ( ml_matrix )));
vs_out.normal = normalize ( normalMatrix * normalize ( normal ));
vs_out.tangent = normalize ( normalMatrix * normalize ( tangent ));
vs_out.binormal = normalize ( normalMatrix * normalize ( cross (normal , tangent )));
vs_out.worldPos = ( ml_matrix * vec4 ( position, 1)).xyz;
vs_out.materialIdOut = materialId;
vs_out.shadowProj = ( lightSpaceMatrix * ml_matrix * vec4 (position, 1.0) );
gl_Position = ( pr_matrix * vw_matrix * ml_matrix ) * vec4 (position, 1.0);
}
and
#version 430
#define MAX_NUM_TEXTURES 5
#define MAX_NUM_MATERIALS 12
struct SMaterial
{
vec3 m_ambient_v3;
vec3 m_diffuse_v3;
vec3 m_specular_v3;
float m_shininess_f32;
int m_textureIds[MAX_NUM_TEXTURES];
};
in VS_OUT
{
vec4 color;
vec2 texture_coordinates;
vec3 normal;
vec3 tangent;
vec3 binormal;
vec3 worldPos;
vec4 shadowProj;
flat int materialIdOut;
} fs_in;
uniform vec3 cameraPos;
uniform mat4 ml_matrix;
uniform mat4 vw_matrix;
uniform sampler2D texSlots[32];
uniform SMaterial material[MAX_NUM_MATERIALS];
uniform SLight light;
out vec4 gl_FragColor;
float shadowCalculation(vec4 fragPosLightSpace)
{
// perform perspective divide
vec3 projCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
// Transform to [0,1] range
projCoords = projCoords * vec3(0.5) + vec3(0.5);
// Get closest depth value from light's perspective (using [0,1] range fragPosLight as coords)
float closestDepth = texture(texSlots[31], projCoords.xy).r;
// Get depth of current fragment from light's perspective
float currentDepth = projCoords.z;
// Check whether current frag pos is in shadow
float shadow = currentDepth > closestDepth ? 1.0 : 0.0;
return shadow;
}
void main()
{
if ( (fs_in.materialIdOut >= 0) && (fs_in.materialIdOut < MAX_NUM_MATERIALS) )
{
int ambientTextureId = material[fs_in.materialIdOut].m_textureIds[0];
int diffuseTextureId = material[fs_in.materialIdOut].m_textureIds[1];
int specularTextureId = material[fs_in.materialIdOut].m_textureIds[2];
int alphaTextureId = material[fs_in.materialIdOut].m_textureIds[3];
int bumpTextureId = material[fs_in.materialIdOut].m_textureIds[4];
vec3 diffTexColor = vec3(0.6,0.6,0.6);
if ((diffuseTextureId >= 0) && (32 > diffuseTextureId))
{
diffTexColor = texture (texSlots[diffuseTextureId], fs_in.texture_coordinates).rgb;
}
// Calculate shadow
float shadow = 1.0 - shadowCalculation(fs_in.shadowProj);
gl_FragColor = vec4(diffTexColor, 1.0) * vec4(shadow, shadow, shadow, 1.0);
}
else
{
gl_FragColor = vec4(fs_in.normal,1.0);
}
}
In my experience a depth map is pretty much always completely white, because a distance of more than 1 away from the light already makes that pixel white. If your whole scene is further than 1 unit then the whole map is white.
To render the map like they show in the tutorial you either need your scene to be really small or to perform an operation on your depth map. I always like to check my maps by dividing their depth values by the camera's zFar distance. Try to find the best value at which you can see contrast.
Related
Here is my code for generating the texture(MRE):
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D, id);
if(readAlpha)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
else
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
Here is how Tex Coords are generated:
for (int y = 0; y < resolution; y++)
{
for (int x = 0; x < resolution; x++)
{
int i = x + y * resolution;
glm::vec2 percent = glm::vec2(x, y) / ((float)resolution - 1);
glm::vec3 pointOnPlane = (percent.x - .5f) * 2 * right + (percent.y - .5f) * 2 * front;
pointOnPlane *= scale;
vertices[i] = Vert();
vertices[i].position = glm::vec3(0.0f);
vertices[i].position.x = (float)pointOnPlane.x;
vertices[i].position.y = (float)pointOnPlane.y;
vertices[i].position.z = (float)pointOnPlane.z;
vertices[i].texCoord = glm::vec2(percent.x, percent.y)*textureScale;
vertices[i].normal = glm::vec3(0.0f);
if (x != resolution - 1 && y != resolution - 1)
{
inds[triIndex] = i;
inds[triIndex + 1] = i + resolution + 1;
inds[triIndex + 2] = i + resolution;
inds[triIndex + 3] = i;
inds[triIndex + 4] = i + 1;
inds[triIndex + 5] = i + resolution + 1;
triIndex += 6;
}
}
}
Here is the shader:
VERT:
#version 330 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec3 aNorm;
layout (location = 2) in vec2 aTexCoord;
uniform mat4 _PV;
uniform mat4 _Model;
out DATA
{
vec3 FragPos;
vec3 Normal;
vec2 TexCoord;
mat4 PV;
} data_out;
void main()
{
gl_Position = vec4(aPos.x, aPos.y, aPos.z, 1.0);
data_out.FragPos = aPos;
data_out.Normal = aNorm;
data_out.TexCoord = aTexCoord;
data_out.PV = _PV;
}
GEOM:
#version 330 core
layout(triangles) in;
layout(triangle_strip, max_vertices = 3) out;
out vec3 FragPos;
out vec3 Normal;
out vec2 TexCoord;
in DATA
{
vec3 FragPos;
vec3 Normal;
vec2 TexCoord;
mat4 PV;
} data_in[];
void main()
{
gl_Position = data_in[0].PV * gl_in[0].gl_Position;
Normal = data_in[0].Normal;
TexCoord = data_in[0].TexCoord;
FragPos = data_in[0].FragPos;
EmitVertex();
gl_Position = data_in[0].PV * gl_in[1].gl_Position;
Normal = data_in[1].Normal;
TexCoord = data_in[0].TexCoord;
FragPos = data_in[1].FragPos;
EmitVertex();
gl_Position = data_in[0].PV * gl_in[2].gl_Position;
Normal = data_in[2].Normal;
TexCoord = data_in[0].TexCoord;
FragPos = data_in[2].FragPos;
EmitVertex();
EndPrimitive();
}
FRAG:
#version 330 core
out vec4 FragColor;
uniform vec3 _LightPosition;
uniform vec3 _LightColor;
uniform sampler2D _Diffuse;
//unifrom float _UseTexutres;
in vec3 FragPos;
in vec3 Normal;
in vec2 TexCoord;
void main()
{
//vec3 objectColor = vec3(0.34f, 0.49f, 0.27f);
vec3 objectColor = vec3(1, 1, 1);
objectColor = texture(_Diffuse, TexCoord).xyz;
vec3 norm = normalize(Normal);
vec3 lightDir = normalize(_LightPosition - FragPos);
float diff = max(dot(norm, lightDir), 0.0f);
vec3 diffuse = diff * _LightColor;
vec3 result = (vec3(0.2, 0.2, 0.2) + diffuse) * objectColor;
FragColor = vec4(result, 1.0);
}
I am getting pixilated texture even thought I am using a 8K texture.
If you want to see the entire source : https://github.com/Jaysmito101/TerraGen3D
Here is the result:
Your geometry shader does not make sense:
First of all, you use the same data_in.TexCoords[0] for all 3 vertices of of the output triangle, which means that all fragments generated for this triangle will sample the exact same location of the texture, resulting in the exact same output color, so the "pixelated" structure of the image emerges. Like you do already for Normal and FragPos, you should forward the data for each vertex. This already should solve your issue.
However, there are more issues with your approach. You do forward mat4 PV as per-Vertex data from the VS to the GS. However, the data you forward is an uniform, so this is a waste of resources. Every shader stage has access to all of the uniforms, so there is no need to forward this data per vertex.
But the real elephant in the room is what this geometry shader is supposed to be doing. The actual transformation with the uniform matrices can - and absolutely should - be carried out directly in the vertex shader. And the rest of your geometry shader is basically an attempt at a pass-through implementation (just a faulty one). So what do you need this shader for? You
can do the transformation in the VS and completely remove the geometry shader. And performance-wise, this will also be a win as geometry shaders are rather inefficent and should be avoided if not absolutely needed.
I’m working on my own game engine AND learning OpenGL in the process, and I have got stuck in Shadowmapping for longer than I would like to admit.
I have been following this guide: https://learnopengl.com/Advanced-Lighting/Shadows/Shadow-Mapping But I don’t get to draw any shadow in my plane.
This is how the code looks in my project:
Framebuffer for creating the depth texture:
glCreateFramebuffers(1, &m_framebufferID);
glBindFramebuffer(GL_FRAMEBUFFER, m_framebufferID);glGenTextures(1, &depthMapID);
ui32 depthMapID;
glBindTexture(GL_TEXTURE_2D, depthMapID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, 1024, 1024, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindFramebuffer(GL_FRAMEBUFFER, m_framebufferID);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthMapID, 0);
ASSERT(glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE, "Framebuffer is incomplete!");
glBindFramebuffer(GL_FRAMEBUFFER, 0);
Depth texture shader (ignore the unused vertex layout):
#type vertex
#version 330 core
layout (location = 0) in vec3 vertex_position;
layout (location = 1) in vec3 vertex_color;
layout (location = 2) in vec3 vertex_normal;
layout (location = 3) in vec3 vertex_tangent;
layout (location = 4) in vec2 vertex_texcoord;
uniform mat4 u_lightViewProjectionMatrix;
uniform mat4 u_worldTransformMatrix;
void main()
{
gl_Position = u_lightViewProjectionMatrix * (u_worldTransformMatrix * vec4(vertex_position, 1.0));
}
#type fragment
#version 330 core
// Ouput data
//layout(location = 0) out float fragmentdepth;
void main(){
// Not really needed, OpenGL does it anyway
//fragmentdepth = gl_FragCoord.z;
}
This is how I create the shadow map
if (s_castingShadowMeshes.Size() == 0)
return;
float shadowDistance = 100.0f;
glm::mat4 lightView = glm::lookAt(LightManager::GetDirectionalLight().direction * -shadowDistance, glm::vec3(0.0f), MathUtils::Vector3UnitY);
glm::mat4 lightProjection = glm::ortho(-shadowDistance, shadowDistance, -shadowDistance, shadowDistance, 0.0f, shadowDistance * 2.0f);
s_lightViewProjection = lightProjection * lightView;
glBindFramebuffer(GL_FRAMEBUFFER, m_framebufferID);
glViewport(0, 0, 1024, 1024); //Shadow height and width is 1024
glDepthMask(GL_TRUE);
glClearDepth(1.0f);
glClearColor(color.r, color.g, color.b, color.a);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT););
for (ui32 i = 0; i < s_castingShadowMeshes.Size(); ++i)
{
s_depthMapShader->Bind();
s_depthMapShader->SetUniform(UNIFORM_LIGHT_SPACE_TRANSFORM, ShaderDataType::Mat4, &(s_lightViewProjection));
s_depthMapShader->SetUniform(UNIFORM_MODEL_SPACE_TRANSFORM, ShaderDataType::Mat4, &(s_castingShadowMeshes[i]->GetWorldTransform()[0][0]));
s_rendererPlatformInterface->DrawVertexArray(s_castingShadowMeshes[i]->GetVertexArray());
s_renderStats.drawCalls++;
s_depthMapShader->Unbind();
}
s_shadowFramebuffer->Unbind();
Up to this point, RenderDoc shows me that a depth texture is actually being generated:
Now, this is how rendering the plane looks like:
shader->Bind();
ui32 useShadowMapTex = 0;
if (receiveShadows)
{
useShadowMapTex = 1;
ui32 shadowMapSlot = (ui32)Material::TextureSlots::ShadowMap;
shader->SetUniform(UNIFORM_SHADOWMAP_TEX, ShaderDataType::Int, &shadowMapSlot);
s_shadowMapTex->Bind(shadowMapSlot);
}
shader->SetUniform(UNIFORM_USE_SHADOWMAP_TEX, ShaderDataType::Int, &useShadowMapTex);
shader->SetUniform(UNIFORM_MODEL_SPACE_TRANSFORM, ShaderDataType::Mat4, &(transform[0][0]));
shader->SetUniform(UNIFORM_VIEW_PROJECTION, ShaderDataType::Mat4, &(s_sceneData.viewProjectionMatrix));
shader->SetUniform(UNIFORM_CAMERA_POS, ShaderDataType::Float3, &(s_sceneData.cameraPosition));
shader->SetUniform(UNIFORM_DIR_LIGHT_DIRECTION, ShaderDataType::Float3, &(LightManager::GetDirectionalLight().direction));
shader->SetUniform(UNIFORM_DIR_LIGHT_AMBIENT,ShaderDataType::Float3, &(LightManager::GetDirectionalLight().ambientColor));
shader->SetUniform(UNIFORM_DIR_LIGHT_DIFUSSE, ShaderDataType::Float3, &(LightManager::GetDirectionalLight().diffuseColor));
shader->SetUniform(UNIFORM_DIR_LIGHT_SPECULAR, ShaderDataType::Float3, &(LightManager::GetDirectionalLight().specularColor));
s_rendererPlatformInterface->DrawVertexArray(vertexArray);
s_renderStats.drawCalls++;
s_renderStats.vertices += vertexArray->GetIndexBuffer()->GetCount();
The shader for the plane:
#type vertex
#version 330 core
layout (location = 0) in vec3 vertex_position;
layout (location = 1) in vec3 vertex_color;
layout (location = 2) in vec3 vertex_normal;
layout (location = 3) in vec3 vertex_tangent;
layout (location = 4) in vec2 vertex_texcoord;
out VS_OUT {
vec3 FragPos;
vec3 Normal;
vec2 TexCoords;
vec4 FragPosLightSpace;
} vs_out;
uniform mat4 u_viewProjectionMatrix;
uniform mat4 u_worldTransformMatrix;
uniform mat4 u_lightViewProjectionMatrix;
void main()
{
vs_out.FragPos = vec3(u_worldTransformMatrix * vec4(vertex_position, 1.0));
vs_out.Normal = transpose(inverse(mat3(u_worldTransformMatrix))) * vertex_normal;
vs_out.TexCoords = vertex_texcoord;
vs_out.FragPosLightSpace = u_lightViewProjectionMatrix * vec4(vs_out.FragPos, 1.0);
gl_Position = u_viewProjectionMatrix* vec4(vs_out.FragPos, 1.0);
}
#type fragment
#version 330 core
out vec4 FragColor;
in VS_OUT {
vec3 FragPos;
vec3 Normal;
vec2 TexCoords;
vec4 FragPosLightSpace;
} fs_in;
struct DirectionalLight
{
vec3 direction;
vec3 ambientColor;
vec3 diffuseColor;
vec3 specularColor;
};
uniform DirectionalLight u_directionalLight;
uniform sampler2D u_shadowMapTex;
uniform vec3 u_cameraPos;
float ShadowCalculation(vec4 fragPosLightSpace)
{
// perform perspective divide
vec3 projCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
// transform to [0,1] range
projCoords = projCoords * 0.5 + 0.5;
// get closest depth value from light's perspective (using [0,1] range fragPosLight as coords)
float closestDepth = texture(u_shadowMapTex, projCoords.xy).r;
// get depth of current fragment from light's perspective
float currentDepth = projCoords.z;
// check whether current frag pos is in shadow
float shadow = currentDepth > closestDepth ? 1.0 : 0.0;
return shadow;
}
void main()
{
vec3 normal = normalize(fs_in.Normal);
vec3 lightColor = vec3(1.0);
// ambient
vec3 ambient = u_directionalLight.ambientColor;
// diffuse
vec3 lightDir = normalize(u_directionalLight.direction);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * lightColor;
// specular
vec3 viewDir = normalize(u_cameraPos - fs_in.FragPos);
float spec = 0.0;
vec3 halfwayDir = normalize(lightDir + viewDir);
spec = pow(max(dot(normal, halfwayDir), 0.0), 64.0);
vec3 specular = spec * lightColor;
// calculate shadow
float shadow = ShadowCalculation(fs_in.FragPosLightSpace);
vec3 lighting = (ambient + (1.0 - shadow) * (diffuse + specular));
FragColor = vec4(lighting, 1.0);
}
This is what i get:
According to RenderDoc, the shadow map texture is actually being passed in the shader, but it never gets drawn in the red plane:
I hope somebody can help me. Many thanks in advance.
p.d: this is my first post ever in StackOverflow, excuse me if i am violating any rule.
I am currently trying to make a little game in OpenGL as an attempt to learn how to use the API. I've come to a point where I can move a camera around a simple scene, and I can render models and shade them with a simple phong model shader.
I'm right now working on texturing the models in the scene, so I got a copy of Maya and made (with quite some struggle) a square with a texture with the UV mapping made in within Maya.
When I render the scene, the texture is applied, but far from correct. I read the models as .obj files with a parser I wrote myself, and the textures are read using a funtion I found online a while back.
I'm not sure how to describe the problem in sufficient detail, nor what to look for in the code, but here are some code fractions that I would suspect contained the problem.
Reading the texture
GLuint loadTexture(Image* image){
GLuint textureId;
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB,
image->width, image->height,
0,
GL_RGB,
GL_UNSIGNED_BYTE,
image->pixels);
return textureId;
}
Setting the texture prior to rendering the mesh
// set texture
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, this->body_texture);
current_shader->setUniformint(0, "Difuse_texture");
Vertex shader
#version 410
layout(location = 0) in vec3 VertexPosition;
layout(location = 1) in vec3 VertexNormal;
layout(location = 1) in vec2 TextureCoord;
out vec3 Position;
out vec3 Normal;
out vec2 TexCoord;
uniform mat4 ModelMatrix;
uniform mat4 VeiwMatrix;
uniform mat4 ProjectionMatrix;
uniform mat3 NormalMatrix;
void main(){
mat4 ModelVeiwMatrix = VeiwMatrix * ModelMatrix;
mat4 MVP = ProjectionMatrix * ModelVeiwMatrix;
TexCoord = TextureCoord;
Normal = normalize( NormalMatrix * VertexNormal );
Position = vec3(ModelVeiwMatrix * vec4(VertexPosition, 1.0));
gl_Position = MVP * vec4(VertexPosition, 1.0);
}
Fragment shader
#version 410
in vec3 Position;
in vec3 Normal;
in vec2 TexCoord;
uniform vec4 LightPosition;
uniform vec3 LightIntensity;
uniform vec3 Kd;
uniform vec3 Ka;
uniform vec3 Ks;
uniform float Shininess;
uniform sampler2D Difuse_texture;
layout(location = 0) out vec4 FragColor;
vec4 ads(){
vec3 n = normalize( Normal );
vec3 s = normalize( vec3(LightPosition) - Position );
vec3 v = normalize( vec3(-Position) );
vec3 r = reflect( -s, n );
vec3 specular_light = Ks * pow(max(dot(r, v), 0.0), Shininess);
vec3 ad_light = Ka + Kd * max(dot(s, n), 0.0);
vec4 TexColor = texture2D(Difuse_texture, TexCoord);
return TexColor; // (vec4(LightIntensity, 1.0) * (vec4(ad_light, 1.0) * TexColor + vec4(specular_light, 1.0)));
}
void main() {
FragColor = ads();
}
I know some things are written strangely, but at this point I'm starting to just try anything to get it working.
Does anyone have a suggestion on how to solve this strange UV mapping?
EDIT:
OBJ LOADING
I have made the obj loader print all vertex attributes and compared these with the indexing in the .obj file. It looks like the verecies, normals and UVs are showing in the correct order.
Screenshot
The scene looks like this using just simple reg to green gradient as trexture image.
(The square should by my understading show the gradient from the texture? not just a single color)
Alignment sounds like a possible flaw, how can I correct this?
a http://imageshack.com/a/img674/9927/y0bJ51.png
SOLUTION
I made a very simple and easy to overlook mistake. In the top of the vertex shader i wrote
layout(location = 0) in vec3 VertexPosition;
layout(location = 1) in vec3 VertexNormal;
layout(location = 1) in vec2 TextureCoord;
So I guess that when I sent the normal data to location 1, I set the Texture coordinates to normal data, so the UV coords never reached the fragment shader.
Changeing to the folowing resolved the problem without further change.
layout(location = 0) in vec3 VertexPosition;
layout(location = 1) in vec3 VertexNormal;
layout(location = 2) in vec2 TextureCoord;
I've successfully rendered my scene from my light's point of view onto a depth cubemap, but I don't quite understand how I can actually project it onto my scene.
Here's a short clip of the current situation: http://youtu.be/54WXDWxqmXw
I found an implementation example on how to do it over here:
http://www.opengl.org/discussion_boards/showthread.php/174093-GLSL-cube-shadows-projecting?p=1219162&viewfull=1#post1219162
It seemed fairly easy to understand, so I figured this would be a great way to start off with, but I'm having some difficulties with the matrices (As shown in the video above).
My Vertex Shader:
#version 330 core
layout(std140) uniform ViewProjection
{
mat4 V;
mat4 P;
};
layout(location = 0) in vec3 vertexPosition;
layout(location = 1) in vec2 vertexUV;
out vec2 UV;
out vec4 posCs;
uniform mat4 M;
uniform mat4 lightView;
void main()
{
mat4 MVP = P *V *M;
gl_Position = MVP *vec4(vertexPosition,1);
UV = vertexUV;
posCs = V *M *vec4(vertexPosition,1);
}
Fragment Shader:
#version 330 core
in vec2 UV;
in vec4 posCs;
out vec4 color;
// Diffuse texture
uniform sampler2D renderTexture;
uniform samplerCubeShadow shadowCubeMap;
uniform mat4 lightView;
uniform mat4 lightProjection;
uniform mat4 camViewInv;
void main()
{
color = texture2D(renderTexture,UV).rgba;
mat4 lView = mat4(1); // The light is currently at the world origin, so we'll skip the transformation for now (The less potential error sources the better)
vec4 posLs = lView *camViewInv *posCs;
vec4 posAbs = abs(posLs);
float fs_z = -max(posAbs.x,max(posAbs.y,posAbs.z));
vec4 clip = lightProjection *vec4(0.0,0.0,fs_z,1.0);
float depth = (clip.z /clip.w) *0.5 +0.5;
vec4 r = shadowCube(shadowCubeMap,vec4(posLs.xyz,depth));
color *= r;
}
(I've only posted the relevant parts)
lightProjection is the same projection matrix that I've used to render the scene into the cubemap.
I'm not entirely sure about 'camViewInv', from the example I've linked above I came up with this:
glm::mat4 camViewInv(
camView[0][0],camView[1][0],camView[2][0],0.0f,
camView[0][1],camView[1][1],camView[2][1],0.0f,
camView[0][2],camView[1][2],camView[2][2],0.0f,
camPos[0],camPos[1],camPos[2],1.0f
);
camView being the camera's view matrix, and camPos the camera's worldspace position.
Everything else should be self-explanatory I believe.
I can't see anything wrong with the shaders, but I'm fairly certain the scene is rendered correctly to the cubemap (As shown in the video above). Maybe someone more versed than me can spot the issue.
// Update:
Some additional information about the creation / usage of the shadow cubemap:
Creating the cubemap texture:
unsigned int frameBuffer;
glGenFramebuffers(1,&frameBuffer);
glBindFramebuffer(GL_FRAMEBUFFER,frameBuffer);
unsigned int texture;
glGenTextures(1,&texture);
glBindTexture(GL_TEXTURE_CUBE_MAP,texture);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_COMPARE_FUNC,GL_LEQUAL);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_WRAP_R,GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_WRAP_S,GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_WRAP_T,GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_COMPARE_MODE,GL_COMPARE_R_TO_TEXTURE);
for(int i=0;i<6;i++)
{
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X +i,0,GL_DEPTH_COMPONENT,size,size,0,GL_DEPTH_COMPONENT,GL_FLOAT,0);
glFramebufferTexture2D(GL_FRAMEBUFFER,GL_DEPTH_ATTACHMENT,GL_TEXTURE_CUBE_MAP_POSITIVE_X +i,texture,0);
glDrawBuffer(GL_NONE);
}
The light's matrices:
glm::perspective<float>(90.f,1.f,2.f,m_distance); // Projection Matrix
// View Matrices
glm::vec3 pos = GetPosition(); // Light worldspace position
glm::lookAt(pos,pos +glm::vec3(1,0,0),glm::vec3(0,1,0));
glm::lookAt(pos,pos +glm::vec3(-1,0,0),glm::vec3(0,1,0));
glm::lookAt(pos,pos +glm::vec3(0,1,0),glm::vec3(0,0,-1))
glm::lookAt(pos,pos +glm::vec3(0,-1,0),glm::vec3(0,0,1))
glm::lookAt(pos,pos +glm::vec3(0,0,1),glm::vec3(0,1,0))
glm::lookAt(pos,pos +glm::vec3(0,0,-1),glm::vec3(0,1,0))
Vertex Shader:
#version 330 core
layout(location = 0) in vec4 vertexPosition;
uniform mat4 shadowMVP;
void main()
{
gl_Position = shadowMVP *vertexPosition;
}
Fragment Shader:
#version 330 core
layout(location = 0) out float fragmentDepth;
void main()
{
fragmentdepth = gl_FragCoord.z;
}
I would suggest doing this in world space, light positions are typically defined in world space and it will reduce the workload if you keep it that way. I removed a bunch of uniforms that you do not need if you do this in world space.
Compute lighting direction and depth in vtx. shader:
#version 330 core
layout(std140) uniform ViewProjection
{
mat4 V;
mat4 P;
};
layout(location = 0) in vec4 vertexPosition; // W is automatically assigned 1, if missing.
layout(location = 1) in vec2 vertexUV;
out vec2 UV;
out vec4 lightDirDepth; // Direction = xyz, Depth = w
uniform mat4 M;
uniform vec3 lightPos; // World Space Light Pos
uniform vec2 shadowZRange; // Near / Far clip plane distances for shadow's camera
float vecToDepth (vec3 Vec)
{
vec3 AbsVec = abs (Vec);
float LocalZcomp = max (AbsVec.x, max (AbsVec.y, AbsVec.z));
const float n = shadowZRange [0]; // Near plane when the shadow map was built
const float f = shadowZRange [1]; // Far plane when the shadow map was built
float NormZComp = (f+n) / (f-n) - (2.0*f*n)/(f-n)/LocalZcomp;
return (NormZComp + 1.0) * 0.5;
}
void main()
{
mat4 MVP = P *V *M;
gl_Position = MVP *vertexPosition;
UV = vertexUV;
vec3 lightDir = lightPos - (M *vertexPosition).xyz;
float lightDepth = vecToDepth (lightDir);
lightDirDepth = vec4 (lightDir, lightDepth);
}
Modified Fragment Shader (sample cubemap using light dir, and test against depth):
#version 330 core
in vec2 UV;
in vec4 lightDirDepth; // Direction = xyz, Depth = w
out vec4 color;
// Diffuse texture
uniform sampler2D renderTexture;
uniform samplerCubeShadow shadowCubeMap;
void main()
{
const float bias = 0.0001; // Prevent shadow acne
color = texture (renderTexture,UV).rgba;
float r = texture (shadowCubeMap, vec4 (lightDirDepth.xyz, lightDirDepth.w + bias));
color *= r;
}
I added two new uniforms:
lightPos -- World space position of your light
shadowZRange -- The values of your near and far plane when you built your shadow cube map, packed into a vec2
Let me know if you need me to explain anything or if this does not produce meaningful results.
I have vertex shader
#version 330 core
layout(location = 0) in vec3 VertexPosition;
layout(location = 1) in vec2 VertexUV;
layout(location = 2) in vec3 VertexNormal;
out VS_GS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_out;
uniform mat4 proj_matrix;
uniform mat4 model_matrix;
void main(void)
{
gl_Normal = VertexNormal;
gl_Position = proj_matrix * vec4(VertexPosition, 1.0);
vertex_out.UV = VertexUV; //VertexPosition.xy;
vertex_out.vs_worldpos = gl_Position.xyz;
vertex_out.vs_normal = mat3(model_matrix) * gl_Normal;
}
and fragment shader
#version 330 core
in GS_FS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_in;
// Values that stay constant for the whole mesh.
uniform sampler2D sampler0;
uniform sampler2D sampler1;
uniform sampler2D sampler2;
uniform sampler2D sampler3;
//uniform sampler2D alphamap0;
uniform sampler2D alphamap1;
uniform sampler2D alphamap2;
uniform sampler2D alphamap3;
uniform int tex_count;
uniform vec4 color_ambient = vec4(0.75, 0.75, 0.75, 1.0);
uniform vec4 color_diffuse = vec4(0.25, 0.25, 0.25, 1.0);
//uniform vec4 color_specular = vec4(1.0, 1.0, 1.0, 1.0);
uniform vec4 color_specular = vec4(0.1, 0.1, 0.1, 0.25);
uniform float shininess = 5.0f;
uniform vec3 light_position = vec3(12.0f, 32.0f, 560.0f);
void main(){
vec3 light_direction = normalize(light_position - vertex_in.vs_worldpos);
vec3 normal = normalize(vertex_in.vs_normal);
vec3 half_vector = normalize(light_direction + normalize(vertex_in.vs_worldpos));
float diffuse = max(0.0, dot(normal, light_direction));
float specular = pow(max(0.0, dot(vertex_in.vs_normal, half_vector)), shininess);
gl_FragColor = texture( sampler0, vertex_in.UV ) * color_ambient + diffuse * color_diffuse + specular * color_specular;
// http://www.opengl.org/wiki/Texture_Combiners
// GL_MODULATE = *
// GL_INTERPOLATE Blend tex0 and tex1 based on a blending factor = mix(texel0, texel1, BlendFactor)
// GL_INTERPOLATE Blend tex0 and tex1 based on alpha of tex0 = mix(texel0, texel1, texel0.a)
// GL_ADD = clamp(texel0 + texel1, 0.0, 1.0)
if (tex_count > 0){
vec4 temp = texture( sampler1, vertex_in.UV );
vec4 amap = texture( alphamap1, vertex_in.UV);
gl_FragColor = mix(gl_FragColor, temp, amap.a);
}
if (tex_count > 1){
vec4 temp = texture( sampler2, vertex_in.UV );
vec4 amap = texture( alphamap2, vertex_in.UV);
gl_FragColor = mix(gl_FragColor, temp, amap.a);
}
if (tex_count > 2){
vec4 temp = texture( sampler3, vertex_in.UV );
vec4 amap = texture( alphamap3, vertex_in.UV);
gl_FragColor = mix(gl_FragColor, temp, amap.a);
}
}
It takes indexed GL_TRIANGLE_STRIP as input
glBindBuffer(GL_ARRAY_BUFFER, tMt.vertex_buf_id[cx, cy]);
glVertexAttribPointer(VERTEX_LAYOUT_POSITION, 3, GL_FLOAT, false, 0, pointer(0));
glEnableVertexAttribArray(0);
{ chunk tex position }
glBindBuffer(GL_ARRAY_BUFFER, chunkTexPositionBO);
glVertexAttribPointer(VERTEX_LAYOUT_TEX_UV, 2, GL_FLOAT, false, 0, pointer(0));
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, tMt.normal_buf_id[cx, cy]);
glVertexAttribPointer(VERTEX_LAYOUT_NORMAL, 3, GL_FLOAT, true, 0, pointer(0));
glEnableVertexAttribArray(2);
{ index buffer }
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, chunkIndexBO);
for i := 0 to tMt.texCount - 1 do begin
bt := tMt.texture_buf_id[cx, cy][i];
if bt = nil then
break;
glUniform1i(proj_tex_count_loc, i);
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, bt.id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
if i > 0 then begin
// this time, use blending:
glActiveTexture(GL_TEXTURE4 + 1);
glBindTexture(GL_TEXTURE_2D, tMt.alphamaps[cx, cy][i - 1]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
end;
end;
glDrawElements(GL_TRIANGLE_STRIP, length(chunkIndexArr), GL_UNSIGNED_SHORT, nil);
Code works as intended except I'm not sure is my normals arranged properly: they was stored as bytes (converted to GLfloat as b / FF), coordinates xyz changed and some probably need negation.
Can someone show me geometry shader to show normals as lines as shown at http://blogs.agi.com/insight3d/index.php/2008/10/23/geometry-shader-for-debugging-normals/ (those shader not works at all and it seems out/in data losed between vertex and fragment shader).
P.S. I'm not sure I did everything properly (starting OpenGL and GLSL) so any suggestions also appreciated.
Edit:
I made simple geometry shader by examples
// This is a very simple pass-through geometry shader
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices = 145) out;
in VS_GS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_in[];
out GS_FS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_out;
uniform float uNormalsLength = 0.5;
void main()
{
int i;
// Loop over the input vertices
for (i = 0; i < gl_in.length(); i++)
{
vertex_out.UV = vertex_in[i].UV;
vertex_out.vs_worldpos = vertex_in[i].vs_worldpos;
vertex_out.vs_normal = vertex_in[i].vs_normal;
// Copy the input position to the output
gl_Position = gl_PositionIn[i];
EmitVertex();
gl_Position = gl_ModelViewProjectionMatrix * (gl_PositionIn[i] + (vec4(vertex_in[i].vs_normal, 0) * uNormalsLength));
gl_FrontColor = vec4(0.0, 0.0, 0.0, 1.0); //gl_FrontColorIn[i];
EmitVertex();
}
// End the primitive. This is not strictly necessary
// and is only here for illustrative purposes.
EndPrimitive();
}
but I don't knwo where it takes gl_ModelViewProjectionMatrix (seems deprecated) and result looks awful, it seems everything including normals stripped. Picture in glPolygonMode(GL_FRONT, GL_LINE) mode, textures also trying to map onto those.
As it seems, you're doing it all in a single pass and you actually emit 6 vertices per incoming triangle. This is not what you want.
Either do it in two passes, i.e. one pass for the mesh, the other for the normals, or try to emit the original triangle and a degenerate triangle for the normal. For simplicity I'd go for the two-pass version:
Inside your render loop:
render terrain
if and only if debug geometry is to be rendered
enable your debug normals shader
render the terrain mesh a second time, passing POINTS to the vertex shader
To make this work, you'll need a second program object that is made up like in the blog post you previously linked to, consisting of a simple pass trough vertex shader, the following geometry shader and a fragment shader for coloring the lines representing the normals.
The vertex and fragment shaders should be no problem. Assuming you have a smoothed mesh, i.e. you have actual, averaged vertex normals, you can simply pass in points and emit lines.
#version 330 core
// assuming you have vertex normals, you need to render a vertex
// only a single time. with any other prim type, you may render
// the same normal multiple times
layout (points) in;
// Geometry shaders can only output points, line strips or triangle
// strips by definition. you output a single line per vertex. therefore,
// the maximum number of vertices per line_strip is 2. This is effectively
// the same as rendering distinct line segments.
layout (line_strip, max_vertices = 2) out;
in vec3 vs_normal[];
uniform float normal_scale = 0.5; // don't forget: this is the default value!
/* if you're never going to change the normal_scale, consider simply putting a
constant there instead:
const float normal_scale = 0.5;
*/
void main()
{
// we simply transform and emit the incoming vertex - this is v0 of our
// line segment
vec4 v0 = gl_in[0].gl_Position;
gl_Position = gl_ModelViewProjectionMatrix * v0;
EmitVertex();
// we calculate v1 of our line segment
vec4 v1 = v0 + vec4(vs_normal[0] * normal_scale, 0);
gl_Position = gl_ModelViewProjectionMatrix * v1;
EmitVertex();
EndPrimitive();
}
Warning: Untested code!
This is probably as simple as it gets. Add a uniform to your fragment shader so you can color your normals as you like or simply export a constant color.
Note: This code still uses gl_ModevelViewProjectionMatrix. If you're writing GL core code, please consider replacing legacy GL constructs, like the matrix stack, with your own stuff!
Note 2: Your geometry shader is not what is usually referred to as a pass through shader. First, you do processing on the incoming data that is more than just assigning incoming values to outgoing values. Second, how can it be a pass-through shader, if you generate geometry? Pass-through means, you don't do anything else than pass incoming values to the next shader stage.