LWGL // reconstruct position in fragment shader - opengl

I have the correct inverse projection matrix. But nothing seem to be right!
The reconstructed position ist totally deformed und the z-Value ist much to small? Anyone a suggestion?
vec3 calculatePosition(vec2 coord, float depth)
{
vec4 clipSpaceLocation;
clipSpaceLocation.x = coord.x * 2.0 - 1.0;
clipSpaceLocation.y = coord.y * 2.0 - 1.0;
clipSpaceLocation.z = depth * 2.0 - 1.0;
clipSpaceLocation.w = 1.0;
vec4 homogenousPosition = uProjectionInverse * clipSpaceLocation;
return homogenousPosition.xyz / homogenousPosition.w;
}
the z-Value is round about -0,001; Why that?
coord und depth is:
vec2 coord = vec2(gl_FragCoord.x / width, gl_FragCoord.y / height);
float currentDepth = texture(depthBuffer, coord).r;
I have to implement SSAO for university and i use Eclipse and Java with the lwjgl-plugin.
Please i need help. I got less than one week.
EDIT:
I have tried this now... but still not lenearized:
float camera_space_z_from_depth(sampler2D depthbuffer, vec2 uv) {
float depth = texture(depthbuffer, uv).x;
return (2 * uNearPlane) / (uFarPlane + uNearPlane - depth * (uFarPlane - uNearPlane));
}
vec3 calculatePosition(vec2 coord, float depth)
{
vec4 clipSpaceLocation;
clipSpaceLocation.x = coord.x * 2.0 - 1.0;
clipSpaceLocation.y = coord.y * 2.0 - 1.0;
clipSpaceLocation.z = depth * 2.0 - 1.0;
clipSpaceLocation.w = 1.0;
vec4 homogenousPosition = uProjectionInverse * clipSpaceLocation;
return homogenousPosition.xyz / homogenousPosition.w;
}
vec3 getPosition(vec2 coord){
float currentDepth = camera_space_z_from_depth(depthBuffer,coord);
vec3 position = calculatePosition(coord, currentDepth);
return position;
}

I suppose you need to linearize your depth value, like said here:
https://www.opengl.org/wiki/Compute_eye_space_from_window_space
Just try this.
float camera_space_z_from_depth(sampler2D depthbuffer, vec2 uv) {
const float depth = texture(depthbuffer, uv).x;
return ( camera_near / (camera_far - depth * (camera_far - camera_near)) ) * camera_far;
}
EDIT:
Try that. Variable meaning should be self explanatory (no need for linearization) :)
vec3 GetPosition(vec2 fragmentCoordinates, float depth)
{
vec3 normalizedDeviceCoordinatesPosition;
normalizedDeviceCoordinatesPosition.xy = (2.0 * fragmentCoordinates) / uScreenSize - 1;
normalizedDeviceCoordinatesPosition.z = 2.0 * depth - 1.0;
vec4 clipSpacePosition;
clipSpacePosition.w = uProjection[3][2] / (normalizedDeviceCoordinatesPosition.z - (uProjection[2][2] / uProjection[2][3]));
clipSpacePosition.xyz = normalizedDeviceCoordinatesPosition * clipSpacePosition.w;
vec4 eyePosition = uInverseProjection * clipSpacePosition;
return eyePosition.xyz / eyePosition.w;
}
Note: So far I'm assuming your depth texture is a GL_DEPTH_COMPONENT and is attached to the framebuffer as a GL_DEPTH_ATTACHMENT.
I.e.:
glBindTexture(GL_TEXTURE_2D, mDepthTextureId);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, mrScreenWidth, mrScreenHeight, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_NONE);
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, mDepthTextureId, 0);

Related

Why does VSM Depth Map Blurring produces strange results?

I am trying to implement Variance Shadow Mapping for directional shadows in my rendering engine with OpenGL.
I have read multiple articles such as - https://developer.nvidia.com/gpugems/gpugems3/part-ii-light-and-shadows/chapter-8-summed-area-variance-shadow-maps, https://graphics.stanford.edu/~mdfisher/Shadows.html to develop this.
The basic flow of the algorithm is as follows:
Store the depth, and depth^2 in the depth texture.
Apply two pass Gaussian blur with a 5 x 5 kernel and 10 passes.
Sample a depth value, calculate the fragment's distance from the light, and
Put them in the Chebyshev inequality to determine the maximum probability of the fragment being in shadow
Use the result to make the fragment dark.
Here's my Depth Shader for the directional light with a orthographic projection matrix:
#version 440 core
uniform float farPlane;
uniform vec3 lightPos;
uniform mat4 directional_light_space_matrix;
in vec4 FragPos;
out vec2 depth;
void main()
{
vec4 FragPosLightSpace = directional_light_space_matrix * FragPos;
float d = FragPosLightSpace.z / FragPosLightSpace.w;
d = d * 0.5 + 0.5;
float m1 = d;
float m2 = d * d;
float dx = dFdx(depth.x);
float dy = dFdx(depth.y);
m2 += 0.25 * (dx * dx + dy * dy);
depth.r = m1;
depth.g = m2;
}
Here's the snippet of the fragment shader that check's how much a fragment is lit.
float linstep(float mi, float ma, float v)
{
return clamp ((v - mi)/(ma - mi), 0, 1);
}
float ReduceLightBleeding(float p_max, float Amount)
{
return linstep(Amount, 1, p_max);
}
float chebyshevUpperBound(float dist, vec2 moments)
{
float p_max;
if(dist <= moments.x)
{
return 1.0;
}
float variance = moments.y - (moments.x * moments.x);
variance = max(variance, 0.1);
float d = moments.x - dist;
p_max = variance / (variance + d * d);
return ReduceLightBleeding(p_max, 1.0);
}
float CheckDirectionalShadow(float bias, vec3 lightpos, vec3 FragPos)
{
vec3 projCoords = FragPosLightSpace.xyz / FragPosLightSpace.w;
projCoords = projCoords * 0.5 + 0.5;
vec2 closest_depth = texture(shadow_depth_map_directional, projCoords.xy).rg;
return chebyshevUpperBound(projCoords.z, closest_depth);
}
Here's the Two Pass Gaussian Blur shader.
#version 440 core
layout (location = 0) out vec2 out_1;
in vec2 TexCoords;
uniform sampler2D inputTexture_1;
uniform bool horizontal;
float weights[5] = float[](0.227027, 0.1945946, 0.1216216, 0.054054, 0.016216);
void main()
{
vec2 tex_offset = 1.0 / textureSize(inputTexture_1,0);
vec2 o1 = texture(inputTexture_1, TexCoords).rg * weights[0];
if(horizontal)
{
for(int i=1; i<4; i++)
{
o1 += texture(inputTexture_1, TexCoords + vec2(tex_offset.x * i, 0.0)).rg * weights[i];
o1 += texture(inputTexture_1, TexCoords - vec2(tex_offset.x * i, 0.0)).rg * weights[i];
}
}
else
{
for(int i=1; i<4; i++)
{
o1 += texture(inputTexture_1, TexCoords + vec2(0.0, tex_offset.y * i)).rg * weights[i];
o1 += texture(inputTexture_1, TexCoords - vec2(0.0, tex_offset.y * i)).rg * weights[i];
}
}
out_1 = o1;
}
I am putting my framebuffer generation code for information about how I store the moments.
// directional ----------------------------------------------------------------------------------------------------------------------------------------------
glGenFramebuffers(1, &directional_shadow_framebuffer);
glGenTextures(1, &directional_shadow_framebuffer_depth_texture);
glBindTexture(GL_TEXTURE_2D, directional_shadow_framebuffer_depth_texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RG32F, shadow_map_width, shadow_map_height, 0, GL_RG, GL_FLOAT, NULL);
float border_color[] = { 0.0f,0.0f,0.0f,1.0f };
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, border_color);
glBindFramebuffer(GL_FRAMEBUFFER, directional_shadow_framebuffer);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, directional_shadow_framebuffer_depth_texture, 0);
glGenRenderbuffers(1, &directional_shadow_framebuffer_renderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, directional_shadow_framebuffer_renderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, shadow_map_width, shadow_map_height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, directional_shadow_framebuffer_renderbuffer);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
LOGGER->log(ERROR, "Renderer : createShadowMapBuffer", "Directional Shadow Framebuffer is incomplete!");
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// ----------------------------------------------------------------------------------------------------------------------------------------------
The results of the above operations is far from expectations. Instead of getting soft penumbra shadows, I get blob like sharp shadows.
Here's how the First moment (depth) looks like, and the second moment is pretty much the same but darker.
I have tried experimenting with the minimum variance, shadow kernel size, gaussian samples, blur passes.. but I haven't come any closer to the solution.
I have a feeling I maybe doing something wrong with how I have set the texture filtering parameters in the Framebuffer generation code given above.
My final questions are :
Is my implementation of VSMs incorrect?
Why do I not see soft penumbras?
I don't have a good feeling about how my texture is filtered, is there anything wrong in the Framebuffer generation code?
So, I had solved the problem.
The implementation is perfectly fine, but the min variance and the amount parameter of the ReduceLightBleeding required tuning.
I discovered that reducing the minimum variance parameter would soften the shadows more, but would greatly increase Light Bleeding.
To counter this side effect we can tune the p_max value to become 0 when below a certain threshold, otherwise rescale between 0 and 1. This is exactly what the ReduceLightBleeding function does, which is also described in the same site linked above.
But, increasing the amount parameter in ReduceLightBleeding would make the shadows look blob-like, which can be seen in the screenshots that I posted above.
I managed to tweak the min variance and the light bleeding reduction amounts to find an optimal spot. However, I could never completely get rid of this artifact.
A better alternative to Variance Shadow Mapping is its extension - Exponential Variance Shadow Maps.
I do not understand the math properly, but I still managed to implement it quite easily.
Check this question on gamedev.stackexchange for hints - EVSM.
ESVM did a great job by reducing bleeding to the point that it can either not be noticed or just ignored.

Shadow Mapping OpenGL shadow not always drawing, and drawing where the position of the light is

I have been trying to do basic Shadow Mapping in my custom Engine using LearnOpenGL as the source. The link for the exact tutorial can be found: here.
I have been debugging this bug for around two weeks, researching the internet, and even trying to wrap my head around this, but all I can say is that the shadow almost never appears, and when it appears it is where the light is Pos is terms of x and z. I tried to do everything exactly like in the tutorial around 10 times, I also tried to check this website for similar questions but for every way I found, it was not my case.
findings
In this Image(1) you can see that the shadow is not visible when the light is on top of it, but it is then visible on this Image (2) when the lightPos.x variable is around -4.5 or 4.5, this is so for the lightPos.z variable too. The shadow when appearing is being drawn where the lightPos is, where in the pictures it is circled by a red line.
I use multiple shaders, one for the light and shadow calculations (ShadowMapping) one for a basic depth mapping (ShadowMapGen)
Here is my ShadowMapping shader:
ShadowMapping Vertex
version 460
in vec3 vertexIn;
in vec3 normalIn;
in vec2 textureIn;
out vec3 FragPos;
out vec3 normalOut;
out vec2 textureOut;
out vec4 FragPosLightSpace;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform mat4 lightSpaceMatrix;
void main()
{
textureOut = textureIn;
FragPos = vec3(model * vec4(vertexIn, 1.0));
normalOut = mat3(transpose(inverse(model))) * normalIn;
FragPosLightSpace = lightSpaceMatrix * vec4(FragPos, 1.0);
gl_Position = projection * view * model * vec4(vertexIn, 1.0);
}
ShadowMapping Frag
out vec4 FragColor;
in vec3 FragPos;
in vec3 normalOut;
in vec2 textureOut;
in vec4 FragPosLightSpace;
uniform sampler2D diffuseTexture;
uniform sampler2D shadowMap;
uniform vec3 lightPos;
uniform vec3 viewPos;
float ShadowCalculation(vec4 fragPosLightSpace, vec3 lightdir)
{
// perform perspective divide
vec3 projCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
// transform to [0,1] range
projCoords = projCoords * 0.5 + 0.5;
// get closest depth value from light's perspective (using [0,1] range fragPosLight as coords)
float closestDepth = texture(shadowMap, projCoords.xy).r;
// get depth of current fragment from light's perspective
float currentDepth = projCoords.z;
// check whether current frag pos is in shadow
float bias = max(0.05 * (1.0 - dot(normalOut, lightdir)), 0.005);
// check whether current frag pos is in shadow
// float shadow = currentDepth - bias > closestDepth ? 1.0 : 0.0;
// // PCF
float shadow = 0.0;
vec2 texelSize = 1.0 / textureSize(shadowMap, 0);
for(int x = -1; x <= 1; ++x)
{
for(int y = -1; y <= 1; ++y)
{
float pcfDepth = texture(shadowMap, projCoords.xy + vec2(x, y) * texelSize).r;
shadow += currentDepth - bias > pcfDepth ? 1.0 : 0.0;
}
}
shadow /= 9.0;
// keep the shadow at 0.0 when outside the far_plane region of the light's frustum.
if(projCoords.z > 1.0)
shadow = 0.0;
return shadow;
}
void main()
{
vec3 color = texture(diffuseTexture, textureOut).rgb;
vec3 normal = normalize(normalOut);
vec3 lightColor = vec3(1.0f);
// ambient
vec3 ambient = 0.30 * color;
// diffuse
vec3 lightDir = normalize(lightPos - FragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * lightColor;
// specular
vec3 viewDir = normalize(viewPos - FragPos);
vec3 reflectDir = reflect(-lightDir, normal);
float spec = 0.0;
vec3 halfwayDir = normalize(lightDir + viewDir);
spec = pow(max(dot(normal, halfwayDir), 0.0), 64.0);
vec3 specular = spec * lightColor;
// calculate shadow
float shadow = ShadowCalculation(FragPosLightSpace, lightDir);
vec3 lighting = (ambient + (1.0 - shadow) * (diffuse + specular)) * color;
FragColor = vec4(lighting, 1.0);
}
ShadowMapGen Vertex
Fragment Shader is empty for this shader
version 460
in vec3 vertexIn;
uniform mat4 model;
uniform mat4 lightSpaceMatrix;
void main()
{
gl_Position = model * lightSpaceMatrix * vec4(vertexIn, 1.0);
}
Variable initialisation
lightPos = glm::vec3(-2.0f, 4.0f, -1.0f);
near_plane = 1.0f;
far_plane = 7.5f;
//SAMPLE 2D Uniform binding
TheShader::Instance()->SendUniformData("ShadowMapping_diffuseTexture", 0);
TheShader::Instance()->SendUniformData("ShadowMapping_shadowMap", 1);
Depth Map Framebuffer Generation
This is how I generate my depth map/ shadow map texture in the constructor of my scene:
glGenFramebuffers(1, &depthMapFBO);
//Create depth texture
glGenTextures(1, &depthMap);
glBindTexture(GL_TEXTURE_2D, depthMap);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, SHADOW_WIDTH, SHADOW_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL); // Height and Width = 1024
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
float borderColor[] = { 1.0, 1.0, 1.0, 1.0 };
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, borderColor);
//Attach depth texture as FBO's depth buffer
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthMap, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
Then in an Update() function that runs in the While loop of the engine I firstly do:
Render Objects from light's perspective
//Light Projection and view Matrix
m_lightProjection = glm::ortho(-10.0f, 10.0f, -10.0f, 10.0f, near_plane, far_plane);
m_lightView = glm::lookAt(lightPos, glm::vec3(0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
//Calculate light matrix and send it.
m_lightSpaceMatrix = m_lightProjection * m_lightView;
TheShader::Instance()->SendUniformData("ShadowMapGen_lightSpaceMatrix", 1, GL_FALSE, m_lightSpaceMatrix);
//Render to Framebuffer depth Map
glViewport(0, 0, SHADOW_WIDTH, SHADOW_HEIGHT);
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glClear(GL_DEPTH_BUFFER_BIT);
//Set current Shader to ShadowMapGen
m_floor.SetShader("ShadowMapGen");
m_moon.SetShader("ShadowMapGen");
//Send model Matrix to current Shader
m_floor.Draw();
m_moon.Draw();
//Set current Shader back to ShadowMapping
m_moon.SetShader("ShadowMapping");
m_floor.SetShader("ShadowMapping");
glBindFramebuffer(GL_FRAMEBUFFER, 0);
Render Objects from Camera's perspective
glViewport(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//Update Camera and Send the view and projection matrices to the ShadowMapping shader
m_freeCamera->Update();
m_freeCamera->Draw();
//Send Light Pos
TheShader::Instance()->SendUniformData("ShadowMapping_lightPos", lightPos);
//Send LightSpaceMatrix
TheShader::Instance()->SendUniformData("ShadowMapping_lightSpaceMatrix", 1, GL_FALSE, m_lightSpaceMatrix);
//Activate Shadow Mapping texture
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, depthMap);
//Send model Matrix to ShadowMapping shaders
m_moon.Draw();
m_floor.Draw();
I hope someone will see this, thank you for your time.
I tried to do everything exactly like in the tutorial around 10 times
Well, you seem to have missed at least one obvious thing:
m_lightSpaceMatrix = m_lightProjection * m_lightView;
So far, so good, but in your "ShadowMapGen" vertex shader, you wrote:
gl_Position = model * lightSpaceMatrix * vec4(vertexIn, 1.0);
So you end up with model * projection * view multiplication order, which does not make sense no matter which conventions you adhere to. Since the tutorial uses default GL conventions, you always need projection * view * model * vertex multiplication order, which the tutorial also correctly uses.

How to overcome differences between off-screen and on-screen framebuffer rendering?

I'm trying to use an off-screen framebuffer to replicate a scene that renders wonderfully to the default framebuffer. There seem to be differences in the rendering that I can't sort out.
For context, I am visualizing the Earth with an atmospheric shader. I am using a QT QOpenGLWidget, but mostly raw GL calls because I'm not a fan of QT's abstractions. I need to render this scene to an off-screen framebuffer because I would like to implement some post-processing effects in my visualization, for which I need to be able to sample the scene as a texture. I've gotten to the point where I am successfully creating a framebuffer and rendering its color texture to a quad on the screen.
My understanding is that alpha blending behaves differently when rendering to an off-screen framebuffer compared to the default. I haven't been able to find any resources online that indicate a way to produce identical results without a major refactor. The methodologies I've seen involve either manually rendering objects in order from back to front, or baking in the alpha values to the colors that are sent to the framebuffer. I've tried an often suggested alternative, which is using glBlendFuncSeparate to control things more manually:
glEnable(GL_BLEND);
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
But that hasn't led to any noticeable improvement in my results (nor would I expect it to, since the math here wouldn't resolve the blending issues that I'm seeing).
So enough rambling, onto some actual code. My code-base is monstrous so I unfortunately can't share all of it, as there are a number of proprietary drawing routines, but I can start with how I generate my framebuffer:
// Create the framebuffer object
glGenFramebuffers(1, &m_fbo);
// Bind the framebuffer to the current context
glBindFramebuffer(GL_FRAMEBUFFER, m_fbo);
// generate texture to attach as a color attachment to the current frame buffer
m_texColorUnit = 4;
// Set to width and height of window, and leave data uninitialized
glGenTextures(1, &m_texColorBuffer);
glActiveTexture(GL_TEXTURE0 + m_texColorUnit);
glBindTexture(GL_TEXTURE_2D, m_texColorBuffer);
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight(),
0,
GL_RGB8_OES,
GL_UNSIGNED_BYTE,
NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// attach texture to currently bound framebuffer object
glFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
m_texColorBuffer,
0);
glBindTexture(GL_TEXTURE_2D, 0); //unbind the texture
glActiveTexture(GL_TEXTURE0); // Reset active texture to default
// Create renderBuffer object for depth and stencil checking
glGenRenderbuffers(1, &m_rbo);
glBindRenderbuffer(GL_RENDERBUFFER, m_rbo); // bind rbo
glRenderbufferStorage(GL_RENDERBUFFER,
GL_DEPTH24_STENCIL8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight()
); // allocate memory
// Attach rbo to the depth and stencil attachment of the fbo
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_STENCIL_OES,
GL_RENDERBUFFER,
m_rbo);
And the shaders for the atmosphere:
// vert
#ifndef GL_ES
precision mediump int;
precision highp float;
#endif
attribute vec3 posAttr;
uniform highp mat4 matrix;
uniform highp mat4 modelMatrix;
uniform vec3 v3CameraPos; // The camera's current position
uniform vec3 v3LightPos; // The direction vector to the light source
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for the red, green, and blue channels
uniform float fCameraHeight; // The camera's current height
uniform float fCameraHeight2; // fCameraHeight^2
uniform float fOuterRadius; // The outer (atmosphere) radius
uniform float fOuterRadius2; // fOuterRadius^2
uniform float fInnerRadius; // The inner (planetary) radius
uniform float fInnerRadius2; // fInnerRadius^2
uniform float fKrESun; // Kr * ESun
uniform float fKmESun; // Km * ESun
uniform float fKr4PI; // Kr * 4 * PI
uniform float fKm4PI; // Km * 4 * PI
uniform float fScale; // 1 / (fOuterRadius - fInnerRadius)
uniform float fScaleDepth; // The scale depth (i.e. the altitude at which the atmosphere's average density is found)
uniform float fScaleOverScaleDepth; // fScale / fScaleDepth
const int nSamples = 5;
const float fSamples = 5.0;
varying vec3 col;
varying vec3 colatten;
varying vec3 v3Direction;
varying vec3 vertexWorld;
float scale(float fCos)
{
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
void main(void)
{
// Get the ray from the camera to the vertex and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = posAttr;
vec3 vertexWorld = posAttr;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the closest intersection of the ray with the outer atmosphere (which is the near point of the ray passing through the atmosphere)
float B = 2.0 * dot(v3CameraPos, v3Ray);
float C = fCameraHeight2 - fOuterRadius2;
float fDet = max(0.0, B*B - 4.0 * C);
float fNear = 0.5 * (-B - sqrt(fDet));
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = v3CameraPos + v3Ray*fNear;
fFar -= fNear;
float fStartAngle = dot(v3Ray, v3Start) / fOuterRadius;
float fStartDepth = exp(-1.0 / fScaleDepth);
float fStartOffset = fStartDepth*scale(fStartAngle);
// Initialize the scattering loop variables
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot(v3Ray, v3SamplePoint) / fHeight;
float fScatter = (fStartOffset + fDepth*(scale(fLightAngle) - scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
colatten = v3FrontColor * fKmESun;
col = v3FrontColor * (v3InvWavelength*fKrESun);
v3Direction = v3CameraPos - v3Pos;
gl_Position = matrix * modelMatrix * vec4(posAttr,1);
}
// frag
#ifdef GL_ES
precision highp float;
precision mediump int;
#endif
varying vec3 col;
varying vec3 colatten;
varying vec3 v3Direction;
varying vec3 vertexWorld;
uniform vec3 v3LightPos;
uniform float g;
uniform float g2;
uniform float fExposure;
void main (void)
{
//float fCos = dot(normalize(lPos), normalize(v3Direction));
float fCos = dot(v3LightPos, v3Direction) / length(v3Direction);
float fRayleighPhase = 0.75 * (1.0 + fCos*fCos);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
//vec3 result = clamp(col + fMiePhase * colatten, vec3(0,0,0), vec3(1,1,1));
//gl_FragColor = vec4(result, result.b);
gl_FragColor.rgb = 1.0 - exp(-fExposure * (fRayleighPhase * col + fMiePhase * colatten));
//gl_FragColor.a = 1.0;
gl_FragColor.a = gl_FragColor.b;
}
As I've said, my results are less than stellar. The first image is what I get when rendering to the off-screen framebuffer, and the second image is when I render directly to the screen. Any ideas on how to resolve these two?
The depth render buffer is not attached to the framebuffer. The 2nd parameter of glFramebufferRenderbuffer has to be the attachment point.
GL_DEPTH_STENCIL_OES is not a valid value for a attachment point. So
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_STENCIL_OES,
GL_RENDERBUFFER,
m_rbo);
will cause GL_INVALID_ENUM error, which can be get by glGetError.
The enumerator constant which specifies the depth and stencil buffer is GL_DEPTH_STENCIL_ATTACHMENT:
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_STENCIL_ATTACHMENT,
GL_RENDERBUFFER,
m_rbo);
Note, the depth/stencil buffer is not attached to the framebuffer, but the framebuffer is still complete, without a depth and stencil buffer.
Alternatively you can use a depth buffer only attachment. Create a depth render buffer (GL_DEPTH_COMPONENT) add use the attachment type GL_DEPTH_ATTACHMENT.
The issue is caused, because the texture, which is attached to the color plane of the framebuffer has no alpha channel. The format GL_RGB8_OES provides the 3 color channels (RGB) but no alpha channel.
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight(),
0,
GL_RGB8_OES,
GL_UNSIGNED_BYTE,
NULL);
You've to use the format and internal format GL_RGBA8_OES rather than GL_RGB8_OES, which is included in OES_required_internalformat, too. See also __gles2_gl2ext_h_:
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGBA8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight(),
0,
GL_RGBA8_OES,
GL_UNSIGNED_BYTE,
NULL);

OpenGL Cubemap FrameBuffer Depth Comparison

I am trying to implement shadow maps for point lights. Basically I'm creating a framebuffer and then render all shadow casters on each side of a cubemap texture (which is 6 times) and then read it in the regular rendering pass and determine which pixel is in shadow. I have several questions:
Why do I have to include a color attachment in addition to a depth component in order for my cubemap to get anything rendered to? I tried it without the color attachment and it did not work.
After adding the color attachment, I can see my shadow casters in the cubemap but it seems the shadow comparison is wrong. I am suspecting that one is in NDC while the other isn't.
Here's how I initialize my framebuffer containing the shadow cubemap:
// Create the depth buffer
glGenTextures(1, &mDepthTextureID);
glBindTexture(GL_TEXTURE_2D, mDepthTextureID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32, width, height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_2D, 0);
//Create the cubemap texture
glGenTextures(1, &mCubemapTextureID);
glBindTexture(GL_TEXTURE_CUBE_MAP, mCubemapTextureID);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
for (GLuint i = 0; i < 6; ++i)
{
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_R32F, width, height, 0, GL_RED, GL_FLOAT, 0);
}
glBindTexture(GL_TEXTURE_CUBE_MAP, 0);
//Create the framebuffer and attach the cubemap texture to it
glGenFramebuffers(1, &mFrameBufferObjectID);
glBindFramebuffer(GL_FRAMEBUFFER, mFrameBufferObjectID);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, mDepthTextureID, 0);
//Disable writes to the color buffer
glDrawBuffer(GL_NONE);
//Disable reads from the color buffer
glReadBuffer(GL_NONE);
GLenum Status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (Status != GL_FRAMEBUFFER_COMPLETE)
{
switch(Status)
{
case GL_FRAMEBUFFER_UNSUPPORTED:
printf("FrameBuffer unsupported error");
return false;
break;
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
printf("FrameBuffer incomplete attachement");
return false;
break;
default:
printf("GLShadowCubemap error, status: 0x%x\n", Status);
return false;
}
}
//Unbind this
glBindFramebuffer(GL_FRAMEBUFFER, 0);
Here's my shadow's vertex shader: (Only the Position attribute is used)
#version 330 core
layout (location = 0) in vec3 Position;
layout (location = 1) in vec3 Normal;
layout (location = 2) in vec2 TexCoord;
layout (location = 3) in vec3 Tangent;
uniform mat4 gModelMatrix;
uniform mat4 gModelViewProjectionMatrix;
out vec3 WorldPosition;
/*
* Below needs a GS and using layered rendering
void main()
{
gl_Position = gModelMatrix * vec4(Position, 1.0);
}
*/
void main()
{
vec4 pos4 = vec4(Position, 1.0);
gl_Position = gModelViewProjectionMatrix * pos4;
WorldPosition = (gModelMatrix * pos4).xyz;
}
Here's my shadow fragment shader:
#version 330 core
in vec3 WorldPosition;
uniform vec3 gLightPosition;
out float Fragment;
void main()
{
// get distance between fragment and light source
float dist_to_light = length(WorldPosition - gLightPosition);
//gl_FragDepth = dist_to_light;
Fragment = dist_to_light;
}
Additional question here:
I saw that many have said that overriding gl_FragDepth is a bad idea. I kind of know why but what's strange here is that if I were to override the gl_FragDepth manually, nothing gets written to the cubemap. Why?
Here's how I render all the regular stuff (the variable i is an index to my lights array)
mShadowCubemapFBOs[i].ViewportChange();
mShadowMapTechnique.SetLightPosition(light.Position);
const float shadow_aspect = (static_cast<float>(mShadowWidth) / mShadowHeight);
const mat4 shadow_projection_matrix = glm::perspective(90.f, shadow_aspect, 1.f, mShadowFarPlane);
const vector<MeshComponent>& meshes = ComponentManager::Instance().GetMeshComponentPool().GetPool();
for(int layer = 0; layer < 6; ++layer)
{
GLenum cubemap_face = GL_TEXTURE_CUBE_MAP_POSITIVE_X + layer;
mShadowCubemapFBOs[i].Bind(cubemap_face);
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
for(const MeshComponent& mesh : meshes)
{
//the transform_component is referenced ahead of time.
const mat4 model_transform = transform_component->GetTransformMatrix();
mShadowMapTechnique.SetModelViewProjectionMatrix(light.Position, cubemap_face, shadow_projection_matrix, model_transform);
mShadowMapTechnique.SetModelMatrix(model_transform);
mesh.Render();
}
}
Finally here's the regular rendering shader:
#version 330 core
const int MAX_LIGHTS = 8;
const int LIGHT_TYPE_DIRECTIONAL = 0;
const int LIGHT_TYPE_POINT = 1;
const int LIGHT_TYPE_SPOT = 2;
in vec2 TexCoord0;
in vec3 WorldNormal0;
in vec3 WorldPos0;
in vec3 WorldTangent0;
out vec4 FragmentColor;
struct Material
{
vec4 Emissive;
vec4 Ambient;
vec4 Diffuse;
vec4 Specular;
float SpecularPower;
bool UseTexture;
};
struct Light
{
vec3 Position;
vec3 Direction;
vec4 Color; //RGBA
float SpotAngle;
float ConstantAttenuation;
float LinearAttenuation;
float QuadraticAttenuation;
int LightType;
samplerCube ShadowMap; //Cubemap shadows
bool Enabled;
};
struct LightingResult
{
vec4 Diffuse;
vec4 Specular;
};
uniform Material gMaterial;
uniform Light gLights[MAX_LIGHTS];
uniform sampler2D gTextureSampler0;
uniform sampler2D gNormalMap;
uniform bool gEnableNormalMap;
uniform vec3 gEyeWorldPos;
float CalculateShadowFactor(vec3 frag_pos, Light light)
{
vec3 fragment_to_light = frag_pos - light.Position;
float sample_distance = texture(light.ShadowMap, fragment_to_light).r;
float distance = length(fragment_to_light);
if (distance < sample_distance + 0.001)
{
return 1.0; // Inside the light
}
else
{
return 0.5; // Inside the shadow
}
}
//L - Light direction vector from pixel to light source
//N - Normal at the pixel
vec4 CalculateDiffuse(Light light, vec3 L, vec3 N)
{
float n_dot_l = max(0, dot(N, L));
return light.Color * n_dot_l;
}
//V - View vector
//L - Light direction vector from pixel to light source
//N - Normal at the pixel
vec4 CalculateSpecular(Light light, vec3 V, vec3 L, vec3 N)
{
//Phong lighting
vec3 R = normalize(reflect(-L, N));
float r_dot_v = max(0, dot(R, V));
return light.Color * pow(r_dot_v, max(0.4, gMaterial.SpecularPower));
}
float CalculateAttenuation(Light light, float distance)
{
return 1.0 / (light.ConstantAttenuation + light.LinearAttenuation * distance + light.QuadraticAttenuation * distance * distance);
}
//V - View vector
//P - Position of pixel
//N - Normal of pixel
LightingResult CalculatePointLight(Light light, vec3 V, vec3 P, vec3 N)
{
LightingResult result;
result.Diffuse = vec4(0.0, 0.0, 0.0, 1.0);
result.Specular = vec4(0.0, 0.0, 0.0, 1.0);
vec3 L = light.Position - P;
float distance = length(L);
L = normalize(L);
float attenuation = CalculateAttenuation( light, distance );
result.Diffuse = CalculateDiffuse(light, L, N) * attenuation;
result.Specular = CalculateSpecular(light, V, L, N) * attenuation;
return result;
}
//V - View vector
//P - Position of pixel
//N - Normal of pixel
LightingResult CalculateDirectionalLight(Light light, vec3 V, vec3 P, vec3 N)
{
LightingResult result;
result.Diffuse = vec4(0.0, 0.0, 0.0, 1.0);
result.Specular = vec4(0.0, 0.0, 0.0, 1.0);
vec3 L = -light.Direction;
result.Diffuse = CalculateDiffuse(light, L, N);
result.Specular = CalculateSpecular(light, V, L, N);
return result;
}
//L - Light vector
//Smoothness increases as angle gets larger
float CalculateSpotCone(Light light, vec3 L)
{
//cos are in radians
float min_cos = cos(light.SpotAngle);
float max_cos = (min_cos + 1.0f) / 2.0f;
float cos_angle = dot(light.Direction, -L); //negated L such that as we move towards the edge, intensity decreases
return smoothstep(min_cos, max_cos, cos_angle);
}
//V - View vector
//P - Position of pixel
//N - Normal of pixel
LightingResult CalculateSpotLight(Light light, vec3 V, vec3 P, vec3 N)
{
LightingResult result;
result.Diffuse = vec4(0.0, 0.0, 0.0, 1.0);
result.Specular = vec4(0.0, 0.0, 0.0, 1.0);
vec3 L = light.Position - P;
float distance = length(L);
L = normalize(L);
float attenuation = CalculateAttenuation(light, distance);
float spot_intensity = CalculateSpotCone(light, L);
result.Diffuse = CalculateDiffuse(light, L, N) * attenuation * spot_intensity;
result.Specular = CalculateSpecular(light, V, L, N) * attenuation * spot_intensity;
return result;
}
//P - Position of pixel
//N - Normal of pixel
LightingResult CalculateLighting(vec3 P, vec3 N)
{
vec3 V = normalize(gEyeWorldPos - P);
LightingResult total_result;
total_result.Diffuse = vec4(0, 0, 0, 1.0);
total_result.Specular = vec4(0, 0, 0, 1.0);
for(int i = 0; i < MAX_LIGHTS; ++i)
{
if(!gLights[i].Enabled)
{
continue;
}
LightingResult result;
result.Diffuse = vec4(0, 0, 0, 1.0);
result.Specular = vec4(0, 0, 0, 1.0);
float shadow_factor = 1.0;
switch(gLights[i].LightType)
{
case LIGHT_TYPE_DIRECTIONAL:
result = CalculateDirectionalLight(gLights[i], V, P, N);
break;
case LIGHT_TYPE_POINT:
result = CalculatePointLight(gLights[i], V, P, N);
shadow_factor = CalculateShadowFactor(P, gLights[i]);
break;
case LIGHT_TYPE_SPOT:
result = CalculateSpotLight(gLights[i], V, P, N);
shadow_factor = CalculateShadowFactor(P, gLights[i]);
break;
}
total_result.Diffuse += (result.Diffuse * shadow_factor);
total_result.Specular += (result.Specular * shadow_factor);
}
total_result.Diffuse = clamp(total_result.Diffuse, 0, 1);
total_result.Specular = clamp(total_result.Specular, 0, 1);
return total_result;
}
vec3 CalculateNormalMapNormal()
{
vec3 normal = normalize(WorldNormal0);
vec3 tangent = normalize(WorldTangent0);
tangent = normalize(tangent - dot(tangent, normal) * normal); //remove components from the normal vector. This is needed for non-uniform scaling
vec3 bi_tangent = cross(tangent, normal);
vec3 bump_map = texture(gNormalMap, TexCoord0).xyz;
bump_map = 2.0 * bump_map - vec3(1.0, 1.0, 1.0); //Remaps the values
mat3 TBN = mat3(tangent, bi_tangent, normal);
vec3 actual_normal = TBN * bump_map;
return normalize(actual_normal);
}
void main()
{
vec3 pixel_normal = normalize(WorldNormal0);
vec4 texture_color = vec4(0, 0, 0, 1);
if(gMaterial.UseTexture)
{
texture_color = texture( gTextureSampler0, TexCoord0 );
}
if(gEnableNormalMap)
{
pixel_normal = CalculateNormalMapNormal();
}
LightingResult light_result = CalculateLighting(WorldPos0, pixel_normal);
vec4 diffuse_color = gMaterial.Diffuse * light_result.Diffuse;
vec4 specular_color = gMaterial.Specular * light_result.Specular;
FragmentColor = (gMaterial.Emissive + gMaterial.Ambient + diffuse_color + specular_color) * texture_color;
//FragmentColor = texture_color;
//temp test
//vec3 fragment_to_light = WorldPos0 - gLights[1].Position;
//FragmentColor = vec4(vec3(texture(gLights[1].ShadowMap, fragment_to_light).r / gFarPlane), 1.0);
}
What am I doing wrong? I see that I am storing the distance from fragment to light in world space and it is written to a color buffer (not the depth buffer) and so it shouldn't be in NDC. Finally when I am comparing it, it's also in world space .... Why are the shadows off? It appears as if the shadows are way larger than they should be so the entire scene is covered with shadow and it appears that what should be the size of shadow is actually covered in light.
Picture of the shadow cubemap:
Picture of the scene (only the helicopter will cast shadow):
Thanks!
After some debugging, I found out my problems:
glPerspective takes fov as radians, not degrees even though it's documentation says it's only in radians if FORCE_RADIANS is defined (I did not define that)
The cubemap for shadow require the clear color to be (FLT_MAX, FLT_MAX, FLT_MAX, 1.0) such that everything is NOT in shadow by default.

Parallax mapping glitch in OpenGL

And this is result when I invert the tangent vector right after transferring it to vertex shader:
The "shadow" is in the wrong place.
(And it works only when I rotate it through Y axis so the last image seem to present a good parallax mapped cube)
IM SURE IT IS NOT A TANGENT VECTOR OR TEXTURE COORDINATES PROBLEM
Because
I used exactly the same tangent calculation functions and exactly the same cube position, normal and texture coordinate data as in working demo.
After all, I exported arrays with position/texcoord/normal/tangent data into a .txt file and I saw what I exactly expected (and what I expected is the same pos/tex/norm data as in working demo, including calculated tangents which I managed to export from working demo).
The next argument is, I copied my shader code to a working demo and it still works.
Other one is, I tried multiple ways to render this cube.
I tried VBO with glVertexAttribPointer, I tried VBO with saving tangent as other texture coordinate (as in the demo), I tried DisplayList with glVertexAttrib4f. Result is... EXACTLY THE SAME.
Height map is loading correctly, I tried to set it as a diffuse map and it looked OK.
glGetError() gives me No Errors and shader compile logs says so.
It is probably something with camera or init states.
Maybe posting an init code will help.
void CDepthBase::OpenGLSet() {
glEnable( GL_TEXTURE_2D );
glShadeModel( GL_SMOOTH );
glClearColor( 0.0f, 0.0f, 0.0f, 0.0f );
glClearDepth( 1.0f );
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glDepthFunc( GL_LEQUAL );
glEnable(GL_DEPTH_TEST);
glBlendFunc( GL_ONE, GL_ONE );
GLfloat ratio;
glViewport(0, 0, ResolutionWidth, ResolutionHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45.0f, ResolutionWidth / (float)ResolutionHeight, 0.1f, 900.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
if (GLEW_OK != glewInit()) {
MBX("Failed to init GLEW.", "Error");
}
if (glewIsSupported("GL_ARB_vertex_buffer_object")) {
VBO_supported = true;
} else VBO_supported = false;
glHint( GL_FOG_HINT, GL_DONT_CARE );
glHint( GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST );
glShadeModel(GL_SMOOTH);
glAlphaFunc(GL_ALWAYS, 0);
}
By the way, I'm using GL Extension Wrangler with extensions.
Shader code & log (this exported file contains code which was directly passed to glShaderSource):
Vertex shader was successfully compiled to run on hardware.
Fragment shader was successfully compiled to run on hardware.
Fragment shader(s) linked, vertex shader(s) linked.
------------------------------------------------------------------------------------------
varying vec3 lightDir;
varying vec3 viewDir;
attribute vec4 tangent;
void main()
{
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_TexCoord[0] = gl_MultiTexCoord0;
vec3 vertexPos = vec3(gl_ModelViewMatrix * gl_Vertex);
vec3 tn = tangent.xyz;
vec3 n = normalize(gl_NormalMatrix * gl_Normal);
vec3 t = normalize(gl_NormalMatrix * tangent.xyz);
vec3 b = cross(t, n) * -tangent.w;
mat3 tbnMatrix = mat3(t.x, b.x, n.x,
t.y, b.y, n.y,
t.z, b.z, n.z);
lightDir = (gl_LightSource[0].position.xyz - vertexPos) / 100.0;
lightDir = tbnMatrix * lightDir;
viewDir = -vertexPos;
viewDir = tbnMatrix * viewDir;
}
-----------------------------------------------------------------------------------------
varying vec3 lightDir;
varying vec3 viewDir;
uniform sampler2D diffuseMap;
uniform sampler2D normalMap;
uniform sampler2D heightMap;
uniform float scale;
uniform float bias;
void main()
{
vec3 v = normalize(viewDir);
vec2 TexCoord = gl_TexCoord[0].st;
{
float height = texture2D(heightMap, gl_TexCoord[0].st).r;
height = height * scale + bias;
TexCoord = gl_TexCoord[0].st + (height * v.xy);
}
vec3 l = lightDir;
float atten = max(0.0, 1.0 - dot(l, l));
l = normalize(l);
vec3 n = normalize(texture2D(normalMap, TexCoord).rgb * 2.0 - 1.0);
vec3 h = normalize(l + v);
float nDotL = max(0.0, dot(n, l));
float nDotH = max(0.0, dot(n, h));
float power = (nDotL == 0.0) ? 0.0 : pow(nDotH, gl_FrontMaterial.shininess);
vec4 ambient = gl_FrontLightProduct[0].ambient * atten;
vec4 diffuse = gl_FrontLightProduct[0].diffuse * nDotL * atten;
vec4 specular = gl_FrontLightProduct[0].specular * power * atten;
vec4 color = gl_FrontLightModelProduct.sceneColor + ambient + diffuse + specular;color *= texture2D(diffuseMap,TexCoord);
gl_FragColor = color ;
}
Uniforms are working correctly because results are the same if I switch them with constant values.
Compiling shader:
void __Shader::import(){
if(imported) __Shader::~__Shader();
v = glCreateShader(GL_VERTEX_SHADER);
f = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(v, 1, (const GLchar **)&vsrc.cstr,NULL);
glShaderSource(f, 1, (const GLchar **)&fsrc.cstr,NULL);
glCompileShader(v);
glCompileShader(f);
p = glCreateProgram();
glAttachShader(p,v);
glAttachShader(p,f);
if(_flags & NORMAL_MAPPING)
glBindAttribLocation(p, ATTRIB_TANGENT, "tangent");
glLinkProgram(p);
if(_flags & DIFFUSE_MAPPING)
diffuseUni.loc = glGetUniformLocation(p, "diffuseMap");
if(_flags & NORMAL_MAPPING)
normalUni.loc = glGetUniformLocation(p, "normalMap");
if(_flags & PARALLAX_MAPPING)
heightUni.loc = glGetUniformLocation(p, "heightMap");
if(_flags & SPECULAR_MAPPING)
specularUni.loc = glGetUniformLocation(p, "specularMap");
imported = true;
}
Setting attribute in VBO:
if(tangents.size() > 0){
buffered |= 3;
glGenBuffers(1, &VBO_tangent);
glBindBuffer(GL_ARRAY_BUFFER, VBO_tangent);
glBufferData(GL_ARRAY_BUFFER, tangents.size()*sizeof(tangent), tangents.get_ptr(), GL_STATIC_DRAW);
}
// and in draw:
if(buffered & 3) {
glBindBuffer(GL_ARRAY_BUFFER, VBO_tangent);
glVertexAttribPointer(__Shader::ATTRIB_TANGENT, 4, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(__Shader::ATTRIB_TANGENT);
}
and a small note
for(int i = 0; i < responders.size(); ++i)
if(strstr(responders[i].idea, "tangent problem"))
responders[i].please_dont_talk();
Just tell me your other ideas about what can be the reason of those bad results.
Wheew... already solved it. The problem was with loading texture files even though I did not see any disorders with diffuse mapping or even with diffuse+normal mapping. I was using IMG_Load from SDL, maybe I used it wrong way but it did not work for me. It was probably normal map messed up.
bad texture import code:
if(imported || filenamez.length() < 1) return;
SDL_Surface* surface = 0;
surface = IMG_Load(filenamez.c_str());
if (surface) {
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
bool endianess = filenamez.substr(filenamez.length()-4) == ".jpg";
glTexImage2D(GL_TEXTURE_2D, 0, 3, surface->w, surface->h, 0,
(endianess ? GL_RGB : GL_BGR), GL_UNSIGNED_BYTE, surface->pixels);
}
BEWARE !
I'm now using HBITMAP-based texture loading taken from dhpoware demo which I was talking about. And it works fine.
peace.
After 2-3 days of hard debugging, let me feel a little bit of euphoria.
Oh, I'd forget, the final result: