openGL linear depth information - opengl

I want to implement Screen Space rendering fluid but I have a trouble with getting depth information.
the depth image I have got is too white.I think is because the depth is not linear.When I look this image very close, it will have a reasonable performance, I can see the depth change. In the same circle, colour in the centre is black and the colour will become white and white when it close to the edge.
So I do a linear for the depth, it looks better but the depth also won't change in the same circle. here is my vertex shader:
#version 400
uniform float pointScale;
layout(location=0) in vec3 position;
uniform mat4 viewMat, projMat, modelMat;
uniform mat3 normalMat;
out vec3 fs_PosEye;
out vec4 fs_Color;
void main()
{
vec3 posEye = (viewMat * vec4(position.xyz, 1.0f)).xyz;
float dist = length(posEye);
gl_PointSize = 1.0f * (pointScale/dist);
fs_PosEye = posEye;
fs_Color = vec4(0.5,0.5,0.5,1.0f);
gl_Position = projMat * viewMat * vec4(position.xyz, 1.0);
}
here is my fragment shader:
#version 400
uniform mat4 viewMat, projMat, modelMat;
uniform float pointScale; // scale to calculate size in pixels
in vec3 fs_PosEye;
in vec4 fs_Color;
out vec4 out_Color;
out vec4 out_Position;
float linearizeDepth(float exp_depth, float near, float far) {
return (2 * near) / (far + near - exp_depth * (far - near));
}
void main()
{
// calculate normal from texture coordinates
vec3 N;
N.xy = gl_PointCoord.xy* 2.0 - 1;
float mag = dot(N.xy, N.xy);
if (mag > 1.0) discard; // kill pixels outside circle
N.z = sqrt(1.0-mag);
//calculate depth
vec4 pixelPos = vec4(fs_PosEye + normalize(N)*1.0f,1.0f);
vec4 clipSpacePos = projMat * pixelPos;
//clipSpacePos.z = linearizeDepth(clipSpacePos, 1, 400);
float depth = clipSpacePos.z / clipSpacePos.w;
//depth = linearizeDepth(depth, 1, 100);
gl_FragDepth = depth;
float diffuse = max(0.0, dot(N, vec3(0.0,0.0,1.0)));
out_Color = diffuse * vec4(0.0f, 0.0f, 1.0f, 1.0f);
out_Color = vec4(N, 1.0);
out_Color = vec4(depth, depth, depth, 1.0);
}
I want to interpolation based on normal after linear.

Related

How I can fix this black hole I get when trying to do 2d lighting using normal maps?

I have the following shader to draw lightning into 2D quads with normal maps
in vec2 position;
in vec2 texturePosition;
out vec4 fragColor;
layout(std140) uniform ubo {
mat4 uCameraView;
vec3 uLightPosition;
};
uniform sampler2D uTexture0;
uniform sampler2D uTexture1;
// TODO: move into the ubo
const vec4 uLightColor = vec4(1.f, 1.f, 1.f, 1.f);
const vec4 uAmbientColor = vec4(0.2f, 0.2f, 0.2f, 0.2f);
const vec3 uFalloff = vec3(0.1f, 3.f, 20.f);
void main() {
vec4 difusseColor = texture(uTexture0, texturePosition);
vec3 normalMap = texture(uTexture1, texturePosition).rgb;
normalMap.y *= -1;
vec3 lightDirection = vec3(uLightPosition.xy - position.xy, uLightPosition.z);
float direction = length(lightDirection);
vec3 light = normalize(lightDirection);
vec3 normal = normalize(normalMap * 2.f - 1.f);
vec3 diffuseLight = (uLightColor.rgb * uLightColor.a) * dot(normal, light);
vec3 ambient = uAmbientColor.rgb * uAmbientColor.a;
float attenuation = 1.f /
(uFalloff.x + (uFalloff.y * direction) +
(uFalloff.z * direction * direction));
vec3 intensity = ambient + diffuseLight * attenuation;
vec3 color = difusseColor.rgb * intensity;
fragColor = vec4(color, difusseColor.a);
}
But the issue I get is this black hole behind the light
I can remove the black hole with this line of code
vec3 diffuseLight = (uLightColor.rgb * uLightColor.a) * max(dot(normal, light), 0.0f);
But then I get this misaligned light without illumination where the black hole was
This is my vertex shader
void main() {
gl_Position = uCameraView * vec4(vPosition, 0.f, 1.f);
}
uCameraView is a mat4 perspective projection that is done like this
glm::vec3 camera;
glm::mat4 projection =
glm::perspective(glm::radians(45.f), app.aspectRatio, 0.f, 100.f);
projection = glm::scale(projection, size.viewport.scale);
glm::mat4 view = glm::translate(projection, camera);
Is this possible to fix?
I tried following this tutorial https://github.com/mattdesl/lwjgl-basics/wiki/ShaderLesson6 and in there the light look good.

Weird Layered Effect During Parallax Mapping

I am following along with the LearnOpenGL guide and am trying to implement Steep Parallax Mapping.
Everything seems to be working fine except my brick wall seems to have distinct visible layers whereas the photos in the guide don't show any layers. I was trying to use this code to parallax the topography of the world but these weird layers seem to show up there too so I was hoping to find a fix for this.
Layered wall photo
[1
Photo of how it should look
Here is my modified vertex shader
#version 300 es
in vec4 vPosition; // aPos
in vec2 texCoord; // aTexCoords
in vec4 vNormal; // aNormal
in vec4 vTangent; // aTangent
uniform mat4 model_view;
uniform mat4 projection;
uniform vec4 light_position;
out vec2 ftexCoord;
out vec3 vT;
out vec3 vN;
out vec4 position;
out vec3 FragPos;
out vec3 TangentLightPos;
out vec3 TangentViewPos;
out vec3 TangentFragPos;
void
main()
{
// Normal variables
vN = normalize(model_view * vNormal).xyz;
vT = normalize(model_view * vTangent).xyz;
vec4 veyepos = model_view*vPosition;
position = veyepos;
ftexCoord = texCoord;
// Displacement variables
vec3 bi = cross(vT, vN);
FragPos = vec3(model_view * vPosition).xyz;
vec3 T = normalize(mat3(model_view) * vTangent.xyz);
vec3 B = normalize(mat3(model_view) * bi);
vec3 N = normalize(mat3(model_view) * vNormal.xyz);
mat3 TBN = transpose(mat3(T, B, N));
TangentLightPos = TBN * light_position.xyz;
TangentViewPos = TBN * vPosition.xyz;
TangentFragPos = TBN * FragPos;
gl_Position = projection * model_view * vPosition;
}
and my modified fragment shader is here
#version 300 es
precision highp float;
in vec2 ftexCoord;
in vec3 vT; //parallel to surface in eye space
in vec3 vN; //perpendicular to surface in eye space
in vec4 position;
in vec3 FragPos;
in vec3 TangentLightPos;
in vec3 TangentViewPos;
in vec3 TangentFragPos;
uniform int mode;
uniform vec4 light_position;
uniform vec4 light_color;
uniform vec4 ambient_light;
uniform sampler2D colorMap;
uniform sampler2D normalMap;
uniform sampler2D depthMap;
out vec4 fColor;
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
return currentTexCoords;
}
void main()
{
// DO NORMAL MAPPING
if (mode == 0) {
vec3 T = normalize(vT);
vec3 N = normalize(vN);
vec3 bi = cross(T, N);
mat4 changeOfCoord = mat4(vec4(T, 0), vec4(bi, 0), vec4(N, 0), vec4(0, 0, 0, 1));
vec3 L = normalize(light_position - position).xyz;
vec3 E = normalize(-position).xyz;
vec4 text = vec4(texture(normalMap, ftexCoord) * 2.0 - 1.0);
vec4 eye = changeOfCoord * text;
vec4 amb = texture(colorMap, ftexCoord) * ambient_light;
vec4 diff = max(0.0, dot(L, eye.xyz)) * light_color * texture(colorMap, ftexCoord);
fColor = amb + diff;
} else if (mode == 1) { // DO PARALLAX MAPPING
// offset texture coordinates with Parallax Mapping
vec3 viewDir = normalize(TangentViewPos - TangentFragPos);
vec2 texCoords = ftexCoord;
texCoords = ParallaxMapping(ftexCoord, viewDir);
// discard samples outside of the default texture coordinate space
if(texCoords.x > 1.0 || texCoords.y > 1.0 || texCoords.x < 0.0 || texCoords.y < 0.0)
discard;
// obtain normal from normal map
vec3 normal = texture(normalMap, texCoords).rgb;
//values stored in normal texture is [0,1] range, we need [-1, 1] range
normal = normalize(normal * 2.0 - 1.0);
// get diffuse color
vec3 color = texture(colorMap, texCoords).rgb;
// ambient
vec3 ambient = 0.1 * color;
// diffuse
vec3 lightDir = normalize(TangentLightPos - TangentFragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * color;
// specular
vec3 reflectDir = reflect(lightDir, normal);
vec3 halfwayDir = normalize(lightDir + viewDir);
float spec = pow(max(dot(normal, halfwayDir), 0.0), 32.0);
vec3 specular = vec3(0.2) * spec;
fColor = vec4(ambient + diffuse + 0.0, 1.0);
}
}
The layers at acute gazing angles are a common effect at parallax mapping. To improve the result you've to increment the number of samples or implement Parallax Occlusion Mapping (as described in the bottom part of the tutorial):
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
// get texture coordinates before collision (reverse operations)
vec2 prevTexCoords = currentTexCoords + deltaTexCoords;
// get depth after and before collision for linear interpolation
float afterDepth = currentDepthMapValue - currentLayerDepth;
float beforeDepth = texture(depthMap, prevTexCoords).r - currentLayerDepth + layerDepth;
// interpolation of texture coordinates
float weight = afterDepth / (afterDepth - beforeDepth);
vec2 finalTexCoords = prevTexCoords * weight + currentTexCoords * (1.0 - weight);
return finalTexCoords;
}
By thee way, the vector seems to be inverted. In common the bitangent is the Cross product of the normal vector and the tangent in a Right-handed system. But that depends on the displacement texture.
vec3 bi = cross(vT, vN);
vec3 bi = cross(vN, vT);
See further:
Bump Mapping with javascript and glsl
Normal, Parallax and Relief mapping
Demo

Optimization of landscape rendering with shadow mapping

On my scene, I rendering landscape (approximately 522000 triangles, from a heights map, all points of the heights map are used to create a triangle mesh) and shadow mapping / blur is active. I noticed a strong fall in FPS. At the moment I have one light source - a kind of "sun". Therefore, light source far_plane is extremely high - 512 (the maximum point of the landscape is 128, the minimum point is 0). I need to somehow optimize it to get better performance.
An example of a generated landscape with heights map 512x512
My first idea is to reduce the number of triangles in the grid. I think for a heights map 512*512 522 thousand triangles is too much. Also, when scaling, the landscape is extremely smooth, even without averaging the normals. This is the reason to make the terrain lower polygonal.
If I reduce the height of the vertices (scale them) and, accordingly, reduce far_plane of light source, can this give an increase in performance?
My shaders:
Vertex shader:
#version 130
in vec4 a_Position; // Per-vertex position information we will pass in.
void main() {
gl_Position = a_Position;
}
Geometry shader:
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices=3) out;
uniform mat4 u_Model, u_View, u_Projection;
uniform float greenValue = 64;
uniform float greyValue = 96;
out vec3 norm;
out vec4 v_Position;
out mat4 model, view, projection;
out vec4 ambientColor;
out vec4 diffuseColor;
out vec4 specularColor;
void main() {
vec4 v1Eye = u_View * u_Model * gl_in[0].gl_Position;
vec4 v2Eye = u_View * u_Model * gl_in[1].gl_Position;
vec4 v3Eye = u_View * u_Model * gl_in[2].gl_Position;
vec4 v1v2 = v1Eye - v2Eye;
vec4 v2v3 = v2Eye - v3Eye;
vec3 normal = cross(vec3(v1v2), vec3(v2v3));
normal = normalize(normal);
if (normal.z < 0) normal = -normal;
mat4 MVPMatrix = u_Projection * u_View * u_Model;
for (int i = 0; i < gl_in.length(); i++) {
v_Position = gl_in[i].gl_Position;
gl_Position = MVPMatrix * gl_in[i].gl_Position;
model = u_Model;
view = u_View;
projection = u_Projection;
norm = normal;
if (v_Position.y < greenValue) {
ambientColor = vec4(0, 1, 0, 1);
diffuseColor = ambientColor;
specularColor = vec4(0, 0, 0, 1);
} else if (v_Position.y < greyValue) {
ambientColor = vec4(0.4, 0.4, 0.4, 1);
diffuseColor = ambientColor;
specularColor = vec4(0, 0, 0, 1);
} else {
ambientColor = vec4(1, 1, 1, 1);
diffuseColor = ambientColor;
specularColor = ambientColor;
}
EmitVertex();
}
EndPrimitive();
}
Fragment shader:
#version 330 core
precision mediump float; // Set the default precision to medium. We don't need as high of a
// precision in the fragment shader.
#define MAX_LAMPS_COUNT 8 // Max lamps count.
uniform vec3 u_ViewPos; // Camera position
uniform int u_LampsCount; // Lamps count
uniform float brightnessThreshold = 0.3; // brightness threshold variable
uniform float far_plane; // shadow matrix far plane
in mat4 model, view, projection;
in vec4 v_Position; // Position for this fragment in world space
in vec4 ambientColor;
in vec4 diffuseColor;
in vec4 specularColor;
in vec3 norm;
struct Lamp {
float ambientStrength;
float diffuseStrength;
float specularStrength;
float kc; // constant term
float kl; // linear term
float kq; // quadratic term
int shininess;
vec3 lampPos; // in world space
vec3 lampColor;
};
uniform samplerCube shadowMaps[MAX_LAMPS_COUNT];
uniform Lamp u_Lamps[MAX_LAMPS_COUNT];
vec3 fragPos;
vec3 fragWorldPos;
vec3 lampEyePos; // Transformed lamp position into eye space
float shadow;
// for PCF
vec3 sampleOffsetDirections[20] = vec3[] (
vec3(1, 1, 1), vec3(1, -1, 1), vec3(-1, -1, 1), vec3(-1, 1, 1),
vec3(1, 1, -1), vec3(1, -1, -1), vec3(-1, -1, -1), vec3(-1, 1, -1),
vec3(1, 1, 0), vec3(1, -1, 0), vec3(-1, -1, 0), vec3(-1, 1, 0),
vec3(1, 0, 1), vec3(-1, 0, 1), vec3(1, 0, -1), vec3(-1, 0, -1),
vec3(0, 1, 1), vec3(0, -1, 1), vec3(0, -1, -1), vec3(0, 1, -1)
);
// output colors
layout(location = 0) out vec4 fragColor;
layout(location = 1) out vec4 fragBrightColor;
float calculateShadow(vec3 lightDir, int index) {
// get vector between fragment position and light position
vec3 fragToLight = fragWorldPos - u_Lamps[index].lampPos;
// now get current linear depth as the length between the fragment and light position
float currentDepth = length(fragToLight);
// now test for shadows
//float bias = max(0.5 * (1.0 - dot(norm, lightDir)), 0.005);
float bias = 1;
// PCF
float viewDistance = length(u_ViewPos - fragWorldPos);
float diskRadius = (1.0 + (viewDistance / far_plane)) / 25.0;
for (int i = 0; i < 20; ++i) {
float closestDepth = texture(shadowMaps[index], fragToLight + sampleOffsetDirections[i] * diskRadius).r;
closestDepth *= far_plane; // Undo mapping [0;1]
if(currentDepth - bias > closestDepth) shadow += 1.0;
}
shadow /= 20;
//fragColor = vec4(vec3(closestDepth / far_plane), 1.0); // visualizing
return shadow;
}
float calculateAttenuation(Lamp lamp) {
float distance = length(lampEyePos - fragPos);
return 1.0 / (
lamp.kc +
lamp.kl * distance +
lamp.kq * (distance * distance)
);
}
vec4 toVec4(vec3 v) {
return vec4(v, 1);
}
// The entry point for our fragment shader.
void main() {
fragWorldPos = vec3(model * v_Position);
// Transform the vertex into eye space
mat4 mvMatrix = view * model;
fragPos = vec3(mvMatrix * v_Position);
vec3 viewDir = normalize(u_ViewPos - fragPos);
vec3 ambientResult = vec3(0, 0, 0); // result of ambient lighting for all lamps
vec3 diffuseResult = vec3(0, 0, 0); // result of diffuse lighting for all lamps
vec3 specularResult = vec3(0, 0, 0); // result of specular lighting for all lamps
for (int i = 0; i<u_LampsCount; i++) {
lampEyePos = vec3(view * toVec4(u_Lamps[i].lampPos));
// attenuation
float attenuation = calculateAttenuation(u_Lamps[i]);
// ambient
vec3 ambient = u_Lamps[i].ambientStrength * u_Lamps[i].lampColor * attenuation;
// diffuse
vec3 lightDir = normalize(lampEyePos - fragPos);
float diff = max(dot(norm, lightDir), 0.0);
vec3 diffuse = u_Lamps[i].diffuseStrength * diff * u_Lamps[i].lampColor * attenuation;
// specular
vec3 reflectDir = reflect(-lightDir, norm);
float spec = pow(max(dot(viewDir, reflectDir), 0.0), u_Lamps[i].shininess);
vec3 specular = u_Lamps[i].specularStrength * spec * u_Lamps[i].lampColor * attenuation;
// calculate shadow
shadow = calculateShadow(lightDir, i);
// result for this(i) lamp
ambientResult += ambient;
diffuseResult += diffuse * (1-shadow);
specularResult += specular * (1-shadow);
}
fragColor =
toVec4(ambientResult) * ambientColor +
toVec4(diffuseResult) * diffuseColor +
toVec4(specularResult) * specularColor;
// brightness calculation
float brightness = dot(fragColor.rgb, vec3(0.2126, 0.7152, 0.0722));
if (brightness > brightnessThreshold) fragBrightColor = vec4(fragColor.rgb, 1.0);
}
And my shadow shaders:
Vertex shader:
#version 130
attribute vec3 a_Position;
uniform mat4 u_ModelMatrix;
void main() {
gl_Position = u_ModelMatrix * vec4(a_Position, 1.0);
}
Geometry shader:
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices=18) out;
uniform mat4 shadowMatrices[6];
out vec4 fragPos; // FragPos from GS (output per emitvertex)
void main() {
for(int face = 0; face < 6; face++) {
gl_Layer = face; // built-in variable that specifies to which face we render.
// for each triangle's vertices
for(int i = 0; i < 3; i++) {
fragPos = gl_in[i].gl_Position;
gl_Position = shadowMatrices[face] * fragPos;
EmitVertex();
}
EndPrimitive();
}
}
Fragment shader:
#version 330 core
in vec4 fragPos; // world space
uniform vec3 lightPos; // world space
uniform float far_plane; // shadow matrix far plane
void main()
{
float lightDistance = length(fragPos.xyz - lightPos);
// map to [0;1] range by dividing by far_plane
lightDistance = lightDistance / far_plane;
// write this as modified depth
gl_FragDepth = lightDistance;
}
I hope for your help in optimizing this scene.

SSAO & Shadow mapping | Shadows do not work with the SSAO

The SSAO in our engine seems to be working, however I cannot get the SSAO to work with shadow mapping. Here is a screenshot of the bug I am currently having when shadows are applied....
With shadows applied
But also, depending on the camera view and camera position, random shadows sometimes appear...
Random Shadows depending on camera view and position
Here is the gbuffer vertex shader..
#version 330 core
layout (location = 0) in vec3 positions;
layout (location = 1) in vec2 texCoords;
layout (location = 2) in vec3 normals;
out vec3 FragPos;
out vec3 ShadowFragPos;
out vec2 TexCoords;
out vec3 Normal;
uniform mat4 model;
uniform mat4 view;
uniform mat4 proj;
void main()
{
vec4 viewPos = view * model * vec4(positions, 1.0);
FragPos = viewPos.xyz;
TexCoords = texCoords;
mat3 normalMatrix = transpose(inverse(mat3(view * model)));
Normal = normalMatrix * normals;
gl_Position = proj * viewPos;
}
Here is the lighting shader..
#version 330 core
out vec4 FragColor;
in vec2 TexCoords;
uniform sampler2D gPosition;
uniform sampler2D gNormal;
uniform sampler2D gAlbedoSpec;
uniform sampler2D gShadowmap;
uniform sampler2D gSsao;
uniform vec3 cameraPos;
uniform mat4 lightSpaceMatrix;
vec3 Normal;
vec3 FragPos;
uniform vec3 lightPos;
float calculate_shadows(vec4 light_space_pos)
{
// perform perspective divide
vec3 projCoords = light_space_pos.xyz / light_space_pos.w;
// transform to [0,1] range
projCoords = projCoords * 0.5 + 0.5;
// get closest depth value from light's perspective (using [0,1] range fragPosLight as coords)
float closestDepth = texture(gShadowmap, projCoords.xy).r;
// get depth of current fragment from light's perspective
float currentDepth = projCoords.z;
// check whether current frag pos is in shadow
vec3 lightDir = normalize(vec3(2.0f, 4.0f, 1.0f) - FragPos);
float bias = max(0.05 * (1.0 - dot(Normal, lightDir)), 0.005);
float shadow = 0.0;
vec2 texelSize = 1.0 / textureSize(gShadowmap, 0);
// 8x8 kernel PCF
float x;
float y;
for (y = -3.5; y <= 3.5 ; y += 1.0)
{
for (x = -3.5; x <= 3.5 ; x += 1.0)
{
float pcfDepth = texture(gShadowmap, projCoords.xy + vec2(x, y) * texelSize).r;
shadow += currentDepth - bias > pcfDepth ? 1.0 : 0.0;
}
}
shadow /= 64.0;
return shadow;
}
void main(void)
{
FragPos = texture(gPosition, TexCoords).rgb;
Normal = texture(gNormal, TexCoords).rgb;
vec3 Diffuse = texture(gAlbedoSpec, TexCoords).rgb;
float Specular = texture(gAlbedoSpec, TexCoords).a;
float AmbientOcclusion = texture(gSsao, TexCoords).r;
vec3 lighting = vec3(0.3 * Diffuse * AmbientOcclusion);
vec3 viewDir = normalize(-FragPos);
vec3 lightDir = normalize(lightPos - FragPos);
vec3 diffuse = max(dot(Normal, lightDir), 0.0) * Diffuse * vec3(1.0f, 0.5f, 0.3f);
vec3 halfwayDir = normalize(lightDir + viewDir);
float spec = pow(max(dot(Normal, halfwayDir), 0.0), 8.0);
vec3 specular = vec3(1.0f, 0.5f, 0.3f) * spec * Specular;
float shadow = calculate_shadows(lightSpaceMatrix * vec4(FragPos, 1.0));
lighting += ((1.0 - shadow) * (diffuse + specular));
FragColor = vec4(lighting, 1.0f);
}
The textures are binded in the light pass as follows..
// bind the positions texture and store in the first texture slot/unit
glActiveTexture(GL_TEXTURE0); // texture unit 0
glBindTexture(GL_TEXTURE_2D, gbuffer.gPositions); // geometry positions
// bind the normals texture and store in the second texture slot/unit
glActiveTexture(GL_TEXTURE1); // texture unit 1
glBindTexture(GL_TEXTURE_2D, gbuffer.gNormals); // geometry normals
// bind the albedo & specular texture and store in the third texture slot/unit
glActiveTexture(GL_TEXTURE2); // texture unit 2
glBindTexture(GL_TEXTURE_2D, gbuffer.gAlbedoSpec); // geometry albedospec
// bind the albedo & specular texture and store in the third texture slot/unit
glActiveTexture(GL_TEXTURE3); // texture unit 3
glBindTexture(GL_TEXTURE_2D, gbuffer.gShadowmap); // geometry albedospec
glActiveTexture(GL_TEXTURE4); // texture unit 2
glBindTexture(GL_TEXTURE_2D, gbuffer.ssaoColorBuffer); // geometry albedospec
Finally, here is the calculation of the lightSpaceMatrix..
light_projection = glm::ortho(-10.0f, 10.0f, -10.0f, 10.0f, 1.0f, 7.5f);
light_view = glm::lookAt(glm::vec3(0.0f, 4.0f, 5.0f), glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
light_space_matrix = light_projection * light_view;
Any ideas why this could be happening? how do I get shadows to work with SSAO?
any help is much appreciated.
FragPos is a camera view space position.
light_space_pos, the input parameter to calculate_shadows has to be a clip space coordinate, as seen from the light source.
This mean that when you do
float shadow = calculate_shadows(lightSpaceMatrix * vec4(FragPos, 1.0));
lightSpaceMatrix has to be the transformation from the camera view space to the clip space of the light source.
To do so, you have to do 3 transformations:
camera view space to world space. This can bed done by the inverse view matrix.
world space to light space, which is the transformation by light_view.
light view space to light clip space, is the transformation by light_projection.
So the setting of light_space_matrix = light_projection * light_view; is not sufficient, it has to be
light_space_matrix = light_projection * light_view * glm::inverse(view);

Atmospheric scattering OpenGL 3.3

Im currently trying to convert a shader by Sean O'Neil to version 330 so i can try it out in a application im writing. Im having some issues with deprecated functions, so i replaced them, but im almost completely new to glsl, so i probably did a mistake somewhere.
Original shaders can be found here:
http://www.gamedev.net/topic/592043-solved-trying-to-use-atmospheric-scattering-oneill-2004-but-get-black-sphere/
My horrible attempt at converting them:
Vertex shader:
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 2) in vec3 vertexNormal_modelspace;
uniform vec3 v3CameraPos; // The camera's current position
uniform vec3 v3LightPos; // The direction vector to the light source
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for the red, green, and blue channels
uniform float fCameraHeight; // The camera's current height
uniform float fCameraHeight2; // fCameraHeight^2
uniform float fOuterRadius; // The outer (atmosphere) radius
uniform float fOuterRadius2; // fOuterRadius^2
uniform float fInnerRadius; // The inner (planetary) radius
uniform float fInnerRadius2; // fInnerRadius^2
uniform float fKrESun; // Kr * ESun
uniform float fKmESun; // Km * ESun
uniform float fKr4PI; // Kr * 4 * PI
uniform float fKm4PI; // Km * 4 * PI
uniform float fScale; // 1 / (fOuterRadius - fInnerRadius)
uniform float fScaleDepth; // The scale depth (i.e. the altitude at which the atmosphere's average density is found)
uniform float fScaleOverScaleDepth; // fScale / fScaleDepth
const int nSamples = 2;
const float fSamples = 2.0;
invariant out vec3 v3Direction;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform vec3 LightPosition_worldspace;
out vec4 dgl_SecondaryColor;
out vec4 dgl_Color;
float scale(float fCos)
{
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
void main(void)
{
//gg_FrontColor = vec3(1.0, 0.0, 0.0);
//gg_FrontSecondaryColor = vec3(0.0, 1.0, 0.0);
// Get the ray from the camera to the vertex, and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = vertexPosition_modelspace;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = v3CameraPos;
float fHeight = length(v3Start);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fCameraHeight));
float fStartAngle = dot(v3Ray, v3Start) / fHeight;
float fStartOffset = fDepth*scale(fStartAngle);
// Initialize the scattering loop variables
gl_FrontColor = vec4(0.0, 0.0, 0.0, 0.0);
gl_FrontSecondaryColor = vec4(0.0, 0.0, 0.0, 0.0);
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.2, 0.1, 0.0);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot(v3Ray, v3SamplePoint) / fHeight;
float fScatter = (fStartOffset + fDepth*(scale(fLightAngle) - scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
gl_FrontSecondaryColor.rgb = v3FrontColor * fKmESun;
gl_FrontColor.rgb = v3FrontColor * (v3InvWavelength * fKrESun);
gl_Position = MVP * vec4(vertexPosition_modelspace,1);
v3Direction = v3CameraPos - v3Pos;
dgl_SecondaryColor = gl_FrontSecondaryColor;
dgl_Color = gl_FrontColor;
}
Fragment shader:
#version 330 core
out vec4 dgl_FragColor;
uniform vec3 v3LightPos;
uniform float g;
uniform float g2;
invariant in vec3 v3Direction;
in vec4 dgl_SecondaryColor;
in vec4 dgl_Color;
uniform mat4 MV;
void main (void)
{
float fCos = dot(v3LightPos, v3Direction) / length(v3Direction);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
dgl_FragColor = dgl_Color + fMiePhase * dgl_SecondaryColor;
dgl_FragColor.a = dgl_FragColor.b;
}
I wrote a function to render a sphere, and im trying to render this shader onto a inverted version of it, the sphere works completely fine, with normals and all. My problem is that the sphere gets rendered all black, so the shader is not working.
Edit: Got the sun to draw, but the sky is still all black.
This is how i'm trying to render the atmosphere inside my main rendering loop.
glUseProgram(programAtmosphere);
glBindTexture(GL_TEXTURE_2D, 0);
//######################
glUniform3f(v3CameraPos, getPlayerPos().x, getPlayerPos().y, getPlayerPos().z);
glm::vec3 lightDirection = lightPos/length(lightPos);
glUniform3f(v3LightPos, lightDirection.x , lightDirection.y, lightDirection.z);
glUniform3f(v3InvWavelength, 1.0f / pow(0.650f, 4.0f), 1.0f / pow(0.570f, 4.0f), 1.0f / pow(0.475f, 4.0f));
glUniform1fARB(fCameraHeight, 10.0f+length(getPlayerPos()));
glUniform1fARB(fCameraHeight2, (10.0f+length(getPlayerPos()))*(10.0f+length(getPlayerPos())));
glUniform1fARB(fInnerRadius, 10.0f);
glUniform1fARB(fInnerRadius2, 100.0f);
glUniform1fARB(fOuterRadius, 10.25f);
glUniform1fARB(fOuterRadius2, 10.25f*10.25f);
glUniform1fARB(fKrESun, 0.0025f * 20.0f);
glUniform1fARB(fKmESun, 0.0015f * 20.0f);
glUniform1fARB(fKr4PI, 0.0025f * 4.0f * 3.141592653f);
glUniform1fARB(fKm4PI, 0.0015f * 4.0f * 3.141592653f);
glUniform1fARB(fScale, 1.0f / 0.25f);
glUniform1fARB(fScaleDepth, 0.25f);
glUniform1fARB(fScaleOverScaleDepth, 4.0f / 0.25f );
glUniform1fARB(g, -0.990f);
glUniform1f(g2, -0.990f * -0.990f);
Any ideas?
Edit: updated the code, and added a picture.
I think the problem there is, that you write to 'FragColor', which may be a 'dead end' output variable in the fragment shader, since one must explicitly bind it to a color number before linking the program:
glBindFragDataLocation(programAtmosphere,0,"FragColor");
or using this in a shader:
layout(location = 0) out vec4 FragColor
You may try to use the builtin out vars instead: gl_FragColor, which is an alias for gl_FragData[0] and therefore the same as above binding.
EDIT: Forgot to say, when using the deprecated builtins, you must have a compatibility declaration:
#version 330 compatibility
EDIT 2: To test the binding, I'd write a constant color to it to disable possible calculations errors, since these may not yield the expected result, because of errors or zero input.