GLSL Water shader normals morph as light passes over? - opengl

I decided to follow the classic guide for writing a basic GLSL water shaders using the sum of sines method. I attempted to implement it inside of Processing 5, where I made a field of vertices in a PShape to make a mesh to mess with. I then overwrote the default shaders with my own vertex and fragment shaders, and I dropped in a directional light so I can actually see the normals. I made sure the directional light was movable as well so I could see if the normals work from all angles.
I got the waves to form height correctly and I had some form of normals workings, but the normals are interacting really strange. When my light passes across the center axis of my water plane, the normals seem to morph between the different waves and change based on the light angle. The gif I captured was too large to post in line, so I'm sure seeing it would explain better than my words:
https://imgur.com/PCznL7U
You should maximize the link to see the whole picture. Note how as the light pans from left to right, the normals of the waves seem to morph between two sets? This is especially apparent as it crosses center. It's like the normals are inconsistent based on the direction the object is being lit from.
The sphere in the middle is a normal sphere using the standard Processing shader. I left it there as reference to see the waves as well as confirm where my lighting was and that it was working fine.
Any ideas what I did wrong? I know I did some math incorrectly somewhere.
EDIT: Was recommended I added the (lengthy) source code [which I should have done from the start].
Vertex Shader:
#define PROCESSING_LIGHT_SHADER
#define MAXWAVES 6
const float pi = 3.14159;
uniform mat4 transform;
uniform mat4 modelview;
uniform mat3 normalMatrix;
uniform float time; //Time since shader started
attribute vec4 position; //Position the vertex from Processing
attribute vec4 color; //Color of the vertex from Processing
attribute vec3 normal; //Normal of the vertex from Processing
attribute vec4 ambient;
attribute vec4 specular;
attribute vec4 emissive;
attribute float shininess;
varying vec4 vertColor; //Color passed on to fragment shader
varying vec4 backVertColor; //Color passed on to fragment shader
uniform float waveLength[MAXWAVES]; //Length of wave
uniform float speed[MAXWAVES]; //Cycle speed of wave
uniform float amplitude[MAXWAVES]; //Wave cycle height
uniform float xDirection[MAXWAVES];
uniform float yDirection[MAXWAVES]; //Flow vector of wave
uniform int lightCount;
uniform vec4 lightPosition[8];
uniform vec3 lightNormal[8];
uniform vec3 lightAmbient[8];
uniform vec3 lightDiffuse[8];
uniform vec3 lightSpecular[8];
uniform vec3 lightFalloff[8];
uniform vec2 lightSpot[8];
varying vec3 Normal;
varying vec3 FragPos;
varying vec3 Vec;
varying vec3 lightDir;
//Some constants that the processing shader used
const float zero_float = 0.0;
const float one_float = 1.0;
const vec3 zero_vec3 = vec3(0);
float falloffFactor(vec3 lightPos, vec3 vertPos, vec3 coeff) {
vec3 lpv = lightPos - vertPos;
vec3 dist = vec3(one_float);
dist.z = dot(lpv, lpv);
dist.y = sqrt(dist.z);
return one_float / dot(dist, coeff);
}
float spotFactor(vec3 lightPos, vec3 vertPos, vec3 lightNorm, float minCos, float spotExp) {
vec3 lpv = normalize(lightPos - vertPos);
vec3 nln = -one_float * lightNorm;
float spotCos = dot(nln, lpv);
return spotCos <= minCos ? zero_float : pow(spotCos, spotExp);
}
float lambertFactor(vec3 lightDir, vec3 vecNormal) {
return max(zero_float, dot(lightDir, vecNormal));
}
float blinnPhongFactor(vec3 lightDir, vec3 vertPos, vec3 vecNormal, float shine) {
vec3 np = normalize(vertPos);
vec3 ldp = normalize(lightDir - np);
return pow(max(zero_float, dot(ldp, vecNormal)), shine);
}
//Returns the height of a vertex given a single wave param
float WaveHeight(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
return amplitude[waveNumber] * sin(theta * frequency + time * phase);
}
//Returns height of a vertex given all the active waves
// and its current x/y value
float WaveSum(float x, float y)
{
float height = 0.0;
for(int i = 0; i < MAXWAVES; i++)
{
height += WaveHeight(i, x, y);
}
return height;
}
float getDy(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
float A = amplitude[waveNumber] * direction.y * frequency;
return A * cos(theta * frequency + time * phase);
}
float getDx(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
float A = amplitude[waveNumber] * direction.x * frequency;
return A * cos(theta * frequency + time * phase);
}
//Returns the normal vector for each vertex
vec3 getNormal(float x, float y) {
float dx = 0.0;
float dy = 0.0;
//Sum for each wave
for (int i = 0; i < MAXWAVES; i++) {
dx += getDx(i, x, y);
dy += getDy(i, x, y);
}
vec3 n = vec3(-dx, -dy, 1.0);
return normalize(n);
}
void main() {
vec4 pos = position; //Grab the position from Processing bc it's read only
pos.z = WaveSum(pos.x, pos.y);
gl_Position = transform * pos; //Get clipping matrix for view
vec3 ecVertex = vec3(modelview * pos);
// Normal vector in eye coordinates
vec3 Normal = getNormal(pos.x, pos.y);
vec3 ecNormal = normalize(normalMatrix * Normal);
vec3 ecNormalInv = ecNormal * -one_float;
// Light calculations
vec3 totalAmbient = vec3(0, 0, 0);
vec3 totalFrontDiffuse = vec3(0, 0, 0);
vec3 totalFrontSpecular = vec3(0, 0, 0);
vec3 totalBackDiffuse = vec3(0, 0, 0);
vec3 totalBackSpecular = vec3(0, 0, 0);
for (int i = 0; i < 8; i++) {
if (lightCount == i) break;
vec3 lightPos = lightPosition[i].xyz;
bool isDir = lightPosition[i].w < one_float;
float spotCos = lightSpot[i].x;
float spotExp = lightSpot[i].y;
vec3 lightDir;
float falloff;
float spotf;
if (isDir) {
falloff = one_float;
lightDir = -one_float * lightNormal[i];
} else {
falloff = falloffFactor(lightPos, ecVertex, lightFalloff[i]);
lightDir = normalize(lightPos - ecVertex);
}
spotf = spotExp > zero_float ? spotFactor(lightPos, ecVertex, lightNormal[i],
spotCos, spotExp)
: one_float;
if (any(greaterThan(lightAmbient[i], zero_vec3))) {
totalAmbient += lightAmbient[i] * falloff;
}
if (any(greaterThan(lightDiffuse[i], zero_vec3))) {
totalFrontDiffuse += lightDiffuse[i] * falloff * spotf *
lambertFactor(lightDir, ecNormal);
totalBackDiffuse += lightDiffuse[i] * falloff * spotf *
lambertFactor(lightDir, ecNormalInv);
}
if (any(greaterThan(lightSpecular[i], zero_vec3))) {
totalFrontSpecular += lightSpecular[i] * falloff * spotf *
blinnPhongFactor(lightDir, ecVertex, ecNormal, shininess);
totalBackSpecular += lightSpecular[i] * falloff * spotf *
blinnPhongFactor(lightDir, ecVertex, ecNormalInv, shininess);
}
}
// Calculating final color as result of all lights (plus emissive term).
// Transparency is determined exclusively by the diffuse component.
vertColor =
vec4(totalFrontDiffuse, 1) * color;
backVertColor =
vec4(totalBackDiffuse, 1) * color;
}
Fragment Shader:
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
varying vec4 vertColor; //Color from vertshader
varying vec4 backVertColor; //Color from vertshader
void main() {
gl_FragColor = gl_FrontFacing ? vertColor : backVertColor;
}

Related

Problem with normals when drawing instanced mesh

I'm rendering a sphere with instanced drawing, while rotating the model-view-matrix around the Y axis.
It looks ok at the beginning:
But at another angle, things get worse:
It looks to me like a problem with normals. Currently, I'm calculating the normal-matrix from my model-view-matrix and then pass it to the shader, which is doing phong-like lighting:
attribute vec4 a_position;
attribute vec3 a_normal;
attribute vec4 a_color;
attribute vec2 a_coord;
attribute mat4 a_matrix;
uniform mat4 u_mv_matrix;
uniform mat4 u_projection_matrix;
uniform mat3 u_normal_matrix;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec4 transformedPosition = u_mv_matrix * a_matrix * a_position;
v_position = transformedPosition;
v_normal = u_normal_matrix * a_normal;
v_color = a_color;
v_coord = a_coord;
gl_Position = u_projection_matrix * transformedPosition;
}
uniform sampler2D u_sampler;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec3 lightPosition = vec3(0.0); // XXX
// set diffuse and specular colors
vec3 cDiffuse = (v_color * texture2D(u_sampler, v_coord)).rgb;
vec3 cSpecular = vec3(0.3);
// lighting calculations
vec3 N = normalize(v_normal);
vec3 L = normalize(lightPosition - v_position.xyz);
vec3 E = normalize(-v_position.xyz);
vec3 H = normalize(L + E);
// Calculate coefficients.
float phong = max(dot(N, L), 0.0);
const float kMaterialShininess = 20.0;
const float kNormalization = (kMaterialShininess + 8.0) / (3.14159265 * 8.0);
float blinn = pow(max(dot(N, H), 0.0), kMaterialShininess) * kNormalization;
// diffuse coefficient
vec3 diffuse = phong * cDiffuse;
// specular coefficient
vec3 specular = blinn * cSpecular;
gl_FragColor = vec4(diffuse + specular, 1);
}
Final note: I'm working on desktop OpenGL 2.1 as well as WebGL on the browser.
Edit: Per request, I'm adding some information.
The mesh is built as follows, by passing an identity matrix:
void Sphere::append(IndexedVertexBatch<XYZ.N.UV> &batch, const Matrix &matrix) const {
float sectorStep = TWO_PI / sectorCount;
float stackStep = PI / stackCount;
for(int i = 0; i <= stackCount; ++i) {
float stackAngle = HALF_PI - i * stackStep;
float xy = radius * cosf(stackAngle);
float z = radius * sinf(stackAngle);
for(int j = 0; j <= sectorCount; ++j) {
float sectorAngle = j * sectorStep;
float x = xy * cosf(sectorAngle);
float y = xy * sinf(sectorAngle);
float nx = x / radius;
float ny = y / radius;
float nz = z / radius;
float s = (float)j / sectorCount;
float t = (float)i / stackCount;
batch.addVertex(matrix.transformPoint(x, y, z), matrix.transformNormal(nx, ny, nz), glm::vec2(s, t));
}
}
for(int i = 0; i < stackCount; ++i) {
float k1 = i * (sectorCount + 1);
float k2 = k1 + sectorCount + 1;
for(int j = 0; j < sectorCount; ++j, ++k1, ++k2) {
if (i != 0) {
if (frontFace == CCW) {
batch.addIndices(k1, k1 + 1, k2);
} else {
batch.addIndices(k1, k2, k1 + 1);
}
}
if (i != (stackCount - 1)) {
if (frontFace == CCW) {
batch.addIndices(k1 + 1, k2 + 1, k2);
} else {
batch.addIndices(k1 + 1, k2, k2 + 1);
}
}
}
}
}
Regarding the transformation matrices, it works as follow:
camera.getMVMatrix()
.setIdentity()
.translate(0, -150, -600)
.rotateY(clock()->getTime() * 0.5f);
State()
.setShader(shader)
.setShaderMatrix<MV>(camera.getMVMatrix())
.setShaderMatrix<PROJECTION>(camera.getProjectionMatrix())
.setShaderMatrix<NORMAL>(camera.getNormalMatrix())
.apply();
Finally, the light position is defined as vec3(0) in the fragment shader.
Note: As you can see, I'm using my own framework which provides among other things high level methods for building meshes and handling transformations. It's all straightforward stuff, proven to work as intended, but let me know if you need pointers to the source-code.
Update: The lighting part of the shader I used ended up being wrong, so I switched to another method.
But in essence, the solution I proposed in my answer is still valid (or at least it does the job of solving the "normal problem" when instancing is used, and non-uniform scaling is avoided.)
Here is a gist with the source-code. There is also an online WebGL demo.
The solution was relatively simple: there is no point in passing a normal-matrix to the shader.
Instead, the normal needs to be computed in the vertex shader:
v_normal = vec3(u_mv_matrix * a_matrix * vec4(a_normal, 0.0));
Credits

Have any guys experienced the same occasion with me about ssao in OpenGL?

Most of the shader codes are follow the instruction of LearnOpenGL. I could make sure that the g-buffer and noise data pass into the shader are correct. It seems like some kind of dislocation, but But I really can't figure out why this happened.
misplace ssao
#version 450 core
out float OUT_FragColor;
in vec2 TexCoords;
uniform sampler2D g_position;
uniform sampler2D g_normal;
uniform sampler2D noise_texture;
struct CameraInfo
{
vec4 position;
mat4 view;
mat4 projection;
};
layout(std140, binding = 0) uniform Camera
{
CameraInfo camera;
};
float radius = 0.5;
float bias = 0.025;
uniform int noise_tex_size;
void main()
{
const vec2 noise_scale = vec2(1280.0/noise_tex_size, 720.0/noise_tex_size);
vec3 frag_pos = texture(g_position, TexCoords).xyz;
vec3 normal = normalize(texture(g_normal, TexCoords).xyz);
vec3 random_vec = normalize(texture(noise_texture, TexCoords * noise_scale).xyz);
vec3 tangent = normalize(random_vec - normal * dot(random_vec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
float occlusion = 0.f;
for(int i = 0; i < sample_array.length(); ++i)
{
vec3 sample_pos = TBN * sample_array[i].xyz;
sample_pos = frag_pos + sample_pos * radius;
vec4 offset = vec4(sample_pos, 1.0);
offset = camera.projection * offset; // from view to clip-space
offset.xyz /= offset.w; // perspective divide ?????
offset.xyz = offset.xyz * 0.5 + 0.5; // transform to range 0.0 - 1.0
float sample_depth = texture(g_position, offset.xy).z;
float range_check = smoothstep(0.f, 1.f, radius / abs(frag_pos.z - sample_depth));
occlusion += (sample_depth >= sample_pos.z + bias ? 1.0 : 0.0) * range_check; //ignore sample points that too near the origin point
}
occlusion = 1.f - (occlusion / sample_array.length());
OUT_FragColor = occlusion;
}
transform the g_postion and g_normal into model space
FragPos = (camera.view * vec4(WorldPos, 1.0)).xyz;
mat4 normal_matrix = camera.view * mat4(transpose(inverse(mat3(model))));
FragNormal = mat3(normal_matrix) * normal;

Weird Layered Effect During Parallax Mapping

I am following along with the LearnOpenGL guide and am trying to implement Steep Parallax Mapping.
Everything seems to be working fine except my brick wall seems to have distinct visible layers whereas the photos in the guide don't show any layers. I was trying to use this code to parallax the topography of the world but these weird layers seem to show up there too so I was hoping to find a fix for this.
Layered wall photo
[1
Photo of how it should look
Here is my modified vertex shader
#version 300 es
in vec4 vPosition; // aPos
in vec2 texCoord; // aTexCoords
in vec4 vNormal; // aNormal
in vec4 vTangent; // aTangent
uniform mat4 model_view;
uniform mat4 projection;
uniform vec4 light_position;
out vec2 ftexCoord;
out vec3 vT;
out vec3 vN;
out vec4 position;
out vec3 FragPos;
out vec3 TangentLightPos;
out vec3 TangentViewPos;
out vec3 TangentFragPos;
void
main()
{
// Normal variables
vN = normalize(model_view * vNormal).xyz;
vT = normalize(model_view * vTangent).xyz;
vec4 veyepos = model_view*vPosition;
position = veyepos;
ftexCoord = texCoord;
// Displacement variables
vec3 bi = cross(vT, vN);
FragPos = vec3(model_view * vPosition).xyz;
vec3 T = normalize(mat3(model_view) * vTangent.xyz);
vec3 B = normalize(mat3(model_view) * bi);
vec3 N = normalize(mat3(model_view) * vNormal.xyz);
mat3 TBN = transpose(mat3(T, B, N));
TangentLightPos = TBN * light_position.xyz;
TangentViewPos = TBN * vPosition.xyz;
TangentFragPos = TBN * FragPos;
gl_Position = projection * model_view * vPosition;
}
and my modified fragment shader is here
#version 300 es
precision highp float;
in vec2 ftexCoord;
in vec3 vT; //parallel to surface in eye space
in vec3 vN; //perpendicular to surface in eye space
in vec4 position;
in vec3 FragPos;
in vec3 TangentLightPos;
in vec3 TangentViewPos;
in vec3 TangentFragPos;
uniform int mode;
uniform vec4 light_position;
uniform vec4 light_color;
uniform vec4 ambient_light;
uniform sampler2D colorMap;
uniform sampler2D normalMap;
uniform sampler2D depthMap;
out vec4 fColor;
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
return currentTexCoords;
}
void main()
{
// DO NORMAL MAPPING
if (mode == 0) {
vec3 T = normalize(vT);
vec3 N = normalize(vN);
vec3 bi = cross(T, N);
mat4 changeOfCoord = mat4(vec4(T, 0), vec4(bi, 0), vec4(N, 0), vec4(0, 0, 0, 1));
vec3 L = normalize(light_position - position).xyz;
vec3 E = normalize(-position).xyz;
vec4 text = vec4(texture(normalMap, ftexCoord) * 2.0 - 1.0);
vec4 eye = changeOfCoord * text;
vec4 amb = texture(colorMap, ftexCoord) * ambient_light;
vec4 diff = max(0.0, dot(L, eye.xyz)) * light_color * texture(colorMap, ftexCoord);
fColor = amb + diff;
} else if (mode == 1) { // DO PARALLAX MAPPING
// offset texture coordinates with Parallax Mapping
vec3 viewDir = normalize(TangentViewPos - TangentFragPos);
vec2 texCoords = ftexCoord;
texCoords = ParallaxMapping(ftexCoord, viewDir);
// discard samples outside of the default texture coordinate space
if(texCoords.x > 1.0 || texCoords.y > 1.0 || texCoords.x < 0.0 || texCoords.y < 0.0)
discard;
// obtain normal from normal map
vec3 normal = texture(normalMap, texCoords).rgb;
//values stored in normal texture is [0,1] range, we need [-1, 1] range
normal = normalize(normal * 2.0 - 1.0);
// get diffuse color
vec3 color = texture(colorMap, texCoords).rgb;
// ambient
vec3 ambient = 0.1 * color;
// diffuse
vec3 lightDir = normalize(TangentLightPos - TangentFragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * color;
// specular
vec3 reflectDir = reflect(lightDir, normal);
vec3 halfwayDir = normalize(lightDir + viewDir);
float spec = pow(max(dot(normal, halfwayDir), 0.0), 32.0);
vec3 specular = vec3(0.2) * spec;
fColor = vec4(ambient + diffuse + 0.0, 1.0);
}
}
The layers at acute gazing angles are a common effect at parallax mapping. To improve the result you've to increment the number of samples or implement Parallax Occlusion Mapping (as described in the bottom part of the tutorial):
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
// get texture coordinates before collision (reverse operations)
vec2 prevTexCoords = currentTexCoords + deltaTexCoords;
// get depth after and before collision for linear interpolation
float afterDepth = currentDepthMapValue - currentLayerDepth;
float beforeDepth = texture(depthMap, prevTexCoords).r - currentLayerDepth + layerDepth;
// interpolation of texture coordinates
float weight = afterDepth / (afterDepth - beforeDepth);
vec2 finalTexCoords = prevTexCoords * weight + currentTexCoords * (1.0 - weight);
return finalTexCoords;
}
By thee way, the vector seems to be inverted. In common the bitangent is the Cross product of the normal vector and the tangent in a Right-handed system. But that depends on the displacement texture.
vec3 bi = cross(vT, vN);
vec3 bi = cross(vN, vT);
See further:
Bump Mapping with javascript and glsl
Normal, Parallax and Relief mapping
Demo

oversaturation in BRDF calculation

Edit:
In hindsight those images may be correct since it's just showing the vector differences, so assuming it's correct the issue is actually somewhere in the code regarding BRDF . I've added the full shader code and I'm attaching a new screenshot showing the artifacts I'm seeing. It seems to be over saturated in certain angles..
The issue is potentially in the distribution.. I tried a beckmann distribution model also and it showed the same type of issue..
See here as the light source moves down over the terrain from .. It's over saturating on the right hand side..
light at horizon
light just above horizon
I'm having some issues calculating directions in the vertex shader, the direction is skewed to one corner (the origin)
I create the terrain using instancing however the same issue happens if I just use a static plane.
my vertex shader looks like this (using ogre3d)
# version 330 compatibility
# define MAP_HEIGHT_FACTOR 50000
# define MAP_SCALE_FACTOR 100
#
// attributes
in vec4 blendIndices;
in vec4 uv0;
in vec4 uv1;
in vec4 uv2;
in vec4 position;
in vec2 vtx_texcoord0;
uniform mat4 viewProjMatrix;
uniform mat4 modelMatrix;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
uniform mat4 worldMatrix;
uniform vec3 cameraPosition;
uniform vec3 sunPosition;
out vec4 vtxPosWorld;
out vec3 lightDirection;
out vec3 viewVector;
uniform sampler2D heightmap;
uniform mat4 worldViewProjMatrix;
void main()
{
vec4 vtxPosWorld = vec4((gl_Vertex.x * MAP_SCALE_FACTOR) + uv0.w,
(gl_Vertex.y * MAP_SCALE_FACTOR) + uv1.w,
(gl_Vertex.z * MAP_SCALE_FACTOR) + uv2.w,
1.0 ) * worldMatrix;
l_texcoord0 = vec2((vtxPosWorld.x)/(8192*MAP_SCALE_FACTOR), (vtxPosWorld.z)/(8192*MAP_SCALE_FACTOR));
vec4 hmt = texture(heightmap, l_texcoord0);
height = (hmt.x * MAP_HEIGHT_FACTOR);
// take the height from the heightmap
vtxPosWorld = vec4(vtxPosWorld.x, height, vtxPosWorld.z, vtxPosWorld.w);
lightDirection = vec4(normalize(vec4(sunPosition,1.0)) * viewMatrix).xyz;
viewVector = normalize((vec4(cameraPosition,1.0)*viewMatrix).xyz-(vtxPosWorld*viewMatrix).xyz);
l_Position = worldViewProjMatrix * vtxPosWorld;
}
fragment shader .
#version 330 compatibility
#define TERRAIN_SIZE 8192.0
#define HEIGHT_SCALE_FACTOR 50000
#define MAP_SCALE_FACTOR 100
#define M_PI 3.1415926535897932384626433832795
in vec2 l_texcoord0;
in vec4 vtxPosWorld;
in vec3 viewVector;
uniform vec3 sunPosition;
uniform vec3 cameraPosition;
uniform sampler2D heightmap;
float G1V(float dotP, float k)
{
return 1.0f/(dotP*(1.0f-k)+k);
}
float calcBRDF(vec3 normal, float fresnel, float MFD, vec3 sunColor) {
float F = fresnel;
vec3 Nn = normalize(normal.xyz);
vec3 Vn = viewVector;
vec3 Ln = lightDirection;
vec3 Hn = normalize(viewVector + lightDirection);
float NdotV = max(dot(Nn,Vn),0.0);
float NdotL = max(dot(Nn,Ln),0.0);
float NdotH = max(dot(Nn,Hn),0.1);
float VdotH = max(dot(Vn,Hn),0.0);
float LdotH = max(dot(Ln,Hn),0.0);
// Microfacet Distribution
float denom, alpha, beckmannD, GGXD;
float NdotHSqr = NdotH * NdotH;
float alphaSqr = MFD*MFD;
// GGX distribution (better performance)
denom = NdotHSqr * ( alphaSqr-1.0 ) + 1.0f;
GGXD = alphaSqr/(M_PI * pow(denom,2));
float k = MFD/2.0f;
float GGX = G1V(NdotL,k) * G1V(NdotV,k);
return GGXSpecular = F * GGXD * GGX;
}
float calcFresnel(float R) {
vec3 Hn = normalize(viewVector + lightDirection);
vec3 Vn = viewVector;
vec3 Ln = lightDirection;
float VdotH = dot(Vn,Hn);
float NdotL = dot(Hn,Vn);
float fresnel = R + (1-R)*pow((1-NdotL),5);
return fresnel;
}
vec3 calcNormal(sampler2D heightmap, vec2 texcoord) {
const vec2 size = vec2(MAP_SCALE_FACTOR,0.0);
vec3 off = ivec3(1,0,1)/TERRAIN_SIZE;
float hL = texture2D(heightmap, texcoord - off.xy).x*HEIGHT_SCALE_FACTOR;
float hR = texture2D(heightmap, texcoord + off.xy).x*HEIGHT_SCALE_FACTOR;
float hD = texture2D(heightmap, texcoord - off.yz).x*HEIGHT_SCALE_FACTOR;
float hU = texture2D(heightmap, texcoord + off.yz).x*HEIGHT_SCALE_FACTOR;
vec3 va = normalize(vec3(size.xy,(hL-hR)));
vec3 vb = normalize(vec3(size.yx,(hD-hU)));
return vec3(1.0,1.0,1.0);
return normalize(cross(va,vb)/2 + 0.5);
}
void main()
{
vec3 normal = calcNormal(heightmap, l_texcoord0);
float N = 1.69;
float microFacetDistribution = 1.5;
vec3 sunColor = vec3(1.0,1.0,1.0);
float Rfactor = calcFresnelReflectance(N);
float fresnel = calcFresnel(Rfactor);
float brdf = calcBRDF(normal,fresnel,microFacetDistribution,sunColor);
float conservedBrdf = clamp(brdf,0.0,fresnel);
gl_FragColor.rgb = vec4(0.5,0.5,0.5,1.0)*conservedBrdf;
}
I've tried using viewspace, worldspace etc.. It seems like a simple/silly problem, but I can't figure it out :|
Any suggestions appreciated..
Of course the answer was something silly.
First of all the normals were incorrect. That caused a skew in the light direction which made the light appear to be hitting on one direction only.
Secondly the light direction itself needed to be negated.

Spotlight with shadows becomes square-like

I've been following a two part tutorial on shadow mapping from OGLDev (part 1, part 2) for a couple of days now, and I've nearly finished implementing it.
My problem is that when I enable shadow maps the illuminated region which shadows appear in appears as a "strip" and I'm unsure if this is intended in the tutorials or not.
Perhaps images will help explain my problem.
However shadows are functional:
My vertex shader for spotlights:
#version 330
in vec2 textureCoordinate;
in vec4 vertex;
uniform mat4 mMatrix;
uniform mat4 pMatrix;
uniform mat4 vMatrix;
uniform mat4 lightV;
uniform mat4 lightP;
uniform vec3 normal;
uniform vec3 eye;
out vec2 TexCoord0;
out vec4 LightSpacePos;
out vec3 Normal0;
out vec3 WorldPos0;
out vec3 EyePos;
void main()
{
EyePos = eye;
TexCoord0 = textureCoordinate;
WorldPos0 = (mMatrix * vertex).xyz;
Normal0 = (mMatrix * vec4(normal, 0.0)).xyz;
mat4 lightMVP = lightP * inverse(lightV) * mMatrix;
LightSpacePos = lightMVP * vertex;
mat4 worldMVP = pMatrix * vMatrix * mMatrix;
gl_Position = worldMVP * vertex;
}
and the fragment shader:
#version 330
struct BaseLight
{
vec4 color;
float intensity;
};
struct Attenuation
{
float constant;
float linear;
float exponent;
};
struct PointLight
{
BaseLight base;
Attenuation atten;
vec3 position;
float range;
};
struct SpotLight
{
struct PointLight base;
vec3 direction;
float cutoff;
};
in vec2 TexCoord0;
in vec3 WorldPos0;
in vec3 EyePos;
in vec4 LightSpacePos;
out vec4 fragColor;
uniform sampler2D sampler;
uniform sampler2D shadowMap;
in vec3 Normal0;
uniform float specularIntensity;
uniform float specularPower;
uniform SpotLight spotLight;
float CalcShadowFactor(vec4 LightSpacePos)
{
vec3 ProjCoords = LightSpacePos.xyz / LightSpacePos.w;
vec2 UVCoords;
UVCoords.x = 0.5 * ProjCoords.x + 0.5;
UVCoords.y = 0.5 * ProjCoords.y + 0.5;
float z = 0.5 * ProjCoords.z + 0.5;
float Depth = texture(shadowMap, UVCoords).x;
if (Depth < z + 0.00001)
return 0.5;
else
return 1.0;
}
vec4 CalcLightInternal(BaseLight Light, vec3 LightDirection, vec3 Normal, float ShadowFactor)
{
vec4 AmbientColor = Light.color * Light.intensity;
float DiffuseFactor = dot(Normal, -LightDirection);
vec4 DiffuseColor = vec4(0, 0, 0, 0);
vec4 SpecularColor = vec4(0, 0, 0, 0);
if (DiffuseFactor > 0) {
DiffuseColor = Light.color * Light.intensity * DiffuseFactor;
vec3 VertexToEye = normalize(EyePos - WorldPos0);
vec3 LightReflect = normalize(reflect(LightDirection, Normal));
float SpecularFactor = dot(VertexToEye, LightReflect);
SpecularFactor = pow(SpecularFactor, specularPower);
if (SpecularFactor > 0) {
SpecularColor = Light.color * specularIntensity * SpecularFactor;
}
}
return (AmbientColor + ShadowFactor * (DiffuseColor + SpecularColor));
}
vec4 CalcPointLight(PointLight l, vec3 Normal, vec4 LightSpacePos)
{
vec3 LightDirection = WorldPos0 - l.position;
float Distance = length(LightDirection);
LightDirection = normalize(LightDirection);
float ShadowFactor = CalcShadowFactor(LightSpacePos);
vec4 Color = CalcLightInternal(l.base, LightDirection, Normal, ShadowFactor);
float Attenuation = l.atten.constant +
l.atten.linear * Distance +
l.atten.exponent * Distance * Distance;
return Color / Attenuation;
}
vec4 CalcSpotLight(SpotLight l, vec3 Normal, vec4 LightSpacePos)
{
vec3 LightToPixel = normalize(WorldPos0 - l.base.position);
float SpotFactor = dot(LightToPixel, l.direction);
if (SpotFactor > l.cutoff) {
vec4 Color = CalcPointLight(l.base, Normal, LightSpacePos);
return Color * (1.0 - (1.0 - SpotFactor) * 1.0/(1.0 - l.cutoff));
}
else {
return vec4(0,0,0,0);
}
}
void main(void)
{
fragColor = texture2D(sampler, TexCoord0.xy) * CalcSpotLight(spotLight, normalize(Normal0), LightSpacePos);
}
This is intended. Since your shadowmap covers a pyramid-like region in space, your spotlight's cone can be occluded by it. This is happening because where you render something that is outside of the shadow camera's view, it will be considered unlitten. Therefore the shadow camera's view pyramid will be visible.
src
To fix this you have 2 options:
1: Make the shadowmap camera's fov bigger, so that the shadow camera's pyramid becomes wider than your spotlight's cone
2: Change the way you calculate shadows on out-of-bounds area. Currently when you sample the shadowmap, and if it's outside of the shadow texture, you apply shadow there. If you change this so that if something that is out of the shadow texture you consider it litten, this problem will disappear.
Edit:
I recommend the second option. A solution for that could be:
Insert the following line into FragmentShader::CalcShadowFactor(), just before the float Depth = ... part
if (UVCoords.x < 0.0 || UVCoords.x > 1.0 || UVCoords.y < 0.0 || UVCoords.y > 1.0) return 1.0;
Note:
There is no universal way to say which is better. In a terrain environment involving sunlight, you may want to consider out-of-bounds regions as litten. However when you use a flashlight for example, that is in the user's hand, you have to consider out-of-bounds regions as unlitten.