How to fix improperly distributed lighting projected onto a model in OpenGL? - c++

I've been trying to implement the Blinn-Phong lighting model to project lighting onto an imported Wavefront OBJ model through Assimp(github link).
The model seems to be loaded correctly, however, there seems to be a point where the lighting appears to be "cut off" near the middle of the model.
Image of the imported model with and without lighting enabled.
As you can see on the left of the image above, there is a region in the middle of the model where the light effectively gets "split up" which is not what is intended. It can be seen that there is a sort of discrepancy where the side facing towards the light source appears brighter than normal and the side away from the light source appears darker than normal without any sort of easing in between the two sides.
I believe there might be something wrong with how I've implemented the lighting model in the fragment shader but I cannot say for sure as to why this is happening.
Vertex shader:
#version 330 core
layout (location = 0) in vec3 vertPos;
layout (location = 1) in vec3 vertNormal;
layout (location = 2) in vec2 vertTexCoords;
out vec3 fragPos;
out vec3 fragNormal;
out vec2 fragTexCoords;
uniform mat4 proj, view, model;
uniform mat3 normalMat;
void main() {
fragPos = vec3(model * vec4(vertPos, 1));
gl_Position = proj * view * vec4(fragPos, 1);
fragTexCoords = vertTexCoords;
fragNormal = normalMat * vertNormal;
}
Fragment shader:
#version 330 core
in vec3 fragPos;
in vec3 fragNormal;
in vec2 fragTexCoords;
out vec4 FragColor;
const int noOfDiffuseMaps = 1;
const int noOfSpecularMaps = 1;
struct Material {
sampler2D diffuseMaps[noOfDiffuseMaps], specularMaps[noOfSpecularMaps];
float shininess;
};
struct Light {
vec3 direction;
vec3 ambient, diffuse, specular;
};
uniform Material material;
uniform Light light;
uniform vec3 viewPos;
const float pi = 3.14159265;
uniform float gamma = 2.2;
float near = 0.1;
float far = 100;
float LinearizeDepth(float depth)
{
float z = depth * 2 - 1;
return (2 * near * far) / (far + near - z * (far - near));
}
void main() {
vec3 normal = normalize(fragNormal);
vec3 calculatedColor = vec3(0);
for (int i = 0; i < noOfDiffuseMaps; i++) {
vec3 diffuseTexel = texture(material.diffuseMaps[i], fragTexCoords).rgb;
// Ambient lighting
vec3 ambient = diffuseTexel * light.ambient;
// Diffuse lighting
float diff = max(dot(light.direction, normal), 0);
vec3 diffuse = diffuseTexel * light.diffuse * diff;
calculatedColor += ambient + diffuse;
}
for (int i = 0; i < noOfSpecularMaps; i++) {
vec3 specularTexel = texture(material.specularMaps[0], fragTexCoords).rgb;
vec3 viewDir = normalize(viewPos - fragPos);
vec3 halfWayDir = normalize(viewDir + light.direction);
float energyConservation = (8 + material.shininess) / (8 * pi);
// Specular lighting
float spec = pow(max(dot(halfWayDir, normal), 0), material.shininess);
vec3 specular = specularTexel * light.specular * spec * energyConservation;
calculatedColor += specular;
}
float depthColor = 1 - LinearizeDepth(gl_FragCoord.z) / far;
FragColor = vec4(pow(calculatedColor, vec3(1 / gamma)) * depthColor, 1);
}

Make sure your texture and colors are also linear(it is a simple pow 2.2) because you are doing gamma encoding at the end.
Also note, it is expected to have a harsh terminator.
http://filmicworlds.com/blog/linear-space-lighting-i-e-gamma/
Beyond that, if you expect soft falloffs, it must be coming from an area light. For that you can implement wrap lighting or area lights.

Related

OpenGL - How could I color objects in pixelated fashion through shaders?

I'm trying to figure out a way to light up my object in a pixelated fashion through the use of shaders.
To ilustrate, my goal is to turn this:
Into this:
I've tried looking up ways to do this through the fragment shader, however, there is no way I can access the local position of a fragment to determine the "fake pixel" it would belong to. I also had the idea to use a geometry shader to create a vertex for each of those boxes, but I'm under suspicion there could be a better way to do this. Would it be possible?
EDIT: These are the shaders currently being used for the object illustrated by the first image:
vertex shader:
#version 330 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec3 aColor;
layout (location = 2) in vec2 aTex;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
out vec3 oColor; //Output of a color
out vec2 oTex; //Output of a Texture
out vec3 oPos; //Output of Position in space for light calculation
out vec3 oNormal; //Output of Normal vector for light calculation.
void main(){
gl_Position = projection * view * model * vec4(aPos, 1.0);
oColor = aColor;
oTex = aTex;
oPos = vec3(model * vec4(aPos, 1.0));
oNormal = vec3(0, 0, -1); //Not being calculated at the moment.
}
fragment shader:
#version 330 core
in vec3 oColor;
in vec2 oTex;
in vec3 oPos;
in vec3 oNormal;
out vec4 FragColor;
uniform sampler2D tex;
uniform vec3 lightColor; //Color of the light on the scene, there's only one
uniform vec3 lightPos; //Position of the light on the scene
void main(){
//Ambient Light Calculation
float ambientStrength = 0.1;
//vec3 ambient = ambientStrength * lightColor * vec3(texture(tex, oTex));
vec3 ambient = ambientStrength * lightColor;
//Diffuse Light Calculation
float diffuseStrength = 1.0;
vec3 norm = normalize(oNormal);
vec3 lightDir = normalize(lightPos - oPos);
float diff = max(dot(norm, lightDir), 0.0);
//vec3 diffuse = diff * lightColor* vec3(texture(tex, oTex)) * diffuseStrength;
vec3 diffuse = diff * lightColor;
//Specular Light Calculation
float specularStrength = 0.25;
float shinnyness = 8;
vec3 viewPos = vec3(0, 0, -10);
vec3 viewDir = normalize(viewPos - oPos);
vec3 reflectDir = reflect(-lightDir, norm);
float spec = pow(max(dot(viewDir, reflectDir), 0.0), shinnyness);
vec3 specular = specularStrength * spec * lightColor;
//Result Light
vec3 result = (ambient+diffuse+specular) * oColor;
FragColor = vec4(result, 1.0f);
}
The lighting depends on oPos. You need to "cascade" the position. e.g:
vec3 pos = vec3(round(oPos.xy * 10.0) / 10.0, oPos.z);
In the following use pos instead of oPos.
Note that this only works if oPos is a position in the view space, respectively if the XY plane of the oPos` coordinate system is parallel to the XY plane of the view.
Alternatively you can compute the a position depending on gl_FragCoord.
Add a uniform variable with the resolution of the screen:
uniform vec2 resolution;
Compute pos depending on resolution and gl_FragCoord:
vec3 pos = vec3(round(20.0 * gl_FragCoord.xy/resolution.y) / 20.0, oPos.z);
If you want to align the inner squares with the object you need to introduce texture coordinates. Where the bottom left coordinate of the object is (0, 0) and the top right is (1, 1).

SSAO shading moves weird with camera (calculating gbuffer wrong)

I'm trying to implement this version of ssao with this tutorial:
http://www.learnopengl.com/#!Advanced-Lighting/SSAO
Here is what I end up with for my render textures.
When I move the camera the shadows seem to follow
Seems like I am missing some kind of matrix multiplication with the camera.
CODE
gBuffer Vertex
#version 330 core
layout (location = 0) in vec3 vertexPosition;
layout (location = 1) in vec3 vertexNormal;
out vec3 position;
out vec3 normal;
uniform mat4 m;
uniform mat4 v;
uniform mat4 p;
uniform mat4 n;
void main()
{
vec4 viewPos = v * m * vec4(vertexPosition, 1.0f);
position = viewPos.xyz;
gl_Position = p * viewPos;
normal = vec3(n * vec4(vertexNormal, 0.0f));
}
gBuffer Fragment
#version 330 core
layout (location = 0) out vec4 gPosition;
layout (location = 1) out vec3 gNormal;
layout (location = 2) out vec4 gColor;
in vec3 position;
in vec3 normal;
const float NEAR = 0.1f;
const float FAR = 50.0f;
float LinearizeDepth(float depth)
{
float z = depth * 2.0f - 1.0f;
return (2.0 * NEAR * FAR) / (FAR + NEAR - z * (FAR - NEAR));
}
void main()
{
gPosition.xyz = position;
gPosition.a = LinearizeDepth(gl_FragCoord.z);
gNormal = normalize(normal);
gColor.rgb = vec3(1.0f);
}
SSAO Vertex
#version 330 core
layout (location = 0) in vec3 vertexPosition;
layout (location = 1) in vec2 texCoords;
out vec2 UV;
void main(){
gl_Position = vec4(vertexPosition, 1.0f);
UV = texCoords;
}
SSAO Fragment
#version 330 core
out float FragColor;
in vec2 UV;
uniform sampler2D gPositionDepth;
uniform sampler2D gNormal;
uniform sampler2D texNoise;
uniform vec3 samples[32];
uniform mat4 projection;
// parameters (you'd probably want to use them as uniforms to more easily tweak the effect)
int kernelSize = 32;
float radius = 1.0;
// tile noise texture over screen based on screen dimensions divided by noise size
const vec2 noiseScale = vec2(1024.0f/4.0f, 1024.0f/4.0f);
void main()
{
// Get input for SSAO algorithm
vec3 fragPos = texture(gPositionDepth, UV).xyz;
vec3 normal = texture(gNormal, UV).rgb;
vec3 randomVec = texture(texNoise, UV * noiseScale).xyz;
// Create TBN change-of-basis matrix: from tangent-space to view-space
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
// Iterate over the sample kernel and calculate occlusion factor
float occlusion = 0.0;
for(int i = 0; i < kernelSize; ++i)
{
// get sample position
vec3 sample = TBN * samples[i]; // From tangent to view-space
sample = fragPos + sample * radius;
// project sample position (to sample texture) (to get position on screen/texture)
vec4 offset = vec4(sample, 1.0);
offset = projection * offset; // from view to clip-space
offset.xyz /= offset.w; // perspective divide
offset.xyz = offset.xyz * 0.5 + 0.5; // transform to range 0.0 - 1.0
// get sample depth
float sampleDepth = -texture(gPositionDepth, offset.xy).w; // Get depth value of kernel sample
// range check & accumulate
float rangeCheck = smoothstep(0.0, 1.0, radius / abs(fragPos.z - sampleDepth ));
occlusion += (sampleDepth >= sample.z ? 1.0 : 0.0) * rangeCheck;
}
occlusion = 1.0 - (occlusion / kernelSize);
FragColor = occlusion;
}
I've read around and saw someone had a similar issue and passed the view matrix into the ssao shader and multiplied the sampleDepth:
float sampleDepth = (viewMatrix * -texture(gPositionDepth, offset.xy)).w;
But seems like it just makes things worse.
Heres another view from up top where you can see the shadows move with the camera
If I position my camera in certain ways things line up
Although I can only assume the value of your normal matrix n in the gBuffer vertex shader, it seems like you don't store your normals in view space but in world space. Since the SSAO calculations are done in screen space, this could (at least partially) explain the unexpected behavior. In that case, you either need to multiply your view matrix v to your normals before storing them to the gBuffer (potentially more efficient, but may interfere with your other shading calculations) or after retrieving them.

glDisableVertexAttribArray works in one Visual Studio solution, but not another

I am having a very strange occurrence where glDisableVertexAttribArray works in my one solution but when I get the solution from my Perforce repository, it doesn't run and throws an assert.
I checked out this forum question but it, unfortunately, didn't solve my problem. This is for shadow mapping that I have been working on and when I try to render things to the depth buffer and then disable the vertex attributes, it throws an error.
Here's how my code is laid out:
glUseProgram(shaderProgram);
glUniform1i(u_diffuseTextureLocation, 0);
glUniform1i(u_shadowMapLocation, 1);
[...]
glUseProgram(shaderProgram);
[Render some stuff to depth buffer]
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glDisableVertexAttibArray(a_normalAttribLocation); // This gives the GL_INVALID_OPERATION
// enum
And here's the vertex shader in that program:
#version 430 core
uniform mat4 u_projection;
uniform mat4 u_view;
uniform mat4 u_model;
uniform mat4 u_lightSpaceMat;
in vec3 a_position;
in vec3 a_normal;
in vec2 a_texture;
out VS_OUT {
vec3 v_fragPos;
vec3 v_normal;
vec2 v_texCoords;
vec4 v_fragPosLightSpace;
} vs_out;
void main()
{
gl_Position = u_projection * u_view * u_model * vec4(a_position, 1.0);
vs_out.v_fragPos = (u_model * vec4(a_position, 1.0)).xyz;
vs_out.v_normal = transpose(inverse(mat3(u_model))) * a_normal;
vs_out.v_texCoords = a_texture;
vs_out.v_fragPosLightSpace = u_lightSpaceMat * vec4(vs_out.v_fragPos, 1.0);
}
And the fragment shader in the program:
#version 430 core
uniform sampler2D u_shadowMap;
uniform sampler2D u_diffuseTexture;
uniform vec3 u_lightPos;
uniform vec3 u_viewPos;
in VS_OUT {
vec3 v_fragPos;
vec3 v_normal;
vec2 v_texCoords;
vec4 v_fragPosLightSpace;
} fs_in;
out vec4 fragColor;
float shadowCalculation(vec4 fragPosLightSpace, vec3 normal, vec3 lightDir)
{
// perform perspective divide
vec3 projCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
// transform to [0,1] range
projCoords = projCoords * 0.5 + 0.5;
// Get closest depth value from light's perspective (using [0,1] range
// fragPosLight as coords)
float closestDepth = texture(u_shadowMap, projCoords.xy).r;
// Get depth of current fragment from lights perspective
float currentDepth = projCoords.z;
float bias = max(0.05 * (1.0 - dot(normal, lightDir)), 0.005);
// Percentage closer filtering
float shadow = 0.0;
vec2 texelSize = 1.0 / textureSize(u_shadowMap, 0);
for (int x = -1; x <= 1; ++x)
{
for (int y = -1; y <= 1; ++y)
{
float pcfDepth = texture(u_shadowMap, projCoords.xy + vec2(x, y) * texelSize).r;
shadow += currentDepth - bias > pcfDepth ? 1.0 : 0.0;
}
}
shadow /= 9.0;
return shadow;
}
void main()
{
vec3 color = texture(u_diffuseTexture, fs_in.v_texCoords).rgb;
vec3 normal = normalize(fs_in.v_normal);
vec3 lightColor = vec3(1.0);
// ambient
vec3 ambient = 0.15 * color;
// diffuse
vec3 lightDir = normalize(u_lightPos - fs_in.v_fragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * lightColor;
// specular
vec3 viewDir = normalize(u_viewPos - fs_in.v_fragPos);
float spec = 0.0;
vec3 halfWayDir = normalize(lightDir + viewDir);
spec = pow(max(dot(normal, halfWayDir), 0.0), 64.0);
vec3 specular = spec * lightColor;
// calculate shadow
float shadow = shadowCalculation(fs_in.v_fragPosLightSpace, normal, lightDir);
vec3 lighting = (ambient + (1.0 - shadow) * (diffuse + specular)) * color;
fragColor = vec4(lighting, 1.0);
}
What I'm really confused about is that the program runs when I'm using my local files. But when I pull the files from the Perforce repository and try and run it, then it throws the exception. I checked and all the necessary files are uploaded to Perforce. It would seem that there is something going wrong with which attributes are actually active? I'm not sure. Just scratching my head here...
glBindVertexArray(0);
glDisableVertexAttibArray(a_normalAttribLocation);
glDisableVertexAttribArray modifies the current VAO. You just removed the current VAO, setting it to 0. Which, in a core profile, means no VAO at all. In the compatibility profile, there is a VAO 0, which is probably why it works elsewhere: you're getting the compatibility profile on a different machine.
However, if you're using VAOs, it's not clear why you want to disable an attribute array at all. The whole point of VAOs is that you don't have to call the attribute array functions every frame. You just bind the VAO and go.

GLSL shadow multiplication doesn't work

I'm in front of a very strange problem which seems to originate from a simple multiplication in the fragment shader
I'm trying to calculate shadows using a framebuffer that renders only the depths from "light's perspective" which is a common tecnique for beginners easier to implement
Fragment Shader:
#version 330 core
uniform sampler2D parquet;
uniform samplerCube depthMaps[15];
in vec2 TexCoords;
out vec4 color;
in vec3 Normal;
in vec3 FragPos;
uniform vec3 lightPos[15];
uniform vec3 lightColor[15];
uniform float intensity[15];
uniform float far_plane;
uniform vec3 viewPos;
float ShadowCalculation(vec3 fragPos, vec3 lightPost, samplerCube depthMaps)
{
vec3 fragToLight = fragPos - lightPost;
float closestDepth = texture(depthMaps, fragToLight).r;
// original depth value
closestDepth *= far_plane;
float currentDepth = length(fragToLight);
float bias = 0.05;
float shadow = currentDepth - bias > closestDepth ? 1.0 : 0.0;
return shadow;
}
void main()
{
vec3 norm = normalize(Normal);
vec3 lightDir = normalize(lightPos[0] - FragPos);
float diff = max(dot(norm, lightDir), 0.0);
vec3 diffuse = diff * lightColor[0];
float _distance = length(vec3(FragPos - lightPos[0]));
float attenuation = 1.0 / pow(_distance +1, 2);
if(attenuation > 1.0) attenuation = 1.0;
float intens = intensity[0];
if(intensity[0] > 150) intens = 150.0f;
vec3 resulta = (diffuse * attenuation) * intens;
//texture color
vec3 tCol = vec3(texture(parquet, TexCoords));
//gamma correction
tCol.rgb = pow(tCol.rgb, vec3(0.45));
vec3 colors = resulta * tCol * (1.0f - ShadowCalculation(FragPos, lightPos[0], depthMaps[0]));
color = vec4(colors, 1.0f);
}
The last multiplication inside main() behaves strangely, multiplying the result of the diffuse light by the texture color renders nicely (so we have no shadows, just diffuse lightning)
//works
vec3 colors = resulta * tCol;
Multiplying the diffuse light by the shadow results renders also nicely (now we have no textures)
//works
vec3 colors = resulta * (1.0f - ShadowCalculation(FragPos, lightPos[0], depthMaps[0]));
Doing all togheter, renders just a black screen. I've tried all sort of things in the fragment shader, but none worked.
Lastly, here is the fragment shader used to render the cubemap:
#version 330 core
in vec4 FragPos;
uniform vec3 lightPos;
uniform float far_plane;
void main()
{
float lightDistance = length(FragPos.xyz - lightPos);
// map to [0;1] range by dividing by far_plane
lightDistance = lightDistance / far_plane;
gl_FragDepth = lightDistance;
}
Can you spot any logical error? I'm using uniforms array buffers since i'll later need multiple lights at once
After a while trying to visually debug the shader's output I finally found the error, I was binding the depthmap's cubemap texture incorrectly and this caused the strange behaviour I was seeing in the last multiplication
Lesson learned: It' not always fragment's fault

Spotlight with shadows becomes square-like

I've been following a two part tutorial on shadow mapping from OGLDev (part 1, part 2) for a couple of days now, and I've nearly finished implementing it.
My problem is that when I enable shadow maps the illuminated region which shadows appear in appears as a "strip" and I'm unsure if this is intended in the tutorials or not.
Perhaps images will help explain my problem.
However shadows are functional:
My vertex shader for spotlights:
#version 330
in vec2 textureCoordinate;
in vec4 vertex;
uniform mat4 mMatrix;
uniform mat4 pMatrix;
uniform mat4 vMatrix;
uniform mat4 lightV;
uniform mat4 lightP;
uniform vec3 normal;
uniform vec3 eye;
out vec2 TexCoord0;
out vec4 LightSpacePos;
out vec3 Normal0;
out vec3 WorldPos0;
out vec3 EyePos;
void main()
{
EyePos = eye;
TexCoord0 = textureCoordinate;
WorldPos0 = (mMatrix * vertex).xyz;
Normal0 = (mMatrix * vec4(normal, 0.0)).xyz;
mat4 lightMVP = lightP * inverse(lightV) * mMatrix;
LightSpacePos = lightMVP * vertex;
mat4 worldMVP = pMatrix * vMatrix * mMatrix;
gl_Position = worldMVP * vertex;
}
and the fragment shader:
#version 330
struct BaseLight
{
vec4 color;
float intensity;
};
struct Attenuation
{
float constant;
float linear;
float exponent;
};
struct PointLight
{
BaseLight base;
Attenuation atten;
vec3 position;
float range;
};
struct SpotLight
{
struct PointLight base;
vec3 direction;
float cutoff;
};
in vec2 TexCoord0;
in vec3 WorldPos0;
in vec3 EyePos;
in vec4 LightSpacePos;
out vec4 fragColor;
uniform sampler2D sampler;
uniform sampler2D shadowMap;
in vec3 Normal0;
uniform float specularIntensity;
uniform float specularPower;
uniform SpotLight spotLight;
float CalcShadowFactor(vec4 LightSpacePos)
{
vec3 ProjCoords = LightSpacePos.xyz / LightSpacePos.w;
vec2 UVCoords;
UVCoords.x = 0.5 * ProjCoords.x + 0.5;
UVCoords.y = 0.5 * ProjCoords.y + 0.5;
float z = 0.5 * ProjCoords.z + 0.5;
float Depth = texture(shadowMap, UVCoords).x;
if (Depth < z + 0.00001)
return 0.5;
else
return 1.0;
}
vec4 CalcLightInternal(BaseLight Light, vec3 LightDirection, vec3 Normal, float ShadowFactor)
{
vec4 AmbientColor = Light.color * Light.intensity;
float DiffuseFactor = dot(Normal, -LightDirection);
vec4 DiffuseColor = vec4(0, 0, 0, 0);
vec4 SpecularColor = vec4(0, 0, 0, 0);
if (DiffuseFactor > 0) {
DiffuseColor = Light.color * Light.intensity * DiffuseFactor;
vec3 VertexToEye = normalize(EyePos - WorldPos0);
vec3 LightReflect = normalize(reflect(LightDirection, Normal));
float SpecularFactor = dot(VertexToEye, LightReflect);
SpecularFactor = pow(SpecularFactor, specularPower);
if (SpecularFactor > 0) {
SpecularColor = Light.color * specularIntensity * SpecularFactor;
}
}
return (AmbientColor + ShadowFactor * (DiffuseColor + SpecularColor));
}
vec4 CalcPointLight(PointLight l, vec3 Normal, vec4 LightSpacePos)
{
vec3 LightDirection = WorldPos0 - l.position;
float Distance = length(LightDirection);
LightDirection = normalize(LightDirection);
float ShadowFactor = CalcShadowFactor(LightSpacePos);
vec4 Color = CalcLightInternal(l.base, LightDirection, Normal, ShadowFactor);
float Attenuation = l.atten.constant +
l.atten.linear * Distance +
l.atten.exponent * Distance * Distance;
return Color / Attenuation;
}
vec4 CalcSpotLight(SpotLight l, vec3 Normal, vec4 LightSpacePos)
{
vec3 LightToPixel = normalize(WorldPos0 - l.base.position);
float SpotFactor = dot(LightToPixel, l.direction);
if (SpotFactor > l.cutoff) {
vec4 Color = CalcPointLight(l.base, Normal, LightSpacePos);
return Color * (1.0 - (1.0 - SpotFactor) * 1.0/(1.0 - l.cutoff));
}
else {
return vec4(0,0,0,0);
}
}
void main(void)
{
fragColor = texture2D(sampler, TexCoord0.xy) * CalcSpotLight(spotLight, normalize(Normal0), LightSpacePos);
}
This is intended. Since your shadowmap covers a pyramid-like region in space, your spotlight's cone can be occluded by it. This is happening because where you render something that is outside of the shadow camera's view, it will be considered unlitten. Therefore the shadow camera's view pyramid will be visible.
src
To fix this you have 2 options:
1: Make the shadowmap camera's fov bigger, so that the shadow camera's pyramid becomes wider than your spotlight's cone
2: Change the way you calculate shadows on out-of-bounds area. Currently when you sample the shadowmap, and if it's outside of the shadow texture, you apply shadow there. If you change this so that if something that is out of the shadow texture you consider it litten, this problem will disappear.
Edit:
I recommend the second option. A solution for that could be:
Insert the following line into FragmentShader::CalcShadowFactor(), just before the float Depth = ... part
if (UVCoords.x < 0.0 || UVCoords.x > 1.0 || UVCoords.y < 0.0 || UVCoords.y > 1.0) return 1.0;
Note:
There is no universal way to say which is better. In a terrain environment involving sunlight, you may want to consider out-of-bounds regions as litten. However when you use a flashlight for example, that is in the user's hand, you have to consider out-of-bounds regions as unlitten.