I'm trying to render an video from OpenCV using OpenGL with the following vertices and indices:
static const GLint ImageIndices[] {
0, 1, 2,
0, 2, 3
};
static const GLfloat ImageVertices[] = {
// positions // texCoords
-1.0f, 1.0f, 0.0f, 1.0f,
-1.0f, -1.0f, 0.0f, 0.0f,
1.0f, -1.0f, 1.0f, 0.0f,
1.0f, 1.0f, 1.0f, 1.0f
};
and following vertex and fragment shader:
#version 330 core
layout(location = 0) in vec2 vert_pos;
layout(location = 1) in vec2 tex_pos;
uniform mat3 trans1;
uniform mat3 trans2;
out vec2 texPos;
void main()
{
vec3 pos = vec3(-vert_pos.y, vert_pos.x, 0.0f);
vec3 rst;
if(pos.y < 0.0f)
{
rst = pos;
texPos = tex_pos;
}
else if(pos.y > 0.0f)
{
rst = pos;
texPos = tex_pos;
}
gl_Position = vec4(rst.x, rst.y, 0.0f, 1.0f);
//texPos = tex_pos;
}
#version 330 core
in vec2 texPos;
out vec4 fragColor;
uniform sampler2D tex;
uniform float width;
uniform float height;
void main()
{
fragColor = texture(tex, texPos);
}
and everything works well:
However, since I want to rotate the image using different matrix on the top and the bottom part, I changed the vertex shader to debug the coordinates of the image where texPos is vec2(1.0f, 1.0f) when pos.y > 0.0f:
#version 330 core
layout(location = 0) in vec2 vert_pos;
layout(location = 1) in vec2 tex_pos;
uniform mat3 trans1;
uniform mat3 trans2;
out vec2 texPos;
void main()
{
vec3 pos = vec3(-vert_pos.y, vert_pos.x, 0.0f);
vec3 rst;
if(pos.y < 0.0f)
{
rst = pos;
texPos = tex_pos;
}
else if(pos.y > 0.0f)
{
rst = pos;
texPos = vec2(1.0f, 1.0f);
}
gl_Position = vec4(rst.x, rst.y, 0.0f, 1.0f);
//texPos = tex_pos;
}
and the output of the video is strange:
Why the video turned out to be like this and how can I fix it?
The vertex shader is executed per vertex, not per fragment. It is executed 6 times for the 6 vertices of the 2 triangles. You have changed the texture coordinates of the 3 vertices where pos.y > 0.0f
Since pos = vec3(-vert_pos.y, vert_pos.x, 0.0)) you have changed the texture coordinates of the vertices, where x > 0.0:
x y u v x y u v
-1 1 0 1 -> -1 1 0 1
-1 -1 0 0 -> -1 -1 0 0
1 -1 1 0 -> 1 -1 1 1
1 1 1 1 -> 1 1 1 1
Actually only the texture coordinate of the vertex attribute with index 2 has changed. Hence, just the 1st triangle is effected:
Related
So I have an AtlasTexture that contains all the tiles I need to draw a tile map.
Right now I pass the AtlasTexture through a uniform, and the idea is to change the texture coordinate to select just the portion I need from the atlas.
The issue is that I can only specify on the fragment shader to cut the texture from the zero origin, is it possible to specify an offsetX to tell to the shader where I want to start drawing?
float vertices[] = {
// aPosition // aTextureCoordinate
0.0f, 0.0f, 0.0f, 0.0f,
100.0f, 0.0f, 1.0f, 0.0f,
0.0f, 100.0f, 0.0f, 1.0f,
100.0f, 100.0f, 1.0f, 1.0f,
};
uint32_t indices[] = {0, 1, 2, 2, 3, 1};
Vertex shader
#version 330 core
layout(location = 0) in vec2 aPosition;
layout(location = 1) in vec2 aTextureCoordinate;
out vec2 textureCoordinate;
void main() {
gl_Position = vec4( aPosition.x, aPosition.y, 1.0f, 1.0f);
textureCoordinate = vec2(
aTextureCoordinate.x / 3.0f, // This selects the first tile in the uAtlasTexture
aTextureCoordinate.y
);
}
Fragment shader
#version 330 core
in vec2 textureCoordinate;
uniform sampler2D uAtlasTexture; // Has 3 tiles
out vec4 color;
void main() {
color = texture(uAtlasTexture, textureCoordinate);
}
Use an Uniform variable for the offset. `vec2(1.0/3.0 + aTextureCoordinate.x / 3.0f, aTextureCoordinate.y); "selects2 the 2nd tile. Use a uniform instead of 1.0/3.0:
#version 330 core
layout(location = 0) in vec2 aPosition;
layout(location = 1) in vec2 aTextureCoordinate;
out vec2 textureCoordinate;
uniform float textureOffsetX;
void main() {
gl_Position = vec4( aPosition.x, aPosition.y, 1.0f, 1.0f);
textureCoordinate = vec2(
textureOffsetX + aTextureCoordinate.x / 3.0f,
aTextureCoordinate.y
);
}
I am trying to implement god rays however I do not understand where it went wrong. The source of god rays is the center of the cube.
Vertex shader:
#version 330 core
layout (location = 0) in vec2 aPos;
layout (location = 1) in vec2 aTexCoords;
out vec2 TexCoords;
void main()
{
gl_Position = vec4(aPos.x, aPos.y, 0.0, 1.0);
TexCoords = aTexCoords;
}
This is simple fragment shader just to show you how scene looks like when I did not add code for god rays to the fragment shader:
#version 330 core
out vec4 FragColor;
in vec2 TexCoords;
uniform sampler2D screenTexture;
void main()
{
FragColor = texture2D(screenTexture, TexCoords);
}
Scene without godrays:
Fragment shader when god rays code is added:
#version 330 core
out vec4 FragColor;
in vec2 TexCoords;
uniform vec2 lightPositionOnScreen;
uniform sampler2D screenTexture;
const float exposure = 0.3f;
const float decay = 0.96815;
const float density = 0.926;
const float weight = 0.587;
const int NUM_SAMPLES = 80;
void main()
{
// Calculate vector from pixel to light source in screen space.
vec2 deltaTexCoord = (TexCoords - lightPositionOnScreen.xy);
vec2 texCoord = TexCoords;
// Divide by number of samples and scale by control factor.
deltaTexCoord *= 1.0f / NUM_SAMPLES * density;
// Store initial sample.
vec3 color = texture2D(screenTexture, TexCoords);
// Set up illumination decay factor.
float illuminationDecay = 1.0f;
// Evaluate summation from Equation 3 NUM_SAMPLES iterations.
for (int i = 0; i < NUM_SAMPLES; i++)
{
// Step sample location along ray.
texCoord -= deltaTexCoord;
// Retrieve sample at new location.
vec3 sample = texture2D(screenTexture, texCoord);
// Apply sample attenuation scale/decay factors.
sample *= illuminationDecay * weight;
// Accumulate combined color.
color += sample;
// Update exponential decay factor.
illuminationDecay *= decay;
}
FragColor = vec4(color * exposure, 1.0);
}
How scene looks after godRays code:
This code is used to translate coordinates of cube center from world to window space position:
glm::vec4 clipSpacePos = projection * (view * glm::vec4(m_cubeCenter, 1.0));
glm::vec3 ndcSpacePos = glm::vec3(clipSpacePos.x / clipSpacePos.w, clipSpacePos.y / clipSpacePos.w, clipSpacePos.z / clipSpacePos.w);
glm::vec2 windowSpacePos;
windowSpacePos.x = (ndcSpacePos.x + 1.0) / 2.0;
windowSpacePos.y = 1.0f - (ndcSpacePos.y + 1.0) / 2.0;
wxMessageOutputDebug().Printf("test %f x position", windowSpacePos.x);
wxMessageOutputDebug().Printf("test %f y position", windowSpacePos.y);
shaderProgram.loadShaders("Shaders/godRays.vert", "Shaders/godRays.frag");
shaderProgram.use();
shaderProgram.setUniform("lightPositionOnScreen", windowSpacePos);
This is how I am setting up texture:
GLfloat vertices[] = {
1.0f, 1.0f, 0.0f, 1.0f, 1.0f, // top right
1.0f, -1.0f, 0.0f, 1.0f, 0.0f, // bottom right
-1.0f, -1.0f, 0.0f, 0.0f, 0.0f, // bottom left
-1.0f, -1.0f, 0.0f, 0.0f, 0.0f, // bottom left
-1.0f, 1.0f, 0.0f, 0.0f, 1.0f, // top left
1.0f, 1.0f, 0.0f, 1.0f, 1.0f, // top right
};
GLuint testBuffer;
glGenBuffers(1, &testBuffer);
glBindBuffer(GL_ARRAY_BUFFER, testBuffer);
glBufferData(GL_ARRAY_BUFFER, 30 * sizeof(GLfloat), &vertices[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(GLfloat), NULL);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(GLfloat), (void*)(3 * sizeof(float)));
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, screenTexture);
glDrawArrays(GL_TRIANGLES, 0, 6);
shaderProgram.deleteProgram();
glDeleteBuffers(1, &testBuffer);
Here is the solution. The problem was in the lines vec3 color = texture2D(screenTexture, TexCoords); and vec3 sample = texture2D(screenTexture, texCoord); I replaced them with vec3 color = texture(screenTexture, TexCoords).rgb; vec3 sample = texture(screenTexture, texCoord).rgb; respectively.
#version 330 core
out vec4 FragColor;
in vec2 TexCoords;
uniform vec2 lightPositionOnScreen;
uniform sampler2D screenTexture;
const float exposure = 0.3f;
const float decay = 0.96815;
const float density = 0.926;
const float weight = 0.587;
const int NUM_SAMPLES = 100;
void main()
{
vec2 deltaTexCoord = vec2(TexCoords.xy - lightPositionOnScreen.xy);
vec2 texCoord = TexCoords;
deltaTexCoord *= 1.0f / NUM_SAMPLES * density;
vec3 color = texture(screenTexture, TexCoords).rgb;
float illuminationDecay = 1.0f;
for (int i = 0; i < NUM_SAMPLES; i++)
{
texCoord -= deltaTexCoord;
vec3 sample = texture(screenTexture, texCoord).rgb;
sample *= illuminationDecay * weight;
color += sample;
illuminationDecay *= decay;
}
FragColor = vec4(color * exposure, 1.0);
}
I have a quad, composed by two triangles, defined like so:
glm::vec3 coords[] = {
glm::vec3(-1.0f, -1.0f, -0.1f),
glm::vec3( 1.0f, -1.0f, -0.1f),
glm::vec3( 1.0f, 1.0f, -0.1f),
glm::vec3(-1.0f, 1.0f, -0.1f)
};
glm::vec3 normals[] = {
glm::vec3(0.0f, 0.0f, 1.0f),
glm::vec3(0.0f, 0.0f, 1.0f),
glm::vec3(0.0f, 0.0f, 1.0f),
glm::vec3(0.0f, 0.0f, 1.0f)
};
glm::vec2 texCoords[] = {
glm::vec2(0.0f, 0.0f),
glm::vec2(1.0f, 0.0f),
glm::vec2(1.0f, 1.0f),
glm::vec2(0.0f, 1.0f)
};
unsigned int indices[] = {
0, 1, 2,
2, 3, 0
};
I'm trying to change the quad's 'height' via a black and white jpg so I wrote a vertex shader to do this, however the transformation is not being applied directly to all the points of the quad. Here's the jpg I'm using:
I expect a sudden constant bump where the image turns white, but this is what I'm getting: https://i.gyazo.com/639a699e7aa12cda2f644201d787c507.gif. It appears that only the top left corner is reaching the maximum height, and that somehow the whole entire left triangle is being distorted.
My vertex shader:
layout(location = 0) in vec3 vertex_position;
layout(location = 1) in vec3 vertex_normal;
layout(location = 2) in vec2 vertex_texCoord;
layout(location = 3) in vec4 vertex_color;
out vec2 v_TexCoord;
out vec4 v_Color;
out vec3 v_Position;
out vec3 v_Normal;
//model view projection matrix
uniform mat4 u_MVP;
uniform mat4 u_ModelMatrix;
uniform sampler2D u_Texture1_Height;
void main()
{
v_TexCoord = vertex_texCoord;
v_Color = vertex_color;
v_Normal = mat3(u_ModelMatrix) * vertex_normal;
vec4 texHeight = texture(u_Texture1_Height, v_TexCoord);
vec3 offset = vertex_normal * (texHeight.r + texHeight.g + texHeight.b) * 0.33;
v_Position = vec3(u_ModelMatrix * vec4(vertex_position + offset, 1.0f));
gl_Position = u_MVP * vec4(vertex_position + offset, 1.0f);
}
The bump map is just evaluated per vertex rather than per fragment, because you do the computation in the vertex shader. The vertex shader is just executed for each vertex (for each corner of the quad). Compare Vertex Shader and Fragment Shader.
It is not possible to displace the clip space coordinate for each fragment. You have to tessellate the geometry (the quad) to a lot of small quads. Since the vertex shader is executed for each fragment, the geometry is displaced for each corner point in the mesh. This is the common approach. See this simulation.
Another possibility is to implement parallax mapping, where a depth effect is accomplished by displacing the texture coordinates and distort the normal vectors in the fragment shader. See Normal, Parallax and Relief mapping respectively LearnOpenGL - Parallax Mapping or Bump Mapping with glsl.
I was using the following fragment shader to display a texture:
std::string const fragment_shader_source =
"#version 330 core\n"
""
"in vec4 fColor;\n"
"in vec2 fTexPos;\n"
"\n"
"out vec4 finalColor;\n"
"\n"
"uniform sampler2D textureUniform;\n"
"\n"
"void main() {\n"
" \n"
" vec4 textureColor = texture(textureUniform, fTexPos);\n"
" finalColor = fColor * textureColor;\n"
"}";
However, I wanted to be able to only display a clip of the image rather than the entire image. So, I added a call to textureSize to get the width and height of the texture so I could normalize the coordinates myself. But, it comes up with what looks like it's just repeating
std::string const fragment_shader_source =
"#version 330 core\n"
""
"in vec4 fColor;\n"
"in vec2 fTexPos;\n"
"\n"
"out vec4 finalColor;\n"
"\n"
"uniform sampler2D textureUniform;\n"
"\n"
"void main() {\n"
" \n"
" ivec2 samplerSize = textureSize(textureUniform, 0);\n"
" vec2 texturePos = vec2(fTexPos.x / float(samplerSize.x), fTexPos.y / float(samplerSize.y));\n"
" vec4 textureColor = texture(textureUniform, texturePos);\n"
" finalColor = fColor * textureColor;\n"
"}";
This is h ow I am uploading the data:
glBufferData(GL_ARRAY_BUFFER, sizeof(acorn::graphics::Vertex) * sprite_batch_.size() * 6, nullptr, GL_DYNAMIC_DRAW);
std::vector<Vertex> vertex_data;
for(auto const& sprite : sprite_batch_) {
GLfloat fw = (sprite.origin_x + sprite.u);
GLfloat bw = sprite.origin_x;
GLfloat fh = (sprite.origin_y + sprite.v);
GLfloat bh = sprite.origin_y;
// body color texture
// x y r g b a s t
vertex_data.push_back({0.5f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f, fw, fh});
vertex_data.push_back({-0.5f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f, bw, fh});
vertex_data.push_back({0.5f, -0.5f, 1.0f, 1.0f, 1.0f, 1.0f, fw, bh});
vertex_data.push_back({-0.5f, -0.5f, 1.0f, 1.0f, 1.0f, 1.0f, bw, bh});
vertex_data.push_back({-0.5f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f, bw, fh});
vertex_data.push_back({0.5f, -0.5f, 1.0f, 1.0f, 1.0f, 1.0f, fw, bh});
}
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(Vertex) * vertex_data.size(), static_cast<void*>(vertex_data.data()));
glDrawArrays(GL_TRIANGLES, 0, 6);
What you've done is taken the texture coordinates and remapped them to cover just the first pixel in the image. If you want to clip the input image, you'll need to specify to the shader the bounds of the area you wish to show. You could pass in a lower left and upper right coordinate and test against those. Something like this (not tested):
#version 330 core
in vec4 fColor;
in vec2 fTexPos;
out vec4 finalColor
uniform sampler2D textureUniform;
uniform float2 lowerLeft;
uniform float2 upperRight;
void main()
{
if ((lowerLeft.x <= fTexPos.x) && (fTexPos.x <= upperRight.x) &&
(lowerLeft.y <= fTexPos.y) && (fTexPos.y <= upperRight.y))
{
textureColor = texture(textureUniform, fTexPos);
}
else
{
textureColor = vec4(0.0);
}
finalColor = textureColor * fColor;
}
I've read several articles here at stackoverflow and in some books. But I can't find the error in my code. I have an deferred renderer and saved albedo, depth and normals (including spec) inside the g-buffer. Moreover created an shadow map from a light source from top of the scene.
It looks like this:
Albedo (R8G8B8A8):
Normal (R8G8B8A8):
Linear Depth (F32):
Shadow map from light source (linear depth) (500 meter above camera):
Ok. My shader code of the deferred with shadow mapping look like this:
I have uploaded row major matrices.
Vertex Shader:
layout(row_major) uniform UVSViewMatrix
{
mat4 m_ScreenMatrix;
};
layout(location = 0) in vec3 VertexPosition;
smooth out vec2 PSTexCoord;
void main()
{
vec4 Position = vec4(VertexPosition.xyz, 1.0f);
PSTexCoord = Position.xy;
gl_Position = Position * m_ScreenMatrix;
}
Fragment Shader:
#version 410
// -----------------------------------------------------------------------------
// Input from engine
// -----------------------------------------------------------------------------
layout(row_major) uniform UPSCameraProperties
{
mat4 m_ProjectionMatrix;
mat4 m_CameraView;
vec3 m_CameraPosition;
vec3 m_CameraDirection;
};
layout(row_major) uniform UPSLightProperties
{
mat4 m_LightProjection;
mat4 m_LightView;
vec4 m_LightPosition;
vec4 m_LightAmbientIntensity;
vec4 m_LightDiffuseIntensity;
vec4 m_LightSpecularIntensity;
};
uniform sampler2D PSTextureAlbedo;
uniform sampler2D PSTextureNormalSpecular;
uniform sampler2D PSTextureDepth;
uniform sampler2D PSTextureShadowMap;
// -----------------------------------------------------------------------------
// Input from vertex shader
// ----------------------------------------------------------------- ------------
smooth in vec2 PSTexCoord;
// -----------------------------------------------------------------------------
// Output to systembuffer
// -----------------------------------------------------------------------------
layout (location = 0) out vec4 PSOutput;
// -----------------------------------------------------------------------------
// Functions
// -----------------------------------------------------------------------------
vec3 GetViewSpacePositionFromDepth(float _Depth, vec2 _ScreenPosition, mat4 _InvertedProjectionMatrix)
{
// -----------------------------------------------------------------------------
// Information from:
// http://mynameismjp.wordpress.com/2009/03/10/reconstructing-position-from-depth/
// -----------------------------------------------------------------------------
vec4 ScreenPosition;
ScreenPosition.x = _ScreenPosition.x * 2.0f - 1.0f;
ScreenPosition.y = _ScreenPosition.y * 2.0f - 1.0f;
ScreenPosition.z = _Depth * 2.0f - 1.0f;
ScreenPosition.w = 1.0f;
// -----------------------------------------------------------------------------
// Transform by the inverse projection matrix
// -----------------------------------------------------------------------------
vec4 VSPosition = ScreenPosition * _InvertedProjectionMatrix;
// -----------------------------------------------------------------------------
// Divide by w to get the view-space position
// -----------------------------------------------------------------------------
return (VSPosition.xyz / VSPosition.w);
}
// -----------------------------------------------------------------------------
float GetShadowAtPosition(vec3 _WSPosition)
{
// -----------------------------------------------------------------------------
// Set worls space coord into light projection by multiply with light
// view and projection matrix;
// -----------------------------------------------------------------------------
vec4 LSPosition = vec4(_WSPosition, 1.0f) * m_LightView * m_LightProjection;
// -----------------------------------------------------------------------------
// Divide xyz by w to get the position in light view's clip space.
// -----------------------------------------------------------------------------
LSPosition.xyz /= LSPosition.w;
// -----------------------------------------------------------------------------
// Get uv texcoords for this position
// -----------------------------------------------------------------------------
vec3 ShadowCoord = LSPosition.xyz * 0.5f + 0.5f;
// -----------------------------------------------------------------------------
// Get final depth at this texcoord and compare it with the real
// position z value (do a manual depth test)
// -----------------------------------------------------------------------------
float DepthValue = texture( PSTextureShadowMap, vec2(ShadowCoord.x, ShadowCoord.y) ).r;
float Shadow = 1.0f;
if (ShadowCoord.z > DepthValue)
{
Shadow = 0.3f;
}
return Shadow;
}
// -----------------------------------------------------------------------------
// Main
// -----------------------------------------------------------------------------
void main()
{
// -----------------------------------------------------------------------------
// Get informations from g-buffer
// -----------------------------------------------------------------------------
vec2 TexCoord = vec2(PSTexCoord.s, 1.0f - PSTexCoord.t);
vec4 AlbedoColor = texture(PSTextureAlbedo , TexCoord);
vec4 NormalSpec = texture(PSTextureNormalSpecular, TexCoord);
float Depth = texture(PSTextureDepth , TexCoord).r;
vec3 VSPosition = GetViewSpacePositionFromDepth(Depth, TexCoord, inverse(m_ProjectionMatrix));
vec3 Normal = normalize(NormalSpec.xyz);
float SpecularExponent = NormalSpec.w;
vec4 WSPosition = vec4(VSPosition, 1.0f) * inverse(m_CameraView);
// -----------------------------------------------------------------------------
// Compute lighting (Light Accumulation)
// -----------------------------------------------------------------------------
vec3 CameraPosition = m_CameraPosition.xyz;
vec3 LightPosition = m_LightPosition.xyz;
vec3 EyeDirection = WSPosition.xyz - CameraPosition;
vec3 LightDirection = normalize(LightPosition - WSPosition.xyz);
vec3 LightReflection = normalize(-reflect(LightDirection, Normal));
vec4 AmbientColor = m_LightAmbientIntensity;
vec4 DiffuseColor = clamp(m_LightDiffuseIntensity * max(dot(Normal, LightDirection), 0.0f), 0.0f, 1.0f);
vec4 SpecularColor = clamp(m_LightSpecularIntensity * pow(max(dot(LightReflection, EyeDirection), 0.0f), SpecularExponent), 0.0f, 1.0f);
float Shadow = GetShadowAtPosition(WSPosition.xyz);
// -----------------------------------------------------------------------------
// Final result
// -----------------------------------------------------------------------------
PSOutput = vec4((AlbedoColor * (AmbientColor + DiffuseColor) * Shadow).xyz, 1.0f);
}
At the end my result look like this:
Did anyone can see my mistake?
Some measurement results:
ShadowCoord.xy is always 0.5, 0.5
ShadowCoord.z seams to be 1.0f
Here are some variable values:
LightProjection
(
1.29903805f, 0.0f, 0.0, 0.0f,
0.0f, 1.73205066f, 0.0f, 0.0f,
0.0f, 0.0f, -1.00024426f, -1.0f,
0.0f, 0.0f, -1.00024426f, 0.0f
);
LightView
(
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, -1500.0f, 1.0f
);
CameraProjection
(
1.29903805f, 0.0f, 0.0f, 0.0f,
0.0f, 1.73205066f, 0.0f, 0.0f,
0.0f, 0.0f, -1.00024426f, -1.0f,
0.0f, 0.0f, -1.00024426f, 0.0f
);
CameraView
(
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, -1000.0f, 1.0f
);
One issue I can see is a difference between linear and hyperbolic depth. It looks like your "Linear Depth (F32)" G-buffer depth is actually hyperbolic, which is fine because the code expects this. However the light's depth is in fact linear but is then compared against the clip space depth after perspective divide.
It'd probably be easiest to just make the light buffer's depth hyperbolic (gl_FragCoord.z), but changing to compare against eye-space Z should work too:
float ShadowCoordZ = -(vec4(_WSPosition, 1.0f) * m_LightView).z;
The bit about "ShadowCoord.xy is always 0.5, 0.5" is confusing. The G-buffer texture coord seems to work. I can't really tell with the shadow, but do the lighting equations work? If so maybe something's wrong with the matrices? I'd also pass in the inverse of your matrices as uniforms so time isn't spent computing it in the shader.