OpenGL 2D faded circle being stretched/compressed by the resolution - glsl

I'd like my faded lighting (based on distance from a point) to be a perfect circle no matter the resolution. Currently, the light is only a circle if the height and width of the window are equal.
This is what it looks like right now:
My fragment shader looks like this:
precision mediump float;
#endif
#define MAX_LIGHTS 10
// varying input variables from our vertex shader
varying vec4 v_color;
varying vec2 v_texCoords;
// a special uniform for textures
uniform sampler2D u_texture;
uniform vec2 u_resolution;
uniform float u_time;
uniform vec2 lightsPos[MAX_LIGHTS];
uniform vec3 lightsColor[MAX_LIGHTS];
uniform float lightsSize[MAX_LIGHTS];
uniform vec2 cam;
uniform vec2 randPos;
uniform bool dark;
void main()
{
vec4 lights = vec4(0.0,0.0,0.0,1.0);
float ratio = u_resolution.x/u_resolution.y;
vec2 st = gl_FragCoord.xy/u_resolution;
vec2 loc = vec2(.5 + randPos.x, 0.5 + randPos.y);
for(int i = 0; i < MAX_LIGHTS; i++)
{
if(lightsSize[i] != 0.0)
{
// trying to reshape the light
// vec2 st2 = st;
// st2.x *= ratio;
float size = 2.0/lightsSize[i];
float dist = max(0.0, distance(lightsPos[i], st)); // st here was replaced with st2 when experimenting
lights = lights + vec4(max(0.0, lightsColor[i].x - size * dist), max(0.0, lightsColor[i].y - size * dist), max(0.0, lightsColor[i].z - size * dist), 0.0);
}
}
if(dark)
{
lights.r = max(lights.r, 0.075);
lights.g = max(lights.g, 0.075);
lights.b = max(lights.b, 0.075);
}
else
{
lights.r += 1.0;
lights.g += 1.0;
lights.b += 1.0;
}
gl_FragColor = texture2D(u_texture, v_texCoords) * lights;
}
I tried reshaping the light by multiplying the x value of the pixel by the ratio of the screen width to the screen height but that caused the lights to be out of place. I couldn't figure out anything that would put them back in their correct place while maintaining their shape.
EDIT: the displacement is determined by my camera's position in my libgdx scene.

what you need is to rescale the difference between light position and fragment position
vec2 dr = st-lightsPos[i];
dr.x*=ratio;
float dist = length(dr);

Try normalizing st like that
vec2 st = (gl_FragCoord.xy - .5*u_resolution.xy) / min(u_resolution.x, u_resolution.y);
So that coordinates of your fragments are in range [-1; 1] for y and [-ratio/2; ratio/2] where ratio = u_resolution.x/u_resolution.y
You can also make it [0; 1] for y and [0; ratio] for x by doing
vec2 st = gl_FragCoord.xy / min(u_resolution.x, u_resolution.y);
But the former is more convenient in many cases

Related

Circle in GLSL is being drawn in the wrong location

So, I have a circle in glsl that is supposed to be drawn around the mouse. The resulting circle is drawn in the wrong location.
I'm drawing the circle by taking the step of the distance from st and the vector2 of the uniform mouse.
I have no Idea why this is happening.
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 u_resolution;
uniform vec2 u_mouse;
uniform float u_time;
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
st.x *= u_resolution.x/u_resolution.y;
float pct = 0.0;
vec2 brightness = vec2(0.0);
pct = step(distance(st,vec2(u_mouse/100.0)),0.5);
vec3 color = vec3(0.);
color = vec3(pct);
brightness = vec2(1.0);
gl_FragColor = vec4(color,brightness);
}
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 u_resolution;
uniform vec2 u_mouse;
uniform float u_time;
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
st.x *= u_resolution.x/u_resolution.y;
float pct = 0.0;
vec2 brightness = vec2(0.0);
pct = step(distance(st,vec2(u_mouse/100.0)),0.5);
vec3 color = vec3(0.);
color = vec3(pct);
brightness = vec2(1.0);
gl_FragColor = vec4(color,brightness);
}
Here is an example using Shadertoy, that can be trivially adapted to your OpenGL/GLSL code.
The code comes from a basic 2D tutorial on Shadertoy on how to draw a circle around the centre of the screen, by coloring a pixel based on whether it lies within a given cartesian distance (ie. its radius) from its centre. Then it is modified to instead draw the circle around the mouse pointer:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
vec2 m = (iMouse.xy / iResolution.xy);
vec2 uv = (fragCoord.xy / iResolution.xy);
uv.x *= iResolution.x/iResolution.y;
m.x *= iResolution.x/iResolution.y;
float radius = 0.25;
vec3 pixel;
if( (uv.x-m.x)*(uv.x-m.x) + (uv.y-m.y)*(uv.y-m.y) < radius*radius ) {
pixel = vec3(0.3, 0.3, 1.0);
} else {
pixel = vec3(1.0, 0.3, 0.2);
}
fragColor = vec4(pixel, 1.0);
}
Demo:
Is the code duplicated by accident? I would check that you are passing in the values you expect for mouse and resolution, and take into account whether your window is fullscreen or not.

Weird Layered Effect During Parallax Mapping

I am following along with the LearnOpenGL guide and am trying to implement Steep Parallax Mapping.
Everything seems to be working fine except my brick wall seems to have distinct visible layers whereas the photos in the guide don't show any layers. I was trying to use this code to parallax the topography of the world but these weird layers seem to show up there too so I was hoping to find a fix for this.
Layered wall photo
[1
Photo of how it should look
Here is my modified vertex shader
#version 300 es
in vec4 vPosition; // aPos
in vec2 texCoord; // aTexCoords
in vec4 vNormal; // aNormal
in vec4 vTangent; // aTangent
uniform mat4 model_view;
uniform mat4 projection;
uniform vec4 light_position;
out vec2 ftexCoord;
out vec3 vT;
out vec3 vN;
out vec4 position;
out vec3 FragPos;
out vec3 TangentLightPos;
out vec3 TangentViewPos;
out vec3 TangentFragPos;
void
main()
{
// Normal variables
vN = normalize(model_view * vNormal).xyz;
vT = normalize(model_view * vTangent).xyz;
vec4 veyepos = model_view*vPosition;
position = veyepos;
ftexCoord = texCoord;
// Displacement variables
vec3 bi = cross(vT, vN);
FragPos = vec3(model_view * vPosition).xyz;
vec3 T = normalize(mat3(model_view) * vTangent.xyz);
vec3 B = normalize(mat3(model_view) * bi);
vec3 N = normalize(mat3(model_view) * vNormal.xyz);
mat3 TBN = transpose(mat3(T, B, N));
TangentLightPos = TBN * light_position.xyz;
TangentViewPos = TBN * vPosition.xyz;
TangentFragPos = TBN * FragPos;
gl_Position = projection * model_view * vPosition;
}
and my modified fragment shader is here
#version 300 es
precision highp float;
in vec2 ftexCoord;
in vec3 vT; //parallel to surface in eye space
in vec3 vN; //perpendicular to surface in eye space
in vec4 position;
in vec3 FragPos;
in vec3 TangentLightPos;
in vec3 TangentViewPos;
in vec3 TangentFragPos;
uniform int mode;
uniform vec4 light_position;
uniform vec4 light_color;
uniform vec4 ambient_light;
uniform sampler2D colorMap;
uniform sampler2D normalMap;
uniform sampler2D depthMap;
out vec4 fColor;
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
return currentTexCoords;
}
void main()
{
// DO NORMAL MAPPING
if (mode == 0) {
vec3 T = normalize(vT);
vec3 N = normalize(vN);
vec3 bi = cross(T, N);
mat4 changeOfCoord = mat4(vec4(T, 0), vec4(bi, 0), vec4(N, 0), vec4(0, 0, 0, 1));
vec3 L = normalize(light_position - position).xyz;
vec3 E = normalize(-position).xyz;
vec4 text = vec4(texture(normalMap, ftexCoord) * 2.0 - 1.0);
vec4 eye = changeOfCoord * text;
vec4 amb = texture(colorMap, ftexCoord) * ambient_light;
vec4 diff = max(0.0, dot(L, eye.xyz)) * light_color * texture(colorMap, ftexCoord);
fColor = amb + diff;
} else if (mode == 1) { // DO PARALLAX MAPPING
// offset texture coordinates with Parallax Mapping
vec3 viewDir = normalize(TangentViewPos - TangentFragPos);
vec2 texCoords = ftexCoord;
texCoords = ParallaxMapping(ftexCoord, viewDir);
// discard samples outside of the default texture coordinate space
if(texCoords.x > 1.0 || texCoords.y > 1.0 || texCoords.x < 0.0 || texCoords.y < 0.0)
discard;
// obtain normal from normal map
vec3 normal = texture(normalMap, texCoords).rgb;
//values stored in normal texture is [0,1] range, we need [-1, 1] range
normal = normalize(normal * 2.0 - 1.0);
// get diffuse color
vec3 color = texture(colorMap, texCoords).rgb;
// ambient
vec3 ambient = 0.1 * color;
// diffuse
vec3 lightDir = normalize(TangentLightPos - TangentFragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * color;
// specular
vec3 reflectDir = reflect(lightDir, normal);
vec3 halfwayDir = normalize(lightDir + viewDir);
float spec = pow(max(dot(normal, halfwayDir), 0.0), 32.0);
vec3 specular = vec3(0.2) * spec;
fColor = vec4(ambient + diffuse + 0.0, 1.0);
}
}
The layers at acute gazing angles are a common effect at parallax mapping. To improve the result you've to increment the number of samples or implement Parallax Occlusion Mapping (as described in the bottom part of the tutorial):
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
// get texture coordinates before collision (reverse operations)
vec2 prevTexCoords = currentTexCoords + deltaTexCoords;
// get depth after and before collision for linear interpolation
float afterDepth = currentDepthMapValue - currentLayerDepth;
float beforeDepth = texture(depthMap, prevTexCoords).r - currentLayerDepth + layerDepth;
// interpolation of texture coordinates
float weight = afterDepth / (afterDepth - beforeDepth);
vec2 finalTexCoords = prevTexCoords * weight + currentTexCoords * (1.0 - weight);
return finalTexCoords;
}
By thee way, the vector seems to be inverted. In common the bitangent is the Cross product of the normal vector and the tangent in a Right-handed system. But that depends on the displacement texture.
vec3 bi = cross(vT, vN);
vec3 bi = cross(vN, vT);
See further:
Bump Mapping with javascript and glsl
Normal, Parallax and Relief mapping
Demo

Incorrect tracing with SSLR (Screen Space Local Reflections)

While implementing SSLR, I ran into the problem of incorrectly displaying objects: they are infinitely projected "down" and displayed in no way at all in the mirror. I give the code and screenshot below.
Fragment SSLR shader:
#version 330 core
uniform sampler2D normalMap; // in view space
uniform sampler2D depthMap; // in view space
uniform sampler2D colorMap;
uniform sampler2D reflectionStrengthMap;
uniform mat4 projection;
uniform mat4 inv_projection;
in vec2 texCoord;
layout (location = 0) out vec4 fragColor;
vec3 calcViewPosition(in vec2 texCoord) {
// Combine UV & depth into XY & Z (NDC)
vec3 rawPosition = vec3(texCoord, texture(depthMap, texCoord).r);
// Convert from (0, 1) range to (-1, 1)
vec4 ScreenSpacePosition = vec4(rawPosition * 2 - 1, 1);
// Undo Perspective transformation to bring into view space
vec4 ViewPosition = inv_projection * ScreenSpacePosition;
// Perform perspective divide and return
return ViewPosition.xyz / ViewPosition.w;
}
vec2 rayCast(vec3 dir, inout vec3 hitCoord, out float dDepth) {
dir *= 0.25f;
for (int i = 0; i < 20; i++) {
hitCoord += dir;
vec4 projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
float depth = calcViewPosition(projectedCoord.xy).z;
dDepth = hitCoord.z - depth;
if(dDepth < 0.0) return projectedCoord.xy;
}
return vec2(-1.0);
}
void main() {
vec3 normal = texture(normalMap, texCoord).xyz * 2.0 - 1.0;
vec3 viewPos = calcViewPosition(texCoord);
// Reflection vector
vec3 reflected = normalize(reflect(normalize(viewPos), normalize(normal)));
// Ray cast
vec3 hitPos = viewPos;
float dDepth;
float minRayStep = 0.1f;
vec2 coords = rayCast(reflected * max(minRayStep, -viewPos.z), hitPos, dDepth);
if (coords != vec2(-1.0)) fragColor = mix(texture(colorMap, texCoord), texture(colorMap, coords), texture(reflectionStrengthMap, texCoord).r);
else fragColor = texture(colorMap, texCoord);
}
Screenshot:
Also, the lamp is not reflected at all
I will grateful for help
UPDATE:
colorMap:
normalMap:
depthMap:
UPDATE: I solved the problem with the wrong reflection, but there are still problems.
I solved it as follows: ViewPosition.y *= -1
Now, as you can see in the screenshot, the lower parts of the objects are not reflected for some reason.
The question still remains open.
I m struggling to get a fine ssr too. I found two things that could help.
To get the view space normals you have to keep only the rotation of the camera and remove the translation, because if you dont, you will get the normals stretched to the opposite direction of the camera movement and will no longer have the right direction even if you normalize them again, for column major mat4 you can do it like:
mat4 viewNoTranslation = view;
viewNoTranslation[3] = vec4(0.0, 0.0, 0.0, 1.0);
The depth sampling from the depth image is logarithmic and if you linearize it you will get indeed the values from 0 to 1 but they will be inaccurate as to the needed precision. I tried to get the depth value straight from the vertex shader:
gl_Position = ubo.projection * ubo.view * ubo.model * inPos;
depth = gl_Position.z;
I dont know if it is right but the depth now is more accurate.
If you make proggress, please update :)

OpenGL Deferred Pixelated Lighting

I'm working on a 3-pass deferred lighting system for a voxel game, however I am having problems with pixelated lighting and ambient occlusion.
The first stage renders the color, position and normal of each pixel on the screen into separate textures. This part works correctly:
The second shader calculates an ambient occlusion value for each pixel on the screen and renders that to a texture. This part doesn't work correctly and is pixelated:
Raw occlusion data:
The third shader uses the color, position, normal and occlusion textures to render the game scene onto the screen. The lighting in this stage is also pixelated:
The SSAO (2nd pass) fragment shader comes from the www.LearnOpenGL.com tutorial for Screen Space Ambient Occlusion:
out float FragColor;
layout (binding = 0) uniform sampler2D gPosition; // World space position
layout (binding = 1) uniform sampler2D gNormal; // Normalised normal values
layout (binding = 2) uniform sampler2D texNoise;
uniform vec3 samples[64]; // 64 random precalculated vectors (-0.1 to 0.1 magnitude)
uniform mat4 projection;
float kernelSize = 64;
float radius = 1.5;
in vec2 TexCoords;
const vec2 noiseScale = vec2(1600.0/4.0, 900.0/4.0);
void main()
{
vec4 n = texture(gNormal, TexCoords);
// The alpha value of the normal is used to determine whether to apply SSAO to this pixel
if (int(n.a) > 0)
{
vec3 normal = normalize(n.rgb);
vec3 fragPos = texture(gPosition, TexCoords).xyz;
vec3 randomVec = normalize(texture(texNoise, TexCoords * noiseScale).xyz);
// Some maths. I don't understand this bit, it's from www.learnopengl.com
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
float occlusion = 0.0;
// Test 64 points around the pixel
for (int i = 0; i < kernelSize; i++)
{
vec3 sam = fragPos + TBN * samples[i] * radius;
vec4 offset = projection * vec4(sam, 1.0);
offset.xyz = (offset.xyz / offset.w) * 0.5 + 0.5;
// If the normal's are different, increase the occlusion value
float l = length(normal - texture(gNormal, offset.xy).rgb);
occlusion += l * 0.3;
}
occlusion = 1 - (occlusion / kernelSize);
FragColor = occlusion;
}
}
The lighting and final fragment shader:
out vec4 FragColor;
in vec2 texCoords;
layout (binding = 0) uniform sampler2D gColor; // Colour of each pixel
layout (binding = 1) uniform sampler2D gPosition; // World-space position of each pixel
layout (binding = 2) uniform sampler2D gNormal; // Normalised normal of each pixel
layout (binding = 3) uniform sampler2D gSSAO; // Red channel contains occlusion value of each pixel
// Each of these textures are 300 wide and 2 tall.
// The first row contains light positions. The second row contains light colours.
uniform sampler2D playerLightData; // Directional lights
uniform sampler2D mapLightData; // Spherical lights
uniform float worldBrightness;
// Amount of player and map lights
uniform float playerLights;
uniform float mapLights;
void main()
{
vec4 n = texture(gNormal, texCoords);
// BlockData: a = 4
// ModelData: a = 2
// SkyboxData: a = 0;
// Don't do lighting calculations on the skybox
if (int(n.a) > 0)
{
vec3 Normal = n.rgb;
vec3 FragPos = texture(gPosition, texCoords).rgb;
vec3 Albedo = texture(gColor, texCoords).rgb;
vec3 lighting = Albedo * worldBrightness * texture(gSSAO, texCoords).r;
for (int i = 0; i < playerLights; i++)
{
vec3 pos = texelFetch(playerLightData, ivec2(i, 0), 0).rgb;
vec3 direction = pos - FragPos;
float l = length(direction);
if (l < 40)
{
// Direction of the light to the position
vec3 spotDir = normalize(direction);
// Angle of the cone of the light
float angle = dot(spotDir, -normalize(texelFetch(playerLightData, ivec2(i, 1), 0).rgb));
// Crop the cone
if (angle >= 0.95)
{
float fade = (angle - 0.95) * 40;
lighting += (40.0 - l) / 40.0 * max(dot(Normal, spotDir), 0.0) * Albedo * fade;
}
}
}
for (int i = 0; i < mapLights; i++)
{
// Compare this pixel's position with the light's position
vec3 difference = texelFetch(mapLightData, ivec2(i, 0), 0).rgb - FragPos;
float l = length(difference);
if (l < 7.0)
{
lighting += (7.0 - l) / 7.0 * max(dot(Normal, normalize(difference)), 0.0) * Albedo * texelFetch(mapLightData, ivec2(i, 1), 0).rgb;
}
}
FragColor = vec4(lighting, 1.0);
}
else
{
FragColor = vec4(texture(gColor, texCoords).rgb, 1.0);
}
}
The size of each block face in the game is 1x1 (world space size). I have tried splitting these faces up into smaller triangles, as illustrated below, however there wasn't much visible difference.
How can I increase the resolution of the lighting and SSAO data to reduce these pixelated artifacts? Thank you in advance
Good news! Thanks to some_rand over at the GameDev stack exchange, I was able to fix this by upgrading the resolution of my position buffer from GL_RGBA16F to GL_RGBA32F.
Here is his answer.

Shadowmapping always produces shadows beyond far plane

I am working on the beginnings of omnidirectional shadow mapping in my engine. For now I am only producing one shadowmap as a test. I am getting an odd result when using my current shaders. Here is a screenshot which shows the problem:
I am using a near value of 0.5 and a far value of 5.0 in the projection matrix for the shadowmap render. As near as I can tell, any value with a light-space z larger than my far plane distance is being computed by my fragment shader as in shadow.
This is my fragment shader:
in vec2 st;
uniform sampler2D colorTexture;
uniform sampler2D normalTexture;
uniform sampler2D depthTexture;
uniform sampler2D shadowmapTexture;
uniform mat4 invProj;
uniform mat4 lightProj;
uniform vec3 lightPosition;
out vec3 color;
void main () {
vec3 clipSpaceCoords;
clipSpaceCoords.xy = st.xy * 2.0 - 1.0;
clipSpaceCoords.z = texture(depthTexture, st).x * 2.0 - 1.0;
vec4 position = invProj * vec4(clipSpaceCoords,1.0);
position.xyz /= position.w;
vec4 lightSpace = lightProj * vec4(position.xyz,1.0);
lightSpace.xyz /= lightSpace.w;
lightSpace.xyz = lightSpace.xyz * 0.5 + 0.5;
float lightDepth = texture(shadowmapTexture, lightSpace.xy).x;
vec3 normal = texture(normalTexture, st);
vec3 diffuse;
float shadowFactor = 1.0;
if(lightSpace.w > 0.0 && lightSpace.z > lightDepth+0.0042) {
shadowFactor = 0.2;
}
else {
float k = 0.00001;
vec3 distanceToLight = lightPosition - position.xyz;
float distanceLength = length(distanceToLight);
float attenuation = (1.0 / (1.0 + (0.1 * distanceLength) + k * (distanceLength * distanceLength)));
float diffuseTemp = max(dot(normalize(normal), normalize(distanceToLight)), 0.0);
diffuse = vec3(1.0, 1.0, 1.0) * attenuation * diffuseTemp;
}
vec3 gamma = vec3(1.0/2.2);
color = pow(texture(colorTexture, st).xyz*shadowFactor+diffuse, gamma);
}
How can I fix this issue (Other than increasing my far plane distance)?
One other question, as this is the first time I have attempted shadowmapping: am I doing the lighting in relation to the shadows correctly?