How I can apply antialiasing to a 2D grid on a shader? - glsl

I have the following fragment shader to draw a grid
#version 450 core
layout(location = 0) in vec2 position;
layout(location = 0) out vec4 fragColor;
layout(binding = 0) uniform ubo
{
mat4 uCameraView;
vec4 uGridColor;
float uTileSize;
float uGridBorderSize;
};
void main()
{
vec2 uv = mod(position, uTileSize);
vec2 border = mod(uv + (uGridBorderSize / 2.0), uTileSize);
border -= mod(uv - (uGridBorderSize / 2.0), uTileSize);
if (length(border) > uTileSize - uGridBorderSize)
{
fragColor = uGridColor;
}
else
{
fragColor = vec4(0.0);
}
}
This works fine until I change the zoom, the issue appears when the camera gets far away and the uGridBorderSize is smaller than a pixel in the screen, then I get this ugly effect, where lines appear and disappear when the zoom changes.
So I wonder, is it possible to apply antialising to this lines so they appear consistently?

In the end I found this fragment shader that draws antialiased grid lines
void main()
{
// https://madebyevan.com/shaders/grid/
vec2 uv = fragCoord / uTileSize;
vec2 grid = abs(fract(uv - 0.5) - 0.5) / fwidth(uv);
float line = (min(grid.x, grid.y) * uGridBorderSize) / uScaleFactor;
float color = 1.0 - min(line, 1.0);
fragColor = uGridColor * color;
}

Related

GLSL shader that draws an outline around my object

I'm trying to make a GLSL shader. The shader should display a red outline around my character. The shader works, but on some specific characters there are red lines, which I don't know how to fix
This is how my shader fragmetn looks:
uniform mat4 u_Color;
varying vec2 v_TexCoord;
varying vec2 v_TexCoord2;
uniform sampler2D u_Tex0;
const float ALPHA_TOLERANCE = 0.01;
void main()
{
vec4 baseColor = texture2D(u_Tex0, v_TexCoord);
vec4 texcolor = texture2D(u_Tex0, v_TexCoord2);
if(texcolor.r > 0.9) {
baseColor *= texcolor.g > 0.9 ? u_Color[0] : u_Color[1];
} else if(texcolor.g > 0.9) {
baseColor *= u_Color[2];
} else if(texcolor.b > 0.9) {
baseColor *= u_Color[3];
}
vec4 pixel1 = texture2D(u_Tex0, vec2(v_TexCoord.x + 0.001, v_TexCoord.y));
vec4 pixel2 = texture2D(u_Tex0, vec2(v_TexCoord.x - 0.001, v_TexCoord.y));
vec4 pixel3 = texture2D(u_Tex0, vec2(v_TexCoord.x, v_TexCoord.y + 0.001));
vec4 pixel4 = texture2D(u_Tex0, vec2(v_TexCoord.x, v_TexCoord.y - 0.001));
bool neighbourColor = pixel4.a > ALPHA_TOLERANCE || pixel3.a > ALPHA_TOLERANCE || pixel2.a > ALPHA_TOLERANCE || pixel1.a > ALPHA_TOLERANCE;
if (baseColor.a < ALPHA_TOLERANCE && neighbourColor) {
baseColor.rgb = vec3(1.0, 0.0, 0.0);
baseColor.a = 0.7;
}
gl_FragColor = baseColor;
if(gl_FragColor.a < 0.01) discard;
}
This is how my vertex shader looks:
attribute vec2 a_Vertex;
attribute vec2 a_TexCoord;
uniform mat3 u_TextureMatrix;
varying vec2 v_TexCoord;
varying vec2 v_TexCoord2;
uniform mat3 u_TransformMatrix;
uniform mat3 u_ProjectionMatrix;
uniform vec2 u_Offset;
void main()
{
gl_Position = vec4((u_ProjectionMatrix * u_TransformMatrix * vec3(a_Vertex.xy, 1.0)).xy, 1.0, 1.0);
v_TexCoord = (u_TextureMatrix * vec3(a_TexCoord,1.0)).xy;
v_TexCoord2 = (u_TextureMatrix * vec3(a_TexCoord + u_Offset,1.0)).xy;
}
The shader works like this: it checks if the color is transparent, if it is, then it checks if a neighboring pixel has a real "color", if it is then it's replaced with red. But for some reason there are some random red lines outside of my character. There isn't any "real" colors next to those lines, so I don't understand why it's making those pixels red.
Example of a bugged character

Incorrect tracing with SSLR (Screen Space Local Reflections)

While implementing SSLR, I ran into the problem of incorrectly displaying objects: they are infinitely projected "down" and displayed in no way at all in the mirror. I give the code and screenshot below.
Fragment SSLR shader:
#version 330 core
uniform sampler2D normalMap; // in view space
uniform sampler2D depthMap; // in view space
uniform sampler2D colorMap;
uniform sampler2D reflectionStrengthMap;
uniform mat4 projection;
uniform mat4 inv_projection;
in vec2 texCoord;
layout (location = 0) out vec4 fragColor;
vec3 calcViewPosition(in vec2 texCoord) {
// Combine UV & depth into XY & Z (NDC)
vec3 rawPosition = vec3(texCoord, texture(depthMap, texCoord).r);
// Convert from (0, 1) range to (-1, 1)
vec4 ScreenSpacePosition = vec4(rawPosition * 2 - 1, 1);
// Undo Perspective transformation to bring into view space
vec4 ViewPosition = inv_projection * ScreenSpacePosition;
// Perform perspective divide and return
return ViewPosition.xyz / ViewPosition.w;
}
vec2 rayCast(vec3 dir, inout vec3 hitCoord, out float dDepth) {
dir *= 0.25f;
for (int i = 0; i < 20; i++) {
hitCoord += dir;
vec4 projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
float depth = calcViewPosition(projectedCoord.xy).z;
dDepth = hitCoord.z - depth;
if(dDepth < 0.0) return projectedCoord.xy;
}
return vec2(-1.0);
}
void main() {
vec3 normal = texture(normalMap, texCoord).xyz * 2.0 - 1.0;
vec3 viewPos = calcViewPosition(texCoord);
// Reflection vector
vec3 reflected = normalize(reflect(normalize(viewPos), normalize(normal)));
// Ray cast
vec3 hitPos = viewPos;
float dDepth;
float minRayStep = 0.1f;
vec2 coords = rayCast(reflected * max(minRayStep, -viewPos.z), hitPos, dDepth);
if (coords != vec2(-1.0)) fragColor = mix(texture(colorMap, texCoord), texture(colorMap, coords), texture(reflectionStrengthMap, texCoord).r);
else fragColor = texture(colorMap, texCoord);
}
Screenshot:
Also, the lamp is not reflected at all
I will grateful for help
UPDATE:
colorMap:
normalMap:
depthMap:
UPDATE: I solved the problem with the wrong reflection, but there are still problems.
I solved it as follows: ViewPosition.y *= -1
Now, as you can see in the screenshot, the lower parts of the objects are not reflected for some reason.
The question still remains open.
I m struggling to get a fine ssr too. I found two things that could help.
To get the view space normals you have to keep only the rotation of the camera and remove the translation, because if you dont, you will get the normals stretched to the opposite direction of the camera movement and will no longer have the right direction even if you normalize them again, for column major mat4 you can do it like:
mat4 viewNoTranslation = view;
viewNoTranslation[3] = vec4(0.0, 0.0, 0.0, 1.0);
The depth sampling from the depth image is logarithmic and if you linearize it you will get indeed the values from 0 to 1 but they will be inaccurate as to the needed precision. I tried to get the depth value straight from the vertex shader:
gl_Position = ubo.projection * ubo.view * ubo.model * inPos;
depth = gl_Position.z;
I dont know if it is right but the depth now is more accurate.
If you make proggress, please update :)

OpenGL Deferred Pixelated Lighting

I'm working on a 3-pass deferred lighting system for a voxel game, however I am having problems with pixelated lighting and ambient occlusion.
The first stage renders the color, position and normal of each pixel on the screen into separate textures. This part works correctly:
The second shader calculates an ambient occlusion value for each pixel on the screen and renders that to a texture. This part doesn't work correctly and is pixelated:
Raw occlusion data:
The third shader uses the color, position, normal and occlusion textures to render the game scene onto the screen. The lighting in this stage is also pixelated:
The SSAO (2nd pass) fragment shader comes from the www.LearnOpenGL.com tutorial for Screen Space Ambient Occlusion:
out float FragColor;
layout (binding = 0) uniform sampler2D gPosition; // World space position
layout (binding = 1) uniform sampler2D gNormal; // Normalised normal values
layout (binding = 2) uniform sampler2D texNoise;
uniform vec3 samples[64]; // 64 random precalculated vectors (-0.1 to 0.1 magnitude)
uniform mat4 projection;
float kernelSize = 64;
float radius = 1.5;
in vec2 TexCoords;
const vec2 noiseScale = vec2(1600.0/4.0, 900.0/4.0);
void main()
{
vec4 n = texture(gNormal, TexCoords);
// The alpha value of the normal is used to determine whether to apply SSAO to this pixel
if (int(n.a) > 0)
{
vec3 normal = normalize(n.rgb);
vec3 fragPos = texture(gPosition, TexCoords).xyz;
vec3 randomVec = normalize(texture(texNoise, TexCoords * noiseScale).xyz);
// Some maths. I don't understand this bit, it's from www.learnopengl.com
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
float occlusion = 0.0;
// Test 64 points around the pixel
for (int i = 0; i < kernelSize; i++)
{
vec3 sam = fragPos + TBN * samples[i] * radius;
vec4 offset = projection * vec4(sam, 1.0);
offset.xyz = (offset.xyz / offset.w) * 0.5 + 0.5;
// If the normal's are different, increase the occlusion value
float l = length(normal - texture(gNormal, offset.xy).rgb);
occlusion += l * 0.3;
}
occlusion = 1 - (occlusion / kernelSize);
FragColor = occlusion;
}
}
The lighting and final fragment shader:
out vec4 FragColor;
in vec2 texCoords;
layout (binding = 0) uniform sampler2D gColor; // Colour of each pixel
layout (binding = 1) uniform sampler2D gPosition; // World-space position of each pixel
layout (binding = 2) uniform sampler2D gNormal; // Normalised normal of each pixel
layout (binding = 3) uniform sampler2D gSSAO; // Red channel contains occlusion value of each pixel
// Each of these textures are 300 wide and 2 tall.
// The first row contains light positions. The second row contains light colours.
uniform sampler2D playerLightData; // Directional lights
uniform sampler2D mapLightData; // Spherical lights
uniform float worldBrightness;
// Amount of player and map lights
uniform float playerLights;
uniform float mapLights;
void main()
{
vec4 n = texture(gNormal, texCoords);
// BlockData: a = 4
// ModelData: a = 2
// SkyboxData: a = 0;
// Don't do lighting calculations on the skybox
if (int(n.a) > 0)
{
vec3 Normal = n.rgb;
vec3 FragPos = texture(gPosition, texCoords).rgb;
vec3 Albedo = texture(gColor, texCoords).rgb;
vec3 lighting = Albedo * worldBrightness * texture(gSSAO, texCoords).r;
for (int i = 0; i < playerLights; i++)
{
vec3 pos = texelFetch(playerLightData, ivec2(i, 0), 0).rgb;
vec3 direction = pos - FragPos;
float l = length(direction);
if (l < 40)
{
// Direction of the light to the position
vec3 spotDir = normalize(direction);
// Angle of the cone of the light
float angle = dot(spotDir, -normalize(texelFetch(playerLightData, ivec2(i, 1), 0).rgb));
// Crop the cone
if (angle >= 0.95)
{
float fade = (angle - 0.95) * 40;
lighting += (40.0 - l) / 40.0 * max(dot(Normal, spotDir), 0.0) * Albedo * fade;
}
}
}
for (int i = 0; i < mapLights; i++)
{
// Compare this pixel's position with the light's position
vec3 difference = texelFetch(mapLightData, ivec2(i, 0), 0).rgb - FragPos;
float l = length(difference);
if (l < 7.0)
{
lighting += (7.0 - l) / 7.0 * max(dot(Normal, normalize(difference)), 0.0) * Albedo * texelFetch(mapLightData, ivec2(i, 1), 0).rgb;
}
}
FragColor = vec4(lighting, 1.0);
}
else
{
FragColor = vec4(texture(gColor, texCoords).rgb, 1.0);
}
}
The size of each block face in the game is 1x1 (world space size). I have tried splitting these faces up into smaller triangles, as illustrated below, however there wasn't much visible difference.
How can I increase the resolution of the lighting and SSAO data to reduce these pixelated artifacts? Thank you in advance
Good news! Thanks to some_rand over at the GameDev stack exchange, I was able to fix this by upgrading the resolution of my position buffer from GL_RGBA16F to GL_RGBA32F.
Here is his answer.

Why does rendering and sampling from texture need an offset?

I have 2 similar versions of a vertex shader that produce slightly different textures.
The first such version is the following:
#version 450
layout(location = 0) in vec3 position; //(x,y,z) coordinates of a vertex
layout(location = 1) in vec3 norm; //a 3D vertex representing the normal to the vertex
layout(location = 2) in vec2 texture_coordinate; // texture coordinates
layout(std430, binding = 3) buffer instance_buffer
{
vec4 cubes_info[];//first 3 values are position of object
};
out vec3 normalized_pos;
out vec3 normal;
out vec2 texture_coord;
uniform float width = 128;
uniform float depth = 128;
uniform float height = 128;
void main()
{
texture_coord = texture_coordinate;
vec4 pos = (vec4(position, 1.0) + vec4(vec3(cubes_info[gl_InstanceID]),0));
pos+=ivec4(1,1,0,0);
pos.x = (2.f*pos.x-width)/(width);
pos.y = (2.f*pos.y-depth)/(depth);
pos.z = 0.9;
gl_Position = pos;
normalized_pos = vec3(pos);
normal = normalize(norm);
}
Which produces:
Look at the border, especially at the bottom and notice some isolated pixels near the botom left.
Now the following version:
#version 450
layout(location = 0) in vec3 position; //(x,y,z) coordinates of a vertex
layout(location = 1) in vec3 norm; //a 3D vertex representing the normal to the vertex
layout(location = 2) in vec2 texture_coordinate; // texture coordinates
layout(std430, binding = 3) buffer instance_buffer
{
vec4 cubes_info[];//first 3 values are position of object
};
out vec3 normalized_pos;
out vec3 normal;
out vec2 texture_coord;
uniform float width = 128;
uniform float depth = 128;
uniform float height = 128;
void main()
{
texture_coord = texture_coordinate;
vec4 pos = (vec4(position, 1.0) + vec4(vec3(cubes_info[gl_InstanceID]),0));
// pos+=ivec4(1,1,0,0); <- Look here, we're not offsetting anymore
pos.x = (2.f*pos.x-width)/(width);
pos.y = (2.f*pos.y-depth)/(depth);
pos.z = 0.9;
gl_Position = pos;
normalized_pos = vec3(pos);
normal = normalize(norm);
}
Produces the image:
Unsurprisingly when we do not add that offset the image is shifted (the pixels at the bottom are gone and we know have an empty border on the top and right edges). My question is, why is that offset necessary to begin with?
Why doesn't the mapping
pos.x = (2.f*pos.x-width)/(width);
pos.y = (2.f*pos.y-depth)/(depth);
Directly calculate the correct texel?
Edit:
Some clarifications based on a comment.
The images you are seeing are layers of 3D dimensional texture. Although it may vary we can assume that the x,y coordinates will be on the range [0,width-1] and [0,depth-1] (in this case 127 for both).

Same shaders behaving differently on Nvidia and ATI cards

Me and a friend are developing an editor (CAD-like) to use in our future game.
We are using the Qt framework and OpenGL.
The problem we are encountering is that on his laptop with an integrated nVidia card, the shading is working as expected and renders well. On my laptop with an integrated ATI card, as well as on my desktop with Radeon HD5850, the phong lighting is behaving slightly differently. There are more bright spots and dark spots and the image doesn't look good. Also, we are using a toon shader to draw a silhouette around the edges and to limit the amount of shades a color can have.
The toon shader uses 2-pass rendering - first pass renders the object in black, slightly larger than original (shifting each vertex in its normals direction slightly) to make the silhouette and then the second pass renders the object normally (only limiting the shade spectrum, so it looks more comic-like).
The images are of the same thing on our 2 computers. The first difference I mentioned above, the second is that the silhouette is stretched out as it should be on my friends computer, so it makes an even silhouette around the object, but is moved slightly up on my computer, making a thick line on the top and no line on the bottom.
The other thing is the phong lighting, illuminating the cube within which the object is edited. Again, rendering well on my friends computer, but being almost all-black or all-white on mine.
First image (nVidia card):
Second image (ATI cards):
I understand that the code is long and maybe the problem lays in some Qt settings, not in the shaders, but if you see anything that strikes you as bad practice, please answer.
Code for the phong shading follows
#version 400
in vec4 aVertex;
in vec4 aNormal;
in vec2 aTexCoord;
uniform mat4 uPVM;
uniform mat4 uViewModel;
uniform mat4 uNormal;
uniform int uLightsOn;
out vec2 vTexCoord;
out vec3 vNormal;
flat out vec3 mEye;
flat out vec3 mLightDirection;
flat out vec4 mAxisColor;
void main(void)
{
if(uLightsOn == 1) {
mEye = (uViewModel * aVertex).xyz;
mLightDirection = vec4(2.0,-2.0,1.0,0.0).xyz;
vNormal = (uNormal * aNormal).xyz;
}
gl_Position = uPVM * aVertex;
vTexCoord = aTexCoord;
mAxisColor = aNormal;
}
The phong fragment shader :
#version 400
uniform sampler2D uTexture0;
uniform int uLightsOn;
uniform vec3 uHighlightColor;
uniform int uTextured;
uniform int uAxisRender;
in vec2 vTexCoord;
in vec3 vNormal;
flat in vec3 mEye;
flat in vec3 mLightDirection;
out vec4 fragColor;
flat in vec4 mAxisColor;
struct TMaterial {
vec4 diffuse;
vec4 ambient;
vec4 specular;
float shininess;
};
TMaterial material;
void setup() {
// setupMaterials
material.ambient = vec4(0.4);
material.diffuse = vec4(0.9);
material.specular = vec4(0.0);
material.shininess = 0.3;
}
void main(void)
{
setup();
vec3 finalHighlightColor = uHighlightColor;
if(finalHighlightColor.x <= 0.0) finalHighlightColor.x = 0.1;
if(finalHighlightColor.y <= 0.0) finalHighlightColor.y = 0.1;
if(finalHighlightColor.z <= 0.0) finalHighlightColor.z = 0.1;
if(uLightsOn == 0) {
if(uAxisRender == 1) fragColor = mAxisColor;
else fragColor = vec4(finalHighlightColor,1.0);
return;
}
vec4 diffuse;
vec4 spec = vec4(0.0);
vec4 ambient;
vec3 L = normalize(mLightDirection - mEye);
vec3 E = normalize(-mEye);
vec3 R = normalize(reflect(-L,vNormal));
ambient = material.ambient;
float intens = max(dot(vNormal,L), 0.0);
diffuse = clamp( material.diffuse * intens , 0.0, 1.0 );
if(intens > 0.0) spec = clamp ( material.specular * pow(max(dot(R,E),0.0),material.shininess) , 0.0, 1.0 );
if(uTextured == 1) fragColor = (ambient + diffuse + spec) * texture(uTexture0,vTexCoord);
else fragColor = (ambient + diffuse + spec) * vec4(finalHighlightColor,1.0);
}
And the toon shaders :
#version 400
in vec4 aVertex;
in vec4 aNormal;
in vec2 aTexCoord;
uniform mat4 uPV;
uniform mat4 uM;
uniform mat4 uN;
uniform vec3 uLightPosition;
uniform vec3 uCameraPosition;
uniform int uSilhouetteMode;
uniform float uOffset;
// if this uniform is passed, all the toon rendering is going off and only simple axis are rendered
// last data in aNormal are colors of those axis if everything was ser properly.
uniform int uAxisRendering;
flat out vec4 fAxisColor;
out vec4 vNormal;
out vec2 vTexCoord;
out vec3 vDirectionToCamera;
out vec3 vDirectionToLight;
void silhouetteMode() {
gl_Position = uPV * uM * vec4(aVertex.xyz + aNormal.xyz * uOffset,1.0f);
}
void toonMode() {
vec4 worldPosition = uM * aVertex;
vDirectionToCamera = uCameraPosition - worldPosition.xyz;
vDirectionToLight = uLightPosition - worldPosition.xyz;
vNormal = uN * aNormal;
gl_Position = uPV * worldPosition;
}
void axisMode() {
fAxisColor = aNormal;
gl_Position = uPV * uM * aVertex;
}
void main(void)
{
vTexCoord = aTexCoord;
if(uSilhouetteMode == 1) {
silhouetteMode();
} else {
if(uAxisRendering == 1) axisMode();
else toonMode();
}
}
and the fragment shader
#version 400
uniform sampler2D uTexture;
uniform vec3 uBaseColor;
uniform float uNumShades;
uniform int uSilhouetteMode;
uniform int uAxisRendering;
flat in vec4 fAxisColor;
in vec4 vNormal;
in vec2 vTexCoord;
in vec3 vDirectionToCamera;
in vec3 vDirectionToLight;
out vec4 outFragColor;
void main(void)
{
if(uSilhouetteMode == 1) {
outFragColor = vec4(uBaseColor,1.0);
return;
}
if(uAxisRendering == 1) {
outFragColor = fAxisColor;
return;
}
float l_ambient = 0.1;
float l_diffuse = clamp(dot(vDirectionToLight,vNormal.xyz),0.0,1.0);
float l_specular;
vec3 halfVector = normalize(vDirectionToCamera + vDirectionToLight);
if(dot(vDirectionToLight,vNormal.xyz) > 0.0) {
l_specular = pow(clamp(dot(halfVector,vNormal.xyz),0.0,1.0),64.0);
} else {
l_specular = 0.0;
}
float intensity = l_ambient + l_diffuse + l_specular;
float shadeIntesity = ceil(intensity * uNumShades)/ uNumShades;
outFragColor = vec4(texture(uTexture,vTexCoord).xyz * shadeIntesity * uBaseColor,1.0);
}
And finally, our OpenGLWindow initialization (in Qt)
OpenGLWindow::OpenGLWindow(QWindow *parent) :
QWindow(parent),m_animating(false), m_initialized(false), m_animationTimer(NULL)
{
setSurfaceType(QWindow::OpenGLSurface);
QSurfaceFormat format;
format.setDepthBufferSize( 24 );
format.setMajorVersion( 4 );
format.setMinorVersion( 0 );
format.setSamples( 4 );
format.setProfile( QSurfaceFormat::CoreProfile );
setFormat( format );
create();
if(!m_context) {
m_context = new QOpenGLContext(this);
m_context->setFormat(requestedFormat());
m_context->create();
m_context->makeCurrent(this);
initializeOpenGLFunctions();
}
m_animationTimer = new QTimer(this);
connect(m_animationTimer, SIGNAL(timeout()), this, SLOT(renderer()));
m_animationTimer->setInterval(16);
}
To my eyes the nVidia image seems to be using alpha whereas the AMD one is not. I also can't see a
format.setAlpha(true);
in your Qt setup so may be that.