i want to shade the quad with checkers:
f(P)=[floor(Px)+floor(Py)]mod2.
My quad is:
glBegin(GL_QUADS);
glVertex3f(0,0,0.0);
glVertex3f(4,0,0.0);
glVertex3f(4,4,0.0);
glVertex3f(0,4, 0.0);
glEnd();
The vertex shader file:
varying float factor;
float x,y;
void main(){
x=floor(gl_Position.x);
y=floor(gl_Position.y);
factor = mod((x+y),2.0);
}
And the fragment shader file is:
varying float factor;
void main(){
gl_FragColor = vec4(factor,factor,factor,1.0);
}
But im getting this:
It seems that the mod function doeasn't work or maybe somthing else...
Any help?
It is better to calculate this effect in fragment shader, something like that:
vertex program =>
varying vec2 texCoord;
void main(void)
{
gl_Position = vec4(gl_Vertex.xy, 0.0, 1.0);
gl_Position = sign(gl_Position);
texCoord = (vec2(gl_Position.x, gl_Position.y)
+ vec2(1.0)) / vec2(2.0);
}
fragment program =>
#extension GL_EXT_gpu_shader4 : enable
uniform sampler2D Texture0;
varying vec2 texCoord;
void main(void)
{
ivec2 size = textureSize2D(Texture0, 0);
float total = floor(texCoord.x * float(size.x)) +
floor(texCoord.y * float(size.y));
bool isEven = mod(total, 2.0) == 0.0;
vec4 col1 = vec4(0.0, 0.0, 0.0, 1.0);
vec4 col2 = vec4(1.0, 1.0, 1.0, 1.0);
gl_FragColor = (isEven) ? col1 : col2;
}
Output =>
Good luck!
Try this function in your fragment shader:
vec3 checker(in float u, in float v)
{
float checkSize = 2;
float fmodResult = mod(floor(checkSize * u) + floor(checkSize * v), 2.0);
float fin = max(sign(fmodResult), 0.0);
return vec3(fin, fin, fin);
}
Then in main you can call it using :
vec3 check = checker(fs_vertex_texture.x, fs_vertex_texture.y);
And simply pass x and y you are getting from vertex shader. All you have to do after that is to include it when calculating your vFragColor.
Keep in mind that you can change chec size simply by modifying checkSize value.
What your code does is calculate the factor 4 times (once for each vertex, since it's vertex shader code) and then interpolate those values (because it's written into a varying varible) and then output that variable as color in the fragment shader.
So it doesn't work that way. You need to do that calculation directly in the fragment shader. You can get the fragment position using the gl_FragCoord built-in variable in the fragment shader.
May I suggest the following:
float result = mod(dot(vec2(1.0), step(vec2(0.5), fract(v_uv * u_repeat))), 2.0);
v_uv is a vec2 of UV values,
u_repeat is a vec2 of how many times the pattern should be repeated for each axis.
result is 0 or 1, you can use it in mix function to provide colors, for example:
gl_FragColor = mix(vec4(1.0, 1.0, 1.0, 1.0), vec4(0.0, 0.0, 0.0, 1.0) result);
Another nice way to do it is by just tiling a known pattern (zooming out). Assuming that you have a square canvas:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fragCoord/iResolution.xy;
uv -= 0.5; // moving the coordinate system to middle of screen
// Output to screen
fragColor = vec4(vec3(step(uv.x * uv.y, 0.)), 1.);
}
Code above gives you this kind of pattern.
Code below by just zooming 4.5 times and taking the fractional part repeats the pattern 4.5 times resulting in 9 squares per row.
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fract(fragCoord/iResolution.xy * 4.5);
uv -= 0.5; // moving the coordinate system to middle of screen
// Output to screen
fragColor = vec4(vec3(step(uv.x * uv.y, 0.)), 1.);
}
Related
Based on the accepted answer for the question Is there a way to draw a circle with the fragment shader at the position of a point from the vertex shader?, I am having the following results when I try to draw 3 circles based on the points (0.0, -0.75, 0.0), (0.0, 0.0, 0.0) and (0.0, 0.75, 0.0). Both window and viewport are 418x418.
The question is: Why only the center point is rendered properly, while the bottom circle is stretched and the top circle is shrinked, both on the Y-axis?
Vertex Shader
precision highp float;
attribute vec4 vPosition;
varying vec2 pointPos;
uniform vec2 windowSize; // = (window-width, window-height)
void main()
{
gl_Position = vPosition;
gl_PointSize = 900.0;
vec2 ndcPos = gl_Position.xy / gl_Position.w;
pointPos = windowSize * (ndcPos*0.5 + 0.5);
}
Fragment Shader
precision highp float;
varying vec2 pointPos;
uniform vec4 fColor;
const float threshold = 0.3;
const float aRadius = 10.0;
void main()
{
float dist = distance(pointPos, gl_FragCoord.xy);
if (dist > aRadius)
discard;
float d = dist / aRadius;
vec3 color = mix(fColor.rgb, vec3(0.0), step(1.0-threshold, d));
gl_FragColor = vec4(color, 1.0);
}
I am struggling to understand why the top and bottom circles are not being rendered properly, but I could not figure it out yet.
The problem
I'm trying to make shaders for my game. In the fragment shaders, I don't want to set the color of the fragment but add to it. How do I do so? I'm new to this, so sorry for any mistakes.
The code
varying vec3 vN;
varying vec3 v;
varying vec4 color;
#define MAX_LIGHTS 1
void main (void)
{
vec3 N = normalize(vN);
vec4 finalColor = vec4(0.0, 0.0, 0.0, 0.0);
for (int i=0;i<MAX_LIGHTS;i++)
{
vec3 L = normalize(gl_LightSource[i].position.xyz - v);
vec3 E = normalize(-v); // we are in Eye Coordinates, so EyePos is (0,0,0)
vec3 R = normalize(-reflect(L,N));
vec4 Iamb = gl_LightSource[i].ambient;
vec4 Idiff = gl_LightSource[i].diffuse * max(dot(N,L), 0.0);
Idiff = clamp(Idiff, 0.0, 1.0);
vec4 Ispec = gl_LightSource[i].specular * pow(max(dot(R,E),0.0),0.3*gl_FrontMaterial.shininess);
Ispec = clamp(Ispec, 0.0, 1.0);
finalColor += Iamb + Idiff + Ispec;
}
gl_FragColor = color * finalColor ;
}
Thanks in advance!
You can not get the color of the fragment you're writing to. In simple scenarios (like yours) what you can do is to enable blending and set the blending functions to achieve additive blending.
For more complex logic you'd create a framebuffer object, attach a texture to it, render your input(e.g. scene) to that texture, then switch to and render with another framebuffer(or the default "screen" one), this way you can sample your scene from the texture and add the lighting on top. Read how to do this in detail on WebGLFundamentals.com
I've got a shader that implements shadow mapping like this:
#version 430 core
out vec4 color;
in VS_OUT {
vec3 N;
vec3 L;
vec3 V;
vec4 shadow_coord;
} fs_in;
layout(binding = 0) uniform sampler2DShadow shadow_tex;
uniform vec3 light_ambient_albedo = vec3(1.0);
uniform vec3 light_diffuse_albedo = vec3(1.0);
uniform vec3 light_specular_albedo = vec3(1.0);
uniform vec3 ambient_albedo = vec3(0.1, 0.1, 0.2);
uniform vec3 diffuse_albedo = vec3(0.4, 0.4, 0.8);
uniform vec3 specular_albedo = vec3(0.0, 0.0, 0.0);
uniform float specular_power = 128.0;
void main(void) {
//color = vec4(0.4, 0.4, 0.8, 1.0);
//normalize
vec3 N = normalize(fs_in.N);
vec3 L = normalize(fs_in.L);
vec3 V = normalize(fs_in.V);
//calculate R
vec3 R = reflect(-L, N);
//calcualte ambient
vec3 ambient = ambient_albedo * light_ambient_albedo;
//calculate diffuse
vec3 diffuse = max(dot(N, L), 0.0) * diffuse_albedo * light_diffuse_albedo;
//calcualte spcular
vec3 specular = pow(max(dot(R, V), 0.0), specular_power) * specular_albedo * light_specular_albedo;
//write color
color = textureProj(shadow_tex, fs_in.shadow_coord) * vec4(ambient + diffuse + specular, 0.5);
//if in shadow, then multiply color by 0.5 ^^, except alpha
}
What I want to do is to check first if the fragment is indeed in the shadow, and only then change the color (halve it, such that it becomes halfway between fully black and original color).
However how to check if the textureProj(...) result is indeed in shadow, as far as I know it returns a normalized float value.
Would something like textureProj(...) > 0.9999 suffice already? I know that it can returns values other than zero or one if you are using multisampling and I'd like behaviour that will not just break at one point.
The outputting vertex shader:
#version 430 core
layout(location = 0) in vec4 position;
layout(location = 0) uniform mat4 model_matrix;
layout(location = 1) uniform mat4 view_matrix;
layout(location = 2) uniform mat4 proj_matrix;
layout(location = 3) uniform mat4 shadow_matrix;
out VS_OUT {
vec3 N;
vec3 L;
vec3 V;
vec4 shadow_coord;
} vs_out;
uniform vec4 light_pos = vec4(-20.0, 7.5, -20.0, 1.0);
void main(void) {
vec4 local_light_pos = view_matrix * light_pos;
vec4 p = view_matrix * model_matrix * position;
//normal
vs_out.N = vec3(0.0, 1.0, 0.0);
//light vector
vs_out.L = local_light_pos.xyz - p.xyz;
//view vector
vs_out.V = -p.xyz;
//light space coordinates
vs_out.shadow_coord = shadow_matrix * position;
gl_Position = proj_matrix * p;
}
Note that the fragment shader is for terrain, and the vertex shader is for the floor, so there might be minor inconsistencies between the two, but they should be non relevant.
shadow_matrix is an uniform passed in as bias_matrix * light_projection_matrix * light_view_matrix * light_model_matrix.
textureProj (...) does not return a normalized floating-point value. It does return a single float if you use it on a sampler<1D|2D|2DRect>Shadow, but this value represents the result of a depth test. 1.0 = pass, 0.0 = fail.
Now, the interesting thing to note here, and the reason returning a float for a shadow sampler is meaningful at all has to do with filtering the shadow map. If you use a GL_LINEAR filter mode on the shadow map together with a shadow sampler, GL will actually pick the 4 closest texels in the shadow map and perform 4 independent depth tests.
Each depth test still has a binary result, but GL will return a weighted average of the result of all 4 tests (based on distance from the ideal sample location). So if you use GL_LINEAR in conjunction with a shadow sampler, you will have a value that lies somewhere in-between 0.0 and 1.0 representing the average occlusion for the 4 nearest depth samples.
I should point out that your use of textureProj (...) looks potentially wrong to me. The coordinate it uses is a 4D vector consisting of (s,t,r) [projected coordinates] and (q) [depth value to test]. I do not see anywhere in your code where you are assigning q a depth value. If you could edit your question to include the vertex/geometry shader that is outputting shadow_coord, that would help.
Try the following:
Get the distance from each vertex of your model to the light.
Send this distance to your fragment shader.
Compare the distance to the value stored in your shadow map sampler (I assume this texture stores the depth values of your scene from the camera's point of view?)
If the distance is greater than the sampler, the point is in shadow. Else, it is not.
If this is confusing, here's a pair of tutorials that should help:
http://ogldev.atspace.co.uk/www/tutorial23/tutorial23.html
http://ogldev.atspace.co.uk/www/tutorial24/tutorial24.html
i have been trying to implement deferred rendering for past 2 weeks. I have finally come to the spot lighting pass part using stencil buffer and linearized depth. I hold 3 framebuffer textures : albedo, normal+depth (X,Y,Z,EyeViewLinearDepth), Lighting texture. So I draw my light (sphere) and apply this fragment shader :
void main(void)
{
vec2 texCoord = gl_FragCoord.xy * u_inverseScreenSize.xy;
float linearDepth = texture2D(u_normalDepth, texCoord.st).a;
// vector to far plane
vec3 viewRay = vec3(v_vertex.xy * (-farClip/v_vertex.z), -farClip);
// scale viewRay by linear depth to get view space position
vec3 vertex = viewRay * linearDepth;
vec3 normal = texture2D(u_normalDepth, texCoord.st).xyz*2.0 - 1.0;
vec4 ambient = vec4(0.0, 0.0, 0.0, 1.0);
vec4 diffuse = vec4(0.0, 0.0, 0.0, 1.0);
vec4 specular = vec4(0.0, 0.0, 0.0, 1.0);
vec3 lightDir = lightpos - vertex ;
vec3 R = normalize(reflect(lightDir, normal));
vec3 V = normalize(vertex);
float lambert = max(dot(normal, normalize(lightDir)), 0.0);
if (lambert > 0.0) {
float distance = length(lightDir);
if (distance <= u_lightRadius) {
//CLASSICAL LIGHTING COMPUTATION PART
}
}
vec4 final_color = vec4(ambient + diffuse + specular);
gl_FragColor = vec4(final_color.xyz, 1.0);
}
The variables you need to know : v_vertex is eye space position of the vertex (of sphere), lightpos is the position/center of the light in eye space, linearDepth is generated on geometry pass stage in eye space.
The problem is that, the code fail this if check : if (distance <= u_lightRadius). The light is never computed until i remove the distance check. I am sure that i pass these values correctly, radius is 170.0, light position is only like 40-50 units away from the model. There is definitely something wrong but i can't find it somehow. I tried many possibilities of radius and other variables.
i'm having difficulties understanding the math between the different shader stages.
in the fragment shader from the lights perspective i basically write out the fragDepth to rgb color
#version 330
out vec4 shader_fragmentColor;
void main()
{
shader_fragmentColor = vec4(gl_FragCoord.z, gl_FragCoord.z, gl_FragCoord.z, 1);
//shader_fragmentColor = vec4(1, 0.5, 0.5, 1);
}
when rendering the scene using the above shader it displays the scene in an all white color. i suppose thats because gl_FragCoord.z is bigger than 1. hopefully its not maxed out at 1. but we can leave that question alone for now.
in the geometry shader from the cameras perspective i basicly turn all points into quads and write out the probably "incorrect" texture position to lookup in the lightTexture. the math here is the question. im also a bit unsure about if the interpolation value will be correct in the next shader stage.
#version 330
#extension GL_EXT_geometry_shader4 : enable
uniform mat4 p1_modelM;
uniform mat4 p1_cameraPV;
uniform mat4 p1_lightPV;
out vec4 shader_lightTexturePosition;
void main()
{
float s = 10.00;
vec4 llCorner = vec4(-s, -s, 0.0, 0.0);
vec4 llWorldPosition = ((p1_modelM * llCorner) + gl_in[0].gl_Position);
gl_Position = p1_cameraPV * llWorldPosition;
shader_lightTexturePosition = p1_lightPV * llWorldPosition;
EmitVertex();
vec4 rlCorner = vec4(+s, -s, 0.0, 0.0);
vec4 rlWorldPosition = ((p1_modelM * rlCorner) + gl_in[0].gl_Position);
gl_Position = p1_cameraPV * rlWorldPosition;
shader_lightTexturePosition = p1_lightPV * rlWorldPosition;
EmitVertex();
vec4 luCorner = vec4(-s, +s, 0.0, 0.0);
vec4 luWorldPosition = ((p1_modelM * luCorner) + gl_in[0].gl_Position);
gl_Position = p1_cameraPV * luWorldPosition;
shader_lightTexturePosition = p1_lightPV * luWorldPosition;
EmitVertex();
vec4 ruCorner = vec4(+s, +s, 0.0, 0.0);
vec4 ruWorldPosition = ((p1_modelM * ruCorner) + gl_in[0].gl_Position);
gl_Position = p1_cameraPV * ruWorldPosition;
shader_lightTexturePosition = p1_lightPV * ruWorldPosition;
EmitVertex();
EndPrimitive();
}
in the fragment shader from the cameras perspective i basicly lookup in the lightTexture what color would be shown from the lights perspecive and write out the same color.
#version 330
uniform sampler2D p1_lightTexture;
in vec4 shader_lightTexturePosition;
out vec4 shader_fragmentColor;
void main()
{
vec4 lightTexel = texture2D(p1_lightTexture, shader_lightTexturePosition.xy);
shader_fragmentColor = lightTexel;
/*
if(lightTexel.x < shader_lightTexturePosition.z)
shader_fragmentColor = vec4(1, 0, 0, 1);
else
shader_fragmentColor = vec4(0, 1, 0, 1);
*/
//shader_fragmentColor = vec4(1, 1, 1, 1);
}
when rendering from the cameras perspective i see the scene drawn as it should but with the incorrect texture coordinates applied on them that repeats. repeating texture is probably caused by the texture-coordinate being outside the bounds of 0 to 1.
I've tried several things but still fail to understand what the math should be. some of out commented code and one example im unsure of is:
shader_lightTexturePosition = normalize(p1_lightPV * llWorldPosition) / 2 + vec4(0.5, 0.5, 0.5, 0.5);
for the lower-left corner. similair code to the other corners
from the solution i expect the scene to be rendered from the cameras perspective with exactly the same color as from the lights perspective. with perhaps some precision error.
i figured out the texture mapping bit myself. the depth value bit is still a bit strange.
convert the screenProjectedCoords to normalizedDeviceCoords then add 1 divide by 2.
vec4 textureNormalizedCoords(vec4 screenProjected)
{
vec3 normalizedDeviceCoords = (screenProjected.xyz / screenProjected.w);
return vec4( (normalizedDeviceCoords.xy + 1.0) / 2.0, screenProjected.z * 0.005, 1/screenProjected.w);
}
void main()
{
float s = 10.00;
vec4 llCorner = vec4(-s, -s, 0.0, 0.0);
vec4 llWorldPosition = ((p1_modelM * llCorner) + gl_in[0].gl_Position);
gl_Position = p1_cameraPV * llWorldPosition;
shader_lightTextureCoords = textureNormalizedCoords(p1_lightPV * llWorldPosition);
EmitVertex();a