I am trying to hide a specific face (assuming face 0) by using the discard keyword.
Vertex Shader:
uniform float v_triangleId;
varying vec3 normal;
void main() {
v_triangleId = floor(gl_VertexID / 3.0);
normal = normalize(gl_NormalMatrix * gl_Normal);
gl_Position = ftransform();
}
Pixel Shader:
uniform float v_triangleId;
void main(void)
{
float diff = abs(v_triangleId - 0); // I just want to hide face 0
if (diff < 0.01) {
discard;
}
}
However, in this case all the faces are hidden, instead of just a single face. I was wondering what went wrong.
Related
I have the following fragment shader to draw a grid
#version 450 core
layout(location = 0) in vec2 position;
layout(location = 0) out vec4 fragColor;
layout(binding = 0) uniform ubo
{
mat4 uCameraView;
vec4 uGridColor;
float uTileSize;
float uGridBorderSize;
};
void main()
{
vec2 uv = mod(position, uTileSize);
vec2 border = mod(uv + (uGridBorderSize / 2.0), uTileSize);
border -= mod(uv - (uGridBorderSize / 2.0), uTileSize);
if (length(border) > uTileSize - uGridBorderSize)
{
fragColor = uGridColor;
}
else
{
fragColor = vec4(0.0);
}
}
This works fine until I change the zoom, the issue appears when the camera gets far away and the uGridBorderSize is smaller than a pixel in the screen, then I get this ugly effect, where lines appear and disappear when the zoom changes.
So I wonder, is it possible to apply antialising to this lines so they appear consistently?
In the end I found this fragment shader that draws antialiased grid lines
void main()
{
// https://madebyevan.com/shaders/grid/
vec2 uv = fragCoord / uTileSize;
vec2 grid = abs(fract(uv - 0.5) - 0.5) / fwidth(uv);
float line = (min(grid.x, grid.y) * uGridBorderSize) / uScaleFactor;
float color = 1.0 - min(line, 1.0);
fragColor = uGridColor * color;
}
In a similar way than in this related question, I am trying to render complex shapes by the mean of ray-tracing inside a cube: This is, 12 triangles are used to generate a bounding box and each fragment is used to render the given shape by ray-tracing.
For this example, I am using the easiest shape: a sphere.
The problem is that when the cube is rotated, different triangle angles are distording the sphere:
What I have attempted so far:
I tried making the raytracing in World space, also in View-space as suggested in the related question.
I checked that the worldCoordinate of the fragment is correct, by making a reverse projection from gl_fragCoord, with the same output.
I switched to orthographic projection, where the distortion is reversed:
My conclusion is that, as described in the related question, the interpolant of the coordinates and the projection are the origin of the problem.
I could project the cube to a plane perpendicular to the camera direction, but I would like to understand the bottom of the question.
Related code:
Vertex shader:
#version 420 core
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
in vec3 in_Position;
in vec3 in_Normal;
out Vertex
{
vec4 worldCoord;
vec4 worldNormal;
} v;
void main(void)
{
mat4 mv = view * model;
// Position
v.worldCoord = model * vec4(in_Position, 1.0);
gl_Position = projection * mv * vec4(in_Position, 1.0);
// Normal
v.worldNormal = normalize(vec4(in_Normal, 0.0));
}
Fragment shader:
#version 420 core
uniform mat4 view;
uniform vec3 cameraPosView;
in Vertex
{
vec4 worldCoord;
vec4 worldNormal;
} v;
out vec4 out_Color;
bool sphereIntersection(vec4 rayOrig, vec4 rayDirNorm, vec4 spherePos, float radius, out float t_out)
{
float r2 = radius * radius;
vec4 L = spherePos - rayOrig;
float tca = dot(L, rayDirNorm);
float d2 = dot(L, L) - tca * tca;
if(d2 > r2)
{
return false;
}
float thc = sqrt(r2 - d2);
float t0 = tca - thc;
float t1 = tca + thc;
if (t0 > 0)
{
t_out = t0;
return true;
}
if (t1 > 0)
{
t_out = t1;
return true;
}
return false;
}
void main()
{
vec3 color = vec3(1);
vec4 spherePos = vec4(0.0, 0.0, 0.0, 1.0);
float radius = 1.0;
float t_out=0.0;
vec4 cord = v.worldCoord;
vec4 rayOrig = (inverse(view) * vec4(-cameraPosView, 1.0));
vec4 rayDir = normalize(cord-rayOrig);
if (sphereIntersection(rayOrig, rayDir, spherePos, 0.3, t_out))
{
out_Color = vec4(1.0);
}
else
{
discard;
}
}
I have some code I wrote in the book of shaders editor:
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 u_resolution;
uniform vec2 u_mouse;
uniform float u_time;
float remap(float a, float b, float c, float d, float t) {
return ((t - a) / (b - a)) * (d-c) + c;
}
float outline(vec2 st) {
return smoothstep(0.99, 1.0, st.y) + smoothstep(0.99, 1.0, st.x) + smoothstep(0.01, 0.0, st.y) + smoothstep(0.01, 0.0, st.x);
}
float mouseFoo(vec2 scaledSt, vec2 u_mouse, float scaleVal) {
vec2 scaledMouse = u_mouse * scaleVal;
if(scaledSt.x < ceil(scaledMouse.x) && scaledSt.x > floor(scaledMouse.x) && scaledSt.y < ceil(scaledMouse.y) && scaledSt.y > floor(scaledMouse.y)) {
// if(u_mouse.x < 100.0) {
return 1.0;
} else {
return 0.0;
}
}
void main(){
vec2 st = gl_FragCoord.xy/u_resolution.xy;
vec3 color = vec3(0.03,0.07,0.15);
vec3 redColor = vec3(1.0, 0.0, 0.0);
vec3 outlineColor = vec3(1.0);
float floorSt;
float scaleVal = 5.0;
vec2 scaledSt = st * scaleVal;
// tile
st *= scaleVal;
floorSt = floor(st.x);
st = fract(st);
// inner color
color = mix(color, redColor, mouseFoo(scaledSt, u_mouse/u_resolution.xy, scaleVal));
// outline
color = mix(color, outlineColor, outline(st));
gl_FragColor = vec4(color, 1.0 );
}
I'm wondering if it's possible to have the red color tween to the blue color when a box is hovered off? I think I might have an idea of how to do it if I were to write data to a texture and look that up, but even then I'm not entirely sure.
Use mix
Use mix to interpolate between red and blue. You need another variable that transitions from 0-1 to do the blend, which is mix's 3rd parameter.
ShaderToy example:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fragCoord/iResolution.xy;
vec3 red = vec3(1,0,0);
vec3 blue = vec3(0,0,1);
// Output to screen
fragColor = vec4(mix(red, blue, uv.x),1.0);
}
which produces:
In your case, you'll want the 3rd parameter (the alpha or lerp parameter) to be driven over some time (say, .2 seconds) after the mouse entered the hover area. You'll need to do one of the following:
Detect the hover entered at a higher level and then pass the mouse down time in as a uniform
Drive the 3rd parameter directly from a uniform
While implementing SSLR, I ran into the problem of incorrectly displaying objects: they are infinitely projected "down" and displayed in no way at all in the mirror. I give the code and screenshot below.
Fragment SSLR shader:
#version 330 core
uniform sampler2D normalMap; // in view space
uniform sampler2D depthMap; // in view space
uniform sampler2D colorMap;
uniform sampler2D reflectionStrengthMap;
uniform mat4 projection;
uniform mat4 inv_projection;
in vec2 texCoord;
layout (location = 0) out vec4 fragColor;
vec3 calcViewPosition(in vec2 texCoord) {
// Combine UV & depth into XY & Z (NDC)
vec3 rawPosition = vec3(texCoord, texture(depthMap, texCoord).r);
// Convert from (0, 1) range to (-1, 1)
vec4 ScreenSpacePosition = vec4(rawPosition * 2 - 1, 1);
// Undo Perspective transformation to bring into view space
vec4 ViewPosition = inv_projection * ScreenSpacePosition;
// Perform perspective divide and return
return ViewPosition.xyz / ViewPosition.w;
}
vec2 rayCast(vec3 dir, inout vec3 hitCoord, out float dDepth) {
dir *= 0.25f;
for (int i = 0; i < 20; i++) {
hitCoord += dir;
vec4 projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
float depth = calcViewPosition(projectedCoord.xy).z;
dDepth = hitCoord.z - depth;
if(dDepth < 0.0) return projectedCoord.xy;
}
return vec2(-1.0);
}
void main() {
vec3 normal = texture(normalMap, texCoord).xyz * 2.0 - 1.0;
vec3 viewPos = calcViewPosition(texCoord);
// Reflection vector
vec3 reflected = normalize(reflect(normalize(viewPos), normalize(normal)));
// Ray cast
vec3 hitPos = viewPos;
float dDepth;
float minRayStep = 0.1f;
vec2 coords = rayCast(reflected * max(minRayStep, -viewPos.z), hitPos, dDepth);
if (coords != vec2(-1.0)) fragColor = mix(texture(colorMap, texCoord), texture(colorMap, coords), texture(reflectionStrengthMap, texCoord).r);
else fragColor = texture(colorMap, texCoord);
}
Screenshot:
Also, the lamp is not reflected at all
I will grateful for help
UPDATE:
colorMap:
normalMap:
depthMap:
UPDATE: I solved the problem with the wrong reflection, but there are still problems.
I solved it as follows: ViewPosition.y *= -1
Now, as you can see in the screenshot, the lower parts of the objects are not reflected for some reason.
The question still remains open.
I m struggling to get a fine ssr too. I found two things that could help.
To get the view space normals you have to keep only the rotation of the camera and remove the translation, because if you dont, you will get the normals stretched to the opposite direction of the camera movement and will no longer have the right direction even if you normalize them again, for column major mat4 you can do it like:
mat4 viewNoTranslation = view;
viewNoTranslation[3] = vec4(0.0, 0.0, 0.0, 1.0);
The depth sampling from the depth image is logarithmic and if you linearize it you will get indeed the values from 0 to 1 but they will be inaccurate as to the needed precision. I tried to get the depth value straight from the vertex shader:
gl_Position = ubo.projection * ubo.view * ubo.model * inPos;
depth = gl_Position.z;
I dont know if it is right but the depth now is more accurate.
If you make proggress, please update :)
I've been having problems sending the shininess factor to my bump mapping shader.
The result always looks like this: http://i.imgur.com/unzdx.jpg
But if I hard code the value inside the shader to 0.0 it's working just fine.
When I send 0.0 to the shader it's turning out like in the picture above.
Any ideas?
Here's my shader
#version 110
uniform sampler2D tex;
uniform sampler2D bmap;
uniform bool boolBump;
uniform vec4 vecColor;
uniform bool onlyColor;
uniform float fTransparencyThresh;
uniform float fShininess;
uniform float alpha;
varying vec3 vecLight;
varying vec3 vecEye;
varying vec3 vecNormal;
vec4 getLighting()
{
//Ambient part
vec4 color = (gl_FrontLightModelProduct.sceneColor * gl_FrontMaterial.ambient) + (gl_LightSource[0].ambient * gl_FrontMaterial.ambient);
//For bump mapping, the normal comes from the bump map texture lookup
vec3 n = normalize(vecNormal);
if(boolBump)
{
n = normalize(texture2D(bmap, gl_TexCoord[0].st).xyz * 2.0 - 1.0);
}
vec3 l = normalize(vecLight);
//Lambert term
float NdotL = dot(n, l);
if(NdotL > 0.0)
{
//Diffuse part
color += gl_LightSource[0].diffuse * gl_FrontMaterial.diffuse * max(0.0, NdotL);
//Specular part
vec3 e = normalize(vecEye);
vec3 r = normalize(-reflect(l,n));
float spec = pow(max(0.0, dot(r, e)), fShininess);
color += gl_LightSource[0].specular * gl_FrontMaterial.specular * spec;
}
return color;
}
void main(void)
{
vec4 texel = texture2D(tex, gl_TexCoord[0].st);
if(texel.a < fTransparencyThresh)
discard;
//Get shading
vec4 color = getLighting();
//Color only mode?
if(onlyColor)
{
color *= vecColor;
}
else
{
color *= texel;
}
//Set fragment color, alpha comes from MTL file
gl_FragColor = vec4(color.xyz, alpha);
}
Edit, OpenGL code:
void MyClass::sendToShader(const OBJ::StelModel* pStelModel, Effect cur, bool& tangEnabled, int& tangLocation)
{
int location;
tangEnabled = false;
if(cur != No)
{
location = curShader->uniformLocation("fTransparencyThresh");
curShader->setUniform(location, fTransparencyThresh);
location = curShader->uniformLocation("alpha");
curShader->setUniform(location, pStelModel->pMaterial->alpha);
location = curShader->uniformLocation("fShininess");
curShader->setUniform(location, 0.0f);
...
Edit: Even this wont work:
GLint loc = glGetUniformLocation(curShader->program, "fShininess");
glUniform1f(loc, 0.0f);
Note that pow(0, 0) is undefined. This means spec is undefined if dot(r, e) == 0 and fShininess == 0.
Do you make sure the program is actively bound when you call glUniform? Do you check glGetError anywhere?