I want to draw a smooth circle in GLSL but with a border of variable width, in a separate colour. Potentially, the interior of the circle could be transparent.
My original non-smooth shader:
#version 330
layout(location=0) out vec4 frag_colour;
in vec4 color;
uniform float radius;
uniform vec2 position;
uniform vec4 borderColor;
uniform float borderThickness;
void main()
{
float distanceX = abs(gl_FragCoord.x - position.x);
float distanceY = abs(gl_FragCoord.y - position.y);
if(sqrt(distanceX * distanceX + distanceY * distanceY) > radius)
discard;
else if(sqrt(distanceX * distanceX + distanceY * distanceY) <= radius &&
sqrt(distanceX * distanceX + distanceY * distanceY) >= radius-borderThickness)
frag_colour = borderColor;
else
frag_colour = color;
}
This works, but is not smooth. I can draw smooth circles:
#version 330
layout(location=0) out vec4 frag_colour;
in vec4 color;
uniform float radius;
uniform vec2 position;
uniform vec4 borderColor;
uniform float borderThickness;
void main()
{
vec2 uv = gl_FragCoord.xy - position;
float d = sqrt(dot(uv,uv));
float t = 1.0 - smoothstep(radius-borderThickness,radius, d);
frag_colour = vec4(color.rgb,color.a*t);
}
But I can't work out how to add my border to the above.
You have to compute the absolut value of the difference between the radius and the distance and interpolate between 0.0 and borderThickness:
float t = 1.0 - smoothstep(0.0, borderThickness, abs(radius-d));
If you want to fill the circle, then you need 2 gradients. 1 for the transition between the inner circle and the border and a 2nd one for the alpha channel on the outline. mix the colors by the former and set the alpha channel by the later:
float t1 = 1.0 - smoothstep(radius-borderThickness, radius, d);
float t2 = 1.0 - smoothstep(radius, radius+borderThickness, d);
frag_colour = vec4(mix(color.rgb, baseColor.rgb, t1), t2);
Related
So, I have a circle in glsl that is supposed to be drawn around the mouse. The resulting circle is drawn in the wrong location.
I'm drawing the circle by taking the step of the distance from st and the vector2 of the uniform mouse.
I have no Idea why this is happening.
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 u_resolution;
uniform vec2 u_mouse;
uniform float u_time;
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
st.x *= u_resolution.x/u_resolution.y;
float pct = 0.0;
vec2 brightness = vec2(0.0);
pct = step(distance(st,vec2(u_mouse/100.0)),0.5);
vec3 color = vec3(0.);
color = vec3(pct);
brightness = vec2(1.0);
gl_FragColor = vec4(color,brightness);
}
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 u_resolution;
uniform vec2 u_mouse;
uniform float u_time;
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
st.x *= u_resolution.x/u_resolution.y;
float pct = 0.0;
vec2 brightness = vec2(0.0);
pct = step(distance(st,vec2(u_mouse/100.0)),0.5);
vec3 color = vec3(0.);
color = vec3(pct);
brightness = vec2(1.0);
gl_FragColor = vec4(color,brightness);
}
Here is an example using Shadertoy, that can be trivially adapted to your OpenGL/GLSL code.
The code comes from a basic 2D tutorial on Shadertoy on how to draw a circle around the centre of the screen, by coloring a pixel based on whether it lies within a given cartesian distance (ie. its radius) from its centre. Then it is modified to instead draw the circle around the mouse pointer:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
vec2 m = (iMouse.xy / iResolution.xy);
vec2 uv = (fragCoord.xy / iResolution.xy);
uv.x *= iResolution.x/iResolution.y;
m.x *= iResolution.x/iResolution.y;
float radius = 0.25;
vec3 pixel;
if( (uv.x-m.x)*(uv.x-m.x) + (uv.y-m.y)*(uv.y-m.y) < radius*radius ) {
pixel = vec3(0.3, 0.3, 1.0);
} else {
pixel = vec3(1.0, 0.3, 0.2);
}
fragColor = vec4(pixel, 1.0);
}
Demo:
Is the code duplicated by accident? I would check that you are passing in the values you expect for mouse and resolution, and take into account whether your window is fullscreen or not.
I'd like my faded lighting (based on distance from a point) to be a perfect circle no matter the resolution. Currently, the light is only a circle if the height and width of the window are equal.
This is what it looks like right now:
My fragment shader looks like this:
precision mediump float;
#endif
#define MAX_LIGHTS 10
// varying input variables from our vertex shader
varying vec4 v_color;
varying vec2 v_texCoords;
// a special uniform for textures
uniform sampler2D u_texture;
uniform vec2 u_resolution;
uniform float u_time;
uniform vec2 lightsPos[MAX_LIGHTS];
uniform vec3 lightsColor[MAX_LIGHTS];
uniform float lightsSize[MAX_LIGHTS];
uniform vec2 cam;
uniform vec2 randPos;
uniform bool dark;
void main()
{
vec4 lights = vec4(0.0,0.0,0.0,1.0);
float ratio = u_resolution.x/u_resolution.y;
vec2 st = gl_FragCoord.xy/u_resolution;
vec2 loc = vec2(.5 + randPos.x, 0.5 + randPos.y);
for(int i = 0; i < MAX_LIGHTS; i++)
{
if(lightsSize[i] != 0.0)
{
// trying to reshape the light
// vec2 st2 = st;
// st2.x *= ratio;
float size = 2.0/lightsSize[i];
float dist = max(0.0, distance(lightsPos[i], st)); // st here was replaced with st2 when experimenting
lights = lights + vec4(max(0.0, lightsColor[i].x - size * dist), max(0.0, lightsColor[i].y - size * dist), max(0.0, lightsColor[i].z - size * dist), 0.0);
}
}
if(dark)
{
lights.r = max(lights.r, 0.075);
lights.g = max(lights.g, 0.075);
lights.b = max(lights.b, 0.075);
}
else
{
lights.r += 1.0;
lights.g += 1.0;
lights.b += 1.0;
}
gl_FragColor = texture2D(u_texture, v_texCoords) * lights;
}
I tried reshaping the light by multiplying the x value of the pixel by the ratio of the screen width to the screen height but that caused the lights to be out of place. I couldn't figure out anything that would put them back in their correct place while maintaining their shape.
EDIT: the displacement is determined by my camera's position in my libgdx scene.
what you need is to rescale the difference between light position and fragment position
vec2 dr = st-lightsPos[i];
dr.x*=ratio;
float dist = length(dr);
Try normalizing st like that
vec2 st = (gl_FragCoord.xy - .5*u_resolution.xy) / min(u_resolution.x, u_resolution.y);
So that coordinates of your fragments are in range [-1; 1] for y and [-ratio/2; ratio/2] where ratio = u_resolution.x/u_resolution.y
You can also make it [0; 1] for y and [0; ratio] for x by doing
vec2 st = gl_FragCoord.xy / min(u_resolution.x, u_resolution.y);
But the former is more convenient in many cases
I decided to follow the classic guide for writing a basic GLSL water shaders using the sum of sines method. I attempted to implement it inside of Processing 5, where I made a field of vertices in a PShape to make a mesh to mess with. I then overwrote the default shaders with my own vertex and fragment shaders, and I dropped in a directional light so I can actually see the normals. I made sure the directional light was movable as well so I could see if the normals work from all angles.
I got the waves to form height correctly and I had some form of normals workings, but the normals are interacting really strange. When my light passes across the center axis of my water plane, the normals seem to morph between the different waves and change based on the light angle. The gif I captured was too large to post in line, so I'm sure seeing it would explain better than my words:
https://imgur.com/PCznL7U
You should maximize the link to see the whole picture. Note how as the light pans from left to right, the normals of the waves seem to morph between two sets? This is especially apparent as it crosses center. It's like the normals are inconsistent based on the direction the object is being lit from.
The sphere in the middle is a normal sphere using the standard Processing shader. I left it there as reference to see the waves as well as confirm where my lighting was and that it was working fine.
Any ideas what I did wrong? I know I did some math incorrectly somewhere.
EDIT: Was recommended I added the (lengthy) source code [which I should have done from the start].
Vertex Shader:
#define PROCESSING_LIGHT_SHADER
#define MAXWAVES 6
const float pi = 3.14159;
uniform mat4 transform;
uniform mat4 modelview;
uniform mat3 normalMatrix;
uniform float time; //Time since shader started
attribute vec4 position; //Position the vertex from Processing
attribute vec4 color; //Color of the vertex from Processing
attribute vec3 normal; //Normal of the vertex from Processing
attribute vec4 ambient;
attribute vec4 specular;
attribute vec4 emissive;
attribute float shininess;
varying vec4 vertColor; //Color passed on to fragment shader
varying vec4 backVertColor; //Color passed on to fragment shader
uniform float waveLength[MAXWAVES]; //Length of wave
uniform float speed[MAXWAVES]; //Cycle speed of wave
uniform float amplitude[MAXWAVES]; //Wave cycle height
uniform float xDirection[MAXWAVES];
uniform float yDirection[MAXWAVES]; //Flow vector of wave
uniform int lightCount;
uniform vec4 lightPosition[8];
uniform vec3 lightNormal[8];
uniform vec3 lightAmbient[8];
uniform vec3 lightDiffuse[8];
uniform vec3 lightSpecular[8];
uniform vec3 lightFalloff[8];
uniform vec2 lightSpot[8];
varying vec3 Normal;
varying vec3 FragPos;
varying vec3 Vec;
varying vec3 lightDir;
//Some constants that the processing shader used
const float zero_float = 0.0;
const float one_float = 1.0;
const vec3 zero_vec3 = vec3(0);
float falloffFactor(vec3 lightPos, vec3 vertPos, vec3 coeff) {
vec3 lpv = lightPos - vertPos;
vec3 dist = vec3(one_float);
dist.z = dot(lpv, lpv);
dist.y = sqrt(dist.z);
return one_float / dot(dist, coeff);
}
float spotFactor(vec3 lightPos, vec3 vertPos, vec3 lightNorm, float minCos, float spotExp) {
vec3 lpv = normalize(lightPos - vertPos);
vec3 nln = -one_float * lightNorm;
float spotCos = dot(nln, lpv);
return spotCos <= minCos ? zero_float : pow(spotCos, spotExp);
}
float lambertFactor(vec3 lightDir, vec3 vecNormal) {
return max(zero_float, dot(lightDir, vecNormal));
}
float blinnPhongFactor(vec3 lightDir, vec3 vertPos, vec3 vecNormal, float shine) {
vec3 np = normalize(vertPos);
vec3 ldp = normalize(lightDir - np);
return pow(max(zero_float, dot(ldp, vecNormal)), shine);
}
//Returns the height of a vertex given a single wave param
float WaveHeight(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
return amplitude[waveNumber] * sin(theta * frequency + time * phase);
}
//Returns height of a vertex given all the active waves
// and its current x/y value
float WaveSum(float x, float y)
{
float height = 0.0;
for(int i = 0; i < MAXWAVES; i++)
{
height += WaveHeight(i, x, y);
}
return height;
}
float getDy(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
float A = amplitude[waveNumber] * direction.y * frequency;
return A * cos(theta * frequency + time * phase);
}
float getDx(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
float A = amplitude[waveNumber] * direction.x * frequency;
return A * cos(theta * frequency + time * phase);
}
//Returns the normal vector for each vertex
vec3 getNormal(float x, float y) {
float dx = 0.0;
float dy = 0.0;
//Sum for each wave
for (int i = 0; i < MAXWAVES; i++) {
dx += getDx(i, x, y);
dy += getDy(i, x, y);
}
vec3 n = vec3(-dx, -dy, 1.0);
return normalize(n);
}
void main() {
vec4 pos = position; //Grab the position from Processing bc it's read only
pos.z = WaveSum(pos.x, pos.y);
gl_Position = transform * pos; //Get clipping matrix for view
vec3 ecVertex = vec3(modelview * pos);
// Normal vector in eye coordinates
vec3 Normal = getNormal(pos.x, pos.y);
vec3 ecNormal = normalize(normalMatrix * Normal);
vec3 ecNormalInv = ecNormal * -one_float;
// Light calculations
vec3 totalAmbient = vec3(0, 0, 0);
vec3 totalFrontDiffuse = vec3(0, 0, 0);
vec3 totalFrontSpecular = vec3(0, 0, 0);
vec3 totalBackDiffuse = vec3(0, 0, 0);
vec3 totalBackSpecular = vec3(0, 0, 0);
for (int i = 0; i < 8; i++) {
if (lightCount == i) break;
vec3 lightPos = lightPosition[i].xyz;
bool isDir = lightPosition[i].w < one_float;
float spotCos = lightSpot[i].x;
float spotExp = lightSpot[i].y;
vec3 lightDir;
float falloff;
float spotf;
if (isDir) {
falloff = one_float;
lightDir = -one_float * lightNormal[i];
} else {
falloff = falloffFactor(lightPos, ecVertex, lightFalloff[i]);
lightDir = normalize(lightPos - ecVertex);
}
spotf = spotExp > zero_float ? spotFactor(lightPos, ecVertex, lightNormal[i],
spotCos, spotExp)
: one_float;
if (any(greaterThan(lightAmbient[i], zero_vec3))) {
totalAmbient += lightAmbient[i] * falloff;
}
if (any(greaterThan(lightDiffuse[i], zero_vec3))) {
totalFrontDiffuse += lightDiffuse[i] * falloff * spotf *
lambertFactor(lightDir, ecNormal);
totalBackDiffuse += lightDiffuse[i] * falloff * spotf *
lambertFactor(lightDir, ecNormalInv);
}
if (any(greaterThan(lightSpecular[i], zero_vec3))) {
totalFrontSpecular += lightSpecular[i] * falloff * spotf *
blinnPhongFactor(lightDir, ecVertex, ecNormal, shininess);
totalBackSpecular += lightSpecular[i] * falloff * spotf *
blinnPhongFactor(lightDir, ecVertex, ecNormalInv, shininess);
}
}
// Calculating final color as result of all lights (plus emissive term).
// Transparency is determined exclusively by the diffuse component.
vertColor =
vec4(totalFrontDiffuse, 1) * color;
backVertColor =
vec4(totalBackDiffuse, 1) * color;
}
Fragment Shader:
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
varying vec4 vertColor; //Color from vertshader
varying vec4 backVertColor; //Color from vertshader
void main() {
gl_FragColor = gl_FrontFacing ? vertColor : backVertColor;
}
I've been following a two part tutorial on shadow mapping from OGLDev (part 1, part 2) for a couple of days now, and I've nearly finished implementing it.
My problem is that when I enable shadow maps the illuminated region which shadows appear in appears as a "strip" and I'm unsure if this is intended in the tutorials or not.
Perhaps images will help explain my problem.
However shadows are functional:
My vertex shader for spotlights:
#version 330
in vec2 textureCoordinate;
in vec4 vertex;
uniform mat4 mMatrix;
uniform mat4 pMatrix;
uniform mat4 vMatrix;
uniform mat4 lightV;
uniform mat4 lightP;
uniform vec3 normal;
uniform vec3 eye;
out vec2 TexCoord0;
out vec4 LightSpacePos;
out vec3 Normal0;
out vec3 WorldPos0;
out vec3 EyePos;
void main()
{
EyePos = eye;
TexCoord0 = textureCoordinate;
WorldPos0 = (mMatrix * vertex).xyz;
Normal0 = (mMatrix * vec4(normal, 0.0)).xyz;
mat4 lightMVP = lightP * inverse(lightV) * mMatrix;
LightSpacePos = lightMVP * vertex;
mat4 worldMVP = pMatrix * vMatrix * mMatrix;
gl_Position = worldMVP * vertex;
}
and the fragment shader:
#version 330
struct BaseLight
{
vec4 color;
float intensity;
};
struct Attenuation
{
float constant;
float linear;
float exponent;
};
struct PointLight
{
BaseLight base;
Attenuation atten;
vec3 position;
float range;
};
struct SpotLight
{
struct PointLight base;
vec3 direction;
float cutoff;
};
in vec2 TexCoord0;
in vec3 WorldPos0;
in vec3 EyePos;
in vec4 LightSpacePos;
out vec4 fragColor;
uniform sampler2D sampler;
uniform sampler2D shadowMap;
in vec3 Normal0;
uniform float specularIntensity;
uniform float specularPower;
uniform SpotLight spotLight;
float CalcShadowFactor(vec4 LightSpacePos)
{
vec3 ProjCoords = LightSpacePos.xyz / LightSpacePos.w;
vec2 UVCoords;
UVCoords.x = 0.5 * ProjCoords.x + 0.5;
UVCoords.y = 0.5 * ProjCoords.y + 0.5;
float z = 0.5 * ProjCoords.z + 0.5;
float Depth = texture(shadowMap, UVCoords).x;
if (Depth < z + 0.00001)
return 0.5;
else
return 1.0;
}
vec4 CalcLightInternal(BaseLight Light, vec3 LightDirection, vec3 Normal, float ShadowFactor)
{
vec4 AmbientColor = Light.color * Light.intensity;
float DiffuseFactor = dot(Normal, -LightDirection);
vec4 DiffuseColor = vec4(0, 0, 0, 0);
vec4 SpecularColor = vec4(0, 0, 0, 0);
if (DiffuseFactor > 0) {
DiffuseColor = Light.color * Light.intensity * DiffuseFactor;
vec3 VertexToEye = normalize(EyePos - WorldPos0);
vec3 LightReflect = normalize(reflect(LightDirection, Normal));
float SpecularFactor = dot(VertexToEye, LightReflect);
SpecularFactor = pow(SpecularFactor, specularPower);
if (SpecularFactor > 0) {
SpecularColor = Light.color * specularIntensity * SpecularFactor;
}
}
return (AmbientColor + ShadowFactor * (DiffuseColor + SpecularColor));
}
vec4 CalcPointLight(PointLight l, vec3 Normal, vec4 LightSpacePos)
{
vec3 LightDirection = WorldPos0 - l.position;
float Distance = length(LightDirection);
LightDirection = normalize(LightDirection);
float ShadowFactor = CalcShadowFactor(LightSpacePos);
vec4 Color = CalcLightInternal(l.base, LightDirection, Normal, ShadowFactor);
float Attenuation = l.atten.constant +
l.atten.linear * Distance +
l.atten.exponent * Distance * Distance;
return Color / Attenuation;
}
vec4 CalcSpotLight(SpotLight l, vec3 Normal, vec4 LightSpacePos)
{
vec3 LightToPixel = normalize(WorldPos0 - l.base.position);
float SpotFactor = dot(LightToPixel, l.direction);
if (SpotFactor > l.cutoff) {
vec4 Color = CalcPointLight(l.base, Normal, LightSpacePos);
return Color * (1.0 - (1.0 - SpotFactor) * 1.0/(1.0 - l.cutoff));
}
else {
return vec4(0,0,0,0);
}
}
void main(void)
{
fragColor = texture2D(sampler, TexCoord0.xy) * CalcSpotLight(spotLight, normalize(Normal0), LightSpacePos);
}
This is intended. Since your shadowmap covers a pyramid-like region in space, your spotlight's cone can be occluded by it. This is happening because where you render something that is outside of the shadow camera's view, it will be considered unlitten. Therefore the shadow camera's view pyramid will be visible.
src
To fix this you have 2 options:
1: Make the shadowmap camera's fov bigger, so that the shadow camera's pyramid becomes wider than your spotlight's cone
2: Change the way you calculate shadows on out-of-bounds area. Currently when you sample the shadowmap, and if it's outside of the shadow texture, you apply shadow there. If you change this so that if something that is out of the shadow texture you consider it litten, this problem will disappear.
Edit:
I recommend the second option. A solution for that could be:
Insert the following line into FragmentShader::CalcShadowFactor(), just before the float Depth = ... part
if (UVCoords.x < 0.0 || UVCoords.x > 1.0 || UVCoords.y < 0.0 || UVCoords.y > 1.0) return 1.0;
Note:
There is no universal way to say which is better. In a terrain environment involving sunlight, you may want to consider out-of-bounds regions as litten. However when you use a flashlight for example, that is in the user's hand, you have to consider out-of-bounds regions as unlitten.
I am working on the beginnings of omnidirectional shadow mapping in my engine. For now I am only producing one shadowmap as a test. I am getting an odd result when using my current shaders. Here is a screenshot which shows the problem:
I am using a near value of 0.5 and a far value of 5.0 in the projection matrix for the shadowmap render. As near as I can tell, any value with a light-space z larger than my far plane distance is being computed by my fragment shader as in shadow.
This is my fragment shader:
in vec2 st;
uniform sampler2D colorTexture;
uniform sampler2D normalTexture;
uniform sampler2D depthTexture;
uniform sampler2D shadowmapTexture;
uniform mat4 invProj;
uniform mat4 lightProj;
uniform vec3 lightPosition;
out vec3 color;
void main () {
vec3 clipSpaceCoords;
clipSpaceCoords.xy = st.xy * 2.0 - 1.0;
clipSpaceCoords.z = texture(depthTexture, st).x * 2.0 - 1.0;
vec4 position = invProj * vec4(clipSpaceCoords,1.0);
position.xyz /= position.w;
vec4 lightSpace = lightProj * vec4(position.xyz,1.0);
lightSpace.xyz /= lightSpace.w;
lightSpace.xyz = lightSpace.xyz * 0.5 + 0.5;
float lightDepth = texture(shadowmapTexture, lightSpace.xy).x;
vec3 normal = texture(normalTexture, st);
vec3 diffuse;
float shadowFactor = 1.0;
if(lightSpace.w > 0.0 && lightSpace.z > lightDepth+0.0042) {
shadowFactor = 0.2;
}
else {
float k = 0.00001;
vec3 distanceToLight = lightPosition - position.xyz;
float distanceLength = length(distanceToLight);
float attenuation = (1.0 / (1.0 + (0.1 * distanceLength) + k * (distanceLength * distanceLength)));
float diffuseTemp = max(dot(normalize(normal), normalize(distanceToLight)), 0.0);
diffuse = vec3(1.0, 1.0, 1.0) * attenuation * diffuseTemp;
}
vec3 gamma = vec3(1.0/2.2);
color = pow(texture(colorTexture, st).xyz*shadowFactor+diffuse, gamma);
}
How can I fix this issue (Other than increasing my far plane distance)?
One other question, as this is the first time I have attempted shadowmapping: am I doing the lighting in relation to the shadows correctly?