How to create a 2D spotlight? - opengl

is it possible to modify my current lighting algorithm that radiates light in all directions to something like:
Here is is how I am calculating my light in the fragment shader:
uniform struct Light
{
vec4 colour;
vec3 position;
vec2 radius;
float intensity;
} allLights[MAX_LIGHTS];
vec4 calculateLight(Light light)
{
float aspectRatio = resolution.x / resolution.y; //amt of width / height
if (aspectRatio > 1.0)
light.radius.x /= aspectRatio;
else
light.radius.x /= aspectRatio;
vec2 lightDir = fragmentPosition.xy - light.position.xy;
float lightDistance = length(lightDir);
if (length(lightDir / light.radius) >= 1.0)
return vec4(0, 0, 0, 1); //outside of radius make it black
return light.intensity * (1 - length(lightDir / light.radius)) * light.colour;
}

A solution would be to pass in a uniform variable for spotDir, spotAngle (in radians) and modifying the shader to be:
vec4 calculateLight(Light light)
{
float aspectRatio = resolution.x / resolution.y; //amt of width / height
if (aspectRatio > 1.0)
light.radius.x /= aspectRatio;
else
light.radius.x /= aspectRatio;
vec2 lightDir = fragmentPosition.xy - light.position.xy;
float lightDistance = length(lightDir);
if (length(lightDir / light.radius) >= 1.0)
return vec4(0, 0, 0, 1); //outside of radius make it black
if (dot(normalize(lightDir), normalize(light.spotDir.xy)) < cos(light.spotAngle/2))
return vec4(0, 0, 0, 1); //outside of radius make it black
return light.intensity * (1 - length(lightDir / light.radius)) * light.colour;
}

Related

drawing more than one of my glowing lights makes them share the light, cant split them

I am trying to create two independent glowing lights but when a make the second share the light stretches between the 2
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fragCoord/iResolution.xy;
uv =(fragCoord-.5*iResolution.xy)/iResolution.y;
vec3 col = vec3(0.);
float radius = 0.5;
float glowSpeed = 1.;
vec2 glowPos = vec2(uv.x, uv.y+0.5);
vec2 glowPos2 = vec2(uv.x+0.5, uv.y+0.0);
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv+glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv+glowPos2);
col += vec3(glowCol1, glowCol1, 0.);
col += vec3(glowCol2, glowCol2, 0.);
// Output to screen
fragColor = vec4(col, 1);
}
enter image description here
uv is a position relative to the fragment currently being processed. So the position of a light source must not depend on uv. e.g.:
vec2 glowPos = vec2(0.5, 0.5);
vec2 glowPos2 = vec2(-0.5, -0.5);
The distance between 2 points is the length of the vector from one point to the other. A vector between 2 points is calculated by subtracting one point from the other, but not by calculating the sum:
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos2);
The result for glowCol1 and glowCol2 can become negative. Thus, one light source would negatively affect the other. You must clamp the result in the range [0, 1]:
glowCol1 = clamp(glowCol1, 0.0, 1.0);
glowCol2 = clamp(glowCol2, 0.0, 1.0);
Complete and working shader:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fragCoord/iResolution.xy;
uv = uv * 2.0 - 1.0;
uv.x *= iResolution.x / iResolution.y;
vec3 col = vec3(0.);
float radius = 0.5;
float glowSpeed = 1.;
vec2 glowPos = vec2(0.5, 0.5);
vec2 glowPos2 = vec2(-0.5, -0.5);
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos2);
glowCol1 = clamp(glowCol1, 0.0, 1.0);
glowCol2 = clamp(glowCol2, 0.0, 1.0);
col += vec3(glowCol1, glowCol1, 0.);
col += vec3(glowCol2, glowCol2, 0.);
// Output to screen
fragColor = vec4(col, 1);
}

Compute Normals After Vertex Deformation?

I am coding a vertex and a fragment shader trying to distort the surface of some water and then computing blinn-phong lighting on the surface. I am able to successfully compute the deformed matrices with a simple noise function, but how can I find the distorted normals? Since it isn't a linear transformation I am stuck, could anyone help?
Here are the relevant files:
vertex shader:
#version 150
uniform mat4 u_Model;
uniform mat4 u_ModelInvTr;
uniform mat4 u_ViewProj;
uniform vec4 u_Color;
uniform int u_Time;
in vec4 vs_Pos; // The array of vertex positions passed to the shader
in vec4 vs_Nor; // The array of vertex normals passed to the shader
in vec4 vs_Col; // The array of vertex colors passed to the shader.
in vec2 vs_UV; // UV coords for texture to pass thru to fragment shader
in float vs_Anim; // 0.f or 1.f To pass thru to fragment shader
in float vs_T2O;
out vec4 fs_Pos;
out vec4 fs_Nor;
out vec4 fs_LightVec;
out vec4 fs_Col;
out vec2 fs_UVs;
out float fs_Anim;
out float fs_dimVal;
out float fs_T2O;
uniform vec4 u_CamPos;
out vec4 fs_CamPos;
const vec4 lightDir = normalize(vec4(0.0, 1.f, 0.0, 0));
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c, oc * axis.x * axis.y - axis.z * s, oc * axis.z * axis.x + axis.y * s, 0.0, oc * axis.x * axis.y + axis.z * s, oc * axis.y * axis.y + c, oc * axis.y * axis.z - axis.x * s, 0.0,oc * axis.z * axis.x - axis.y * s, oc * axis.y * axis.z + axis.x * s, oc * axis.z * axis.z + c, 0.0, 0.0, 0.0, 0.0, 1.0);
}
vec4 rotateLightVec(float deg, vec4 LV) {
mat4 R = rotationMatrix(vec3(0,0,1), deg);
return R * LV;
}
float random1(vec3 p) {
return fract(sin(dot(p, vec3(127.1, 311.7, 191.999)))*43758.5453);
}
vec3 random2( vec3 p ) {
return fract( sin( vec3(dot(p, vec3(127.1, 311.7, 58.24)), dot(p, vec3(269.5, 183.3, 657.3)), dot(p, vec3(420.69, 69.420, 469.20))) ) * 43758.5453);
}
void main()
{
fs_Col = vs_Col;
fs_UVs = vs_UV;
fs_Anim = vs_Anim;
fs_T2O = vs_T2O;
mat3 invTranspose = mat3(u_ModelInvTr);
fs_Nor = vec4(invTranspose * vec3(vs_Nor), 0);
vec4 modelposition = u_Model * vs_Pos;
if (vs_Anim != 0) { // if we want to animate this surface
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y <= 4.f/16.f;
if (water) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.05) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
} else if (lava) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.01) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
}
}
fs_dimVal = random1(modelposition.xyz/100.f);
fs_LightVec = rotateLightVec(0.001 * u_Time, lightDir); // Compute the direction in which the light source lies
fs_CamPos = u_CamPos; // uniform handle for the camera position instead of the inverse
fs_Pos = modelposition;
gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is
// used to render the final positions of the geometry's vertices
}
fragment shader:
#version 330
uniform vec4 u_Color; // The color with which to render this instance of geometry.
uniform sampler2D textureSampler;
uniform int u_Time;
uniform mat4 u_ViewProj;
uniform mat4 u_Model;
in vec4 fs_Pos;
in vec4 fs_Nor;
in vec4 fs_LightVec;
in vec4 fs_Col;
in vec2 fs_UVs;
in float fs_Anim;
in float fs_T2O;
in float fs_dimVal;
out vec4 out_Col;
in vec4 fs_CamPos;
float random1(vec3 p) {
return fract(sin(dot(p,vec3(127.1, 311.7, 191.999)))
*43758.5453);
}
float random1b(vec3 p) {
return fract(sin(dot(p,vec3(169.1, 355.7, 195.999)))
*95751.5453);
}
float mySmoothStep(float a, float b, float t) {
t = smoothstep(0, 1, t);
return mix(a, b, t);
}
float cubicTriMix(vec3 p) {
vec3 pFract = fract(p);
float llb = random1(floor(p) + vec3(0,0,0));
float lrb = random1(floor(p) + vec3(1,0,0));
float ulb = random1(floor(p) + vec3(0,1,0));
float urb = random1(floor(p) + vec3(1,1,0));
float llf = random1(floor(p) + vec3(0,0,1));
float lrf = random1(floor(p) + vec3(1,0,1));
float ulf = random1(floor(p) + vec3(0,1,1));
float urf = random1(floor(p) + vec3(1,1,1));
float mixLoBack = mySmoothStep(llb, lrb, pFract.x);
float mixHiBack = mySmoothStep(ulb, urb, pFract.x);
float mixLoFront = mySmoothStep(llf, lrf, pFract.x);
float mixHiFront = mySmoothStep(ulf, urf, pFract.x);
float mixLo = mySmoothStep(mixLoBack, mixLoFront, pFract.z);
float mixHi = mySmoothStep(mixHiBack, mixHiFront, pFract.z);
return mySmoothStep(mixLo, mixHi, pFract.y);
}
float fbm(vec3 p) {
float amp = 0.5;
float freq = 4.0;
float sum = 0.0;
for(int i = 0; i < 8; i++) {
sum += cubicTriMix(p * freq) * amp;
amp *= 0.5;
freq *= 2.0;
}
return sum;
}
void main()
{
vec4 diffuseColor = texture(textureSampler, fs_UVs);
bool apply_lambert = true;
float specularIntensity = 0;
if (fs_Anim != 0) {
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y < 4.f/16.f;
if (lava) {
// slowly gyrate texture and lighten and darken with random dimVal from vert shader
vec2 movingUVs = vec2(fs_UVs.x + fs_Anim * 0.065/16 * sin(0.01*u_Time),
fs_UVs.y - fs_Anim * 0.065/16 * sin(0.01*u_Time + 3.14159/2));
diffuseColor = texture(textureSampler, movingUVs);
vec4 warmerColor = diffuseColor + vec4(0.3, 0.3, 0, 0);
vec4 coolerColor = diffuseColor - vec4(0.1, 0.1, 0, 0);
diffuseColor = mix(warmerColor, coolerColor, 0.5 + fs_dimVal * 0.65*sin(0.02*u_Time));
apply_lambert = false;
} else if (water) {
// blend between 3 different points in texture to create a wavy subtle change over time
vec2 offsetUVs = vec2(fs_UVs.x - 0.5f/16.f, fs_UVs.y - 0.5f/16.f);
diffuseColor = texture(textureSampler, fs_UVs);
vec4 altColor = texture(textureSampler, offsetUVs);
altColor.x += fs_dimVal * pow(altColor.x+.15, 5);
altColor.y += fs_dimVal * pow(altColor.y+.15, 5);
altColor.z += 0.5 * fs_dimVal * pow(altColor.z+.15, 5);
diffuseColor = mix(diffuseColor, altColor, 0.5 + 0.35*sin(0.05*u_Time));
offsetUVs -= 0.25f/16.f;
vec4 newColor = texture(textureSampler, offsetUVs);
diffuseColor = mix(diffuseColor, newColor, 0.5 + 0.5*sin(0.025*u_Time)) + fs_dimVal * vec4(0.025);
diffuseColor.a = 0.7;
// ----------------------------------------------------
// Blinn-Phong Shading
// ----------------------------------------------------
vec4 lightDir = normalize(fs_LightVec - fs_Pos);
vec4 viewDir = normalize(fs_CamPos - fs_Pos);
vec4 halfVec = normalize(lightDir + viewDir);
float shininess = 400.f;
float specularIntensity = max(pow(dot(halfVec, normalize(fs_Nor)), shininess), 0);
}
}
// Calculate the diffuse term for Lambert shading
float diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec));
// Avoid negative lighting values
diffuseTerm = clamp(diffuseTerm, 0, 1);
float ambientTerm = 0.3;
float lightIntensity = diffuseTerm + ambientTerm; //Add a small float value to the color multiplier
//to simulate ambient lighting. This ensures that faces that are not
//lit by our point light are not completely black.
vec3 col = diffuseColor.rgb;
// Compute final shaded color
if (apply_lambert) {
col = col * lightIntensity + col * specularIntensity;
}
// & Check the rare, special case where we draw face between two diff transparent blocks as opaque
if (fs_T2O != 0) {
out_Col = vec4(col, 1.f);
} else {
out_Col = vec4(col, diffuseColor.a);
}
// distance fog!
vec4 fogColor = vec4(0.6, 0.75, 0.9, 1.0);
float FC = gl_FragCoord.z / gl_FragCoord.w / 124.f;
float falloff = clamp(1.05 - exp(-1.05f * (FC - 0.9f)), 0.f, 1.f);
out_Col = mix(out_Col, fogColor, falloff);
}
I tried implementing blinn-phong in the fragment shader, but I think it is wrong simple from the wrong normals. I think this can be done with some sort of tangent and cross product solution, but how can I know the tangent of the surface given we only know the vertex position?
I am not using unity, just bare c++ and most of the answers I am finding online are for java or unity which I do not understand.`

GLES3.2 Kawase Shader giving horrible artifacts

I am currently trying to implement a Kawase Blur shader in GLES3.2.
I found the appropriate fragment shaders on shadertoy, and implemented them as such:
Down:
#version 100
precision mediump float;
varying mediump vec2 v_texcoord; // is in 0-1
uniform sampler2D tex;
uniform float radius;
uniform vec2 halfpixel;
void main() {
vec2 uv = v_texcoord * 2.0;
vec4 sum = texture2D(tex, uv) * 4.0;
sum += texture2D(tex, uv - halfpixel.xy * radius);
sum += texture2D(tex, uv + halfpixel.xy * radius);
sum += texture2D(tex, uv + vec2(halfpixel.x, -halfpixel.y) * radius);
sum += texture2D(tex, uv - vec2(halfpixel.x, -halfpixel.y) * radius);
gl_FragColor = sum / 8.0;
}
up:
#version 100
precision mediump float;
varying mediump vec2 v_texcoord; // is in 0-1
uniform sampler2D tex;
uniform float radius;
uniform vec2 halfpixel;
void main() {
vec2 uv = v_texcoord / 2.0;
vec4 sum = texture2D(tex, uv + vec2(-halfpixel.x * 2.0, 0.0) * radius);
sum += texture2D(tex, uv + vec2(-halfpixel.x, halfpixel.y) * radius) * 2.0;
sum += texture2D(tex, uv + vec2(0.0, halfpixel.y * 2.0) * radius);
sum += texture2D(tex, uv + vec2(halfpixel.x, halfpixel.y) * radius) * 2.0;
sum += texture2D(tex, uv + vec2(halfpixel.x * 2.0, 0.0) * radius);
sum += texture2D(tex, uv + vec2(halfpixel.x, -halfpixel.y) * radius) * 2.0;
sum += texture2D(tex, uv + vec2(0.0, -halfpixel.y * 2.0) * radius);
sum += texture2D(tex, uv + vec2(-halfpixel.x, -halfpixel.y) * radius) * 2.0;
gl_FragColor = sum / 12.0;
}
The thought process is simple: the texture I want to blur is in the Primary FB, then it's passed once with a down blur to the Mirror FB, and then blurred down more and up on the Mirror FB, and finally later rendered.
The down shader works great, and produces the expected result of a small image in the top-left corner, with streaks throughout the rest of the framebuffer.
Image of shader Down
However, when trying to apply even one pass of the up shader, it starts giving horrible artifacts. Blocks of pixels blinking everywhere, and the screen is divided into 4 sections vertically, where in each one the original image gets bigger, blurrier and more glitchy.
Image of shader Up
This result is with 2 passes of the down shader and one of the up shader
The code:
const float fullVerts[] = {
1, 0, // top right
0, 0, // top left
1, 1, // bottom right
0, 1, // bottom left
};
// ...
{
auto drawPass = [&](CShader* pShader) {
glActiveTexture(GL_TEXTURE0);
if (pShader == &m_shBLUR2)
glBindTexture(PMIRRORFB->m_cTex.m_iTarget, PMIRRORFB->m_cTex.m_iTexID);
glTexParameteri(PMIRRORFB->m_cTex.m_iTarget, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glUseProgram(pShader->program);
// prep two shaders
glUniformMatrix3fv(pShader->proj, 1, GL_FALSE, glMatrix);
glUniform1f(glGetUniformLocation(pShader->program, "radius"), BLURSIZE * (a / 255.f)); // this makes the blursize change with a
if (pShader == &m_shBLUR1)
glUniform2f(glGetUniformLocation(m_shBLUR1.program, "halfpixel"), 0.5f / (m_RenderData.pMonitor->vecSize.x / 2.f), 0.5f / (m_RenderData.pMonitor->vecSize.y / 2.f));
else
glUniform2f(glGetUniformLocation(m_shBLUR2.program, "halfpixel"), 0.5f / (m_RenderData.pMonitor->vecSize.x * 2.f), 0.5f / (m_RenderData.pMonitor->vecSize.y * 2.f));
glUniform1i(pShader->tex, 0);
glVertexAttribPointer(pShader->posAttrib, 2, GL_FLOAT, GL_FALSE, 0, fullVerts);
glVertexAttribPointer(pShader->texAttrib, 2, GL_FLOAT, GL_FALSE, 0, fullVerts);
glEnableVertexAttribArray(pShader->posAttrib);
glEnableVertexAttribArray(pShader->texAttrib);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableVertexAttribArray(pShader->posAttrib);
glDisableVertexAttribArray(pShader->texAttrib);
};
// draw the things.
// first draw is prim -> mirr
PMIRRORFB->bind();
clear(CColor(0,0,0,0));
glBindTexture(m_mMonitorRenderResources[m_RenderData.pMonitor].primaryFB.m_cTex.m_iTarget, m_mMonitorRenderResources[m_RenderData.pMonitor].primaryFB.m_cTex.m_iTexID);
drawPass(&m_shBLUR1);
// now draw from mirror->mirror
glBindTexture(PMIRRORFB->m_cTex.m_iTarget, PMIRRORFB->m_cTex.m_iTexID);
for (int i = 1; i < BLURPASSES; ++i) {
drawPass(&m_shBLUR1); // down
}
for (int i = BLURPASSES - 1; i >= 0; --i) {
drawPass(&m_shBLUR2); // up
}
}
What's causing the artifacts?

How do I detect whether a point exists in circle sector light? (FOV detection)

I am trying to implement a spotlight (cone-shape) detection FOV for a game. I am currently calculating the lighting using the following function inside my fragment shader.
vec4 calculateLight(Light light)
{
float aspectRatio = resolution.x / resolution.y; //amt of width / height
if (aspectRatio > 1.0)
light.radius.x /= aspectRatio;
else
light.radius.x /= aspectRatio;
vec2 lightDir = fragmentPosition.xy - light.position.xy;
float lightDistance = length(lightDir);
if (length(lightDir / light.radius) >= 1.0)
return vec4(0, 0, 0, 1); //outside of radius make it black
if (dot(normalize(lightDir), normalize(light.spotDir.xy)) < cos(light.spotAngle/2))
return vec4(0, 0, 0, 1); //outside of radius make it black
return light.intensity * (1 - length(lightDir / light.radius)) * light.colour;
}
However, I would also like to use my light as something like a detection tool, which represents the enemy's line of sight, is there a way for me to do that? My LightComponent:
struct LightComponent
{
Vec4 colour;
Vec3 pos;
Vec2 radius;
float intensity;
bool lightEnabled;
//spotlight stuff
float spotAngle;
Vec3 spotDir;
float DirectionAngle;
bool takeVector = true;
static Vec4 ambientLight;
LightComponent();
void ImGuiDraw();
};
Range here is the radius.

How do I convert my point light into an oval/ellipse?

I am looking to turn my current circular light into an ellipse by having a vec2 radius which can have different x and y values. Is there any way to do this based on my current code in the fragment shader?
uniform struct Light
{
vec4 colour;
vec3 position;
vec2 radius;
float intensity;
} allLights[MAX_LIGHTS];
vec4 calculateLight(Light light)
{
vec2 lightDir = fragmentPosition.xy - light.position.xy;
float lightDistance = length(lightDir);
if (lightDistance >= light.radius.x)
{
return vec4(0, 0, 0, 1); //outside of radius make it black
}
return light.intensity * (1 - lightDistance / light.radius.x) * light.colour;
}
Divide the vector to the light source with the semi-axis of the ellipse and check whether the length of the vector is greater than 1.0:
if (length(lightDir / light.radius) >= 1.0)
return vec4(0, 0, 0, 1); //outside of radius make it black
return light.intensity * (1 - length(lightDir / light.radius)) * light.colour;