How do I convert my point light into an oval/ellipse? - c++

I am looking to turn my current circular light into an ellipse by having a vec2 radius which can have different x and y values. Is there any way to do this based on my current code in the fragment shader?
uniform struct Light
{
vec4 colour;
vec3 position;
vec2 radius;
float intensity;
} allLights[MAX_LIGHTS];
vec4 calculateLight(Light light)
{
vec2 lightDir = fragmentPosition.xy - light.position.xy;
float lightDistance = length(lightDir);
if (lightDistance >= light.radius.x)
{
return vec4(0, 0, 0, 1); //outside of radius make it black
}
return light.intensity * (1 - lightDistance / light.radius.x) * light.colour;
}

Divide the vector to the light source with the semi-axis of the ellipse and check whether the length of the vector is greater than 1.0:
if (length(lightDir / light.radius) >= 1.0)
return vec4(0, 0, 0, 1); //outside of radius make it black
return light.intensity * (1 - length(lightDir / light.radius)) * light.colour;

Related

GLES3.2 Kawase Shader giving horrible artifacts

I am currently trying to implement a Kawase Blur shader in GLES3.2.
I found the appropriate fragment shaders on shadertoy, and implemented them as such:
Down:
#version 100
precision mediump float;
varying mediump vec2 v_texcoord; // is in 0-1
uniform sampler2D tex;
uniform float radius;
uniform vec2 halfpixel;
void main() {
vec2 uv = v_texcoord * 2.0;
vec4 sum = texture2D(tex, uv) * 4.0;
sum += texture2D(tex, uv - halfpixel.xy * radius);
sum += texture2D(tex, uv + halfpixel.xy * radius);
sum += texture2D(tex, uv + vec2(halfpixel.x, -halfpixel.y) * radius);
sum += texture2D(tex, uv - vec2(halfpixel.x, -halfpixel.y) * radius);
gl_FragColor = sum / 8.0;
}
up:
#version 100
precision mediump float;
varying mediump vec2 v_texcoord; // is in 0-1
uniform sampler2D tex;
uniform float radius;
uniform vec2 halfpixel;
void main() {
vec2 uv = v_texcoord / 2.0;
vec4 sum = texture2D(tex, uv + vec2(-halfpixel.x * 2.0, 0.0) * radius);
sum += texture2D(tex, uv + vec2(-halfpixel.x, halfpixel.y) * radius) * 2.0;
sum += texture2D(tex, uv + vec2(0.0, halfpixel.y * 2.0) * radius);
sum += texture2D(tex, uv + vec2(halfpixel.x, halfpixel.y) * radius) * 2.0;
sum += texture2D(tex, uv + vec2(halfpixel.x * 2.0, 0.0) * radius);
sum += texture2D(tex, uv + vec2(halfpixel.x, -halfpixel.y) * radius) * 2.0;
sum += texture2D(tex, uv + vec2(0.0, -halfpixel.y * 2.0) * radius);
sum += texture2D(tex, uv + vec2(-halfpixel.x, -halfpixel.y) * radius) * 2.0;
gl_FragColor = sum / 12.0;
}
The thought process is simple: the texture I want to blur is in the Primary FB, then it's passed once with a down blur to the Mirror FB, and then blurred down more and up on the Mirror FB, and finally later rendered.
The down shader works great, and produces the expected result of a small image in the top-left corner, with streaks throughout the rest of the framebuffer.
Image of shader Down
However, when trying to apply even one pass of the up shader, it starts giving horrible artifacts. Blocks of pixels blinking everywhere, and the screen is divided into 4 sections vertically, where in each one the original image gets bigger, blurrier and more glitchy.
Image of shader Up
This result is with 2 passes of the down shader and one of the up shader
The code:
const float fullVerts[] = {
1, 0, // top right
0, 0, // top left
1, 1, // bottom right
0, 1, // bottom left
};
// ...
{
auto drawPass = [&](CShader* pShader) {
glActiveTexture(GL_TEXTURE0);
if (pShader == &m_shBLUR2)
glBindTexture(PMIRRORFB->m_cTex.m_iTarget, PMIRRORFB->m_cTex.m_iTexID);
glTexParameteri(PMIRRORFB->m_cTex.m_iTarget, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glUseProgram(pShader->program);
// prep two shaders
glUniformMatrix3fv(pShader->proj, 1, GL_FALSE, glMatrix);
glUniform1f(glGetUniformLocation(pShader->program, "radius"), BLURSIZE * (a / 255.f)); // this makes the blursize change with a
if (pShader == &m_shBLUR1)
glUniform2f(glGetUniformLocation(m_shBLUR1.program, "halfpixel"), 0.5f / (m_RenderData.pMonitor->vecSize.x / 2.f), 0.5f / (m_RenderData.pMonitor->vecSize.y / 2.f));
else
glUniform2f(glGetUniformLocation(m_shBLUR2.program, "halfpixel"), 0.5f / (m_RenderData.pMonitor->vecSize.x * 2.f), 0.5f / (m_RenderData.pMonitor->vecSize.y * 2.f));
glUniform1i(pShader->tex, 0);
glVertexAttribPointer(pShader->posAttrib, 2, GL_FLOAT, GL_FALSE, 0, fullVerts);
glVertexAttribPointer(pShader->texAttrib, 2, GL_FLOAT, GL_FALSE, 0, fullVerts);
glEnableVertexAttribArray(pShader->posAttrib);
glEnableVertexAttribArray(pShader->texAttrib);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableVertexAttribArray(pShader->posAttrib);
glDisableVertexAttribArray(pShader->texAttrib);
};
// draw the things.
// first draw is prim -> mirr
PMIRRORFB->bind();
clear(CColor(0,0,0,0));
glBindTexture(m_mMonitorRenderResources[m_RenderData.pMonitor].primaryFB.m_cTex.m_iTarget, m_mMonitorRenderResources[m_RenderData.pMonitor].primaryFB.m_cTex.m_iTexID);
drawPass(&m_shBLUR1);
// now draw from mirror->mirror
glBindTexture(PMIRRORFB->m_cTex.m_iTarget, PMIRRORFB->m_cTex.m_iTexID);
for (int i = 1; i < BLURPASSES; ++i) {
drawPass(&m_shBLUR1); // down
}
for (int i = BLURPASSES - 1; i >= 0; --i) {
drawPass(&m_shBLUR2); // up
}
}
What's causing the artifacts?

How do I detect whether a point exists in circle sector light? (FOV detection)

I am trying to implement a spotlight (cone-shape) detection FOV for a game. I am currently calculating the lighting using the following function inside my fragment shader.
vec4 calculateLight(Light light)
{
float aspectRatio = resolution.x / resolution.y; //amt of width / height
if (aspectRatio > 1.0)
light.radius.x /= aspectRatio;
else
light.radius.x /= aspectRatio;
vec2 lightDir = fragmentPosition.xy - light.position.xy;
float lightDistance = length(lightDir);
if (length(lightDir / light.radius) >= 1.0)
return vec4(0, 0, 0, 1); //outside of radius make it black
if (dot(normalize(lightDir), normalize(light.spotDir.xy)) < cos(light.spotAngle/2))
return vec4(0, 0, 0, 1); //outside of radius make it black
return light.intensity * (1 - length(lightDir / light.radius)) * light.colour;
}
However, I would also like to use my light as something like a detection tool, which represents the enemy's line of sight, is there a way for me to do that? My LightComponent:
struct LightComponent
{
Vec4 colour;
Vec3 pos;
Vec2 radius;
float intensity;
bool lightEnabled;
//spotlight stuff
float spotAngle;
Vec3 spotDir;
float DirectionAngle;
bool takeVector = true;
static Vec4 ambientLight;
LightComponent();
void ImGuiDraw();
};
Range here is the radius.

How to create a 2D spotlight?

is it possible to modify my current lighting algorithm that radiates light in all directions to something like:
Here is is how I am calculating my light in the fragment shader:
uniform struct Light
{
vec4 colour;
vec3 position;
vec2 radius;
float intensity;
} allLights[MAX_LIGHTS];
vec4 calculateLight(Light light)
{
float aspectRatio = resolution.x / resolution.y; //amt of width / height
if (aspectRatio > 1.0)
light.radius.x /= aspectRatio;
else
light.radius.x /= aspectRatio;
vec2 lightDir = fragmentPosition.xy - light.position.xy;
float lightDistance = length(lightDir);
if (length(lightDir / light.radius) >= 1.0)
return vec4(0, 0, 0, 1); //outside of radius make it black
return light.intensity * (1 - length(lightDir / light.radius)) * light.colour;
}
A solution would be to pass in a uniform variable for spotDir, spotAngle (in radians) and modifying the shader to be:
vec4 calculateLight(Light light)
{
float aspectRatio = resolution.x / resolution.y; //amt of width / height
if (aspectRatio > 1.0)
light.radius.x /= aspectRatio;
else
light.radius.x /= aspectRatio;
vec2 lightDir = fragmentPosition.xy - light.position.xy;
float lightDistance = length(lightDir);
if (length(lightDir / light.radius) >= 1.0)
return vec4(0, 0, 0, 1); //outside of radius make it black
if (dot(normalize(lightDir), normalize(light.spotDir.xy)) < cos(light.spotAngle/2))
return vec4(0, 0, 0, 1); //outside of radius make it black
return light.intensity * (1 - length(lightDir / light.radius)) * light.colour;
}

OpenGL height based fog

I am reading Inigo Quilez Fog article and I just can't understand few things when he talks about fog based on height.
He has a shader function about height based fog but I have problems understanding how to make it work.
He uses this function to apply fog
vec3 applyFog( in vec3 rgb, // original color of the pixel
in float distance ) // camera to point distance
{
float fogAmount = 1.0 - exp( -distance*b );
vec3 fogColor = vec3(0.5,0.6,0.7);
return mix( rgb, fogColor, fogAmount );
}
then he has the other one based to calculage fog based on height
vec3 applyFog( in vec3 rgb, // original color of the pixel
in float distance, // camera to point distance
in vec3 rayOri, // camera position
in vec3 rayDir ) // camera to point vector
{
float fogAmount = c * exp(-rayOri.y*b) * (1.0-exp( -distance*rayDir.y*b ))/rayDir.y;
vec3 fogColor = vec3(0.5,0.6,0.7);
return mix( rgb, fogColor, fogAmount );
}
I can understand how the shader works but I don't know how to use it with mine. For now I am just trying to learn how the whole fog world in GLSL works but it looks that there is just a lot about it to learn. :D
#version 400 core
in vec3 Position;
in vec3 Normal;
//in vec4 positionToCamera;
//in float visibility;
uniform vec3 color;
uniform vec3 CameraPosition;
uniform float near;
uniform float far;
uniform vec3 fogColor;
uniform bool enableBlending;
uniform float c;
uniform float b;
uniform int fogType;
vec3 applyFogDepth( vec3 rgb, // original color of the pixel
float distance, // camera to point distance
vec3 rayOri, // camera position
vec3 rayDir) // camera to point vector
{
//float cc = 1.0;
//float bb = 1.1;
float fogAmount = c * exp(-rayOri.y*b) * (1.0 - exp(-distance*rayDir.y*b)) / rayDir.y;
return mix(rgb, fogColor, fogAmount );
}
// Fog with Sun factor
vec3 applyFogSun( vec3 rgb,// original color of the pixel
float distance, // camera to point distance
vec3 rayDir, // camera to point vector
vec3 sunDir) // sun light direction
{
float fogAmount = 1.0 - exp(-distance*b);
float sunAmount = max(dot(rayDir, sunDir), 0.0);
vec3 fog = mix(fogColor, // bluish
vec3(1.0, 0.9, 0.7), // yellowish
pow(sunAmount, 8.0));
return mix(rgb, fog, fogAmount);
}
//Exponential fog
vec3 applyFog( vec3 rgb, // original color of the pixel
float distance) // camera to point distance
{
float fogAmount = 1.0 - exp(-distance*b);
return mix(rgb, fogColor, fogAmount);
//return rgb*( exp(-distance*b)) + fogColor*(1.0 - exp(-distance*b));
}
float LinearizeDepth(float depth)
{
float z = depth * 2.0 - 1.0; // Back to NDC
return (2.0 * near * far) / (far + near - z * (far - near));
}
out vec4 gl_FragColor;
void main(void) {
vec3 fog = vec3(0.0);
//-5.0f, 900.0f, 400.0f camera coord
vec3 lightPosition = vec3(0.0, 1200.0, -6000.0);
vec3 lightDirection = normalize(lightPosition - Position);
vec3 direction = normalize(CameraPosition - Position);
float depth = LinearizeDepth(gl_FragCoord.z) / far;
switch (fogType) {
case 0:
fog = applyFog(color, depth);
break;
case 1:
fog = applyFogSun(color, depth, direction, lightDirection);
break;
case 2:
//fog = mix(applyFog(color, depth), applyFogDepth(color, depth, CameraPosition, CameraPosition - Position), 0.5) ;
fog = applyFogDepth(color, depth, CameraPosition, CameraPosition - Position);
break;
}
//calculate light
float diff = max(dot(Normal, lightDirection), 0.0);
vec3 diffuse = diff * color;
float fogAmount = 1.0 - exp(-depth*b);
vec3 finalColor = vec3(0.0);
if (enableBlending)
finalColor = mix(diffuse, fog, fogAmount);
else
finalColor = fog;
gl_FragColor = vec4(finalColor,1.0);
//gl_FragColor = vec4(vec3(LinearizeDepth(visibility) / far), 1.0f);
}
The first image is the first function to apply fog and in the second image is the second function.

Bump Mapping in OpenGL and GLSL

I'm trying to implement bump mapping on a cube using OpenGL and GLSL. However, when I rotate my cube around, only the left-facing square and right-facing square appear (that is, in the negative x and positive x direction), the other four faces of the cube (top, bottom, front, back) are black. Here is an example:
My mesh class consists of an indexList and vertexList (the Vertex class contains x, y, z, etc). I'm using this simple cube.ply model, which contains s,t texture coordinates. Borrowing from this example, I calculate the tangent vectors as follows:
void Mesh::computeTangents() {
for (size_t i = 0; i < m_indexList.size(); i += 3) {
Vertex v1 = m_vertexList[m_indexList[i]];
Vertex v2 = m_vertexList[m_indexList[i+1]];
Vertex v3 = m_vertexList[m_indexList[i+2]];
glm::vec3 pos1 = glm::vec3(v1.getX(), v1.getY(), v1.getZ());
glm::vec3 pos2 = glm::vec3(v2.getX(), v2.getY(), v2.getZ());
glm::vec3 pos3 = glm::vec3(v3.getX(), v3.getY(), v3.getZ());
glm::vec2 tex1 = glm::vec2(v1.getS(), v1.getT());
glm::vec2 tex2 = glm::vec2(v2.getS(), v2.getT());
glm::vec2 tex3 = glm::vec2(v3.getS(), v3.getT());
glm::vec3 edge1 = glm::normalize(pos2 - pos1);
glm::vec3 edge2 = glm::normalize(pos3 - pos1);
glm::vec2 texEdge1 = glm::normalize(tex2 - tex1);
glm::vec2 texEdge2 = glm::normalize(tex3 - tex1);
float det = (texEdge1.x * texEdge2.y) - (texEdge1.y * texEdge2.x);
glm::vec3 tangent;
if(fabsf(det) < 1e-6f) {
tangent.x = 1.0;
tangent.y = 0.0;
tangent.z = 0.0;
}
else {
det = 1.0 / det;
tangent.x = (texEdge2.y * edge1.x - texEdge1.y * edge2.x) * det;
tangent.y = (texEdge2.y * edge1.y - texEdge1.y * edge2.y) * det;
tangent.z = (texEdge2.y * edge1.z - texEdge1.y * edge2.z) * det;
glm::normalize(tangent);
}
m_vertexList[m_indexList[i]].setTanX(tangent.x);
m_vertexList[m_indexList[i]].setTanY(tangent.y);
m_vertexList[m_indexList[i]].setTanZ(tangent.z);
m_vertexList[m_indexList[i+1]].setTanX(tangent.x);
m_vertexList[m_indexList[i+1]].setTanY(tangent.y);
m_vertexList[m_indexList[i+1]].setTanZ(tangent.z);
m_vertexList[m_indexList[i+2]].setTanX(tangent.x);
m_vertexList[m_indexList[i+2]].setTanY(tangent.y);
m_vertexList[m_indexList[i+2]].setTanZ(tangent.z);
}
}
If I output the values of the tangent vector for each triangle, I get these values:
1, 0, 0
1, 0, 0
0, 0, -1
0, 0, -1
0, 0, 1
0, 0, 1
-1, 0, 0
-1, 0, 0,
1, 0, 0
1, 0, 0
1, 0, 0
1, 0, 0
If these are correct, then the problem is likely in the shader. My shader is as follows (mostly taken from a book):
vert:
attribute vec4 vertexPosition;
attribute vec3 vertexNormal;
attribute vec2 vertexTexture;
attribute vec3 vertexTangent;
varying vec2 texCoord;
varying vec3 viewDirection;
varying vec3 lightDirection;
uniform vec3 diffuseColor;
uniform float shininess;
uniform vec4 lightPosition;
uniform mat4 modelViewMatrix;
uniform mat4 normalMatrix;
uniform mat4 MVP; // modelview projection
void main() {
vec4 eyePosition = modelViewMatrix * vertexPosition;
vec3 N = normalize(vec3(normalMatrix * vec4(vertexNormal, 1.0)));
vec3 T = normalize(vec3(normalMatrix * vec4(vertexTangent, 1.0)));
vec3 B = normalize(cross(N, T));
vec3 v;
v.x = dot(lightPosition.xyz, T);
v.y = dot(lightPosition.xyz, B);
v.z = dot(lightPosition.xyz, N);
lightDirection = normalize(v);
v.x = dot(eyePosition.xyz, T);
v.y = dot(eyePosition.xyz, B);
v.z = dot(eyePosition.xyz, N);
viewDirection = normalize(v);
texCoord = vertexTexture;
gl_Position = MVP * vertexPosition;
}
Frag:
varying vec2 texCoord;
varying vec3 viewDirection;
varying vec3 lightDirection;
uniform vec3 diffuseColor;
uniform float shininess;
void main() {
float bumpDensity = 16.0;
float bumpSize = 0.15;
vec2 c = bumpDensity * texCoord;
vec2 p = fract(c) - vec2(0.5);
float d, f;
d = dot(p, p);
f = 1.0 / sqrt(d + 1.0);
if (d >= bumpSize) {
p = vec2(0.0);
f = 1.0;
}
vec3 normalDelta = vec3(p.x, p.y, 1.0) * f;
vec3 litColor = diffuseColor * max(dot(normalDelta, lightDirection), 0.0);
vec3 reflectDir = reflect(lightDirection, normalDelta);
float spec = max(dot(viewDirection, reflectDir), 0.0);
spec *= shininess;
litColor = min(litColor + spec, vec3(1.0));
gl_FragColor = vec4(litColor, 1.0);
}
Edit: Changed background to more clearly see the black faces. Also, I incorrectly noted which faces were actually appearing before.
Edit 2: I'm finding that the value of max(dot(normalDelta, lightDirection), 0.0) in the fragment shader returns 0 for those faces. However, I still don't know why.
Edit 3: Well, the problem turned out to be that I was passing the wrong index of the tangent vectors in my Vertex class. That is, I had 10 in this line instead of 9:
glVertexAttribPointer(v3, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)(sizeof( float ) * 9));
Okay, so the problem is that any face with a normal on the xz-plane isn't rendering properly, i.e. the ones that are rendering properly are the ones with normals along the y-axis (top and bottom faces).
And your list of tangents:
1, 0, 0
1, 0, 0
0, 0, -1
0, 0, -1
0, 0, 1
0, 0, 1
-1, 0, 0
-1, 0, 0
1, 0, 0
1, 0, 0
1, 0, 0
1, 0, 0
Are all either along the x or z axes. So what I'm guessing might be happening is that your normals and tangents are pointing in the same (or opposite) directions, in which case this line in your vertex shader:
vec3 B = normalize(cross(N, T));
is going to result in vec3(0.0, 0.0, 0.0), which can't be normalized.
My recommendation is to try manually giving the x and z faces tangents along the y-axis, see if that makes a difference. If it helps, then you know that the problem is with your computation of the tangents.
If it doesn't help, you can troubleshoot by outputting values from the fragment shader as on-screen colours. Try displaying the viewDirection, lightDirection, normalDelta, reflectDir, and anything else you can think of to see which variable is causing the blackness.
Bonus: Switch the glClear color to something other than black? So that it doesn't look like a face floating in space.