the title says it all.. using opengls built in lighting system, specularlight does not increase or decrease with distance from the object, but by shader implementation does.
Vertex Shader:
#version 330
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
layout (location = 2) in vec3 normal;
out vec2 texCoord0;
out vec3 normal0;
out vec3 worldPos0;
uniform mat4 transform;
uniform mat4 normalRotation;
uniform mat4 transformProjected;
void main()
{
gl_Position = transformProjected * vec4(position, 1.0);
texCoord0 = texCoord;
normal0 = normalize((normalRotation * vec4(normal, 0.0))).xyz;
worldPos0 = (transform * vec4(position, 1.0)).xyz;
}
Fragment Shader:
#version 330
in vec2 texCoord0;
in vec3 normal0;
in vec3 worldPos0;
out vec4 fragColor;
struct BaseLight
{
vec3 colorDiffuse;
vec3 colorSpecular;
float intensityDiffuse;
};
struct DirectionalLight
{
BaseLight base;
vec3 direction;
};
uniform vec3 tint;
uniform sampler2D sampler;
uniform vec3 eyePos; // camera pos
uniform vec3 ambientLight;
uniform vec3 emissiveLight;
//material
uniform float specularIntensity;
uniform float specularPower;
uniform DirectionalLight directionalLight;
vec4 calcLight(BaseLight base,vec3 direction, vec3 normal)
{
float diffuseFactor = dot(normal, -direction);
vec4 diffuseColorFinal = vec4(0,0,0,0);
vec4 specularColorFinal = vec4(0,0,0,0);
if(diffuseFactor > 0)
{
diffuseColorFinal = vec4(base.colorDiffuse,1) * diffuseFactor * base.intensityDiffuse;
vec3 directionToEye = normalize(eyePos - worldPos0);
vec3 reflectDirection = normalize(reflect(direction, normal));
float specularFactor = dot(directionToEye, reflectDirection);
specularFactor = pow(specularFactor, specularPower);
if(specularFactor > 0)
specularColorFinal = vec4(base.colorSpecular,1) * specularFactor * specularIntensity;
}
//
return diffuseColorFinal + specularColorFinal;
}
void main()
{
vec4 colorD = texture(sampler, texCoord0.xy) * vec4(tint,1);
vec3 normal = normal0;
vec4 totalLight = vec4(ambientLight,1) + vec4(emissiveLight,1);
totalLight += calcLight(directionalLight.base,-directionalLight.direction,normal);
fragColor = colorD * totalLight;
}
As you can see from the 2 images the specular light takes up a larger surface area the farther the camera gets from the plane.In my test with opengls built in lighting, this doesnt happen. is there a way to fix this? im new to lighting, maybe this is normal for directional light sources? thanks for the help!
Im also setting my eyePos uniform to my cameraPos. i dont know if that helps.
Basically you need to have distance between the fragment and the light dist . This can be a problem for directional light though because you have only the direction and distant is assumed to be infinite. Maybe switch to point light?
when youo have the 'dist' you use a formula
att = 1.0 / (Kc + Kl*dist + Kq*dist^2)
Kc - constant attenuation
Kl - linear attenuation
Kq - quadratic attenuation
simpler version (only Kq used, rest set to 1.0):
float attenuation = 1.0 / (1.0 + light.attenuation * pow(distanceToLight, 2));
then in the lighting equation you basically multiply calculated color by this att factor:
vec4 finalColor = ambient + (diffuseColorFinal + specularColorFinal)*att
http://www.ozone3d.net/tutorials/glsl_lighting_phong_p4.php#part_4
http://tomdalling.com/blog/modern-opengl/07-more-lighting-ambient-specular-attenuation-gamma/
Related
platform: Windows10
context: OpenGL, glew, Win32
So I loaded 2 meshes(using a simple OBJ parser, which only reads the triangulated mesh), with vertexpos,uv and normal data. The first mesh is lighted okay. No black faces.The second one looks like this.
The Strange Effects
my vertex shader:
#version 440
in vec3 pos;
in vec2 tex;
in vec3 nor;
uniform float Scale;
uniform mat4 perspective;
uniform mat4 model;
out vec3 normaldir;
out vec2 texOut;
out vec3 FragPos;
void main()
{
normaldir = normalize(mat3(transpose(inverse(model))) * nor);
gl_Position = perspective * model * vec4(pos.xyz, 1.0);
texOut = tex;
FragPos = vec3(model * vec4(pos, 1.0));
}
my fragment shader:
#version 440
uniform float Scale;
uniform sampler2D diffuse;
uniform sampler2D normal;
uniform vec3 viewPos;
//uniform sampler2D normalMap0;
in vec3 normaldir;
in vec2 texOut;
in vec3 FragPos;
layout(location = 0) out vec4 FragColor0;
void main()
{
vec3 lightPos = {2,6,0};
lightPos.x = sin(Scale)*5;
lightPos.z = cos(Scale)*5;
vec3 lightDir = normalize(lightPos - FragPos);
vec3 lightColor = {1.0,1.0,1.0};
float specularStrength = 1.6;
float diff = max(dot(normaldir, lightDir), 0.0);
vec3 diffuseD = diff * lightColor;
vec3 viewDir = normalize(viewPos - FragPos);
vec3 reflectDir = reflect(-lightDir, normaldir);
vec3 ambient = {0.0,0.2,0.4};
float spec = pow(max(dot(viewDir, reflectDir), 0.0), 25);
vec3 specular = specularStrength * spec * lightColor;
vec3 diffuseCol = texture(diffuse, texOut).xyz;
vec3 result = (ambient + diffuseD+ specular) * diffuseCol;
FragColor0 = vec4(result, 1.0);
}
Sorry I made a very dumb mistake. Thank you for all your support #Rabbid76 (Yes I did inverted the normals yes) #paddy
The problem was Binding the normal buffers. I bind glm::vec2 * size instead of glm::vec3 * size for normals' buffers
Recently I added deferred shading support in my engine; however I ran into some attenuation issues:
As you can see, when I'm rendering the light volume (sphere), it doesn't blend nicely with the ambient part of the image !
Here is how I declare my point light:
PointLight pointlight;
pointlight.SetPosition(glm::vec3(0.0, 6.0, 0.0));
pointlight.SetIntensity(glm::vec3(1.0f, 1.0f, 1.0f));
Here is how I compute the light sphere radius:
Attenuation attenuation = pointLights[i].GetAttenuation();
float lightMax = std::fmaxf(std::fmax(pointLights[i].GetIntensity().r, pointLights[i].GetIntensity().g),
pointLights[i].GetIntensity().b);
float pointLightRadius = (-attenuation.linear +
std::sqrtf(std::pow(attenuation.linear, 2.0f) - 4.0f * attenuation.exponential *
(attenuation.constant - (256.0f / 5.0f) * lightMax))) / (2.0f * attenuation.exponential);
And finally, here is my PointLightPass fragment shader:
#version 450 core
struct BaseLight
{
vec3 intensities;//a.k.a color of light
float ambientCoeff;
};
struct Attenuation
{
float constant;
float linear;
float exponential;
};
struct PointLight
{
BaseLight base;
Attenuation attenuation;
vec3 position;
};
struct Material
{
float shininess;
vec3 specularColor;
float ambientCoeff;
};
layout (std140) uniform Viewport
{
uniform mat4 Projection;
uniform mat4 View;
uniform mat4 ViewProjection;
uniform vec2 scrResolution;
};
layout(binding = 0) uniform sampler2D gPositionMap;
layout(binding = 1) uniform sampler2D gAlbedoMap;
layout(binding = 2) uniform sampler2D gNormalMap;
layout(binding = 3) uniform sampler2D gSpecularMap;
uniform vec3 cameraPosition;
uniform PointLight pointLight;
out vec4 fragmentColor;
vec2 FetchTexCoord()
{
return gl_FragCoord.xy / scrResolution;
}
void main()
{
vec2 texCoord = FetchTexCoord();
vec3 gPosition = texture(gPositionMap, texCoord).xyz;
vec3 gSurfaceColor = texture(gAlbedoMap, texCoord).xyz;
vec3 gNormal = texture(gNormalMap, texCoord).xyz;
vec3 gSpecColor = texture(gSpecularMap, texCoord).xyz;
float gSpecPower = texture(gSpecularMap, texCoord).a;
vec3 totalLight = gSurfaceColor * 0.1; //TODO remove hardcoded ambient light
vec3 viewDir = normalize(cameraPosition - gPosition);
vec3 lightDir = normalize(pointLight.position - gPosition);
vec3 diffuse = max(dot(gNormal, lightDir), 0.0f) * gSurfaceColor *
pointLight.base.intensities;
vec3 halfWayDir = normalize(lightDir + viewDir);
float spec = pow(max(dot(gNormal, halfWayDir), 0.0f), 1.0f);
vec3 specular = pointLight.base.intensities * spec /** gSpecColor*/;
float distance = length(pointLight.position - gPosition);
float attenuation = 1.0f / (1.0f + pointLight.attenuation.linear * distance
+ pointLight.attenuation.exponential * distance * distance +
pointLight.attenuation.constant);
diffuse *= attenuation;
specular *= attenuation;
totalLight += diffuse + specular;
fragmentColor = vec4(totalLight, 1.0f);
}
So what can you suggest to deal with this issue ?
EDIT : Here are more details :
For deferred shading,
I populate my GBuffer;
I make an ambient light pass where I render a fullscreen quad
with the ambient colors :
#version 420 core
layout (std140) uniform Viewport
{
uniform mat4 Projection;
uniform mat4 View;
uniform mat4 ViewProjection;
uniform vec2 scrResolution;
};
layout(binding = 1) uniform sampler2D gAlbedoMap;
out vec4 fragmentColor;
vec2 FetchTexCoord()
{
return gl_FragCoord.xy / scrResolution;
}
void main()
{
vec2 texCoord = FetchTexCoord();
vec3 gSurfaceColor = texture(gAlbedoMap, texCoord).xyz;
vec3 totalLight = gSurfaceColor * 1.2; //TODO remove hardcoded ambient light
fragmentColor = vec4(totalLight, 1.0f);
}
Then I pass my point lights (see code above);
The reason you're having this problem is that you're using a "light volume" (a fact that you didn't make entirely clear in this question, but was brought up in your other question).
You are using the normal light attenuation equation. Well, you'll notice that this equation does not magically stop at some arbitrary radius. It is defined for all distances from 0 to infinity.
The purpose of your light volume is to prevent lighting contributions beyond a certain distance. Well, if your light attenuation doesn't go to zero at that distance, then you're going to see a discontinuity at the edge of the light volume.
If you're going to use a light volume, you need to use a light attenuation equation that actually is guaranteed to reach zero at the edge of the volume. Or failing that, you should pick a radius for your volume such that the attenuated strength of the light is nearly zero. And your radius is too small for that.
Keep making your radius bigger until you can't tell it's there.
I am implementing a basic phong lighting GLSL shader; I have looked up some things on the internet, and found that the phong effect was created by adding an ambient, diffuse, and specular layer on the object (see image below, from tom dalling's site); problem is I have seen a lot of examples, and none of them really suits my GLSL set-up. Can any of you give me a code example of the correct way to implement the phong effect which would fit my GLSL set-up ? :
PS : This question could be put on hold because of the fact that it may be based on user opinion : In my mind, it is not, because I would like to know the most effective, and better way of implementing it.
Here is my vertex shader :
#version 120
uniform mat4 modelView;
uniform mat4 MVP;
uniform float time;
attribute vec3 position;
attribute vec2 texCoord;
attribute vec3 normal;
varying vec3 position0;
varying vec2 texCoord0;
varying vec3 normal0;
varying mat4 modelView0;
void main()
{
//Updating varyings...
position0 = position;
texCoord0 = texCoord;
normal0 = (MVP * vec4(normal, 0.0)).xyz;
modelView0 = modelView;
//set position
gl_Position = MVP * vec4(position, 1.0);
}
and my fragment shader :
#version 120
varying vec3 position0;
varying vec2 texCoord0;
varying vec3 normal0;
varying mat4 modelView0;
uniform sampler2D diffuse;
void main()
{
vec4 surfaceColor = texture2D(diffuse, texCoord0);
gl_FragColor = (texture2D(diffuse, texCoord0))
* clamp(dot(-vec3(0.0, 0.5, 0.5), normal0), 0, 1.0);
}
try this:
void main()
{
vec4 texread = texture2D(diffuse, texCoord0);
vec3 normal = normalize(normal0);
vec3 material_kd = vec3(1.0,1.0,1.0);
vec3 material_ks = vec3(1.0,1.0,1.0);
vec3 material_ka = vec3(0.2,0.2,0.2);
vec3 material_ke = vec3(0.0,0.0,0.0);
float material_shininess = 60;
vec3 lightpos = vec3(0.0,10.0,5.0);
vec3 lightcolor = vec3(1.0,1.0,1.0);
vec3 lightdir = normalize(lightpos - worldPosition);
float shade = clamp(dot(lightdir, normal), 0.0, 1.0);
vec3 toWorldpos = normalize((worldPosition) - u_eyePos);
vec3 reflectDir = reflect( toWorldpos, normal );
vec4 specular = vec4(pow(clamp(dot(lightdir, reflectDir),0.0,1.0), material_shininess) * lightcolor * material_ks, 1.0);
vec4 shaded = texread * vec4(material_kd, 1.0) * vec4(lightcolor , 1.0) * shade;
vec4 ambient = texread * vec4(material_ka, 1.0);
vec4 emission = vec4(material_ke, 1.0);
gl_FragColor = shaded + specular + emission + ambient;
}
it may have some compilation errors though as i didnt run it...
you may need to upload your eye position as a uniform (u_eyePos), and calculate the worldposition (worldPosition) for it to work
I made my own sphong shader : here is the code :
fragment shader :
#version 150
uniform mat4 modelView;
uniform mat3 normalMatrix;
uniform vec3 cameraPosition;
uniform sampler2D materialTex;
uniform float materialShininess;
uniform vec3 materialSpecularColor;
uniform vec3 lightPosition;//light settings
uniform vec3 lightIntensities;
uniform float lightAttenuation;
uniform float lightAmbientCoeff;
in vec3 position0;
in vec2 texCoord0;
in vec3 normal0;
out vec4 fragmentColor;
void main()
{
//calculate normal in world coordinates
vec3 normal = normalize(normalMatrix * normal0);
//calculate the location of this fragment (pixel) in world coordinates
vec3 surfacePos = vec3(modelView * vec4(position0, 1));
//color of the current fragment
vec4 surfaceColor = texture(materialTex, texCoord0);
//calculate the vector from this pixels surface to the light source
vec3 surfaceToLight = normalize(lightPosition - surfacePos);
//cam distance
vec3 surfaceToCamera = normalize(cameraPosition - surfacePos);
///////////////////////////DIFUSE///////////////////////////////////////
//calculate the cosine of the angle of incidence
//float diffuseCoeff = dot(normal, surfaceToLight) / (length(surfaceToLight) * length(normal));
float diffuseCoeff = max(0.0, dot(normal, surfaceToLight));
vec3 diffuse = diffuseCoeff * surfaceColor.rgb * lightIntensities;
/////////////////////////AMBIENT////////////////////////////////////////
vec3 ambient = lightAmbientCoeff * surfaceColor.rgb * lightIntensities;
/////////////////////////SPECULAR//////////////////////////////////////
float specularCoeff = 0.0;
if(diffuseCoeff > 0.0)
specularCoeff = pow(max(0.0, dot(surfaceToCamera, reflect(-surfaceToLight, normal))), materialShininess);
vec3 specular = specularCoeff * materialSpecularColor * lightIntensities;
////////////////////////ATTENUATION///////////////////////////////////
float distanceToLight = length(lightPosition - surfacePos);
float attenuation = 1.0 / (1.0 + lightAttenuation * pow(distanceToLight, 2));
/////////////////////////////////FINAL/////////////////////////////////
vec3 linearColor = ambient + attenuation * (diffuse + specular);
//finalColor with gamma correction
vec3 gamma = vec3(1.0/2.2);
fragmentColor = vec4(pow(linearColor, gamma), surfaceColor.a);
//fragmentColor = vec4(diffuseCoeff * lightIntensities * surfaceColor.rgb, surfaceColor.a);
}
I just started learning OpenGL 3.x and I'm trying to implement a basic ADS/Phong shader in OpenGL 4.4.
Unfortunately I get these weird dark spots underneath this low-poly version of the Stanford Bunny. After using some other models I've come to the conclusion that the culprit cannot be the bunny, so it is probably my shader.
Vertex Shader
#version 330
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 1) in vec3 vertex_normal;
out vec3 lightIntensity;
uniform mat4 modelViewProjectionMatrix;
uniform mat4 modelMatrix;
// Diffuse
// K REFLECTIVITY, L SOURCE INTENSITY
// a AMBIENT, d DIFFUSE, s SPECULAR
struct Light{
vec3 position;
vec3 La;
vec3 Ld;
vec3 Ls;
};
uniform Light light;
struct Material{
float shininess;
vec3 Ka;
vec3 Kd;
vec3 Ks;
};
uniform Material material;
void main(){
vec4 vertex = vec4(vertexPosition_modelspace, 1.0f);
vec4 eyeCoords = modelMatrix * vertex;
vec3 n = normalize(vertex_normal); // Normal
vec3 s = normalize(light.position - eyeCoords.xyz); // D tw light
vec3 v = normalize(eyeCoords.xyz);
vec3 r = reflect(-s, n);
float sDotN = max(dot(s, n), 0.0);
vec3 ambient = light.La * material.Ka;
vec3 diffuse = light.Ld * material.Kd * sDotN;
vec3 specular = vec3(0.0f);
if(sDotN > 0.0f){
specular = light.Ls * material.Ks * pow(max(dot(r, v), 0.0), material.shininess);
}
lightIntensity = ambient + diffuse + specular;
gl_Position = modelViewProjectionMatrix * vertex;
}
What is causing this and how do I fix it?
Turns out I was passing the vertices instead of the normals - I had completely broken my model loader. All is well now and the above code works if the correct uniforms are passed in!
I have a very strange behaviour of specular(phong light model) light. It seems to be appering on both sides of all objects. Does anyone know what could be the issue ?
The actual calculation seems to be alright, as I can see that the light changes its position as object rotates.
#version 330
in vec4 CameraPos0;
in vec3 Pos0;
in vec4 Colour0;
in vec3 Normal0;
out vec4 FragColor;
// Ambient light parameters
uniform vec3 gAmbientLightIntensity;
// Directional light parameters
uniform vec3 gDirectionalLightIntensity;
uniform vec3 gDirectionalLightDirection;
// Specular light parameter
uniform vec3 gSpecularLightIntensity;
uniform vec3 gLightSourcePosition;
uniform vec3 gCameraPosition;
// Material constants
uniform float gKa;
uniform float gKd;
uniform float gKs;
void main()
{
// Calculate the ambient light intensity at the vertex
// Ia = Ka * ambientLightIntensity
vec4 ambientLightIntensity = gKa * vec4(gAmbientLightIntensity, 1.0);
// Setup the light direction and normalise it
vec3 lightDirection = normalize(-gDirectionalLightDirection);
//lightDirection = normalize(gDirectionalLightDirection);
// Id = kd * lightItensity * N.L
// Calculate N.L
float diffuseFactor = dot(Normal0, lightDirection);
diffuseFactor = clamp(diffuseFactor, 0.0, 1.0);
// N.L * light source colour * intensity
vec4 diffuseLightIntensity = gKd * vec4(gDirectionalLightIntensity, 1.0f) * diffuseFactor;
// Phong light
vec3 L = normalize(gLightSourcePosition - Pos0);
vec3 V = normalize(-Pos0);
vec3 R = normalize(2 * Normal0 * dot(Normal0, L) - L);
float specularFactor = pow(dot(R, V), 0.1f);
vec4 specularLightIntensity = gKs * vec4(gSpecularLightIntensity, 1.0f) * specularFactor;
specularLightIntensity = clamp(specularLightIntensity, 0.0, 1.0);
// Final vertex colour is the product of the vertex colour
// and the total light intensity at the vertex
vec4 lightedFragColor = Colour0 * (ambientLightIntensity + diffuseLightIntensity + specularLightIntensity);
FragColor = lightedFragColor;
}
Vertex Shader
#version 330
layout (location = 0) in vec3 Position;
layout (location = 1) in vec3 Normal;
layout (location = 2) in vec4 Colour;
out vec3 Pos0;
out vec4 Colour0;
out vec3 Normal0;
out vec4 CameraPos0;
uniform mat4 gModelToWorldTransform;
uniform mat4 gWorldToViewTransform;
uniform mat4 gProjectionTransform;
void main()
{
vec4 vertexPositionInModelSpace = vec4(Position, 1);
vec4 vertexInWorldSpace = gModelToWorldTransform * vertexPositionInModelSpace;
vec4 vertexInViewSpace = gWorldToViewTransform * vertexInWorldSpace;
vec4 vertexInHomogeneousClipSpace = gProjectionTransform * vertexInViewSpace;
gl_Position = vertexInHomogeneousClipSpace;
vec3 normalInWorldSpace = (gModelToWorldTransform * vec4(Normal, 0.0)).xyz;
normalInWorldSpace = normalize(normalInWorldSpace);
Normal0 = normalInWorldSpace;
CameraPos0 = vertexInViewSpace;
Pos0 = vertexInWorldSpace.xyz;
Colour0 = Colour;
}
you need to clamp the dot result from the saturation calculus because on the back side the result is negative and the pow can return a positive number instead of clamping it to zero.
float specularFactor = pow(clamp(dot(R, V),0.0,1.0), 0.1f);
Edit:
Also the V should be a vector pointing to the camera position, not to the vertex position in world space:
vec3 V = normalize(CameraPos0 - Pos0);