I've created a brdf file but then I try to open it in BRDF explorer
in the brdf.exe it appears the following message.
BRDF Version 1.0.0
opening ./probes/beach.penv
Unrecognised OpenGL version
My code had some problems but with help from team stackoverflow I fixed.
however there is still one problem.
Can anyone advise how to fix this?
this is my code
analytic
::begin parameters
float baseColor 0.001 .2 .1
float m 0.001 0.1 1.0
::end parameters
::begin shader
const float PI = 3.14;
float Fresnel(float baseColor, float l, float h)
{
return baseColor + ( 1 - baseColor ) * pow( 1 - (h), 5);
}
float ggx_visib(float dotNV, float m2)
{
return 1.0/(dotNV*(1.0-m2)+m2);
}
vec3 BRDF( vec3 N, vec3 V, vec3 L, float m, float baseColor)
{
vec3 H = normalize(V+L);
float dotNL = clamp(dot(N,L), 0.0, 1.0);
float dotLH = clamp(dot(L,H), 0.0, 1.0);
float dotNH = clamp(dot(N,H), 0.0, 1.0);
float DotNH2 = dotNH * dotNH;
float m2 = m * m;
float D = m2 / (PI * pow(dotNH * dotNH * (m2 - 1) + 1, 2.0));
float F = Fresnel(baseColor, dotNH);
float g1o =ggx_visib(m, dotNH);
float g1o =ggx_visib(m, dotNV);
float G = g1i * g1o;
D * G * F * (1.0 / (4.0 * dotNL * dotNH));
}
::end shader
Related
I keep having this bug where there's a black spot right where I would assume the model is supposed to be brightest. I pulled an all-nighter trying to get this to work, but no avail.
I've been following this tutuorial https://learnopengl.com/PBR/Lighting, and referencing this code as well https://github.com/Nadrin/PBR/blob/master/data/shaders/hlsl/pbr.hlsl
As far as I can tell, the math operations I'm doing are identical but they don't produce the intended results. Along with the dark spots, roughness seems to not effect the end result whatsoever, even though I use it in several places that effect the end result.
Here's the code I'm using, all inputs are in world chordinates:
vec3 gammaCorrect(vec3 color)
{
color = color / (color + vec3(1.0));
return pow(color, vec3(1.0/2.2));
}
vec3 shadeDiffuse(vec3 color, vec3 position, vec3 normal)
{
vec3 lightHue = vec3(0,0,0);
for(uint i = 0; i < plb.numLights; ++i)
{
float sqrdist = distance(plb.lights[i].position, position);
sqrdist *= sqrdist;
float b = max(0, dot(normalize(plb.lights[i].position - position), normal) * max(0, plb.lights[i].color.a * (1 / sqrdist)));
lightHue += plb.lights[i].color.xyz * b;
}
color *= lightHue;
return gammaCorrect(color);
}
#ifndef PI
const float PI = 3.14159265359;
#endif
float DistributionGGX(vec3 normal, vec3 viewVec, float roughness)
{
float a2 = pow(roughness, 4);
float NdotH = max(dot(normal, viewVec), 0.0);
float denom = (NdotH*NdotH * (a2 - 1.0) + 1.0);
return a2 / (PI * denom * denom);
}
float GeometrySchlickGGX(float dotp, float roughness)
{
return dotp / (dotp * (1.0 - roughness) + roughness);
}
float GeometrySmith(vec3 normal, vec3 viewVec, vec3 lightVec, float roughness)
{
float r = (roughness + 1.0);
float k = (r * r) / 8.0;
return GeometrySchlickGGX(max(dot(normal, viewVec), 0.0), k) * GeometrySchlickGGX(max(dot(normal, lightVec), 0.0), k);
}
vec3 fresnelSchlick(float cosTheta, vec3 F0)
{
return F0 + (1.0 - F0) * pow(1.0 - cosTheta, 5.0);
}
vec3 shadePBR(vec3 albedo, vec3 position, vec3 cameraPos, vec3 normal, float roughness, float metallic)
{
vec3 viewVec = normalize(cameraPos - position);
const vec3 F0 = mix(vec3(0.03), albedo, metallic);
vec3 lightHue = vec3(0);
for(uint i = 0; i < plb.numLights; ++i)
{
// radiance
vec3 lightVec = normalize(plb.lights[i].position - position);
vec3 halfVec = normalize(viewVec + lightVec);
float distance = length(plb.lights[i].position - position);
float attenuation = 1.0 / (distance * distance);
vec3 radiance = plb.lights[i].color.xyz * attenuation * max(plb.lights[i].color.a, 0);
// brdf
float NDF = DistributionGGX(halfVec, normal, roughness);
float G = GeometrySmith(normal, viewVec, lightVec, roughness);
vec3 F = fresnelSchlick(max(dot(halfVec, viewVec), 0.0), F0);
vec3 kD = mix(vec3(1)-F, vec3(0), metallic);
float viewDot = max(dot(normal, viewVec), 0.0);
float lightDot = max(dot(normal, lightVec), 0.0);
vec3 specular = (NDF * G * F) / (4.0 * max(viewDot * lightDot, 0.000001));
// add to hue
lightHue += (kD * albedo / PI + specular) * radiance * lightDot;
}
//Add in ambient here later
vec3 color = lightHue;
return gammaCorrect(color);
}
I'm going to go sleep now, thanks for any help in advance.
So turns out I'm very stupid. Problem was that I was trying to grab the camera position from the render matrix, and as I have found out, you can't really grab a clean position from that without fully disassembling it, instead of just grabbing a few indexes from it. Passed camera position with a uniform instead and code immediately worked perfectly.
I've got a problem with rendering hard shadows in a PBR pipeline.
I believe there is something wrong with PBR calculations because with a Blinn-Phong lighting model everything looks fine.
These are lightning calculations - basic PBR
struct DirectionalLight
{
vec3 direction;
};
layout(std140, binding = 2) uniform Scene
{
DirectionalLight directionalLight;
vec3 viewPosition;
} u_scene;
layout(std140, binding = 4) uniform Material
{
vec4 baseColor;
float roughness;
float metalness;
} u_material;
const float PI = 3.14159265359;
const float epsilon = 0.00001;
int lightCount = 1;
vec3 CalculateDirectionalLight(vec3 N, vec3 V, float NdotV, vec3 F0)
{
vec3 result;
for(int i = 0; i < lightCount; ++i) {
vec3 L = normalize(-u_scene.directionalLight.direction);
float NdotL = max(0.0f, dot(N, L));
vec3 H = normalize(V + L);
float NdotH = max(0.0f, dot(N, H));
vec3 F = FresnelSchlickRoughness(max(0.0f, dot(H, V)), F0, u_material.roughness);
float D = NDFGGX(NdotH, u_material.roughness);
float G = GeometrySmith(NdotL, NdotV, u_material.roughness);
vec3 kd = (1.0f - F) * (1.0f - u_material.metalness);
vec3 diffuse = kd * u_material.baseColor.rgb;
vec3 nominator = F * G * D;
float denominator = max(epsilon, 4.0f * NdotV * NdotL);
vec3 specular = nominator / denominator;
specular = clamp(specular, vec3(0.0f), vec3(10.0f));
result += (diffuse + specular) /* u_material.radiance */ * NdotL;
}
return result;
}
float NDFGGX(float NdotH, float roughness)
{
float alpha = roughness * roughness;
float alphaSq = alpha * alpha;
float denom = (NdotH * NdotH) * (alphaSq - 1.0) + 1.0;
return alphaSq / (PI * denom * denom);
}
float GeometrySchlickGGX(float Ndot, float k)
{
float nom = Ndot;
float denom = Ndot * (1.0 - k) + k;
return nom / denom;
}
float GeometrySmith(float NdotL, float NdotV, float roughness)
{
float r = (roughness + 1.0f);
float k = (r * r) / 8.0f;
float ggx2 = GeometrySchlickGGX(NdotV, k);
float ggx1 = GeometrySchlickGGX(NdotL, k);
return ggx1 * ggx2;
}
vec3 FresnelSchlick(float cosTheta, vec3 F0)
{
return F0 + (1.0 - F0) * pow(1.0 - cosTheta, 5.0);
}
vec3 FresnelSchlickRoughness(float cosTheta, vec3 F0, float roughness)
{
return F0 + (max(vec3(1.0 - roughness), F0) - F0) * pow(1.0 - cosTheta, 5.0);
}
shadow functions
layout(binding = 2) uniform sampler2D u_shadowMap;
float ShadowFade = 1.0;
float GetShadowBias()
{
const float MINIMUM_SHADOW_BIAS = 0.002;
float bias = max(MINIMUM_SHADOW_BIAS * (1.0 - dot(normalize(v_normal), -normalize(u_scene.directionalLight.direction))), MINIMUM_SHADOW_BIAS);
return bias;
}
float HardShadows_DirectionalLight(vec4 fragPosLightSpace)
{
vec3 shadowCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
float bias = GetShadowBias();
float shadowMapDepth = texture(u_shadowMap, vec2(shadowCoords.xy * 0.5 + 0.5)).r;
return step(shadowCoords.z, shadowMapDepth + bias) * ShadowFade;
}
and the main function
void main()
{
vec3 F0 = vec3(0.04f);
F0 = mix(F0, u_material.baseColor.rgb, u_material.metalness);
vec3 N = normalize(v_normal);
vec3 V = normalize(u_scene.viewPosition - v_position);
float NdotV = max(0.0f, dot(N, V));
//v_positionFromLight is calculated in a vertex shader like this:
//v_positionFromLight = u_lightViewProjection * vec4(v_position, 1.0f);
//where v_position is modelMatrix * a_position;
//where a_position is a input position of a vertex
float shadow = HardShadows_DirectionalLight(v_positionFromLight);
vec3 ambient = u_material.baseColor.rgb * 0.3f;
vec3 lightContribution = ambient + CalculateDirectionalLight(N, V, NdotV, F0) * shadow;
f_color = vec4(lightContribution, 1.0);
}
and this is how the scene looks like - there should be visible shadows, but there aren't:
I've tested 2 things.
First - Blinn-Phong lighting model - shadows render just fine.
Second - output shadow calculations without PBR lightning
like this:
void main()
{
float shadow = HardShadows_DirectionalLight(v_positionFromLight);
vec3 ambient = u_material.baseColor.rgb * 0.3f;
f_color = vec4(ambient * shadow, 1.0f);
}
and it also works (besides that they're not placed in a good spot, but that is another topic):
Why this PBR model does not work with shadows?
How can I fix it?
I am trying to implement atmospheric scatting in GLSL version 4.10. I am adapting the shaders from the this Shadertoy shader https://www.shadertoy.com/view/lslXDr. The atmosphere in my program is created from a scaled version of the planet sphere.
I have the actual scattering equations working, but the inner radius of the atmosphere does not line up with the outer radius of the sphere for most camera positions. I know this is from the radius of the atmosphere being bigger than the planet sphere, but I cannot seem to get it to scale right.
My problem is best illustrated here. The model is scaled up in these pictures. As can be seen, the atmosphere inner radius does not match the radius of the planet (the dark blue sphere).
Here the model is scaled and translated. The atmosphere is off center from the camera and the inner atmosphere is still not lined up with the planet.
Here is the vertex shader, which is essentially a pass through shader
#version 410
in vec4 vPosition;
in vec3 vNormal;
out vec3 fPosition;
out mat3 m;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
fPosition = vec3(vPosition);
m = mat3(model);
gl_Position = projection*view*model*vPosition;
}
And the fragment shader.
#version 410
uniform float time;
uniform vec3 camPosition;
uniform float fInnerRadius;
uniform float fOuterRadius;
in vec3 fPosition;
in mat3 m;
out vec4 FragColor;
const float PI = 3.14159265359;
const float degToRad = PI / 180.0;
const float MAX = 10000.0;
float K_R = 0.166;
const float K_M = 0.0025;
const float E = 14.3;
const vec3 C_R = vec3(0.3, 0.7, 1.0);
const float G_M = -0.85;
float SCALE_H = 4.0 / (fOuterRadius - fInnerRadius);
float SCALE_L = 1.0 / (fOuterRadius - fInnerRadius);
const int numOutScatter = 10;
const float fNumOutScatter = 10.0;
const int numInScatter = 10;
const float fNumInScatter = 10.0;
vec3 rayDirection(vec3 camPosition) {
vec3 ray = m*fPosition - camPosition;
float far = length(ray);
return ray /= far;
}
vec2 rayIntersection(vec3 p, vec3 dir, float radius ) {
float b = dot( p, dir );
float c = dot( p, p ) - radius * radius;
float d = b * b - c;
if ( d < 0.0 ) {
return vec2( MAX, -MAX );
}
d = sqrt( d );
float near = -b - d;
float far = -b + d;
return vec2(near, far);
}
// Mie
// g : ( -0.75, -0.999 )
// 3 * ( 1 - g^2 ) 1 + c^2
// F = ----------------- * -------------------------------
// 2 * ( 2 + g^2 ) ( 1 + g^2 - 2 * g * c )^(3/2)
float miePhase( float g, float c, float cc ) {
float gg = g * g;
float a = ( 1.0 - gg ) * ( 1.0 + cc );
float b = 1.0 + gg - 2.0 * g * c;
b *= sqrt( b );
b *= 2.0 + gg;
return 1.5 * a / b;
}
// Reyleigh
// g : 0
// F = 3/4 * ( 1 + c^2 )
float rayleighPhase( float cc ) {
return 0.75 * ( 1.0 + cc );
}
float density(vec3 p) {
return exp(-(length(p) - fInnerRadius) * SCALE_H);
}
float optic(vec3 p, vec3 q) {
vec3 step = (q - p) / fNumOutScatter;
vec3 v = p + step * 0.5;
float sum = 0.0;
for(int i = 0; i < numOutScatter; i++) {
sum += density(v);
v += step;
}
sum *= length(step)*SCALE_L;
return sum;
}
vec3 inScatter(vec3 o, vec3 dir, vec2 e, vec3 l) {
float len = (e.y - e.x) / fNumInScatter;
vec3 step = dir * len;
vec3 p = o + dir * e.x;
vec3 v = p + dir * (len * 0.5);
vec3 sum = vec3(0.0);
for(int i = 0; i < numInScatter; i++) {
vec2 f = rayIntersection(v, l, fOuterRadius);
vec3 u = v + l * f.y;
float n = (optic(p, v) + optic(v, u))*(PI * 4.0);
sum += density(v)* exp(-n * ( K_R * C_R + K_M ));
v += step;
}
sum *= len * SCALE_L;
float c = dot(dir, -l);
float cc = c * c;
return sum * ( K_R * C_R * rayleighPhase( cc ) + K_M * miePhase( G_M, c, cc ) ) * E;
}
void main (void)
{
vec3 dir = rayDirection(vec3(camPosition.x, 0.0, camPosition.z));
vec3 eye = vec3(camPosition.x, 0.0, camPosition.z);
vec3 l = normalize(vec3(0.0, 0.0, 1.0));
vec2 e = rayIntersection(eye, dir, fOuterRadius);
if ( e.x > e.y ) {
discard;
}
vec2 f = rayIntersection(eye, dir, fInnerRadius);
e.y = min(e.y, f.x);
vec3 I = inScatter(eye, dir, e, l);
FragColor = vec4(I, 1.0);
}
If needed here is the code that draws the atmosphere. The code that draws the planet has essentially the same transformations sans the scaleFactor.
void drawAtmosphere()
{
glUseProgram(atmosphereShader);
v = getViewMatrix();
vec3 Position = getCameraPosition();
float scaleFactor = 1.25;
m = multiplymat4(translate(0.0, 0.0, -10), scale(fScale*scaleFactor));
float fOuter = (fScale*scaleFactor);
float fInner = fScale;
glUniform1f(glGetUniformLocation(atmosphereShader, "fInnerRadius"), fInner);
glUniform1f(glGetUniformLocation(atmosphereShader, "fOuterRadius"), fOuter);
glUniform3f(glGetUniformLocation(atmosphereShader, "camPosition"), Position.x, Position.y, Position.z);
glUniform1f(glGetUniformLocation(atmosphereShader, "time"), glfwGetTime());
initMVP(atmosphereShader, m, v);
glBindVertexArray (atmosphereVAO);
glDrawArrays( GL_TRIANGLES, 0, planet.vertexNumber);
glBindVertexArray(0);
}
Any help, or anything that can point me in the right direction is appreciated.
Found the problem was caused by incorrect calculation of the camera position and not taking into account the model space of the object. I uploaded a stripped down version of the code here.
Hopefully this will help anyone trying to implement Sean O'Neil's atmosphere code.
I have been trying to wrap my head around physical based rendering these last 2.5 weeks and so far I managed to learn a lot, ask a lot of questions, and have some results, although I still have few problems that I would like to fix but the last few days I am stuck. I am want to continue working/learning more but now I don't know what else to do or how to proceed further, thus I need some guidance :(
One of the first problems that I can not figure out what is happening is when I get close to a shape. There is a cut-off problem with BRDF function that I have implemented. The second and third row are BRDF functions using Spherical Gaussian for Fresnel, and Schlick approximation. The second row Beckmann distribution NDF and the third one uses GGX/Trowbridge-Reitz as NDF.
I started implementing this referring to "Real Shading in Unreal Engine 4" and few other posts found while Google-ing.
What I believe the remaining things to do are:
How to blend diffuse, reflection, and speculal better
Fix the problem with the BRDF cut-off problem
Evaluate if my shaders are producing good results based on the equation (it is the first time for me going this way and some comments would be very helpful as a guide on how to proceed in tweaking things)
Fix specular factor in Phong (first row) shader, now I use material roughness as a blend factor when I mix Phong, skybox reflection and diffuse
The code I use for BRDF's is
// geometry term Cook Torrance
float G(float NdotH, float NdotV, float VdotH, float NdotL) {
float NH2 = 2.0 * NdotH;
float g1 = (NH2 * NdotV) / VdotH;
float g2 = (NH2 * NdotL) / VdotH;
return min(1.0, min(g1, g2));
}
// Fresnel reflection term, Schlick approximation
float R_Fresnel(float VdotH) {
return F0 + (1.0 - F0) * pow(2, (-5.55473 * (VdotH)-6.98316) * (VdotH));
}
// Normal distribution function, GGX/Trowbridge-Reitz
float D_GGX(float NdotH, float roughtness2) {
float a = roughtness2 * roughtness2;
float a2 = a*a;
float t = ((NdotH * NdotH) * (a2 - 1.0) + 1.0);
return a2 / (PI * t * t);
}
// Normal distribution function, Beckmann distribution
float D_Beckmann(float NdotH, float mSquared) {
float r1 = 1.0 / (4.0 * mSquared * pow(NdotH, 4.0));
float r2 = (NdotH * NdotH - 1.0) / (mSquared * NdotH * NdotH);
return (r1 * exp(r2));
}
// COOK TORRANCE BRDF
vec4 cookTorrance(Light light, vec3 direction, vec3 normal) {
// do the lighting calculation for each fragment.
float NdotL = max(dot(normal, direction), 0.0);
float specular = 0.0;
if (NdotL > 0.0)
{
vec3 eyeDir = normalize(cameraPosition);
// calculate intermediary values
vec3 halfVector = normalize(direction + eyeDir);
float NdotH = max(dot(normal, halfVector), 0.0);
float NdotV = max(dot(normal, eyeDir), 0.0);
float VdotH = max(dot(eyeDir, halfVector), 0.0);
float matShininess = (material.shininess / 1000.0);
float mSquared = (0.99 - matShininess) * (0.99 - matShininess);
float geoAtt = G(NdotH, NdotV, VdotH, NdotL);
float roughness = D_Beckmann(NdotH, mSquared);
float fresnel = R_Fresnel(VdotH);
specular = (fresnel * geoAtt * roughness) / (NdotV * NdotL * PI);
}
vec3 finalValue = light.color * NdotL * (k + specular * (1.0 - k));
return vec4(finalValue, 1.0);
}
vec4 cookTorrance_GGX(Light light, vec3 direction, vec3 normal) {
// do the lighting calculation for each fragment.
float NdotL = max(dot(normal, direction), 0.0);
float specular = 0.0;
if (NdotL > 0.0)
{
vec3 eyeDir = normalize(cameraPosition);
// calculate intermediary values
vec3 halfVector = normalize(direction + eyeDir);
float NdotH = max(dot(normal, halfVector), 0.0);
float NdotV = max(dot(normal, eyeDir), 0.0);
float VdotH = max(dot(eyeDir, halfVector), 0.0);
float matShininess = (material.shininess / 1000.0);
float mSquared = (0.99 - matShininess) * (0.99 - matShininess);
float geoAtt = G(NdotH, NdotV, VdotH, NdotL);
// NDF CHANGED TO GGX
float roughness = D_GGX(NdotH, mSquared);
float fresnel = R_Fresnel(VdotH);
specular = (fresnel * geoAtt * roughness) / (NdotV * NdotL * PI);
}
vec3 finalValue = light.color * NdotL * (k + specular * (1.0 - k));
return vec4(finalValue, 1.0);
}
void main() {
//vec4 tempColor = vec4(material.diffuse, 1.0);
vec4 tempColor = vec4(0.1);
// interpolating normals will change the length of the normal, so renormalize the normal.
vec3 normal = normalize(Normal);
vec3 I = normalize(Position - cameraPosition);
vec3 R = reflect(I, normalize(Normal));
vec4 reflection = texture(skybox, R);
// fix blending
float shininess = (material.shininess / 1000.0);
vec4 tempFinalDiffuse = mix(tempColor, reflection, shininess);
vec4 finalValue = cookTorrance_GGX(directionalLight.light, directionalLight.position, normal) + tempFinalDiffuse;
// OR FOR COOK TORRANCE IN THE OTHER SHADER PROGRAM
//vec4 finalValue = cookTorrance(directionalLight.light, directionalLight.position, normal) + tempFinalDiffuse;
gl_FragColor = finalValue;
//gl_FragColor = vec4(1.0); // TESTING AND DEBUGGING FRAG OUT
}
The results i have so far are lik in pictures below
EDIT :: I managed to solve few problems and implement environment sampling given in "Real Shading in Unreal Engine 4" but still I just cant figure out why I have that cut-off problem and I have a problem with reflection now after sampling. :(
Also I moved Phong that i tough in books and online tutorial to BDRF Blinn-Phong for better comparison.
My shader now looks like this.
vec4 brdf_GGX(Light light, vec3 direction, vec3 normal) {
float specular = 0.0;
float matShininess = 1.0 - (material.shininess / 1000.0);
vec2 randomPoint;
vec4 finalColor = vec4(0.0);
vec4 totalLambert = vec4(0.0);
const uint numberSamples = 32;
for (uint sampleIndex = 0; sampleIndex < numberSamples; sampleIndex++)
{
randomPoint = hammersley2d(sampleIndex, numberSamples);
vec3 H = ImportanceSampleGGX(randomPoint, matShininess, normal);
vec3 L = 2.0 * dot(normal, H) * H - normal;
vec3 R = reflect(L, normalize(normal));
totalLambert += texture(skybox, -R);
}
totalLambert = totalLambert / numberSamples;
float NdotL = max(dot(normal, direction), 0.0);
if (NdotL > 0.0)
{
vec3 eyeDir = normalize(cameraPosition);
// calculate intermediary values
vec3 halfVector = normalize(direction + eyeDir);
float NdotH = max(dot(normal, halfVector), 0.0);
float NdotV = max(dot(normal, eyeDir), 0.0);
float VdotH = max(dot(eyeDir, halfVector), 0.0);
float mSquared = clamp(matShininess * matShininess, 0.01, 0.99);
float geoAtt = G(NdotH, NdotV, VdotH, NdotL);
float roughness = D_Beckmann(NdotH, mSquared);
float fresnel = R_Fresnel(VdotH);
specular = (fresnel * geoAtt * roughness) / (NdotV * NdotL * PI);
}
vec3 finalValue = light.color * NdotL * (k + specular * (1.0 - k));
return vec4(finalValue, 1.0) * totalLambert;
}
Current results look like this (NOTE: I used skybox sampling only in the third GGX model, do the same for other shaders tomorrow)
EDIT:: OK i am figuring out what is happening but still i can not fix it. I have problems when sampling. I have no idea how to translate normalized ray to proper cube map reflection after sampling. If you can notice in pictures I lost the correct reflection that sphere does to environment map. I just have a simple/flat texture on each sphere and now I have no idea how to fix that.
Over the past ~2-3 weeks, i've been learning about Physically Based Shading and I just cannot wrap my head around some of the problems I'm having.
Fragment Shader
#version 430
#define PI 3.14159265358979323846
// Inputs
in vec3 inputNormal;
vec3 fNormal;
// Material
float reflectance = 1.0; // 0 to 1
float roughness = 0.5;
vec3 specularColor = vec3(1.0, 1.0, 1.0); // f0
// Values
vec3 lightVector = vec3(1, 1, 1); // Light (l)
vec3 eyeVector = vec3(2.75, 1.25, 1.25); // Camera (v)
vec3 halfVector = normalize(lightVector + eyeVector); // L + V / |L + V|
out vec4 fColor; // Output Color
// Specular Functions
vec3 D(vec3 h) // Normal Distribution Function - GGX/Trowbridge-Reitz
{
float alpha = roughness * roughness;
float alpha2 = alpha * alpha;
float NoH = dot(fNormal, h);
float finalTerm = ((NoH * NoH) * (alpha2 - 1.0) + 1.0);
return vec3(alpha2 / (PI * (finalTerm * finalTerm)));
}
vec3 Gsub(vec3 v) // Sub Function of G
{
float k = ((roughness + 1.0) * (roughness + 1.0)) / 8;
return vec3(dot(fNormal, v) / ((dot(fNormal, v)) * (1.0 - k) + k));
}
vec3 G(vec3 l, vec3 v, vec3 h) // Geometric Attenuation Term - Schlick Modified (k = a/2)
{
return Gsub(l) * Gsub(v);
}
vec3 F(vec3 v, vec3 h) // Fresnel - Schlick Modified (Spherical Gaussian Approximation)
{
vec3 f0 = specularColor; // right?
return f0 + (1.0 - f0) * pow(2, (-5.55473 * (dot(v, h)) - 6.98316) * (dot(v, h)));
}
vec3 specular()
{
return (D(halfVector) * F(eyeVector, halfVector) * G(lightVector, eyeVector, halfVector)) / 4 * ((dot(fNormal, lightVector)) * (dot(fNormal, eyeVector)));
}
vec3 diffuse()
{
float NoL = dot(fNormal, lightVector);
vec3 result = vec3(reflectance / PI);
return result * NoL;
}
void main()
{
fNormal = normalize(inputNormal);
fColor = vec4(diffuse() + specular(), 1.0);
//fColor = vec4(D(halfVector), 1.0);
}
So far I have been able to fix up some things and now I get a better result.
However it now seems clear that the highlight is way too big; this originates from the normal distribution function (Specular D).
Your coding of GGX/Trowbridge-Reitz is wrong:
vec3 NxH = fNormal * h;
The star * means a term by term product where you want a dot product
Also
float alphaTerm = (alpha * alpha - 1.0) + 1.0;
Is not correct since the formula multiplies n.m by (alpha * alpha - 1.0) before adding 1.0. Yours formula is equal to alpha*alpha!
Try:
// Specular
vec3 D(vec3 h) // Normal Distribution Function - GGX/Trowbridge-Reitz
{
float alpha = roughness * roughness;
float NxH = dot(fNormal,h);
float alpha2 = alpha*alpha;
float t = ((NxH * NxH) * (alpha2 - 1.0) + 1.0);
return alpha2 / (PI * t * t);
}
In many other places you use * instead of dot. You need to correct all these. Also, check for your formulas, many seem incorrect.