There are two draw object groups that I'd like to combine into one canvas, a rain effect layer and points forming a circle. These are their vertex shader codes, taken from this website
// Rain effect
const vs = `#version 300 es
uniform int numVerts;
uniform float time;
// hash function from https://www.shadertoy.com/view/4djSRW
// given a value between 0 and 1
// returns a value between 0 and 1 that *appears* kind of random
float hash(float p) {
vec2 p2 = fract(vec2(p * 5.3983, p * 5.4427));
p2 += dot(p2.yx, p2.xy + vec2(21.5351, 14.3137));
return fract(p2.x * p2.y * 95.4337);
}
void main() {
float u = float(gl_VertexID) / float(numVerts); // goes from 0 to 1
float off = floor(time + u) / 1000.0; // changes once per second per vertex
float x = hash(u + off) * 2.0 - 1.0; // random position
float y = fract(time + u) * -2.0 + 1.0; // 1.0 -> -1.0
gl_Position = vec4(x, y, 0, 1);
gl_PointSize = 2.0;
}
`;
// Points forming a circle
const vs = `#version 300 es
uniform int numVerts;
uniform vec2 resolution;
#define PI radians(180.0)
void main() {
float u = float(gl_VertexID) / float(numVerts); // goes from 0 to 1
float angle = u * PI * 2.0; // goes from 0 to 2PI
float radius = 0.8;
vec2 pos = vec2(cos(angle), sin(angle)) * radius;
float aspect = resolution.y / resolution.x;
vec2 scale = vec2(aspect, 1);
gl_Position = vec4(pos * scale, 0, 1);
gl_PointSize = 5.0;
}
`;
Both shaders assign different values to gl_Position and gl_PointSize. I couldn't think of a way to combine them under one vertex shader without conflict. It doesn't help that the vertex dataset is entirely generated on site, inside the shader instead of having it passed from the buffer.
Add a uniform variable to the shader that indicates which algorithm to use.
#version 300 es
uniform int numVerts;
uniform float time;
uniform int mode; // 0 or 1
// hash function
// [...]
void main() {
float u = float(gl_VertexID) / float(numVerts); // goes from 0 to 1
vec2 pos;
float pointSize;
if (mode == 0) {
float off = floor(time + u) / 1000.0; // changes once per second per vertex
float x = hash(u + off) * 2.0 - 1.0; // random position
float y = fract(time + u) * -2.0 + 1.0; // 1.0 -> -1.0
pos = vec2(x, y);
pointSize = 2.0
}
else {
float angle = u * PI * 2.0; // goes from 0 to 2PI
float radius = 0.8;
float aspect = resolution.y / resolution.x;
vec2 scale = vec2(aspect, 1);
pos = vec2(cos(angle), sin(angle)) * radius * scale;
pointSize = 5.0;
}
gl_Position = vec4(pos, 0, 1);
gl_PointSize = pointSize;
}
Related
I am coding a vertex and a fragment shader trying to distort the surface of some water and then computing blinn-phong lighting on the surface. I am able to successfully compute the deformed matrices with a simple noise function, but how can I find the distorted normals? Since it isn't a linear transformation I am stuck, could anyone help?
Here are the relevant files:
vertex shader:
#version 150
uniform mat4 u_Model;
uniform mat4 u_ModelInvTr;
uniform mat4 u_ViewProj;
uniform vec4 u_Color;
uniform int u_Time;
in vec4 vs_Pos; // The array of vertex positions passed to the shader
in vec4 vs_Nor; // The array of vertex normals passed to the shader
in vec4 vs_Col; // The array of vertex colors passed to the shader.
in vec2 vs_UV; // UV coords for texture to pass thru to fragment shader
in float vs_Anim; // 0.f or 1.f To pass thru to fragment shader
in float vs_T2O;
out vec4 fs_Pos;
out vec4 fs_Nor;
out vec4 fs_LightVec;
out vec4 fs_Col;
out vec2 fs_UVs;
out float fs_Anim;
out float fs_dimVal;
out float fs_T2O;
uniform vec4 u_CamPos;
out vec4 fs_CamPos;
const vec4 lightDir = normalize(vec4(0.0, 1.f, 0.0, 0));
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c, oc * axis.x * axis.y - axis.z * s, oc * axis.z * axis.x + axis.y * s, 0.0, oc * axis.x * axis.y + axis.z * s, oc * axis.y * axis.y + c, oc * axis.y * axis.z - axis.x * s, 0.0,oc * axis.z * axis.x - axis.y * s, oc * axis.y * axis.z + axis.x * s, oc * axis.z * axis.z + c, 0.0, 0.0, 0.0, 0.0, 1.0);
}
vec4 rotateLightVec(float deg, vec4 LV) {
mat4 R = rotationMatrix(vec3(0,0,1), deg);
return R * LV;
}
float random1(vec3 p) {
return fract(sin(dot(p, vec3(127.1, 311.7, 191.999)))*43758.5453);
}
vec3 random2( vec3 p ) {
return fract( sin( vec3(dot(p, vec3(127.1, 311.7, 58.24)), dot(p, vec3(269.5, 183.3, 657.3)), dot(p, vec3(420.69, 69.420, 469.20))) ) * 43758.5453);
}
void main()
{
fs_Col = vs_Col;
fs_UVs = vs_UV;
fs_Anim = vs_Anim;
fs_T2O = vs_T2O;
mat3 invTranspose = mat3(u_ModelInvTr);
fs_Nor = vec4(invTranspose * vec3(vs_Nor), 0);
vec4 modelposition = u_Model * vs_Pos;
if (vs_Anim != 0) { // if we want to animate this surface
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y <= 4.f/16.f;
if (water) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.05) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
} else if (lava) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.01) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
}
}
fs_dimVal = random1(modelposition.xyz/100.f);
fs_LightVec = rotateLightVec(0.001 * u_Time, lightDir); // Compute the direction in which the light source lies
fs_CamPos = u_CamPos; // uniform handle for the camera position instead of the inverse
fs_Pos = modelposition;
gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is
// used to render the final positions of the geometry's vertices
}
fragment shader:
#version 330
uniform vec4 u_Color; // The color with which to render this instance of geometry.
uniform sampler2D textureSampler;
uniform int u_Time;
uniform mat4 u_ViewProj;
uniform mat4 u_Model;
in vec4 fs_Pos;
in vec4 fs_Nor;
in vec4 fs_LightVec;
in vec4 fs_Col;
in vec2 fs_UVs;
in float fs_Anim;
in float fs_T2O;
in float fs_dimVal;
out vec4 out_Col;
in vec4 fs_CamPos;
float random1(vec3 p) {
return fract(sin(dot(p,vec3(127.1, 311.7, 191.999)))
*43758.5453);
}
float random1b(vec3 p) {
return fract(sin(dot(p,vec3(169.1, 355.7, 195.999)))
*95751.5453);
}
float mySmoothStep(float a, float b, float t) {
t = smoothstep(0, 1, t);
return mix(a, b, t);
}
float cubicTriMix(vec3 p) {
vec3 pFract = fract(p);
float llb = random1(floor(p) + vec3(0,0,0));
float lrb = random1(floor(p) + vec3(1,0,0));
float ulb = random1(floor(p) + vec3(0,1,0));
float urb = random1(floor(p) + vec3(1,1,0));
float llf = random1(floor(p) + vec3(0,0,1));
float lrf = random1(floor(p) + vec3(1,0,1));
float ulf = random1(floor(p) + vec3(0,1,1));
float urf = random1(floor(p) + vec3(1,1,1));
float mixLoBack = mySmoothStep(llb, lrb, pFract.x);
float mixHiBack = mySmoothStep(ulb, urb, pFract.x);
float mixLoFront = mySmoothStep(llf, lrf, pFract.x);
float mixHiFront = mySmoothStep(ulf, urf, pFract.x);
float mixLo = mySmoothStep(mixLoBack, mixLoFront, pFract.z);
float mixHi = mySmoothStep(mixHiBack, mixHiFront, pFract.z);
return mySmoothStep(mixLo, mixHi, pFract.y);
}
float fbm(vec3 p) {
float amp = 0.5;
float freq = 4.0;
float sum = 0.0;
for(int i = 0; i < 8; i++) {
sum += cubicTriMix(p * freq) * amp;
amp *= 0.5;
freq *= 2.0;
}
return sum;
}
void main()
{
vec4 diffuseColor = texture(textureSampler, fs_UVs);
bool apply_lambert = true;
float specularIntensity = 0;
if (fs_Anim != 0) {
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y < 4.f/16.f;
if (lava) {
// slowly gyrate texture and lighten and darken with random dimVal from vert shader
vec2 movingUVs = vec2(fs_UVs.x + fs_Anim * 0.065/16 * sin(0.01*u_Time),
fs_UVs.y - fs_Anim * 0.065/16 * sin(0.01*u_Time + 3.14159/2));
diffuseColor = texture(textureSampler, movingUVs);
vec4 warmerColor = diffuseColor + vec4(0.3, 0.3, 0, 0);
vec4 coolerColor = diffuseColor - vec4(0.1, 0.1, 0, 0);
diffuseColor = mix(warmerColor, coolerColor, 0.5 + fs_dimVal * 0.65*sin(0.02*u_Time));
apply_lambert = false;
} else if (water) {
// blend between 3 different points in texture to create a wavy subtle change over time
vec2 offsetUVs = vec2(fs_UVs.x - 0.5f/16.f, fs_UVs.y - 0.5f/16.f);
diffuseColor = texture(textureSampler, fs_UVs);
vec4 altColor = texture(textureSampler, offsetUVs);
altColor.x += fs_dimVal * pow(altColor.x+.15, 5);
altColor.y += fs_dimVal * pow(altColor.y+.15, 5);
altColor.z += 0.5 * fs_dimVal * pow(altColor.z+.15, 5);
diffuseColor = mix(diffuseColor, altColor, 0.5 + 0.35*sin(0.05*u_Time));
offsetUVs -= 0.25f/16.f;
vec4 newColor = texture(textureSampler, offsetUVs);
diffuseColor = mix(diffuseColor, newColor, 0.5 + 0.5*sin(0.025*u_Time)) + fs_dimVal * vec4(0.025);
diffuseColor.a = 0.7;
// ----------------------------------------------------
// Blinn-Phong Shading
// ----------------------------------------------------
vec4 lightDir = normalize(fs_LightVec - fs_Pos);
vec4 viewDir = normalize(fs_CamPos - fs_Pos);
vec4 halfVec = normalize(lightDir + viewDir);
float shininess = 400.f;
float specularIntensity = max(pow(dot(halfVec, normalize(fs_Nor)), shininess), 0);
}
}
// Calculate the diffuse term for Lambert shading
float diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec));
// Avoid negative lighting values
diffuseTerm = clamp(diffuseTerm, 0, 1);
float ambientTerm = 0.3;
float lightIntensity = diffuseTerm + ambientTerm; //Add a small float value to the color multiplier
//to simulate ambient lighting. This ensures that faces that are not
//lit by our point light are not completely black.
vec3 col = diffuseColor.rgb;
// Compute final shaded color
if (apply_lambert) {
col = col * lightIntensity + col * specularIntensity;
}
// & Check the rare, special case where we draw face between two diff transparent blocks as opaque
if (fs_T2O != 0) {
out_Col = vec4(col, 1.f);
} else {
out_Col = vec4(col, diffuseColor.a);
}
// distance fog!
vec4 fogColor = vec4(0.6, 0.75, 0.9, 1.0);
float FC = gl_FragCoord.z / gl_FragCoord.w / 124.f;
float falloff = clamp(1.05 - exp(-1.05f * (FC - 0.9f)), 0.f, 1.f);
out_Col = mix(out_Col, fogColor, falloff);
}
I tried implementing blinn-phong in the fragment shader, but I think it is wrong simple from the wrong normals. I think this can be done with some sort of tangent and cross product solution, but how can I know the tangent of the surface given we only know the vertex position?
I am not using unity, just bare c++ and most of the answers I am finding online are for java or unity which I do not understand.`
I'm trying to create a fish-eye effect but only in a small radius around the mouse position. I've been able to modify this code to work about the mouse position (demo) but I can't figure out where the zooming is coming from. I'm expecting the output to warp the image similarly to this (ignore the color inversion for the sake of this question):
Relevant code:
// Check if within given radius of the mouse
vec2 diff = myUV - u_mouse - 0.5;
float distance = dot(diff, diff); // square of distance, saves a square-root
// Add fish-eye
if(distance <= u_radius_squared) {
vec2 xy = 2.0 * (myUV - u_mouse) - 1.0;
float d = length(xy * maxFactor);
float z = sqrt(1.0 - d * d);
float r = atan(d, z) / PI;
float phi = atan(xy.y, xy.x);
myUV.x = d * r * cos(phi) + 0.5 + u_mouse.x;
myUV.y = d * r * sin(phi) + 0.5 + u_mouse.y;
}
vec3 tex = texture2D(tMap, myUV).rgb;
gl_FragColor.rgb = tex;
This is my first shader, so other improvements besides fixing this issue are also welcome.
Compute the vector from the current fragment to the mouse and the length of the vector:
vec2 diff = myUV - u_mouse;
float distance = length(diff);
The new texture coordinate is the sum of the mouse position and the scaled direction vector:
myUV = u_mouse + normalize(diff) * u_radius * f(distance/u_radius);
For instance:
uniform float u_radius;
uniform vec2 u_mouse;
void main()
{
vec2 diff = myUV - u_mouse;
float distance = length(diff);
if (distance <= u_radius)
{
float scale = (1.0 - cos(distance/u_radius * PI * 0.5));
myUV = u_mouse + normalize(diff) * u_radius * scale;
}
vec3 tex = texture2D(tMap, myUV).rgb;
gl_FragColor = vec4(tex, 1.0);
}
I am trying to implement atmospheric scatting in GLSL version 4.10. I am adapting the shaders from the this Shadertoy shader https://www.shadertoy.com/view/lslXDr. The atmosphere in my program is created from a scaled version of the planet sphere.
I have the actual scattering equations working, but the inner radius of the atmosphere does not line up with the outer radius of the sphere for most camera positions. I know this is from the radius of the atmosphere being bigger than the planet sphere, but I cannot seem to get it to scale right.
My problem is best illustrated here. The model is scaled up in these pictures. As can be seen, the atmosphere inner radius does not match the radius of the planet (the dark blue sphere).
Here the model is scaled and translated. The atmosphere is off center from the camera and the inner atmosphere is still not lined up with the planet.
Here is the vertex shader, which is essentially a pass through shader
#version 410
in vec4 vPosition;
in vec3 vNormal;
out vec3 fPosition;
out mat3 m;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
fPosition = vec3(vPosition);
m = mat3(model);
gl_Position = projection*view*model*vPosition;
}
And the fragment shader.
#version 410
uniform float time;
uniform vec3 camPosition;
uniform float fInnerRadius;
uniform float fOuterRadius;
in vec3 fPosition;
in mat3 m;
out vec4 FragColor;
const float PI = 3.14159265359;
const float degToRad = PI / 180.0;
const float MAX = 10000.0;
float K_R = 0.166;
const float K_M = 0.0025;
const float E = 14.3;
const vec3 C_R = vec3(0.3, 0.7, 1.0);
const float G_M = -0.85;
float SCALE_H = 4.0 / (fOuterRadius - fInnerRadius);
float SCALE_L = 1.0 / (fOuterRadius - fInnerRadius);
const int numOutScatter = 10;
const float fNumOutScatter = 10.0;
const int numInScatter = 10;
const float fNumInScatter = 10.0;
vec3 rayDirection(vec3 camPosition) {
vec3 ray = m*fPosition - camPosition;
float far = length(ray);
return ray /= far;
}
vec2 rayIntersection(vec3 p, vec3 dir, float radius ) {
float b = dot( p, dir );
float c = dot( p, p ) - radius * radius;
float d = b * b - c;
if ( d < 0.0 ) {
return vec2( MAX, -MAX );
}
d = sqrt( d );
float near = -b - d;
float far = -b + d;
return vec2(near, far);
}
// Mie
// g : ( -0.75, -0.999 )
// 3 * ( 1 - g^2 ) 1 + c^2
// F = ----------------- * -------------------------------
// 2 * ( 2 + g^2 ) ( 1 + g^2 - 2 * g * c )^(3/2)
float miePhase( float g, float c, float cc ) {
float gg = g * g;
float a = ( 1.0 - gg ) * ( 1.0 + cc );
float b = 1.0 + gg - 2.0 * g * c;
b *= sqrt( b );
b *= 2.0 + gg;
return 1.5 * a / b;
}
// Reyleigh
// g : 0
// F = 3/4 * ( 1 + c^2 )
float rayleighPhase( float cc ) {
return 0.75 * ( 1.0 + cc );
}
float density(vec3 p) {
return exp(-(length(p) - fInnerRadius) * SCALE_H);
}
float optic(vec3 p, vec3 q) {
vec3 step = (q - p) / fNumOutScatter;
vec3 v = p + step * 0.5;
float sum = 0.0;
for(int i = 0; i < numOutScatter; i++) {
sum += density(v);
v += step;
}
sum *= length(step)*SCALE_L;
return sum;
}
vec3 inScatter(vec3 o, vec3 dir, vec2 e, vec3 l) {
float len = (e.y - e.x) / fNumInScatter;
vec3 step = dir * len;
vec3 p = o + dir * e.x;
vec3 v = p + dir * (len * 0.5);
vec3 sum = vec3(0.0);
for(int i = 0; i < numInScatter; i++) {
vec2 f = rayIntersection(v, l, fOuterRadius);
vec3 u = v + l * f.y;
float n = (optic(p, v) + optic(v, u))*(PI * 4.0);
sum += density(v)* exp(-n * ( K_R * C_R + K_M ));
v += step;
}
sum *= len * SCALE_L;
float c = dot(dir, -l);
float cc = c * c;
return sum * ( K_R * C_R * rayleighPhase( cc ) + K_M * miePhase( G_M, c, cc ) ) * E;
}
void main (void)
{
vec3 dir = rayDirection(vec3(camPosition.x, 0.0, camPosition.z));
vec3 eye = vec3(camPosition.x, 0.0, camPosition.z);
vec3 l = normalize(vec3(0.0, 0.0, 1.0));
vec2 e = rayIntersection(eye, dir, fOuterRadius);
if ( e.x > e.y ) {
discard;
}
vec2 f = rayIntersection(eye, dir, fInnerRadius);
e.y = min(e.y, f.x);
vec3 I = inScatter(eye, dir, e, l);
FragColor = vec4(I, 1.0);
}
If needed here is the code that draws the atmosphere. The code that draws the planet has essentially the same transformations sans the scaleFactor.
void drawAtmosphere()
{
glUseProgram(atmosphereShader);
v = getViewMatrix();
vec3 Position = getCameraPosition();
float scaleFactor = 1.25;
m = multiplymat4(translate(0.0, 0.0, -10), scale(fScale*scaleFactor));
float fOuter = (fScale*scaleFactor);
float fInner = fScale;
glUniform1f(glGetUniformLocation(atmosphereShader, "fInnerRadius"), fInner);
glUniform1f(glGetUniformLocation(atmosphereShader, "fOuterRadius"), fOuter);
glUniform3f(glGetUniformLocation(atmosphereShader, "camPosition"), Position.x, Position.y, Position.z);
glUniform1f(glGetUniformLocation(atmosphereShader, "time"), glfwGetTime());
initMVP(atmosphereShader, m, v);
glBindVertexArray (atmosphereVAO);
glDrawArrays( GL_TRIANGLES, 0, planet.vertexNumber);
glBindVertexArray(0);
}
Any help, or anything that can point me in the right direction is appreciated.
Found the problem was caused by incorrect calculation of the camera position and not taking into account the model space of the object. I uploaded a stripped down version of the code here.
Hopefully this will help anyone trying to implement Sean O'Neil's atmosphere code.
I have a volume rendering program write by OpenGL and GLSL.Now I need calculate a fragment's depth value .The follow code is my fragment shader program
#version 120
//
some needed variable declaration
//
float calculateDepthValue(float t, float entryPointsDepth, float exitPointsDepth)
{
// assign front value given in windows coordinates
float zw_front = entryPointsDepth;
// and convert it into eye coordinates
float ze_front = 1.0 / ((zw_front - const_to_z_e_1)*const_to_z_e_2);
// assign back value given in windows coordinates
float zw_back = exitPointsDepth;
// and convert it into eye coordinates
float ze_back = 1.0 / ((zw_back - const_to_z_e_1)*const_to_z_e_2);
// interpolate in eye coordinates
float ze_current = ze_front + t*(ze_back - ze_front);
// convert back to window coordinates
float zw_current = (1.0 / ze_current)*const_to_z_w_1 + const_to_z_w_2;
return zw_current;
}
float getDepthValue(float t, float tEnd, float entryPointsDepth, float exitPointsDepth)
{
if (t >= 0.0)
return calculateDepthValue(t / tEnd, entryPointsDepth, exitPointsDepth);
else
return 1.0;
}
vec4 compositeDVR(in vec4 curResult, in vec4 color,in float t, in float pvar,inout float tDepth)
{
vec4 result = curResult;
// apply opacity correction to accomodate for variable sampling intervals
color.a = 1.0 - pow(1.0 - color.a, pvar);
result.rgb = result.rgb + (1.0 - result.a) * color.a * color.rgb;
result.a = result.a + (1.0 - result.a) * color.a;
// save first hit ray parameter for depth value calculation
if (tDepth < 0.0)
tDepth = t;
return result;
}
void main()
{
vec2 p = gl_FragCoord.xy * screenDimRCP_;
vec3 start = texture2D(frontTex, p).rgb;
vec3 end = texture2D(texBack, p).rgb;
float entryDepth = texture2D(entryPointsDepth_, p).z;
float exitDepth = texture2D(exitPointsDepth_, p).z;
//
some needed code
//
lookup = texture1D(globalTex, sample);
if (lookup.a > 0.0)
{
lookup.rgb = phongShading(N, texToPhysicalPos, position_, cPos, lookup.rgb, lookup.rgb, vec3(1.0, 1.0, 1.0));
//compositeDVR
result = compositeDVR(result, lookup, t, powAlp, tDepth);
}
gl_FragDepth = getDepthValue(tDepth, tEnd, entryDepth, exitDepth);
gl_FragColor = result;
}
In fragment shader,some function come from Voreen's code.my program's result is that gl_FragDepth all equal 1,this is wrong.any people can help me?
Over the past ~2-3 weeks, i've been learning about Physically Based Shading and I just cannot wrap my head around some of the problems I'm having.
Fragment Shader
#version 430
#define PI 3.14159265358979323846
// Inputs
in vec3 inputNormal;
vec3 fNormal;
// Material
float reflectance = 1.0; // 0 to 1
float roughness = 0.5;
vec3 specularColor = vec3(1.0, 1.0, 1.0); // f0
// Values
vec3 lightVector = vec3(1, 1, 1); // Light (l)
vec3 eyeVector = vec3(2.75, 1.25, 1.25); // Camera (v)
vec3 halfVector = normalize(lightVector + eyeVector); // L + V / |L + V|
out vec4 fColor; // Output Color
// Specular Functions
vec3 D(vec3 h) // Normal Distribution Function - GGX/Trowbridge-Reitz
{
float alpha = roughness * roughness;
float alpha2 = alpha * alpha;
float NoH = dot(fNormal, h);
float finalTerm = ((NoH * NoH) * (alpha2 - 1.0) + 1.0);
return vec3(alpha2 / (PI * (finalTerm * finalTerm)));
}
vec3 Gsub(vec3 v) // Sub Function of G
{
float k = ((roughness + 1.0) * (roughness + 1.0)) / 8;
return vec3(dot(fNormal, v) / ((dot(fNormal, v)) * (1.0 - k) + k));
}
vec3 G(vec3 l, vec3 v, vec3 h) // Geometric Attenuation Term - Schlick Modified (k = a/2)
{
return Gsub(l) * Gsub(v);
}
vec3 F(vec3 v, vec3 h) // Fresnel - Schlick Modified (Spherical Gaussian Approximation)
{
vec3 f0 = specularColor; // right?
return f0 + (1.0 - f0) * pow(2, (-5.55473 * (dot(v, h)) - 6.98316) * (dot(v, h)));
}
vec3 specular()
{
return (D(halfVector) * F(eyeVector, halfVector) * G(lightVector, eyeVector, halfVector)) / 4 * ((dot(fNormal, lightVector)) * (dot(fNormal, eyeVector)));
}
vec3 diffuse()
{
float NoL = dot(fNormal, lightVector);
vec3 result = vec3(reflectance / PI);
return result * NoL;
}
void main()
{
fNormal = normalize(inputNormal);
fColor = vec4(diffuse() + specular(), 1.0);
//fColor = vec4(D(halfVector), 1.0);
}
So far I have been able to fix up some things and now I get a better result.
However it now seems clear that the highlight is way too big; this originates from the normal distribution function (Specular D).
Your coding of GGX/Trowbridge-Reitz is wrong:
vec3 NxH = fNormal * h;
The star * means a term by term product where you want a dot product
Also
float alphaTerm = (alpha * alpha - 1.0) + 1.0;
Is not correct since the formula multiplies n.m by (alpha * alpha - 1.0) before adding 1.0. Yours formula is equal to alpha*alpha!
Try:
// Specular
vec3 D(vec3 h) // Normal Distribution Function - GGX/Trowbridge-Reitz
{
float alpha = roughness * roughness;
float NxH = dot(fNormal,h);
float alpha2 = alpha*alpha;
float t = ((NxH * NxH) * (alpha2 - 1.0) + 1.0);
return alpha2 / (PI * t * t);
}
In many other places you use * instead of dot. You need to correct all these. Also, check for your formulas, many seem incorrect.