I need to get something like:
the close up
zoomed out
So far, I have something like:
Fragment shader (full source)
Vector shader (full source)
The most obvious problem is the bump map ... its flat, why is that? The "relavent" code (in my opinion):
vec2 c = TileDensity * gl_TexCoord[0].st;
vec2 p = fract( c ) - vec2( 0.5 );
// Some useful eye-space vectors.
vec3 ecNNormal = normalize( ecNormal );
vec3 ecViewVec = -normalize( ecPosition );
vec3 N = ecNNormal;
vec3 B = normalize( cross( N, ecTangent ) );
vec3 T = cross( B, N );
vec3 tanPerturbedNormal; // The perturbed normal vector in tangent space of fragment.
vec3 ecPerturbedNormal; // The perturbed normal vector in eye space.
vec3 ecReflectVec; // The mirror reflection vector in eye space.
...
vec3 lightPos = vec3( gl_LightSource[0].position ) / gl_LightSource[0].position.w;
vec3 lightVec = normalize( lightPos - ecPosition );
vec3 halfVector = normalize( lightVec + ecViewVec );
...
if (fractionalPart.s < TubeRadius
|| fractionalPart.s > 1-TubeRadius
|| fractionalPart.t < TubeRadius
|| fractionalPart.t > 1-TubeRadius) {
vec3 i = normalize(ecPosition);
vec3 n = normalize(vec3(p.x, p.y, 1.0));
tanPerturbedNormal.x = dot(T, n);
tanPerturbedNormal.y = dot(B, n);
tanPerturbedNormal.z = dot(N, n);
ecPerturbedNormal = n;
vec3 tanLightPos;
tanLightPos.x = dot(T, lightPos); // LightDir?
tanLightPos.y = dot(B, lightPos);
tanLightPos.z = dot(N, lightPos);
tanLightPos = normalize(tanLightPos);
ecReflectVec = reflect(-tanLightPos, tanPerturbedNormal);
float Ratio = F + (1.0 - F) * pow((1.0 - dot(-i, n)), FresnelPower);
vec3 reflectColor = vec3(textureCube(EnvMap, ecReflectVec));
gl_FragColor = vec4(reflectColor, 1.0);
}
Related
This is the shader i am using to do chroma key , the shader works well but i need to feather the edges of the chroma mask.
How can i do that ?
#version 430 core
uniform sampler2D u_tex;
vec4 keyRGBA = vec4(86.0 / 255.0 , 194.0 / 255.0, 46.0 / 255.0 , 1.0); // key color as rgba
vec2 keyCC; // the CC part of YCC color model of key color
uniform vec2 rangeSpill = vec2(0.1, .52); // the smoothstep range for spill detection
uniform vec2 range = vec2(0.05, 0.21); // the smoothstep range for chroma detection
in vec2 texCoord;
out vec4 FragColor;
vec2 RGBToCC(vec4 rgba) {
float Y = 0.299 * rgba.r + 0.587 * rgba.g + 0.114 * rgba.b;
return vec2((rgba.b - Y) * 0.565, (rgba.r - Y) * 0.713);
}
vec2 RGBAToCC (float r, float g, float b) {
float y = 0.299 * r + 0.587 * g + 0.114 * b;
return vec2((b - y) * 0.565, (r - y) * 0.713);
}
vec3 RGBToYCC( vec3 col )
{
float y = 0.299 * col.r + 0.587 * col.g + 0.114 * col.b;
return vec3( y ,(col.b - y) * 0.565, (col.r - y) * 0.713);
}
vec3 YCCToRGB( vec3 col )
{
float R = col.x + (col.z - 128) * 1.40200;
float G = col.x + (col.y - 128) * -0.34414 + (col.z - 128) * -0.71414;
float B = col.x + (col.y - 128) * 1.77200;
return vec3( R , G , B);
}
vec3 hueShift( vec3 color, float hueAdjust ){
vec3 kRGBToYPrime = vec3 (0.299, 0.587, 0.114);
vec3 kRGBToI = vec3 (0.596, -0.275, -0.321);
vec3 kRGBToQ = vec3 (0.212, -0.523, 0.311);
vec3 kYIQToR = vec3 (1.0, 0.956, 0.621);
vec3 kYIQToG = vec3 (1.0, -0.272, -0.647);
vec3 kYIQToB = vec3 (1.0, -1.107, 1.704);
float YPrime = dot (color, kRGBToYPrime);
float I = dot (color, kRGBToI);
float Q = dot (color, kRGBToQ);
float hue = atan (Q, I);
float chroma = sqrt (I * I + Q * Q);
hue += hueAdjust;
Q = chroma * sin (hue);
I = chroma * cos (hue);
vec3 yIQ = vec3 (YPrime, I, Q);
return vec3( dot (yIQ, kYIQToR), dot (yIQ, kYIQToG), dot (yIQ, kYIQToB) );
}
float GetYComponent( vec3 color){
vec3 kRGBToYPrime = vec3 (0.299, 0.587, 0.114);
vec3 kRGBToI = vec3 (0.596, -0.275, -0.321);
vec3 kRGBToQ = vec3 (0.212, -0.523, 0.311);
vec3 kYIQToR = vec3 (1.0, 0.956, 0.621);
vec3 kYIQToG = vec3 (1.0, -0.272, -0.647);
vec3 kYIQToB = vec3 (1.0, -1.107, 1.704);
float YPrime = dot (color, kRGBToYPrime);
return YPrime;
}
void main() {
vec4 src1Color = texture2D(u_tex, texCoord);
keyCC = RGBAToCC( keyRGBA.r , keyRGBA.g , keyRGBA.b );
vec2 CC = RGBToCC(src1Color);
float mask = sqrt(pow(keyCC.x - CC.x, 2.0) + pow(keyCC.y - CC.y, 2.0));
mask = smoothstep(rangeSpill.x + 0.5, rangeSpill.y, mask);
if (mask > 0.0 && mask < .8)
{
src1Color = vec4( hueShift(src1Color.rgb , 1.8 ) , src1Color.a ); // spill remover
}
// Now the spill is removed do the chroma
vec2 CC2 = RGBToCC(src1Color);
float mask2 = sqrt(pow(keyCC.x - CC2.x, 2.0) + pow(keyCC.y - CC2.y, 2.0));
mask2 = smoothstep(range.x, range.y, mask2);
if (mask2 == 0.0) { discard; }
else if (mask2 == 1.0)
{
FragColor = vec4(src1Color.rgb , mask2);
}
else
{
vec4 col = max(src1Color - (1.0 - mask2) * keyRGBA, 0.0);
FragColor = vec4(hueShift(col.rgb , 0.3 ) , col.a); // do color correction
}
}
This is the base image
This is the result after chroma keying.
Also there is not much information avaliable for chroma keying if someone could also give some information about adding more details in the shader.
Effectively, you need to extrude the areas where the Chroma key matched. While you could just sample in a pattern (instead of a single point) in a single render pass, that's not quite efficient.
Instead you should rather write the mask to a 1bit (or as much as you would like for transparency) mask texture first. Then you can run a simple 1D shader in X and Y direction over that mask to extrude the already excluded areas by a fixed amount. You need a temporary texture for playing ping-pong either way, and splitting X and Y dimensions requires far less samples in total.
E.g. the minimum opacity in a range of 5px, or a Gaussian blur with a scaler / clamp to keep already full transparent pixels still transparent.
Ultimately, combine your final mask with the source image as usual.
I've been following along with the OpenGL 4 Shading Language cookbook and have gotten a teapot rendering with bezier surfaces. The next step I'm attempting is to draw a wireframe over the surfaces using a geometry shader. The directions can be found here on pages 228-230. Following the code that is given, I've gotten the wireframe to display, however, I also have multiple fragments that flicker different shades of my material color.
An image of this can be seen
I have narrowed down the possible issues and have discovered that for some reason, when I perform my triangle height calculations, I am getting variable side lengths for my calculations, as if I hard code the values in the edge distance for each vertex of the triangle within the geometry shader, the teapot no longer flickers, but neither does a wireframe display. (variables ha, hb, hc in the geo shader below)
I was wondering if anyone has run into this issue before or are aware of a workaround.
Below are some sections of my code:
Geometry Shader:
/*
* Geometry Shader
*
* CSCI 499, Computer Graphics, Colorado School of Mines
*/
#version 410 core
layout( triangles ) in;
layout( triangle_strip, max_vertices = 3 ) out;
out vec3 GNormal;
out vec3 GPosition;
out vec3 ghalfwayVec;
out vec3 GLight;
noperspective out vec3 GEdgeDistance;
in vec4 TENormal[];
in vec4 TEPosition[];
in vec3 halfwayVec[];
in vec3 TELight[];
uniform mat4 ViewportMatrix;
void main() {
// Transform each vertex into viewport space
vec3 p0 = vec3(ViewportMatrix * (gl_in[0].gl_Position / gl_in[0].gl_Position.w));
vec3 p1 = vec3(ViewportMatrix * (gl_in[1].gl_Position / gl_in[1].gl_Position.w));
vec3 p2 = vec3(ViewportMatrix * (gl_in[2].gl_Position / gl_in[2].gl_Position.w));
// Find the altitudes (ha, hb and hc)
float a = length(p1 - p2);
float b = length(p2 - p0);
float c = length(p1 - p0);
float alpha = acos( (b*b + c*c - a*a) / (2.0*b*c) );
float beta = acos( (a*a + c*c - b*b) / (2.0*a*c) );
float ha = abs( c * sin( beta ) );
float hb = abs( c * sin( alpha ) );
float hc = abs( b * sin( alpha ) );
// Send the triangle along with the edge distances
GEdgeDistance = vec3( ha, 0, 0 );
GNormal = vec3(TENormal[0]);
GPosition = vec3(TEPosition[0]);
gl_Position = gl_in[0].gl_Position;
EmitVertex();
GEdgeDistance = vec3( 0, hb, 0 );
GNormal = vec3(TENormal[1]);
GPosition = vec3(TEPosition[1]);
gl_Position = gl_in[1].gl_Position;
EmitVertex();
GEdgeDistance = vec3( 0, 0, hc );
GNormal = vec3(TENormal[2]);
GPosition = vec3(TEPosition[2]);
gl_Position = gl_in[2].gl_Position;
EmitVertex();
EndPrimitive();
ghalfwayVec = halfwayVec[0];
GLight = TELight[0];
}
Fragment Shader:
/*
* Fragment Shader
*
* CSCI 441, Computer Graphics, Colorado School of Mines
*/
#version 410 core
in vec3 ghalfwayVec;
in vec3 GLight;
in vec3 GNormal;
in vec3 GPosition;
noperspective in vec3 GEdgeDistance;
layout( location = 0 ) out vec4 FragColor;
uniform vec3 mDiff, mAmb, mSpec;
uniform float shininess;
uniform light {
vec3 lAmb, lDiff, lSpec, lPos;
};
// The mesh line settings
uniform struct LineInfo {
float Width;
vec4 Color;
} Line;
vec3 phongModel( vec3 pos, vec3 norm ) {
vec3 lightVec2 = normalize(GLight);
vec3 normalVec2 = -normalize(GNormal);
vec3 halfwayVec2 = normalize(ghalfwayVec);
float sDotN = max( dot(lightVec2, normalVec2), 0.0 );
vec4 diffuse = vec4(lDiff * mDiff * sDotN, 1);
vec4 specular = vec4(0.0);
if( sDotN > 0.0 ) {
specular = vec4(lSpec * mSpec * pow( max( 0.0, dot( halfwayVec2, normalVec2 ) ), shininess ),1);
}
vec4 ambient = vec4(lAmb * mAmb, 1);
vec3 fragColorOut = vec3(diffuse + specular + ambient);
// vec4 fragColorOut = vec4(0.0,0.0,0.0,0.0);
return fragColorOut;
}
void main() {
// /*****************************************/
// /******* Final Color Calculations ********/
// /*****************************************/
// The shaded surface color.
vec4 color=vec4(phongModel(GPosition, GNormal), 1.0);
// Find the smallest distance
float d = min( GEdgeDistance.x, GEdgeDistance.y );
d = min( d, GEdgeDistance.z );
// Determine the mix factor with the line color
float mixVal = smoothstep( Line.Width - 1, Line.Width + 1, d );
// float mixVal = 1;
// Mix the surface color with the line color
FragColor = vec4(mix( Line.Color, color, mixVal ));
FragColor.a = 1;
}
I ended up stumbling across the solution to my issue. In the geometry shader, I was passing the halfway vector and the light vector after ending the primitive, as such, the values of these vectors was never being correctly sent to the fragment shader. Since no data was given to the fragment shader, garbage values were used and the Phong shading model used random values to compute the fragment color. Moving the two lines after EndPrimative() to the top of the main function in the geometry shader resolved the issue.
I am trying to implement screen space reflection with DDA.
http://casual-effects.blogspot.jp/2014/08/screen-space-ray-tracing.html
But, not working well.
Below is my shader codes.
This is vertex shader code.
layout(location = 0) in vec4 position;
layout(location = 1) in vec4 color_0;
layout(location = 2) in vec3 normal;
uniform mat4 mtxL2W; // Local to World space.
uniform mat4 mtxW2C; // World to Clip space.
out vec4 varColor;
out vec3 varNormal;
void main()
{
gl_Position = mtxW2C * mtxL2W * position;
varColor = color_0;
varNormal = normalize(mtxL2W * vec4(normal, 0)).xyz;
}
This is fragment shader code.
in vec4 varColor;
in vec3 varNormal;
layout(location = 0) out vec4 outColor;
uniform sampler2D s0; // color
uniform sampler2D s1; // linear depth.
uniform mat4 mtxW2V; // World to View(Camera) space.
uniform mat4 mtxV2C; // View(Camera) to Clip space.
uniform mat4 mtxC2V; // Clip to View(Camera) space.
uniform mat4 mtxV2W; // View(Camera) to World space.
uniform vec4 camPos; // Camera position (World space).
uniform float nearPlaneZ;
uniform float maxDistance;
uniform float zThickness;
uniform int maxSteps;
uniform float stride;
float squaredLength(vec2 a, vec2 b)
{
a -= b;
return dot(a, a);
}
bool intersectsDepthBuffer(float z, float minZ, float maxZ)
{
z += zThickness;
return (maxZ >= z) && (minZ - zThickness <= z);
}
bool traceScreenSpaceRay(
vec3 csOrig,
vec3 csDir,
out vec2 hitPixel,
out vec3 hitPoint)
{
// Clip to the near plane.
float rayLength = (csOrig.z + csDir.z * maxDistance) < nearPlaneZ
? (nearPlaneZ - csOrig.z) / csDir.z
: maxDistance;
vec3 csEndPoint = csOrig + csDir * rayLength;
// Project into homogeneous clip space.
vec4 H0 = mtxV2C * vec4(csOrig, 1);
vec4 H1 = mtxV2C * vec4(csEndPoint, 1);
float k0 = 1.0 / H0.w;
float k1 = 1.0 / H1.w;
// The interpolated homogeneous version of the camera-space points.
vec3 Q0 = csOrig * k0;
vec3 Q1 = csEndPoint * k1;
// Screen space point.
vec2 P0 = H0.xy * k0;
vec2 P1 = H1.xy * k1;
// [-1, 1] -> [0, 1]
P0 = P0 * 0.5 + 0.5;
P1 = P1 * 0.5 + 0.5;
ivec2 texsize = textureSize(s0, 0);
P0 *= vec2(texsize.xy);
P1 *= vec2(texsize.xy);
P1.x = min(max(P1.x, 0), texsize.x);
P1.y = min(max(P1.y, 0), texsize.y);
// If the line is degenerate, make it cover at least one pixel to avoid handling zero-pixel extent as a special case later.
P1 += squaredLength(P0, P1) < 0.0001
? vec2(0.01, 0.01)
: vec2(0.0);
vec2 delta = P1 - P0;
// Permute so that the primary iteration is in x to collapse all quadrant-specific DDA cases later.
bool permute = false;
if (abs(delta.x) < abs(delta.y))
{
permute = true;
delta = delta.yx;
P0 = P0.yx;
P1 = P1.yx;
}
float stepDir = sign(delta.x);
float invdx = stepDir / delta.x;
// Track the derivatives of Q and k.
vec3 dQ = (Q1 - Q0) / invdx;
float dk = (k1 - k0) / invdx;
// y is slope.
// slope = (y1 - y0) / (x1 - x0)
vec2 dP = vec2(stepDir, delta.y / invdx);
// Adjust end condition for iteration direction
float end = P1.x * stepDir;
int stepCount = 0;
float prevZMaxEstimate = csOrig.z;
float rayZMin = prevZMaxEstimate;
float rayZMax = prevZMaxEstimate;
float sceneZMax = rayZMax + 100.0f;
dP *= stride;
dQ *= stride;
dk *= stride;
vec4 PQk = vec4(P0, Q0.z, k0);
vec4 dPQk = vec4(dP, dQ.z, dk);
vec3 Q = Q0;
for (;
((PQk.x * stepDir) <= end)
&& (stepCount < maxSteps)
&& !intersectsDepthBuffer(sceneZMax, rayZMin, rayZMax)
&& (sceneZMax != 0.0);
++stepCount)
{
rayZMin = prevZMaxEstimate;
rayZMax = (PQk.z + dPQk.z * 0.5) / (PQk.w + dPQk.w * 0.5);
prevZMaxEstimate = rayZMax;
if (rayZMin > rayZMax) {
float tmp = rayZMin;
rayZMin = rayZMax;
rayZMax = tmp;
}
hitPixel = permute ? PQk.yx : PQk.xy;
//hitPixel.y = texsize.y - hitPixel.y;
sceneZMax = texelFetch(s1, ivec2(hitPixel), 0).r;
PQk += dPQk;
}
// Advance Q based on the number of steps
Q.xy += dQ.xy * stepCount;
hitPoint = Q * (1.0f / PQk.w);
hitPoint = vec3(sceneZMax, rayZMin, rayZMax);
return intersectsDepthBuffer(sceneZMax, rayZMin, rayZMax);
}
void main()
{
vec3 normal = normalize(varNormal);
float linearDepth = texelFetch(s1, ivec2(gl_FragCoord.xy), 0).r;
ivec2 texsize = textureSize(s0, 0);
// Ray origin is camera origin.
vec3 rayOrg = camPos.xyz;
// Screen coordinate.
vec4 pos = vec4(gl_FragCoord.xy / texsize, 0, 1);
// [0, 1] -> [-1, 1]
pos.xy = pos.xy * 2.0 - 1.0;
// Screen-space -> Clip-space
pos.xy *= linearDepth;
// Clip-space -> View-space
pos = mtxC2V * pos;
pos.z = linearDepth;
// View-space -> World-space.
vec3 worldPos = (mtxV2W * vec4(pos.xyz, 1)).xyz;
// Compute ray direction.
// From ray origin to world position.
vec3 rayDir = normalize(worldPos - rayOrg);
// Compute reflection vector.
vec3 refDir = reflect(rayDir, normal);
// Reflection vector origin is world position.
vec3 refOrg = worldPos;
// Transform to view coordinate.
refOrg = (mtxW2V * vec4(refOrg, 1)).xyz;
refDir = (mtxW2V * vec4(refDir, 0)).xyz;
vec2 hitPixel = vec2(0, 0);
vec3 hitPoint = vec3(0, 0, 0);
// Trace screen space ray.
bool isIntersect = traceScreenSpaceRay(refOrg, refDir, hitPixel, hitPoint);
vec2 uv = hitPixel / texsize.xy;
if (uv.x > 1.0 || uv.x < 0.0f || uv.y > 1.0 || uv.y < 0.0) {
isIntersect = false;
}
if (isIntersect) {
outColor = varColor * texture(s0, uv);
}
else {
outColor = vec4(1, 1, 1, 1);
}
}
I think Q0.z and Q1.z are always 1.0.
So, I think dQ.z is also always 0.0.
And, dk is always minus value.
What is wrong?
I've written an LWJGL application that uses .obj files, reads them and displays them (using displaylists).
On my nvidia graphics card, everything runs fine. But on an amd graphics card i can't see the objects.
How i give data to the shaders:
glUseProgram(shaderEngine.obj);
glUniform1i(glGetUniformLocation(shaderEngine.obj, "inOrangeJuice"), inOrangeJuice ? 1 : 0);
shaderEngine.loadMatrix(glGetUniformLocation(shaderEngine.standard, "projectionMatrix"), camera.projectionMatrix);
shaderEngine.loadMatrix(glGetUniformLocation(shaderEngine.obj, "viewMatrix"), camera.viewMatrix);
ModelMatrix is loaded:
shaderEngine.createModelMatrix(new Vector3f(x, y, z), new Vector3f(rx, ry, rz), new Vector3f(1, 1, 1));
shaderEngine.loadModelMatrix(shaderEngine.obj);
Fragment Shader:
#version 130
uniform sampler2D tex;
uniform vec2 texCoord[4];
float textureSize;
float texelSize;
uniform int inOrangeJuice;
bool pointInTriangle(vec3 P, vec3 A, vec3 B, vec3 C)
{
vec3 u = B - A;
vec3 v = C - A;
vec3 w = P - A;
vec3 vCrossW = cross(v, w);
vec3 vCrossU = cross(v, u);
if(dot(vCrossW, vCrossU) < 0)
{
return false;
}
vec3 uCrossW = cross(u, w);
vec3 uCrossV = cross(u, v);
if(dot(uCrossW, uCrossV) < 0)
{
return false;
}
float denom = length(uCrossV);
float r = length(vCrossW);
float t = length(uCrossW);
return (r + t <= 1);
}
vec4 texture2DBilinear(sampler2D textureSampler, vec2 uv)
{
vec4 tl = texture2D(textureSampler, uv);
vec4 tr = texture2D(textureSampler, uv + vec2(texelSize, 0));
vec4 bl = texture2D(textureSampler, uv + vec2(0, texelSize));
vec4 br = texture2D(textureSampler, uv + vec2(texelSize , texelSize));
vec2 f = fract( uv.xy * textureSize );
vec4 tA = mix( tl, tr, f.x );
vec4 tB = mix( bl, br, f.x );
return mix( tA, tB, f.y );
}
void main()
{
ivec2 textureSize2d = textureSize(tex,0);
textureSize = float(textureSize2d.x);
texelSize = 1.0 / textureSize;
//texture coordinate:
vec2 texCoord = (gl_TexCoord[0].st);
bool inOJ = false;
if(inOrangeJuice == 1)
{
float depth = gl_FragCoord.z / gl_FragCoord.w;//works only with perspective projection
depth = depth / 6;
if(depth > 1)
{
depth = 1;
}
inOJ = true;
gl_FragColor = texture2DBilinear(tex, texCoord) * gl_Color * (1.0 - depth) + vec4(1.0, 0.5, 0.0, 1.0) * depth;
}
if(inOJ == false)
{
gl_FragColor = texture2DBilinear(tex, texCoord) * gl_Color;
}
//Nothing is shown, inOrangeJuice should be 0
//gl_FragColor = vec4(inOrangeJuice,0,0,1);
//Always works:
//gl_FragColor = texture2D(tex, texCoord) * gl_Color;
}
How can I get this shader to have a smooth edge on the spot light instead of a hard one? In addition, the shader has to cope with a variable value of GL_SPOT_CUTOFF. Note that not all the lights are spot lights -- GL_LIGHT0 is a point light.
varying vec3 N;
varying vec3 v;
#define MAX_LIGHTS 2
void main (void)
{
vec4 finalColour;
float spotEffect;
for (int i=0; i<MAX_LIGHTS; i++)
{
vec3 L = normalize(gl_LightSource[i].position.xyz - v);
vec3 E = normalize(-v);
vec3 R = normalize(-reflect(L,N));
spotEffect = dot(normalize(gl_LightSource[i].spotDirection),
normalize(-L));
if (spotEffect > gl_LightSource[i].spotCosCutoff) {
vec4 Iamb = gl_FrontLightProduct[i].ambient;
vec4 Idiff = gl_FrontLightProduct[i].diffuse * max(dot(N,L), 0.0);
Idiff = clamp(Idiff, 0.0, 1.0);
vec4 Ispec = gl_FrontLightProduct[i].specular
* pow(max(dot(R,E),0.0),0.3*gl_FrontMaterial.shininess);
Ispec = clamp(Ispec, 0.0, 1.0);
finalColour += Iamb + Idiff + Ispec;
}
}
gl_FragColor = gl_FrontLightModelProduct.sceneColor + finalColour;
}
The scene looks like this:
This shader from http://www.ozone3d.net/tutorials/glsl_lighting_phong_p3.php produces the soft edges to the spotlight you are after.
[Pixel_Shader]
varying vec3 normal, lightDir, eyeVec;
const float cos_outer_cone_angle = 0.8; // 36 degrees
void main (void)
{
vec4 final_color =
(gl_FrontLightModelProduct.sceneColor * gl_FrontMaterial.ambient) +
(gl_LightSource[0].ambient * gl_FrontMaterial.ambient);
vec3 L = normalize(lightDir);
vec3 D = normalize(gl_LightSource[0].spotDirection);
float cos_cur_angle = dot(-L, D);
float cos_inner_cone_angle = gl_LightSource[0].spotCosCutoff;
float cos_inner_minus_outer_angle =
cos_inner_cone_angle - cos_outer_cone_angle;
//****************************************************
// Don't need dynamic branching at all, precompute
// falloff(i will call it spot)
float spot = 0.0;
spot = clamp((cos_cur_angle - cos_outer_cone_angle) /
cos_inner_minus_outer_angle, 0.0, 1.0);
//****************************************************
vec3 N = normalize(normal);
float lambertTerm = max( dot(N,L), 0.0);
if(lambertTerm > 0.0)
{
final_color += gl_LightSource[0].diffuse *
gl_FrontMaterial.diffuse *
lambertTerm * spot;
vec3 E = normalize(eyeVec);
vec3 R = reflect(-L, N);
float specular = pow( max(dot(R, E), 0.0),
gl_FrontMaterial.shininess );
final_color += gl_LightSource[0].specular *
gl_FrontMaterial.specular *
specular * spot;
}
gl_FragColor = final_color;