WebGL Normal calculations from position texture - glsl

Iam trying to create a procedural water puddle in webGL with "water ripples" by vertex displacement.
The problem I'm having is that I get a noise I can't explain.
Below is the first pass vertex shader where I calculate the vertex positions that i later render to a texture that i then use in the second pass.
void main() {
float damping = 0.5;
vNormal = normal;
// wave radius
float timemod = 0.55;
float ttime = mod(time , timemod);
float frequency = 2.0*PI/waveWidth;
float phase = frequency * 0.21;
vec4 v = vec4(position,1.0);
// Loop through array of start positions
for(int i = 0; i < 200; i++){
float cCenterX = ripplePos[i].x;
float cCenterY = ripplePos[i].y;
vec2 center = vec2(cCenterX, cCenterY) ;
if(center.x == 0.0 && center.y == 0.0)
center = normalize(center);
// wave width
float tolerance = 0.005;
radius = sqrt(pow( uv.x - center.x , 2.0) + pow( uv.y -center.y, 2.0));
// Creating a ripple
float w_height = (tolerance - (min(tolerance,pow(ripplePos[i].z-radius*10.0,2.0)) )) * (1.0-ripplePos[i].z/timemod) *5.82;
// -2.07 in the end to keep plane at right height. Trial and error solution
v.z += waveHeight*(1.0+w_height/tolerance) / 2.0 - 2.07;
vNormal = normal+v.z;
}
vPosition = v.xyz;
gl_Position = projectionMatrix * modelViewMatrix * v;
}
And the first pass fragment shader that writes to the texture:
void main()
{
vec3 p = normalize(vPosition);
p.x = (p.x+1.0)*0.5;
p.y = (p.y+1.0)*0.5;
gl_FragColor = vec4( normalize(p), 1.0);
}
The second vertex shader is a standard passthrough.
Second pass fragmentshader is where I try to calculate the normals to be used for light calculations.
void main() {
float w = 1.0 / 200.0;
float h = 1.0 / 200.0;
// Nearest Nieghbours
vec3 p0 = texture2D(rttTexture, vUV).xyz;
vec3 p1 = texture2D(rttTexture, vUV + vec2(-w, 0)).xyz;
vec3 p2 = texture2D(rttTexture, vUV + vec2( w, 0)).xyz;
vec3 p3 = texture2D(rttTexture, vUV + vec2( 0, h)).xyz;
vec3 p4 = texture2D(rttTexture, vUV + vec2( 0, -h)).xyz;
vec3 nVec1 = p2 - p0;
vec3 nVec2 = p3 - p0;
vec3 vNormal = cross(nVec1, nVec2);
vec3 N = normalize(vNormal);
float theZ = texture2D(rttTexture, vUV).r;
//gl_FragColor = vec4(1.,.0,1.,1.);
//gl_FragColor = texture2D(tDiffuse, vUV);
gl_FragColor = vec4(vec3(N), 1.0);
}
The result is this:
The image displays the normalmap and the noise I'm refering to is the inconsistency of the blue.
Here is a live demonstration:
http://oskarhavsvik.se/jonasgerling_water_ripple/waterRTT-clean.html
I appreciate any tips and pointers, not only fixes for this problem. But the code in genereal, I'm here to learn.

After a brief look it seems like your problem is in storing x/y positions.
gl_FragColor = vec4(vec3(p0*0.5+0.5), 1.0);
You don't need to store them anyway, because the texel position implicitly gives the x/y value. Just change your normal points to something like this...
vec3 p2 = vec3(1, 0, texture2D(rttTexture, vUV + vec2(w, 0)).z);
Rather than 1, 0 you will want to use a scale appropriate to the size of your displayed quad relative to the wave height. Anyway, the result now looks like this.
The height/z seems to be scaled by distance from the centre, so I went looking for a normalize() and removed it...
vec3 p = vPosition;
gl_FragColor = vec4(p*0.5+0.5, 1.0);
The normals now look like this...

Related

drawing more than one of my glowing lights makes them share the light, cant split them

I am trying to create two independent glowing lights but when a make the second share the light stretches between the 2
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fragCoord/iResolution.xy;
uv =(fragCoord-.5*iResolution.xy)/iResolution.y;
vec3 col = vec3(0.);
float radius = 0.5;
float glowSpeed = 1.;
vec2 glowPos = vec2(uv.x, uv.y+0.5);
vec2 glowPos2 = vec2(uv.x+0.5, uv.y+0.0);
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv+glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv+glowPos2);
col += vec3(glowCol1, glowCol1, 0.);
col += vec3(glowCol2, glowCol2, 0.);
// Output to screen
fragColor = vec4(col, 1);
}
enter image description here
uv is a position relative to the fragment currently being processed. So the position of a light source must not depend on uv. e.g.:
vec2 glowPos = vec2(0.5, 0.5);
vec2 glowPos2 = vec2(-0.5, -0.5);
The distance between 2 points is the length of the vector from one point to the other. A vector between 2 points is calculated by subtracting one point from the other, but not by calculating the sum:
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos2);
The result for glowCol1 and glowCol2 can become negative. Thus, one light source would negatively affect the other. You must clamp the result in the range [0, 1]:
glowCol1 = clamp(glowCol1, 0.0, 1.0);
glowCol2 = clamp(glowCol2, 0.0, 1.0);
Complete and working shader:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fragCoord/iResolution.xy;
uv = uv * 2.0 - 1.0;
uv.x *= iResolution.x / iResolution.y;
vec3 col = vec3(0.);
float radius = 0.5;
float glowSpeed = 1.;
vec2 glowPos = vec2(0.5, 0.5);
vec2 glowPos2 = vec2(-0.5, -0.5);
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos2);
glowCol1 = clamp(glowCol1, 0.0, 1.0);
glowCol2 = clamp(glowCol2, 0.0, 1.0);
col += vec3(glowCol1, glowCol1, 0.);
col += vec3(glowCol2, glowCol2, 0.);
// Output to screen
fragColor = vec4(col, 1);
}

Billboarding using Qt3D 2.0

I am looking for the best way to create a billboard in Qt3D. I would like a plane which faces the camera wherever it is and does not change sized when the camera dollies forward or back. I have read how to do this using GLSL vertex and geometry shaders, but I am looking for the Qt3D way, unless customer shaders is the most efficient and best way of billboarding.
I have looked, and it appears I can set the Matrix on a QTransform via properties, but it isn't clear to me how I would manipulate the matrix, or perhaps there is a better way? I am using the C++ api, but a QML answer would do. I could port it to C++.
If you want to draw just one billboard, you can add a plane and rotate it whenever the camera moves. However, if you want to do this efficiently with thousands or millions of billboards, I recommend using custom shaders. We did this to draw impostor spheres in Qt3D.
However, we didn't use a geometry shader because we were targeting systems that didn't support geometry shaders. Instead, we used only the vertex shader by placing four vertices in the origin and moved these on the shader. To create many copies, we used instanced drawing. We moved each set of four vertices according to the positions of the spheres. Finally, we moved each of the four vertices of each sphere such that they result in a billboard that is always facing the camera.
Start out by subclassing QGeometry and created a buffer functor that creates four points, all in the origin (see spherespointgeometry.cpp). Give each point an ID that we can use later. If you use geometry shaders, the ID is not needed and you can get away with creating only one vertex.
class SpheresPointVertexDataFunctor : public Qt3DRender::QBufferDataGenerator
{
public:
SpheresPointVertexDataFunctor()
{
}
QByteArray operator ()() Q_DECL_OVERRIDE
{
const int verticesCount = 4;
// vec3 pos
const quint32 vertexSize = (3+1) * sizeof(float);
QByteArray verticesData;
verticesData.resize(vertexSize*verticesCount);
float *verticesPtr = reinterpret_cast<float*>(verticesData.data());
// Vertex 1
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID 1
*verticesPtr++ = 0.0;
// Vertex 2
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID 2
*verticesPtr++ = 1.0;
// Vertex 3
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID3
*verticesPtr++ = 2.0;
// Vertex 4
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID 4
*verticesPtr++ = 3.0;
return verticesData;
}
bool operator ==(const QBufferDataGenerator &other) const Q_DECL_OVERRIDE
{
Q_UNUSED(other);
return true;
}
QT3D_FUNCTOR(SpheresPointVertexDataFunctor)
};
For the real positions, we used a separate QBuffer. We also set color and scale, but I have omitted those here (see spheredata.cpp):
void SphereData::setPositions(QVector<QVector3D> positions, QVector3D color, float scale)
{
QByteArray ba;
ba.resize(positions.size() * sizeof(QVector3D));
SphereVBOData *vboData = reinterpret_cast<QVector3D *>(ba.data());
for(int i=0; i<positions.size(); i++) {
QVector3D &position = vboData[i];
position = positions[i];
}
m_buffer->setData(ba);
m_count = positions.count();
}
Then, in QML, we connected the geometry with the buffer in a QGeometryRenderer. This can also be done in C++, if you prefer (see
Spheres.qml):
GeometryRenderer {
id: spheresMeshInstanced
primitiveType: GeometryRenderer.TriangleStrip
enabled: instanceCount != 0
instanceCount: sphereData.count
geometry: SpheresPointGeometry {
attributes: [
Attribute {
name: "pos"
attributeType: Attribute.VertexAttribute
vertexBaseType: Attribute.Float
vertexSize: 3
byteOffset: 0
byteStride: (3 + 3 + 1) * 4
divisor: 1
buffer: sphereData ? sphereData.buffer : null
}
]
}
}
Finally, we created custom shaders to draw the billboards. Note that because we were drawing impostor spheres, the billboard size was increased to handle raytracing in the fragment shader from awkward angles. You likely do not need the 2.0*0.6 factor in general.
Vertex shader:
#version 330
in vec3 vertexPosition;
in float vertexId;
in vec3 pos;
in vec3 col;
in float scale;
uniform vec3 eyePosition = vec3(0.0, 0.0, 0.0);
uniform mat4 modelMatrix;
uniform mat4 mvp;
out vec3 modelSpherePosition;
out vec3 modelPosition;
out vec3 color;
out vec2 planePosition;
out float radius;
vec3 makePerpendicular(vec3 v) {
if(v.x == 0.0 && v.y == 0.0) {
if(v.z == 0.0) {
return vec3(0.0, 0.0, 0.0);
}
return vec3(0.0, 1.0, 0.0);
}
return vec3(-v.y, v.x, 0.0);
}
void main() {
vec3 position = vertexPosition + pos;
color = col;
radius = scale;
modelSpherePosition = (modelMatrix * vec4(position, 1.0)).xyz;
vec3 view = normalize(position - eyePosition);
vec3 right = normalize(makePerpendicular(view));
vec3 up = cross(right, view);
float texCoordX = 1.0 - 2.0*(float(vertexId==0.0) + float(vertexId==2.0));
float texCoordY = 1.0 - 2.0*(float(vertexId==0.0) + float(vertexId==1.0));
planePosition = vec2(texCoordX, texCoordY);
position += 2*0.6*(-up - right)*(scale*float(vertexId==0.0));
position += 2*0.6*(-up + right)*(scale*float(vertexId==1.0));
position += 2*0.6*(up - right)*(scale*float(vertexId==2.0));
position += 2*0.6*(up + right)*(scale*float(vertexId==3.0));
vec4 modelPositionTmp = modelMatrix * vec4(position, 1.0);
modelPosition = modelPositionTmp.xyz;
gl_Position = mvp*vec4(position, 1.0);
}
Fragment shader:
#version 330
in vec3 modelPosition;
in vec3 modelSpherePosition;
in vec3 color;
in vec2 planePosition;
in float radius;
out vec4 fragColor;
uniform mat4 modelView;
uniform mat4 inverseModelView;
uniform mat4 inverseViewMatrix;
uniform vec3 eyePosition;
uniform vec3 viewVector;
void main(void) {
vec3 rayDirection = eyePosition - modelPosition;
vec3 rayOrigin = modelPosition - modelSpherePosition;
vec3 E = rayOrigin;
vec3 D = rayDirection;
// Sphere equation
// x^2 + y^2 + z^2 = r^2
// Ray equation is
// P(t) = E + t*D
// We substitute ray into sphere equation to get
// (Ex + Dx * t)^2 + (Ey + Dy * t)^2 + (Ez + Dz * t)^2 = r^2
float r2 = radius*radius;
float a = D.x*D.x + D.y*D.y + D.z*D.z;
float b = 2.0*E.x*D.x + 2.0*E.y*D.y + 2.0*E.z*D.z;
float c = E.x*E.x + E.y*E.y + E.z*E.z - r2;
// discriminant of sphere equation
float d = b*b - 4.0*a*c;
if(d < 0.0) {
discard;
}
float t = (-b + sqrt(d))/(2.0*a);
vec3 sphereIntersection = rayOrigin + t * rayDirection;
vec3 normal = normalize(sphereIntersection);
vec3 normalDotCamera = color*dot(normal, normalize(rayDirection));
float pi = 3.1415926535897932384626433832795;
vec3 position = modelSpherePosition + sphereIntersection;
// flat red
fragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
It has been some time since we first implemented this, and there might be easier ways to do it now, but this should give you an idea of the pieces you need.

Oren-Nayar lighting in OpenGL (how to calculate view direction in fragment shader)

I'm trying to implement Oren-Nayar lighting in the fragment shader as shown here.
However, I'm getting some strange lighting effects on the terrain as shown below.
I am currently sending the shader the 'view direction' uniform as the camera's 'front' vector. I am not sure if this is correct, as moving the camera around changes the artifacts.
Multiplying the 'front' vector by the MVP matrix gives a better result, but the artifacts are still very noticable when viewing the terrain from some angles. It is particularly noticable in dark areas and around the edges of the screen.
What could be causing this effect?
Artifact example
How the scene should look
Vertex Shader
#version 450
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 normal;
out VS_OUT {
vec3 normal;
} vert_out;
void main() {
vert_out.normal = normal;
gl_Position = vec4(position, 1.0);
}
Tesselation Control Shader
#version 450
layout(vertices = 3) out;
in VS_OUT {
vec3 normal;
} tesc_in[];
out TESC_OUT {
vec3 normal;
} tesc_out[];
void main() {
if(gl_InvocationID == 0) {
gl_TessLevelInner[0] = 1.0;
gl_TessLevelInner[1] = 1.0;
gl_TessLevelOuter[0] = 1.0;
gl_TessLevelOuter[1] = 1.0;
gl_TessLevelOuter[2] = 1.0;
gl_TessLevelOuter[3] = 1.0;
}
tesc_out[gl_InvocationID].normal = tesc_in[gl_InvocationID].normal;
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
Tesselation Evaluation Shader
#version 450
layout(triangles, equal_spacing) in;
in TESC_OUT {
vec3 normal;
} tesc_in[];
out TESE_OUT {
vec3 normal;
float height;
vec4 shadow_position;
} tesc_out;
uniform mat4 model_view;
uniform mat4 model_view_perspective;
uniform mat3 normal_matrix;
uniform mat4 depth_matrix;
vec3 lerp(vec3 v0, vec3 v1, vec3 v2) {
return (
(vec3(gl_TessCoord.x) * v0) +
(vec3(gl_TessCoord.y) * v1) +
(vec3(gl_TessCoord.z) * v2)
);
}
vec4 lerp(vec4 v0, vec4 v1, vec4 v2) {
return (
(vec4(gl_TessCoord.x) * v0) +
(vec4(gl_TessCoord.y) * v1) +
(vec4(gl_TessCoord.z) * v2)
);
}
void main() {
gl_Position = lerp(
gl_in[0].gl_Position,
gl_in[1].gl_Position,
gl_in[2].gl_Position
);
tesc_out.normal = normal_matrix * lerp(
tesc_in[0].normal,
tesc_in[1].normal,
tesc_in[2].normal
);
tesc_out.height = gl_Position.y;
tesc_out.shadow_position = depth_matrix * gl_Position;
gl_Position = model_view_perspective * gl_Position;
}
Fragment Shader
#version 450
in TESE_OUT {
vec3 normal;
float height;
vec4 shadow_position;
} frag_in;
out vec4 colour;
uniform vec3 view_direction;
uniform vec3 light_position;
#define PI 3.141592653589793
void main() {
const vec3 ambient = vec3(0.1, 0.1, 0.1);
const float roughness = 0.8;
const vec4 water = vec4(0.0, 0.0, 0.8, 1.0);
const vec4 sand = vec4(0.93, 0.87, 0.51, 1.0);
const vec4 grass = vec4(0.0, 0.8, 0.0, 1.0);
const vec4 ground = vec4(0.49, 0.27, 0.08, 1.0);
const vec4 snow = vec4(0.9, 0.9, 0.9, 1.0);
if(frag_in.height == 0.0) {
colour = water;
} else if(frag_in.height < 0.2) {
colour = sand;
} else if(frag_in.height < 0.575) {
colour = grass;
} else if(frag_in.height < 0.8) {
colour = ground;
} else {
colour = snow;
}
vec3 normal = normalize(frag_in.normal);
vec3 view_dir = normalize(view_direction);
vec3 light_dir = normalize(light_position);
float NdotL = dot(normal, light_dir);
float NdotV = dot(normal, view_dir);
float angleVN = acos(NdotV);
float angleLN = acos(NdotL);
float alpha = max(angleVN, angleLN);
float beta = min(angleVN, angleLN);
float gamma = dot(view_dir - normal * dot(view_dir, normal), light_dir - normal * dot(light_dir, normal));
float roughnessSquared = roughness * roughness;
float roughnessSquared9 = (roughnessSquared / (roughnessSquared + 0.09));
// calculate C1, C2 and C3
float C1 = 1.0 - 0.5 * (roughnessSquared / (roughnessSquared + 0.33));
float C2 = 0.45 * roughnessSquared9;
if(gamma >= 0.0) {
C2 *= sin(alpha);
} else {
C2 *= (sin(alpha) - pow((2.0 * beta) / PI, 3.0));
}
float powValue = (4.0 * alpha * beta) / (PI * PI);
float C3 = 0.125 * roughnessSquared9 * powValue * powValue;
// now calculate both main parts of the formula
float A = gamma * C2 * tan(beta);
float B = (1.0 - abs(gamma)) * C3 * tan((alpha + beta) / 2.0);
// put it all together
float L1 = max(0.0, NdotL) * (C1 + A + B);
// also calculate interreflection
float twoBetaPi = 2.0 * beta / PI;
float L2 = 0.17 * max(0.0, NdotL) * (roughnessSquared / (roughnessSquared + 0.13)) * (1.0 - gamma * twoBetaPi * twoBetaPi);
colour = vec4(colour.xyz * (L1 + L2), 1.0);
}
First I've plugged your fragment shader into my renderer with my view/normal/light vectors and it works perfectly. So the problem has to be in the way you calculate those vectors.
Next, you say that you set view_dir to your camera's front vector. I assume that you meant "camera's front vector in the world space" which would be incorrect. Since you calculate the dot products with vectors in the camera space, the view_dir must be in the camera space too. That is vec3(0,0,1) would be an easy way to check that. If it works -- we found your problem.
However, using (0,0,1) for the view direction is not strictly correct when you do perspective projection, because the direction from the fragment to the camera then depends on the location of the fragment on the screen. The correct formula then would be view_dir = normalize(-pos) where pos is the fragment's position in camera space (that is with model-view matrix applied without the projection). Further, this quantity now depends only on the fragment location on the screen, so you can calculate it as:
view_dir = normalize(vec3(-(gl_FragCoord.xy - frame_size/2) / (frame_width/2), flen))
flen is the focal length of your camera, which you can calculate as flen = cot(fovx/2).
I know this is a long dead thread, but I've been having the same problem (for several years), and finally found the solution...
It can be partially solved by fixing the orientation of the surface normals to match the polygon winding direction, but you can also get rid of the artifacts in the shader, by changing the following two lines...
float angleVN = acos(cos_nv);
float angleLN = acos(cos_nl);
to this...
float angleVN = acos(clamp(cos_nv, -1.0, 1.0));
float angleLN = acos(clamp(cos_nl, -1.0, 1.0));
Tada!

Smooth normals using only the vertex shader after displacment using perlin noise

I am displacing vertices to form a 3D planet, using a random noise function:
float hash( float n ) { return fract(sin(n)*753.5453123); }
float snoise( in vec3 x )
{
vec3 p = floor(x);
vec3 f = fract(x);
f = f*f*(3.0-2.0*f);
float n = p.x + p.y*157.0 + 113.0*p.z;
return mix(mix(mix( hash(n+ 0.0), hash(n+ 1.0),f.x),
mix( hash(n+157.0), hash(n+158.0),f.x),f.y),
mix(mix( hash(n+113.0), hash(n+114.0),f.x),
mix( hash(n+270.0), hash(n+271.0),f.x),f.y),f.z);
}
As the points are calculated on the GPU I have no way of calculating smooth normals ( apart from flat normals using a geometry shader). I have found various methods of doing this i.e using the neighbour method, however this leaves lots of artefacts on the terrain while performing the lightning calculations.
I am currently calculating the normals using this function with varying different theta values however the lighting has loads of patched areas of bright light :
vec3 calcNormal(vec3 pos)
{
float theta = Theta;
vec3 vecTangent = normalize(cross(pos, vec3(1.0, 0.0, 0.0))
+ cross(pos, vec3(0.0, 1.0, 0.0)));
vec3 vecBitangent = normalize(cross(vecTangent, pos));
vec3 ptTangentSample = getPos(normalize(pos + theta * normalize(vecTangent)));
vec3 ptBitangentSample = getPos(normalize(pos + theta * normalize(vecBitangent)));
return normalize(cross(ptTangentSample - pos, ptBitangentSample - pos));
}

GLSL OpenGL Each Light Added Get's Darker

I have a scene that works perfectly with one light. However, when I add two more - each new addition becomes dimmer until it is almost unseen. Is the attenuation factors wrong or could it be something else?
int i = 0;
for(i=0; i<3; i++){
if (lights[i].enabled == 1.0){
//Lighting Attributes
vec4 light_position = vec4(lights[i].position,1.0);
vec4 light_ambient = lights[i].ambient;
vec4 light_diffuse = lights[i].diffuse;
vec4 light_specular = lights[i].specular;
float light_att_constant = 1.0;
float light_att_linear = 0.0;
float light_att_quadratic = 0.01;
float light_shine = 1.0;
//Object Attributes
vec3 obj_position = n_vertex;
vec3 obj_normals = n_normal;
vec4 obj_color = n_colors;
//Calc Distance
vec3 distance_LO = (obj_position - light_position.xyz);
float distance = length(distance_LO);
//Normalize some attributes
vec3 n_light_position = normalize(distance_LO);
//Apply ambience
finalColor *= light_ambient * global_ambient;
//Calc Cosine of Normal and Light
float NdotL = max(dot(obj_normals, n_light_position),0.0);
//Calc Eye Vector (negated position)
vec3 eye_view = -obj_position;
//Check if Surface is facing the Light
if (NdotL > 0){
//Apply lambertian reflection
finalColor += obj_color * light_diffuse * NdotL;
//Calc the half-vector
vec3 half_vector = normalize(light_position.xyz + eye_view);
//Calc angle between normal and half-vector
//See the engine notebook for a diagram.
float NdotHV = max(dot(obj_normals, half_vector), 0.0);
//Apply Specularity
finalColor += obj_color * light_specular * pow(NdotHV, light_shine);
}
//Calc Attenuation
float attenuation = light_att_constant / ((1 + light_att_linear * distance) *
1 + light_att_quadratic * distance * distance);
//Apply Attenuation
finalColor = finalColor * attenuation;
}
}
color = vec4(finalColor.rgb, 1.0);
You multiply in your colours. This means that shadows will get darker.
If you have an area around some relative brightness 1/2, then you multiply it by 1/2 (contribution from that light), you'll get 1/4.
If you have Photoshop or Gimp you can test this yourself with the Multiply blending mode, and three circles, pure red, pure green and pure blue and overlap them. Compare Multiply to Linear Dodge (the plus operation in Photoshop.)
Here's an example.
You'll most certainly want an additive effect, that is, add the terms together.