currently I am working on a OSG Project for my study and wrote a CelShading shader (alongside a simpleFog Shader). I first render with the CelShader along with the depth buffer to Texture and then use the fogShader. Everything works fine on my AMD Radeon HD 7950 and on my Intel HD4400 (although it is slow on the last), both running Windows. However, on a Quadro 600 runnning Linux, the Shader compiles without error, but is still wrong, the light is dulled and because of the lack of some light spots, it seems that not every light in the Scene is used. The whole toon effect is also gone.
I confirmed the Shader working on another AMD, a ATI Mobility HD3400.
But on other NVIDIAs, like a GTX 670 or 660 TI oder 560 TI (this time windows) the Shader is not working. First it was totally messed up because of non-uniform flow, but after I fixed it it is still not working.
I have this Problem now for some days and it is giving me a headache. I do not know what am I missing, why is it working on a simple Intel HD 4400 but not on high end NVIDIA Cards?
Strangely, the fogShader is working perfectly on every system and gives me the nice fog I want.
Does anyone have an idea? The Uniforms are set for the toonTex, but texture0 is not set, because the model is uv-mapped with blender, but the textures seem to work just fine (look at the Pony in the Screens). I assuming 0 is used as layout for texture0, which is perfectly valid,as far as I know. Here is a Video showing the shader on a GTX 660 TI. Something seems to work, if there is only one light, but it is not how it should look like, on a Radeon HD 7950 it is like this (ignore the black border, screenshot issue).
The light is cleary different.
EDIT: Just did another test: on the Intel HD 4400 and Windows, it is working. But the same System running Linux is showing only a whole lot of White with some outlines but no textures at all.
Anyone any suggestion?
The sources for the shaders are here:
celShader.vert
#version 120
varying vec3 normalModelView;
varying vec4 vertexModelView;
uniform bool zAnimation;
uniform float osg_FrameTime;
void main()
{
normalModelView = gl_NormalMatrix * gl_Normal;
vertexModelView = gl_ModelViewMatrix * gl_Vertex;
gl_TexCoord[0] = gl_MultiTexCoord0;
vec4 vertexPos = gl_Vertex;
if(zAnimation){//
vertexPos.z = sin(5.0*vertexPos.z + osg_FrameTime)*0.25;//+ vertexPos.z;
}
gl_Position = gl_ModelViewProjectionMatrix * vertexPos;
}
celShader.frag
#version 120
#define NUM_LIGHTS 5
uniform sampler2D texture0;
uniform sampler2D toonTex;
uniform float osg_FrameTime;
uniform bool tex;
varying vec3 normalModelView;
varying vec4 vertexModelView;
vec4 calculateLightFromLightSource(int lightIndex, bool front){
vec3 lightDir;
vec3 eye = normalize(-vertexModelView.xyz);
vec4 curLightPos = gl_LightSource[lightIndex].position;
//curLightPos.z = sin(10*osg_FrameTime)*4+curLightPos.z;
lightDir = normalize(curLightPos.xyz - vertexModelView.xyz);
float dist = distance( gl_LightSource[lightIndex].position, vertexModelView );
float attenuation = 1.0 / (gl_LightSource[lightIndex].constantAttenuation
+ gl_LightSource[lightIndex].linearAttenuation * dist
+ gl_LightSource[lightIndex].quadraticAttenuation * dist * dist);
float z = length(vertexModelView);
vec4 color;
vec3 n = normalize(normalModelView);
vec3 nBack = normalize(-normalModelView);
float intensity = dot(n,lightDir); //NdotL, Lambert
float intensityBack = dot(nBack,lightDir); //NdotL, Lambert
//-Phong Modell
vec3 reflected = normalize(reflect( -lightDir, n));
float specular = pow(max(dot(reflected, eye), 0.0), gl_FrontMaterial.shininess);
vec3 reflectedBack = normalize(reflect( -lightDir, nBack));
float specularBack = pow(max(dot(reflectedBack, eye), 0.0), gl_BackMaterial.shininess);
//Toon-Shading
//2D Toon http://www.cs.rpi.edu/~cutler/classes/advancedgraphics/S12/final_projects/hutchins_kim.pdf
vec4 toonColor = texture2D(toonTex,vec2(intensity,specular));
vec4 toonColorBack = texture2D(toonTex,vec2(intensityBack,specularBack));
if(front){
color += gl_FrontMaterial.ambient * gl_LightSource[lightIndex].ambient[lightIndex];
if(intensity > 0.0){
color += gl_FrontMaterial.diffuse * gl_LightSource[lightIndex].diffuse * intensity * attenuation ;
color += gl_FrontMaterial.specular * gl_LightSource[lightIndex].specular * specular *attenuation ;
}
return color * toonColor;
} else {//back
color += gl_BackMaterial.ambient * gl_LightSource[lightIndex].ambient[lightIndex];
if(intensity > 0.0){
color += gl_BackMaterial.diffuse * gl_LightSource[lightIndex].diffuse * intensityBack * attenuation ;
color += gl_BackMaterial.specular * gl_LightSource[lightIndex].specular * specularBack *attenuation ;
}
return color * toonColorBack;
}
}
void main(void) {
vec4 color = vec4(0.0);
bool front = true;
//non-uniform-flow error correction
//see more here: http://www.opengl.org/wiki/GLSL_Sampler#Non-uniform_flow_control
//and here: http://gamedev.stackexchange.com/questions/32543/glsl-if-else-statement-unexpected-behaviour
vec4 texColor = texture2D(texture0,gl_TexCoord[0].xy);
if(!gl_FrontFacing)
front = false;
for(int i = 0; i< NUM_LIGHTS; i++){
color += calculateLightFromLightSource(i,front);
}
if(tex)
gl_FragColor =color * texColor;
else
gl_FragColor = color;
}
fogShader.vert
#version 120
varying vec4 vertexModelView;
void main()
{
gl_Position = ftransform();
vertexModelView = gl_ModelViewMatrix * gl_Vertex;
gl_TexCoord[0] = gl_MultiTexCoord0;
}
fogShader.frag
varying vec4 vertexModelView;
uniform sampler2D texture0;
uniform sampler2D deepth;
uniform vec3 fogColor;
uniform float zNear;
uniform float zFar;
float linearDepth(float z){
return (2.0 * (zNear+zFar)) / ((zFar + zNear) - z * (zFar - zNear));// -1.0;
}
void main(void){
//Literature
//http://www.ozone3d.net/tutorials/glsl_fog/p04.php and depth_of_field example OSG Cookbook
vec2 deepthPoint = gl_TexCoord[0].xy;
float z = texture2D(deepth, deepthPoint).x;
//fogFactor = (end - z) / (end - start)
z = linearDepth(z);
float fogFactor = (4000*4-z) / (4000*4 - 30*4);
fogFactor = clamp(fogFactor, 0.0, 1.0);
vec4 texColor = texture2D(texture0,gl_TexCoord[0].xy);
gl_FragColor = mix(vec4(fogColor,1.0), texColor,fogFactor);
}
ProgramLinking
osg::ref_ptr<osg::Shader> toonFrag = osgDB::readShaderFile("../Shader/celShader.frag");
osg::ref_ptr<osg::Shader> toonVert = osgDB::readShaderFile("../Shader/" + _vertSource);
osg::ref_ptr<osg::Program> celShadingProgram = new osg::Program;
celShadingProgram->addShader(toonFrag);
celShadingProgram->addShader(toonVert);
osg::ref_ptr<osg::Texture2D> toonTex = new osg::Texture2D;
toonTex->setImage(osgDB::readImageFile("../BlenderFiles/Texturen/toons/" + _toonTex));
toonTex->setFilter(osg::Texture::MIN_FILTER, osg::Texture::NEAREST);
toonTex->setFilter(osg::Texture::MAG_FILTER, osg::Texture::NEAREST);
osg::ref_ptr<osg::StateSet> ss = new osg::StateSet;
ss->setTextureAttributeAndModes(1, toonTex, osg::StateAttribute::OVERRIDE | osg::StateAttribute::ON);
ss->addUniform(new osg::Uniform("toonTex", 1));
ss->setAttributeAndModes(celShadingProgram, osg::StateAttribute::OVERRIDE | osg::StateAttribute::ON);
//TODO NEEED?
ss->setTextureMode(1, GL_TEXTURE_1D, osg::StateAttribute::OVERRIDE | osg::StateAttribute::OFF);
ss->addUniform(new osg::Uniform("tex", true));
ss->addUniform(new osg::Uniform("zAnimation", false));
Okay, I finally found the error.
There was a faulty Line since version zero of my Shader which I overlooked for a whole week (and I am suprised my AMD Driver did not gave my an error, it was just plain wrong!
EDIT: not wrong at all, see comment below!).
This two lines were broken:
color += gl_FrontMaterial.ambient * gl_LightSource[lightIndex].ambient[lightIndex];
color += gl_BackMaterial.ambient * gl_LightSource[lightIndex].ambient[lightIndex];
ambient is of course not an array....
Related
I've been trying to implement the Blinn-Phong lighting model to project lighting onto an imported Wavefront OBJ model through Assimp(github link).
The model seems to be loaded correctly, however, there seems to be a point where the lighting appears to be "cut off" near the middle of the model.
Image of the imported model with and without lighting enabled.
As you can see on the left of the image above, there is a region in the middle of the model where the light effectively gets "split up" which is not what is intended. It can be seen that there is a sort of discrepancy where the side facing towards the light source appears brighter than normal and the side away from the light source appears darker than normal without any sort of easing in between the two sides.
I believe there might be something wrong with how I've implemented the lighting model in the fragment shader but I cannot say for sure as to why this is happening.
Vertex shader:
#version 330 core
layout (location = 0) in vec3 vertPos;
layout (location = 1) in vec3 vertNormal;
layout (location = 2) in vec2 vertTexCoords;
out vec3 fragPos;
out vec3 fragNormal;
out vec2 fragTexCoords;
uniform mat4 proj, view, model;
uniform mat3 normalMat;
void main() {
fragPos = vec3(model * vec4(vertPos, 1));
gl_Position = proj * view * vec4(fragPos, 1);
fragTexCoords = vertTexCoords;
fragNormal = normalMat * vertNormal;
}
Fragment shader:
#version 330 core
in vec3 fragPos;
in vec3 fragNormal;
in vec2 fragTexCoords;
out vec4 FragColor;
const int noOfDiffuseMaps = 1;
const int noOfSpecularMaps = 1;
struct Material {
sampler2D diffuseMaps[noOfDiffuseMaps], specularMaps[noOfSpecularMaps];
float shininess;
};
struct Light {
vec3 direction;
vec3 ambient, diffuse, specular;
};
uniform Material material;
uniform Light light;
uniform vec3 viewPos;
const float pi = 3.14159265;
uniform float gamma = 2.2;
float near = 0.1;
float far = 100;
float LinearizeDepth(float depth)
{
float z = depth * 2 - 1;
return (2 * near * far) / (far + near - z * (far - near));
}
void main() {
vec3 normal = normalize(fragNormal);
vec3 calculatedColor = vec3(0);
for (int i = 0; i < noOfDiffuseMaps; i++) {
vec3 diffuseTexel = texture(material.diffuseMaps[i], fragTexCoords).rgb;
// Ambient lighting
vec3 ambient = diffuseTexel * light.ambient;
// Diffuse lighting
float diff = max(dot(light.direction, normal), 0);
vec3 diffuse = diffuseTexel * light.diffuse * diff;
calculatedColor += ambient + diffuse;
}
for (int i = 0; i < noOfSpecularMaps; i++) {
vec3 specularTexel = texture(material.specularMaps[0], fragTexCoords).rgb;
vec3 viewDir = normalize(viewPos - fragPos);
vec3 halfWayDir = normalize(viewDir + light.direction);
float energyConservation = (8 + material.shininess) / (8 * pi);
// Specular lighting
float spec = pow(max(dot(halfWayDir, normal), 0), material.shininess);
vec3 specular = specularTexel * light.specular * spec * energyConservation;
calculatedColor += specular;
}
float depthColor = 1 - LinearizeDepth(gl_FragCoord.z) / far;
FragColor = vec4(pow(calculatedColor, vec3(1 / gamma)) * depthColor, 1);
}
Make sure your texture and colors are also linear(it is a simple pow 2.2) because you are doing gamma encoding at the end.
Also note, it is expected to have a harsh terminator.
http://filmicworlds.com/blog/linear-space-lighting-i-e-gamma/
Beyond that, if you expect soft falloffs, it must be coming from an area light. For that you can implement wrap lighting or area lights.
I'm creating a terrain mesh, and following this SO answer I'm trying to migrate my CPU computed normals to a shader based version, in order to improve performances by reducing my mesh resolution and using a normal map computed in the fragment shader.
I'm using MapBox height map for the terrain data. Tiles look like this:
And elevation at each pixel is given by the following formula:
const elevation = -10000.0 + ((red * 256.0 * 256.0 + green * 256.0 + blue) * 0.1);
My original code first creates a dense mesh (256*256 squares of 2 triangles) and then computes triangle and vertices normals. To get a visually satisfying result I was diving the elevation by 5000 to match the tile's width & height in my scene (in the future I'll do a proper computation to display the real elevation).
I was drawing with these simple shaders:
Vertex shader:
uniform mat4 u_Model;
uniform mat4 u_View;
uniform mat4 u_Projection;
attribute vec3 a_Position;
attribute vec3 a_Normal;
attribute vec2 a_TextureCoordinates;
varying vec3 v_Position;
varying vec3 v_Normal;
varying mediump vec2 v_TextureCoordinates;
void main() {
v_TextureCoordinates = a_TextureCoordinates;
v_Position = vec3(u_View * u_Model * vec4(a_Position, 1.0));
v_Normal = vec3(u_View * u_Model * vec4(a_Normal, 0.0));
gl_Position = u_Projection * u_View * u_Model * vec4(a_Position, 1.0);
}
Fragment shader:
precision mediump float;
varying vec3 v_Position;
varying vec3 v_Normal;
varying mediump vec2 v_TextureCoordinates;
uniform sampler2D texture;
void main() {
vec3 lightVector = normalize(-v_Position);
float diffuse = max(dot(v_Normal, lightVector), 0.1);
highp vec4 textureColor = texture2D(texture, v_TextureCoordinates);
gl_FragColor = vec4(textureColor.rgb * diffuse, textureColor.a);
}
It was slow but gave visually satisfying results:
Now, I removed all the CPU based normals computation code, and replaced my shaders by those:
Vertex shader:
#version 300 es
precision highp float;
precision highp int;
uniform mat4 u_Model;
uniform mat4 u_View;
uniform mat4 u_Projection;
in vec3 a_Position;
in vec2 a_TextureCoordinates;
out vec3 v_Position;
out vec2 v_TextureCoordinates;
out mat4 v_Model;
out mat4 v_View;
void main() {
v_TextureCoordinates = a_TextureCoordinates;
v_Model = u_Model;
v_View = u_View;
v_Position = vec3(u_View * u_Model * vec4(a_Position, 1.0));
gl_Position = u_Projection * u_View * u_Model * vec4(a_Position, 1.0);
}
Fragment shader:
#version 300 es
precision highp float;
precision highp int;
in vec3 v_Position;
in vec2 v_TextureCoordinates;
in mat4 v_Model;
in mat4 v_View;
uniform sampler2D u_dem;
uniform sampler2D u_texture;
out vec4 color;
const vec2 size = vec2(2.0,0.0);
const ivec3 offset = ivec3(-1,0,1);
float getAltitude(vec4 pixel) {
float red = pixel.x;
float green = pixel.y;
float blue = pixel.z;
return (-10000.0 + ((red * 256.0 * 256.0 + green * 256.0 + blue) * 0.1)) * 6.0; // Why * 6 and not / 5000 ??
}
void main() {
float s01 = getAltitude(textureOffset(u_dem, v_TextureCoordinates, offset.xy));
float s21 = getAltitude(textureOffset(u_dem, v_TextureCoordinates, offset.zy));
float s10 = getAltitude(textureOffset(u_dem, v_TextureCoordinates, offset.yx));
float s12 = getAltitude(textureOffset(u_dem, v_TextureCoordinates, offset.yz));
vec3 va = (vec3(size.xy, s21 - s01));
vec3 vb = (vec3(size.yx, s12 - s10));
vec3 normal = normalize(cross(va, vb));
vec3 transformedNormal = normalize(vec3(v_View * v_Model * vec4(normal, 0.0)));
vec3 lightVector = normalize(-v_Position);
float diffuse = max(dot(transformedNormal, lightVector), 0.1);
highp vec4 textureColor = texture(u_texture, v_TextureCoordinates);
color = vec4(textureColor.rgb * diffuse, textureColor.a);
}
It now loads nearly instantly, but something is wrong:
in the fragment shader I had to multiply the elevation by 6 rather than dividing by 5000 to get something close to my original code
the result is not as good. Especially when I tilt the scene, the shadows are very dark (the more I tilt the darker they get):
Can you spot what causes that difference?
EDIT: I created two JSFiddles:
first version with CPU computed vertices normals: http://jsfiddle.net/tautin/tmugzv6a/10
second version with GPU computed normal map: http://jsfiddle.net/tautin/8gqa53e1/42
The problem appears when you play with the tilt slider.
There were three problems I could find.
One you saw and fixed by trial and error, which is that the scale of your height calculation was wrong. In CPU, your color coordinates varies from 0 to 255, but on GLSL, texture values are normalized from 0 to 1, so the correct height calculation is:
return (-10000.0 + ((red * 256.0 * 256.0 + green * 256.0 + blue) * 0.1 * 256.0)) / Z_SCALE;
But for this shader purpose, the -10000.00 doesn't matter, so you can do:
return (red * 256.0 * 256.0 + green * 256.0 + blue) * 0.1 * 256.0 / Z_SCALE;
The second problem is that the scale of your x and y coordinates was also wrong. In the CPU code the distance between two neighbor points is (SIZE * 2.0 / (RESOLUTION + 1)), but in GPU, you had set it to 1. The correct way to define your size variable is:
const float SIZE = 2.0;
const float RESOLUTION = 255.0;
const vec2 size = vec2(2.0 * SIZE / (RESOLUTION + 1.0), 0.0);
Notice that I increased the resolution to 255 because I assume this is what you want (one minus the texture resolution). Also, this is needed to match the value of offset, which you defined as:
const ivec3 offset = ivec3(-1,0,1);
To use a different RESOLUTION value, you will have to adjust offset accordingly, e.g. for RESOLUTION == 127, offset = ivec3(-2,0,2), i.e. the offset must be <real texture resolution>/(RESOLUTION + 1), which limits the possibilities for RESOLUTION, since offset must be integer.
The third problem is that you used a different normal calculation algorithm in the GPU, which strikes to me as having lower resolution than the one used on CPU, because you use the four outer pixels of a cross, but ignores the central one. It seems that this is not the full story, but I can't explain why they are so different. I tried to implement the exact CPU algorithm as I thought it should be, but it yield different results. Instead, I had to use the following algorithm, which is similar but not exactly the same, to get an almost identical result (if you increase the CPU resolution to 255):
float s11 = getAltitude(texture(u_dem, v_TextureCoordinates));
float s21 = getAltitude(textureOffset(u_dem, v_TextureCoordinates, offset.zy));
float s10 = getAltitude(textureOffset(u_dem, v_TextureCoordinates, offset.yx));
vec3 va = (vec3(size.xy, s21 - s11));
vec3 vb = (vec3(size.yx, s10 - s11));
vec3 normal = normalize(cross(va, vb));
This is the original CPU solution, but with RESOLUTION=255: http://jsfiddle.net/k0fpxjd8/
This is the final GPU solution: http://jsfiddle.net/7vhpuqd8/
I am having a very strange occurrence where glDisableVertexAttribArray works in my one solution but when I get the solution from my Perforce repository, it doesn't run and throws an assert.
I checked out this forum question but it, unfortunately, didn't solve my problem. This is for shadow mapping that I have been working on and when I try to render things to the depth buffer and then disable the vertex attributes, it throws an error.
Here's how my code is laid out:
glUseProgram(shaderProgram);
glUniform1i(u_diffuseTextureLocation, 0);
glUniform1i(u_shadowMapLocation, 1);
[...]
glUseProgram(shaderProgram);
[Render some stuff to depth buffer]
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glDisableVertexAttibArray(a_normalAttribLocation); // This gives the GL_INVALID_OPERATION
// enum
And here's the vertex shader in that program:
#version 430 core
uniform mat4 u_projection;
uniform mat4 u_view;
uniform mat4 u_model;
uniform mat4 u_lightSpaceMat;
in vec3 a_position;
in vec3 a_normal;
in vec2 a_texture;
out VS_OUT {
vec3 v_fragPos;
vec3 v_normal;
vec2 v_texCoords;
vec4 v_fragPosLightSpace;
} vs_out;
void main()
{
gl_Position = u_projection * u_view * u_model * vec4(a_position, 1.0);
vs_out.v_fragPos = (u_model * vec4(a_position, 1.0)).xyz;
vs_out.v_normal = transpose(inverse(mat3(u_model))) * a_normal;
vs_out.v_texCoords = a_texture;
vs_out.v_fragPosLightSpace = u_lightSpaceMat * vec4(vs_out.v_fragPos, 1.0);
}
And the fragment shader in the program:
#version 430 core
uniform sampler2D u_shadowMap;
uniform sampler2D u_diffuseTexture;
uniform vec3 u_lightPos;
uniform vec3 u_viewPos;
in VS_OUT {
vec3 v_fragPos;
vec3 v_normal;
vec2 v_texCoords;
vec4 v_fragPosLightSpace;
} fs_in;
out vec4 fragColor;
float shadowCalculation(vec4 fragPosLightSpace, vec3 normal, vec3 lightDir)
{
// perform perspective divide
vec3 projCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
// transform to [0,1] range
projCoords = projCoords * 0.5 + 0.5;
// Get closest depth value from light's perspective (using [0,1] range
// fragPosLight as coords)
float closestDepth = texture(u_shadowMap, projCoords.xy).r;
// Get depth of current fragment from lights perspective
float currentDepth = projCoords.z;
float bias = max(0.05 * (1.0 - dot(normal, lightDir)), 0.005);
// Percentage closer filtering
float shadow = 0.0;
vec2 texelSize = 1.0 / textureSize(u_shadowMap, 0);
for (int x = -1; x <= 1; ++x)
{
for (int y = -1; y <= 1; ++y)
{
float pcfDepth = texture(u_shadowMap, projCoords.xy + vec2(x, y) * texelSize).r;
shadow += currentDepth - bias > pcfDepth ? 1.0 : 0.0;
}
}
shadow /= 9.0;
return shadow;
}
void main()
{
vec3 color = texture(u_diffuseTexture, fs_in.v_texCoords).rgb;
vec3 normal = normalize(fs_in.v_normal);
vec3 lightColor = vec3(1.0);
// ambient
vec3 ambient = 0.15 * color;
// diffuse
vec3 lightDir = normalize(u_lightPos - fs_in.v_fragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * lightColor;
// specular
vec3 viewDir = normalize(u_viewPos - fs_in.v_fragPos);
float spec = 0.0;
vec3 halfWayDir = normalize(lightDir + viewDir);
spec = pow(max(dot(normal, halfWayDir), 0.0), 64.0);
vec3 specular = spec * lightColor;
// calculate shadow
float shadow = shadowCalculation(fs_in.v_fragPosLightSpace, normal, lightDir);
vec3 lighting = (ambient + (1.0 - shadow) * (diffuse + specular)) * color;
fragColor = vec4(lighting, 1.0);
}
What I'm really confused about is that the program runs when I'm using my local files. But when I pull the files from the Perforce repository and try and run it, then it throws the exception. I checked and all the necessary files are uploaded to Perforce. It would seem that there is something going wrong with which attributes are actually active? I'm not sure. Just scratching my head here...
glBindVertexArray(0);
glDisableVertexAttibArray(a_normalAttribLocation);
glDisableVertexAttribArray modifies the current VAO. You just removed the current VAO, setting it to 0. Which, in a core profile, means no VAO at all. In the compatibility profile, there is a VAO 0, which is probably why it works elsewhere: you're getting the compatibility profile on a different machine.
However, if you're using VAOs, it's not clear why you want to disable an attribute array at all. The whole point of VAOs is that you don't have to call the attribute array functions every frame. You just bind the VAO and go.
I am working on the beginnings of omnidirectional shadow mapping in my engine. For now I am only producing one shadowmap as a test. I am getting an odd result when using my current shaders. Here is a screenshot which shows the problem:
I am using a near value of 0.5 and a far value of 5.0 in the projection matrix for the shadowmap render. As near as I can tell, any value with a light-space z larger than my far plane distance is being computed by my fragment shader as in shadow.
This is my fragment shader:
in vec2 st;
uniform sampler2D colorTexture;
uniform sampler2D normalTexture;
uniform sampler2D depthTexture;
uniform sampler2D shadowmapTexture;
uniform mat4 invProj;
uniform mat4 lightProj;
uniform vec3 lightPosition;
out vec3 color;
void main () {
vec3 clipSpaceCoords;
clipSpaceCoords.xy = st.xy * 2.0 - 1.0;
clipSpaceCoords.z = texture(depthTexture, st).x * 2.0 - 1.0;
vec4 position = invProj * vec4(clipSpaceCoords,1.0);
position.xyz /= position.w;
vec4 lightSpace = lightProj * vec4(position.xyz,1.0);
lightSpace.xyz /= lightSpace.w;
lightSpace.xyz = lightSpace.xyz * 0.5 + 0.5;
float lightDepth = texture(shadowmapTexture, lightSpace.xy).x;
vec3 normal = texture(normalTexture, st);
vec3 diffuse;
float shadowFactor = 1.0;
if(lightSpace.w > 0.0 && lightSpace.z > lightDepth+0.0042) {
shadowFactor = 0.2;
}
else {
float k = 0.00001;
vec3 distanceToLight = lightPosition - position.xyz;
float distanceLength = length(distanceToLight);
float attenuation = (1.0 / (1.0 + (0.1 * distanceLength) + k * (distanceLength * distanceLength)));
float diffuseTemp = max(dot(normalize(normal), normalize(distanceToLight)), 0.0);
diffuse = vec3(1.0, 1.0, 1.0) * attenuation * diffuseTemp;
}
vec3 gamma = vec3(1.0/2.2);
color = pow(texture(colorTexture, st).xyz*shadowFactor+diffuse, gamma);
}
How can I fix this issue (Other than increasing my far plane distance)?
One other question, as this is the first time I have attempted shadowmapping: am I doing the lighting in relation to the shadows correctly?
When I run the program on my computer, it works exactly how I expected it to be working. However, when I try to run it on my campus lab computers, the fragment shader is all kinds of strange.
Right now it's just a simple Phong lighting calculation with a point source light at the origin. However, on the lab computers, it looks more like some strange cross between cel shading and a flashlight. I run an ATI graphics card, while the lab computers run NVIDIA. The shading works as expected on Macs as well (no idea about the graphics card).
The NVIDIA cards support up to OpenGL 3.1, though I run it on this Linux distribution at 2.1. I've tried clamping the shader version to 1.2 (GLSL), among a slew of other things, but they achieve the same results. The strangest thing is that when I do vertex shading rather than pixel shading, the result is the same on both computers...I've exhausted my ideas about how to fix this.
Here's the vertex shader:
#version 120
attribute vec2 aTexCoord;
attribute vec3 aPosition;
attribute vec3 aNormal;
attribute vec3 camLoc;
attribute float mat;
varying vec3 vColor;
varying vec2 vTexCoord;
varying vec3 normals;
varying vec3 lightPos;
varying vec3 camPos;
varying float material;
uniform mat4 uProjMatrix;
uniform mat4 uViewMatrix;
uniform mat4 uModelMatrix;
uniform mat4 uNormMatrix;
uniform vec3 uLight;
uniform vec3 uColor;
void main()
{
//set up object position in world space
vec4 vPosition = uModelMatrix * vec4(aPosition, 1.0);
vPosition = uViewMatrix * vPosition;
vPosition = uProjMatrix * vPosition;
gl_Position = vPosition;
//set up light vector in world space
vec4 vLight = vec4(uLight, 1.0) * uViewMatrix;
lightPos = vLight.xyz - vPosition.xyz;
//set up normal vector in world space
normals = (vec4(aNormal,1.0) * uNormMatrix).xyz;
//set up view vector in world space
camPos = camLoc.xyz - vPosition.xyz;
//set up material shininess
material = mat;
//pass color and vertex
vColor = uColor;
vTexCoord = aTexCoord;
}
And the fragment shader:
#version 120
varying vec3 lightPos;
varying vec3 normals;
varying vec3 camPos;
varying vec2 vTexCoord;
varying vec3 vColor;
varying float material;
uniform sampler2D uTexUnit;
uniform mat4 uProjMatrix;
uniform mat4 uViewMatrix;
uniform mat4 uModelMatrix;
void main(void)
{
float diffuse;
float diffuseRed, diffuseBlue, diffuseGreen;
float specular;
float specRed, specBlue, specGreen;
vec3 lightColor = vec3(0.996, 0.412, 0.706); //color of light (HOT PINK) **UPDATE WHEN CHANGED**
vec4 L; //light vector
vec4 N; //normal vector
vec4 V; //view vector
vec4 R; //reflection vector
vec4 H; //halfway vector
float red;
float green;
float blue;
vec4 texColor1 = texture2D(uTexUnit, vTexCoord);
//diffuse calculations
L = vec4(normalize(lightPos),0.0);
N = vec4(normalize(normals),0.0);
N = uModelMatrix * N;
//calculate RGB of diffuse light
diffuse = max(dot(N,L),0.0);
diffuseRed = diffuse*lightColor[0];
diffuseBlue = diffuse*lightColor[1];
diffuseGreen = diffuse*lightColor[2];
//specular calculations
V = vec4(normalize(camPos),0.0);
V = uModelMatrix * V;
R = vec4(-1.0 * L.x, -1.0 * L.y, -1.0 * L.z, 0.0);
float temp = 2.0*dot(L,N);
vec3 tempR = vec3(temp * N.x, temp * N.y, temp * N.z);
R = vec4(R.x + tempR.x, R.y + tempR.y, R.z + tempR.z, 0.0);
R = normalize(R);
H = normalize(L + V);
specular = dot(H,R);
specular = pow(specular,material);
specRed = specular*lightColor[0];
specBlue = specular*lightColor[1];
specGreen = specular*lightColor[2];
//set new colors
//textures
red = texColor1[0]*diffuseRed + texColor1[0]*specRed*0.7 + texColor1[0]*.05;
green = texColor1[1]*diffuseBlue + texColor1[1]*specBlue*0.7 + texColor1[1]*.05;
blue = texColor1[2]*diffuseGreen + texColor1[2]*specGreen*0.7 + texColor1[2]*.05;
//colors
red = vColor[0]*diffuseRed + vColor[0]*specRed*0.7 + vColor[0]*.05;
green = vColor[1]*diffuseBlue + vColor[1]*specBlue*0.7 + vColor[1]*.05;
blue = vColor[2]*diffuseGreen + vColor[2]*specGreen*0.7 + vColor[2]*.05;
gl_FragColor = vec4(red, green, blue, 1.0);
}
Your code looks wrong in many, many ways.
The following code is wrong. The comment is misleading (it’s in clip space, not world space). But the major problem is that you overwrite vPosition with the clip space coordinates while using it as if it was in view space several lines after this part.
//set up object position in world space
vec4 vPosition = uModelMatrix * vec4(aPosition, 1.0);
vPosition = uViewMatrix * vPosition;
vPosition = uProjMatrix * vPosition;
gl_Position = vPosition;
The following code is wrong, too. First you need matrix * vector, not vector * matrix. But also, the comment says world space, yet you compute vLight in view space and add vPosition which is in clip space!
//set up light vector in world space
vec4 vLight = vec4(uLight, 1.0) * uViewMatrix;
lightPos = vLight.xyz - vPosition.xyz;
Again here, matrix * vector:
//set up normal vector in world space
normals = (vec4(aNormal,1.0) * uNormMatrix).xyz;
Now what is this? camPos is computed in world coordinates, yet you apply the model matrix which converts model space to world space.
//specular calculations
V = vec4(normalize(camPos),0.0);
V = uModelMatrix * V;
I have no idea why your shader performs differently on different computers, but I am pretty sure none of these computers shows anything remotely close to the expected result.
You really need to read your shaders again, and each time you see a vector, ask yourself “in what coordinate space is this vector meaningful?” and each time you see a matrix, ask yourself “what coordinate spaces does this matrix convert from and to?”