I've successfully rendered my scene from my light's point of view onto a depth cubemap, but I don't quite understand how I can actually project it onto my scene.
Here's a short clip of the current situation: http://youtu.be/54WXDWxqmXw
I found an implementation example on how to do it over here:
http://www.opengl.org/discussion_boards/showthread.php/174093-GLSL-cube-shadows-projecting?p=1219162&viewfull=1#post1219162
It seemed fairly easy to understand, so I figured this would be a great way to start off with, but I'm having some difficulties with the matrices (As shown in the video above).
My Vertex Shader:
#version 330 core
layout(std140) uniform ViewProjection
{
mat4 V;
mat4 P;
};
layout(location = 0) in vec3 vertexPosition;
layout(location = 1) in vec2 vertexUV;
out vec2 UV;
out vec4 posCs;
uniform mat4 M;
uniform mat4 lightView;
void main()
{
mat4 MVP = P *V *M;
gl_Position = MVP *vec4(vertexPosition,1);
UV = vertexUV;
posCs = V *M *vec4(vertexPosition,1);
}
Fragment Shader:
#version 330 core
in vec2 UV;
in vec4 posCs;
out vec4 color;
// Diffuse texture
uniform sampler2D renderTexture;
uniform samplerCubeShadow shadowCubeMap;
uniform mat4 lightView;
uniform mat4 lightProjection;
uniform mat4 camViewInv;
void main()
{
color = texture2D(renderTexture,UV).rgba;
mat4 lView = mat4(1); // The light is currently at the world origin, so we'll skip the transformation for now (The less potential error sources the better)
vec4 posLs = lView *camViewInv *posCs;
vec4 posAbs = abs(posLs);
float fs_z = -max(posAbs.x,max(posAbs.y,posAbs.z));
vec4 clip = lightProjection *vec4(0.0,0.0,fs_z,1.0);
float depth = (clip.z /clip.w) *0.5 +0.5;
vec4 r = shadowCube(shadowCubeMap,vec4(posLs.xyz,depth));
color *= r;
}
(I've only posted the relevant parts)
lightProjection is the same projection matrix that I've used to render the scene into the cubemap.
I'm not entirely sure about 'camViewInv', from the example I've linked above I came up with this:
glm::mat4 camViewInv(
camView[0][0],camView[1][0],camView[2][0],0.0f,
camView[0][1],camView[1][1],camView[2][1],0.0f,
camView[0][2],camView[1][2],camView[2][2],0.0f,
camPos[0],camPos[1],camPos[2],1.0f
);
camView being the camera's view matrix, and camPos the camera's worldspace position.
Everything else should be self-explanatory I believe.
I can't see anything wrong with the shaders, but I'm fairly certain the scene is rendered correctly to the cubemap (As shown in the video above). Maybe someone more versed than me can spot the issue.
// Update:
Some additional information about the creation / usage of the shadow cubemap:
Creating the cubemap texture:
unsigned int frameBuffer;
glGenFramebuffers(1,&frameBuffer);
glBindFramebuffer(GL_FRAMEBUFFER,frameBuffer);
unsigned int texture;
glGenTextures(1,&texture);
glBindTexture(GL_TEXTURE_CUBE_MAP,texture);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_COMPARE_FUNC,GL_LEQUAL);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_WRAP_R,GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_WRAP_S,GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_WRAP_T,GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_CUBE_MAP,GL_TEXTURE_COMPARE_MODE,GL_COMPARE_R_TO_TEXTURE);
for(int i=0;i<6;i++)
{
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X +i,0,GL_DEPTH_COMPONENT,size,size,0,GL_DEPTH_COMPONENT,GL_FLOAT,0);
glFramebufferTexture2D(GL_FRAMEBUFFER,GL_DEPTH_ATTACHMENT,GL_TEXTURE_CUBE_MAP_POSITIVE_X +i,texture,0);
glDrawBuffer(GL_NONE);
}
The light's matrices:
glm::perspective<float>(90.f,1.f,2.f,m_distance); // Projection Matrix
// View Matrices
glm::vec3 pos = GetPosition(); // Light worldspace position
glm::lookAt(pos,pos +glm::vec3(1,0,0),glm::vec3(0,1,0));
glm::lookAt(pos,pos +glm::vec3(-1,0,0),glm::vec3(0,1,0));
glm::lookAt(pos,pos +glm::vec3(0,1,0),glm::vec3(0,0,-1))
glm::lookAt(pos,pos +glm::vec3(0,-1,0),glm::vec3(0,0,1))
glm::lookAt(pos,pos +glm::vec3(0,0,1),glm::vec3(0,1,0))
glm::lookAt(pos,pos +glm::vec3(0,0,-1),glm::vec3(0,1,0))
Vertex Shader:
#version 330 core
layout(location = 0) in vec4 vertexPosition;
uniform mat4 shadowMVP;
void main()
{
gl_Position = shadowMVP *vertexPosition;
}
Fragment Shader:
#version 330 core
layout(location = 0) out float fragmentDepth;
void main()
{
fragmentdepth = gl_FragCoord.z;
}
I would suggest doing this in world space, light positions are typically defined in world space and it will reduce the workload if you keep it that way. I removed a bunch of uniforms that you do not need if you do this in world space.
Compute lighting direction and depth in vtx. shader:
#version 330 core
layout(std140) uniform ViewProjection
{
mat4 V;
mat4 P;
};
layout(location = 0) in vec4 vertexPosition; // W is automatically assigned 1, if missing.
layout(location = 1) in vec2 vertexUV;
out vec2 UV;
out vec4 lightDirDepth; // Direction = xyz, Depth = w
uniform mat4 M;
uniform vec3 lightPos; // World Space Light Pos
uniform vec2 shadowZRange; // Near / Far clip plane distances for shadow's camera
float vecToDepth (vec3 Vec)
{
vec3 AbsVec = abs (Vec);
float LocalZcomp = max (AbsVec.x, max (AbsVec.y, AbsVec.z));
const float n = shadowZRange [0]; // Near plane when the shadow map was built
const float f = shadowZRange [1]; // Far plane when the shadow map was built
float NormZComp = (f+n) / (f-n) - (2.0*f*n)/(f-n)/LocalZcomp;
return (NormZComp + 1.0) * 0.5;
}
void main()
{
mat4 MVP = P *V *M;
gl_Position = MVP *vertexPosition;
UV = vertexUV;
vec3 lightDir = lightPos - (M *vertexPosition).xyz;
float lightDepth = vecToDepth (lightDir);
lightDirDepth = vec4 (lightDir, lightDepth);
}
Modified Fragment Shader (sample cubemap using light dir, and test against depth):
#version 330 core
in vec2 UV;
in vec4 lightDirDepth; // Direction = xyz, Depth = w
out vec4 color;
// Diffuse texture
uniform sampler2D renderTexture;
uniform samplerCubeShadow shadowCubeMap;
void main()
{
const float bias = 0.0001; // Prevent shadow acne
color = texture (renderTexture,UV).rgba;
float r = texture (shadowCubeMap, vec4 (lightDirDepth.xyz, lightDirDepth.w + bias));
color *= r;
}
I added two new uniforms:
lightPos -- World space position of your light
shadowZRange -- The values of your near and far plane when you built your shadow cube map, packed into a vec2
Let me know if you need me to explain anything or if this does not produce meaningful results.
Related
C++ , OpenGL , Glad -> I have a light which is supposed to cause some diffuse lighting in the scene.The problem is that when i rotate my object that is being render on the Y (UP) axis , it seems that the light is also moving with the object.
The movement of the light is not synchronized with the rotation of the object.
WHy is this happening and how do i fix this?
This is the Shader.
The Vertex Shader
#version 330 core
layout (location = 0) in vec3 pos;
layout (location = 1) in vec2 coords;
layout (location = 2) in vec3 normals;
out vec2 Texture_Coords;
out vec3 normal;
out vec3 toLightVector;
uniform mat4 p;
uniform mat4 m;
uniform mat4 v;
uniform vec3 light_position;
void main(){
vec4 world_position = m * vec4(pos,1.0);
gl_Position = p * v * world_position;
Texture_Coords = coords;
normal = (vec4(normals,1.0) * m).xyz;
toLightVector = light_position - world_position.xyz;
}
The Fragment Shader
#version 330 core
out vec4 Pixel;
in vec2 Texture_Coords;
in vec3 normal;
in vec3 toLightVector;
uniform vec4 color;
uniform sampler2D Texture;
uniform float ambient;
uniform vec3 light_color;
void main(){
vec3 unitNormal = normalize(normal);
vec3 unitToLightVector = normalize(toLightVector);
float light_factor = dot(unitNormal, unitToLightVector);
float brightness = max(light_factor, ambient);
vec3 diffuse = brightness * light_color;
Pixel = vec4(diffuse,1.0) * texture(Texture, Texture_Coords);
}
Matrix multiplications are not commutative v * m is not the same as m * v:
normal = (vec4(normals,1.0) * m).xyz;
normal = mat3(m) * normals;
I also recommend reading Why is the transposed inverse of the model view matrix used to transform the normal vectors? and Why transforming normals with the transpose of the inverse of the modelview matrix?:
normal = transpose(inverse(mat3(m))) * normals;
I've been trying to implement a simple light / shading system, a simple Phong lighting system without specular lights to be precise. It basically works, except it has some (in my opinion) nasty artifacts.
My first thought was that maybe this is a problem of the texture mipmaps, but disabling them didn't work. My next best guess would be a shader issue, but I can't seem to find the error.
Has anybody ever experienced a similiar issue or an idea on how to solve this?
Image of the artifacts
Vertex shader:
#version 330 core
// Vertex shader
layout(location = 0) in vec3 vpos;
layout(location = 1) in vec2 vuv;
layout(location = 2) in vec3 vnormal;
out vec2 uv; // UV coordinates
out vec3 normal; // Normal in camera space
out vec3 pos; // Position in camera space
out vec3 light[3]; // Vertex -> light vector in camera space
uniform mat4 mv; // View * model matrix
uniform mat4 mvp; // Proj * View * Model matrix
uniform mat3 nm; // Normal matrix for transforming normals into c-space
void main() {
// Pass uv coordinates
uv = vuv;
// Adjust normals
normal = nm * vnormal;
// Calculation of vertex in camera space
pos = (mv * vec4(vpos, 1.0)).xyz;
// Vector vertex -> light in camera space
light[0] = (mv * vec4(0.0,0.3,0.0,1.0)).xyz - pos;
light[1] = (mv * vec4(-6.0,0.3,0.0,1.0)).xyz - pos;
light[2] = (mv * vec4(0.0,0.3,4.8,1.0)).xyz - pos;
// Pass position after projection transformation
gl_Position = mvp * vec4(vpos, 1.0);
}
Fragment shader:
#version 330 core
// Fragment shader
layout(location = 0) out vec3 color;
in vec2 uv; // UV coordinates
in vec3 normal; // Normal in camera space
in vec3 pos; // Position in camera space
in vec3 light[3]; // Vertex -> light vector in camera space
uniform sampler2D tex;
uniform float flicker;
void main() {
vec3 n = normalize(normal);
// Ambient
color = 0.05 * texture(tex, uv).rgb;
// Diffuse lights
for (int i = 0; i < 3; i++) {
l = normalize(light[i]);
cos = clamp(dot(n,l), 0.0, 1.0);
length = length(light[i]);
color += 0.6 * texture(tex, uv).rgb * cos / pow(length, 2);
}
}
As the first comment says, it looks like your color computation is using insufficient precision. Try using mediump or highp floats.
Additionally, the length = length(light[i]); pow(length,2) expression is quite inefficient, and could also be a source of the observed banding; you should use dot(light[i],light[i]) instead.
So i found information about my problem described as "gradient banding", also discussed here. The problem appears to be in the nature of my textures, since both, only the "white" texture and the real texture are mostly grey/white and there are effectively 256 levels of grey when using 8 bit per color channel.
The solution would be to implement post-processing dithering or to use better textures.
In the past few days I been trying to implement parallax mapping in my engine, but it doesn't seem to work, I have seen at least 15 examples, and I'm still not being able to get it to work
Here is an Image:
As you can see, all you can see is the base color, the height map is not there
Here are my shaders:
Fragment Shader
#version 330 core
uniform sampler2D DiffuseTextureSampler;
uniform sampler2D HeightTextureSampler;
vec2 scaleBias = vec2(0.5,0.5);
in vec3 EyeDirection_tangentspace;
in vec2 UV;
void main()
{
float height = texture2D(HeightTextureSampler, vec2 (UV.x, -UV.y)).r;
//Our heightmap only has one color channel.
float v = height * scaleBias.r - scaleBias.g;
vec3 eye = EyeDirection_tangentspace;
vec2 newCoords = UV + (eye.xy * v);
vec3 rgb = texture2D(DiffuseTextureSampler, vec2 (newCoords.x, -newCoords.y)).rgb;
gl_FragColor = vec4(rgb, 1.0);
}
Vertex Shader
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 1) in vec2 vertexUV;
layout(location = 2) in vec3 vertexNormal_modelspace;
layout(location = 3) in vec3 vertexTangent_modelspace;
layout(location = 4) in vec3 vertexBitangent_modelspace;
// Output data ; will be interpolated for each fragment.
out vec2 UV;
out vec3 Position_worldspace;
out vec3 EyeDirection_cameraspace;
out vec3 LightDirection_cameraspace;
out vec3 LightDirection_tangentspace;
out vec3 EyeDirection_tangentspace;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform mat3 MV3x3;
uniform vec3 LightPosition_worldspace;
void main()
{
gl_Position = MVP * vec4(vertexPosition_modelspace,1);
Position_worldspace = (M * vec4(vertexPosition_modelspace,1)).xyz;
// Vector that goes from the vertex to the camera, in camera space.
// In camera space, the camera is at the origin (0,0,0).
vec3 vertexPosition_cameraspace = ( V * M * vec4(vertexPosition_modelspace,1)).xyz;
EyeDirection_cameraspace = vec3(0,0,0) - vertexPosition_cameraspace;
UV = vertexUV;
vec3 vertexTangent_cameraspace = MV3x3 * vertexTangent_modelspace;
vec3 vertexBitangent_cameraspace = MV3x3 * vertexBitangent_modelspace;
vec3 vertexNormal_cameraspace = MV3x3 * vertexNormal_modelspace;
mat3 TBNMatrix = transpose(mat3(vertexTangent_cameraspace, vertexBitangent_cameraspace, vertexNormal_cameraspace));
EyeDirection_tangentspace = Position_worldspace - vertexPosition_modelspace.xyz;
EyeDirection_tangentspace *= TBNMatrix;
}
couple things
set your scale to 1. no point in halving your hightscale if you cant see it at all.
(YOUR CURRENT PROBLEM) you are getting your texture coordinates with -UV.y Opengl does not have negative texture coordinates. getting negative will pull nothing from the texture, or worse a mirrored textured if you have tiling on.
(YOUR NEXT PROBLEM) normalize your eye vector before calculating new coordinates in the fragment. if you don't normalize, the XY coords of the vector are going to be HUGE so your new texture coordinates are MASSIVE offsets.
try these shaders. they are very simple and work. you will have to add lighting after you get the parallax working
Vertex shader
attribute vec3 tangent;
attribute vec3 binormal;
uniform vec3 CAMERA_POSITION;
varying vec3 eyeVec;
void main()
{
gl_Position = ftransform();
gl_TexCoord[0] = gl_TextureMatrix[0] * gl_MultiTexCoord0;
mat3 TBNMatrix = mat3(tangent, binormal, gl_Normal);
eyeVec = CAMERA_POSITION - gl_Vertex.xyz;
eyeVec *= TBNMatrix;
}
fragment shader
uniform sampler2D basetex;
uniform sampler2D heightMap;
uniform vec2 scaleBias;
varying vec3 eyeVec;
void main()
{
float height = texture2D(heightMap, gl_TexCoord[0].st).r;
float v = height * scaleBias.r - scaleBias.g;
vec3 eye = normalize(eyeVec);
vec2 newCoords = texCoord + (eye.xy * v);
vec3 rgb = texture2D(basetex, newCoords).rgb;
gl_FragColor = vec4(rgb, 1.0);
}
I'm getting some pretty freaky results from my tangent space normal mapping shader :). In the scene I show here, the teapot and checkered walls are being shaded with my ordinary Phong-Blinn shader (obviously teapot backface cull gives it a lightly ephemeral look and feel :-) ). I've tried to add in normal mapping to the sphere, with psychedelic results:
The light is coming from the right (just about visible as a black blob). The normal map I'm using on the sphere looks like this:
I'm using AssImp to process input models, so it's calculating tangent and bi-normals for each vertex automatically for me.
The pixel and vertex shaders are below. I'm not too sure what's going wrong, but it wouldn't surprise me if the tangent basis matrix is somehow wrong. I assume I have to compute things into eye space and then transform the eye and light vectors into tangent space and that this is the correct way to go about it. Note that the light position comes into the shader already in view space.
// Vertex Shader
#version 420
// Uniform Buffer Structures
// Camera.
layout (std140) uniform Camera
{
mat4 Camera_Projection;
mat4 Camera_View;
};
// Matrices per model.
layout (std140) uniform Model
{
mat4 Model_ViewModelSpace;
mat4 Model_ViewModelSpaceInverseTranspose;
};
// Spotlight.
layout (std140) uniform OmniLight
{
float Light_Intensity;
vec3 Light_Position; // Already in view space.
vec4 Light_Ambient_Colour;
vec4 Light_Diffuse_Colour;
vec4 Light_Specular_Colour;
};
// Streams (per vertex)
layout(location = 0) in vec3 attrib_Position;
layout(location = 1) in vec3 attrib_Normal;
layout(location = 2) in vec3 attrib_Tangent;
layout(location = 3) in vec3 attrib_BiNormal;
layout(location = 4) in vec2 attrib_Texture;
// Output streams (per vertex)
out vec3 attrib_Fragment_Normal;
out vec4 attrib_Fragment_Position;
out vec3 attrib_Fragment_Light;
out vec3 attrib_Fragment_Eye;
// Shared.
out vec2 varying_TextureCoord;
// Main
void main()
{
// Compute normal.
attrib_Fragment_Normal = (Model_ViewModelSpaceInverseTranspose * vec4(attrib_Normal, 0.0)).xyz;
// Compute position.
vec4 position = Model_ViewModelSpace * vec4(attrib_Position, 1.0);
// Generate matrix for tangent basis.
mat3 tangentBasis = mat3( attrib_Tangent,
attrib_BiNormal,
attrib_Normal);
// Light vector.
attrib_Fragment_Light = tangentBasis * normalize(Light_Position - position.xyz);
// Eye vector.
attrib_Fragment_Eye = tangentBasis * normalize(-position.xyz);
// Return position.
gl_Position = Camera_Projection * position;
}
... and the pixel shader looks like this:
// Pixel Shader
#version 420
// Samplers
uniform sampler2D Map_Normal;
// Global Uniforms
// Material.
layout (std140) uniform Material
{
vec4 Material_Ambient_Colour;
vec4 Material_Diffuse_Colour;
vec4 Material_Specular_Colour;
vec4 Material_Emissive_Colour;
float Material_Shininess;
float Material_Strength;
};
// Spotlight.
layout (std140) uniform OmniLight
{
float Light_Intensity;
vec3 Light_Position;
vec4 Light_Ambient_Colour;
vec4 Light_Diffuse_Colour;
vec4 Light_Specular_Colour;
};
// Input streams (per vertex)
in vec3 attrib_Fragment_Normal;
in vec3 attrib_Fragment_Position;
in vec3 attrib_Fragment_Light;
in vec3 attrib_Fragment_Eye;
// Shared.
in vec2 varying_TextureCoord;
// Result
out vec4 Out_Colour;
// Main
void main(void)
{
// Compute normals.
vec3 N = normalize(texture(Map_Normal, varying_TextureCoord).xyz * 2.0 - 1.0);
vec3 L = normalize(attrib_Fragment_Light);
vec3 V = normalize(attrib_Fragment_Eye);
vec3 R = normalize(-reflect(L, N));
// Compute products.
float NdotL = max(0.0, dot(N, L));
float RdotV = max(0.0, dot(R, V));
// Compute final colours.
vec4 ambient = Light_Ambient_Colour * Material_Ambient_Colour;
vec4 diffuse = Light_Diffuse_Colour * Material_Diffuse_Colour * NdotL;
vec4 specular = Light_Specular_Colour * Material_Specular_Colour * (pow(RdotV, Material_Shininess) * Material_Strength);
// Final colour.
Out_Colour = ambient + diffuse + specular;
}
Edit: 3D Studio Render of the scene (to show the UV's are OK on the sphere):
I think your shaders are okay, but your texture coordinates on the sphere are totally off. It's as if they got distorted towards the poles along the longitude.
I've been learning OpenGL for the past couple of weeks and I've run into some trouble implementing a Phong shader. It appears to do no interpolation between vertexes despite my use of the smooth qualifier. Am I missing something here? To give credit where credit is due, the code for the vertex and fragment shaders cribs heavily from the OpenGL SuperBible Fifth Edition. I would highly recommend this book!
Vertex Shader:
#version 330
in vec4 vVertex;
in vec3 vNormal;
uniform mat4 mvpMatrix; // mvp = ModelViewProjection
uniform mat4 mvMatrix; // mv = ModelView
uniform mat3 normalMatrix;
uniform vec3 vLightPosition;
smooth out vec3 vVaryingNormal;
smooth out vec3 vVaryingLightDir;
void main(void) {
vVaryingNormal = normalMatrix * vNormal;
vec4 vPosition4 = mvMatrix * vVertex;
vec3 vPosition3 = vPosition4.xyz / vPosition4.w;
vVaryingLightDir = normalize(vLightPosition - vPosition3);
gl_Position = mvpMatrix * vVertex;
}
Fragment Shader:
#version 330
out vec4 vFragColor;
uniform vec4 ambientColor;
uniform vec4 diffuseColor;
uniform vec4 specularColor;
smooth in vec3 vVaryingNormal;
smooth in vec3 vVaryingLightDir;
void main(void) {
float diff = max(0.0, dot(normalize(vVaryingNormal), normalize(vVaryingLightDir)));
vFragColor = diff * diffuseColor;
vFragColor += ambientColor;
vec3 vReflection = normalize(reflect(-normalize(vVaryingLightDir),normalize(vVaryingNormal)));
float spec = max(0.0, dot(normalize(vVaryingNormal), vReflection));
if(diff != 0) {
float fSpec = pow(spec, 32.0);
vFragColor.rgb += vec3(fSpec, fSpec, fSpec);
}
}
This (public domain) image from Wikipedia shows exactly what sort of image I'm getting and what I'm aiming for -- I'm getting the "flat" image but I want the "Phong" image.
Any help would be greatly appreciated. Thank you!
edit: If it makes a difference, I'm using PyOpenGL 3.0.1 and Python 2.6.
edit2:
Solution
It turns out the problem was with my geometry; Kos was correct. For anyone else that's having this problem with Blender models, Kos pointed out that doing Edit->Faces->Set Smooth does the trick. I found that Wings 3D worked "out of the box."
As an addition to this answer, here is a simple geometry shader which will let you visualize your normals. Modify the accompanying vertex shader as needed based on your attribute locations and how you send your matrices.
But first, a picture of a giant bunny head from our friend the Stanford bunny as an example of the result !
Major warning: do note that I get away with transforming the normals with the modelview matrix instead of a proper normal matrix. This won't work correctly if your modelview contains non uniform scaling. Also, the length of your normals won't be correct but that matters little if you just want to check their direction.
Vertex shader:
#version 330
layout(location = 0) in vec4 position;
layout(location = 1) in vec4 normal;
layout(location = 2) in mat4 mv;
out Data
{
vec4 position;
vec4 normal;
vec4 color;
mat4 mvp;
} vdata;
uniform mat4 projection;
void main()
{
vdata.mvp = projection * mv;
vdata.position = position;
vdata.normal = normal;
}
Geometry shader:
#version 330
layout(triangles) in;
layout(line_strip, max_vertices = 6) out;
in Data
{
vec4 position;
vec4 normal;
vec4 color;
mat4 mvp;
} vdata[3];
out Data
{
vec4 color;
} gdata;
void main()
{
const vec4 green = vec4(0.0f, 1.0f, 0.0f, 1.0f);
const vec4 blue = vec4(0.0f, 0.0f, 1.0f, 1.0f);
for (int i = 0; i < 3; i++)
{
gl_Position = vdata[i].mvp * vdata[i].position;
gdata.color = green;
EmitVertex();
gl_Position = vdata[i].mvp * (vdata[i].position + vdata[i].normal);
gdata.color = blue;
EmitVertex();
EndPrimitive();
}
}
Fragment shader:
#version 330
in Data
{
vec4 color;
} gdata;
out vec4 outputColor;
void main()
{
outputColor = gdata.color;
}
Hmm... You're interpolating the normal as a varying variable, so the fragment shader should receive the correct per-pixel normal.
The only explanation (I can think of) of the fact that you're having the result as on your left image is that every fragment on a given face ultimately receives the same normal. You can confirm it with a fragment shader like:
void main() {
vFragColor = normalize(vVaryingNormal);
}
If it's the case, the question remains: Why? The vertex shader looks OK.
So maybe there's something wrong in your geometry? What is the data which you send to the shader? Are you sure you have correctly calculated per-vertex normals, not just per-face normals?
The orange lines are normals of the diagonal face, the red lines are normals of the horizontal face.
If your data looks like the above image, then even with a correct shader you'll get flat shading. Make sure that you have correct per-vertex normals like on the lower image. (They are really simple to calculate for a sphere.)