I've been having problems sending the shininess factor to my bump mapping shader.
The result always looks like this: http://i.imgur.com/unzdx.jpg
But if I hard code the value inside the shader to 0.0 it's working just fine.
When I send 0.0 to the shader it's turning out like in the picture above.
Any ideas?
Here's my shader
#version 110
uniform sampler2D tex;
uniform sampler2D bmap;
uniform bool boolBump;
uniform vec4 vecColor;
uniform bool onlyColor;
uniform float fTransparencyThresh;
uniform float fShininess;
uniform float alpha;
varying vec3 vecLight;
varying vec3 vecEye;
varying vec3 vecNormal;
vec4 getLighting()
{
//Ambient part
vec4 color = (gl_FrontLightModelProduct.sceneColor * gl_FrontMaterial.ambient) + (gl_LightSource[0].ambient * gl_FrontMaterial.ambient);
//For bump mapping, the normal comes from the bump map texture lookup
vec3 n = normalize(vecNormal);
if(boolBump)
{
n = normalize(texture2D(bmap, gl_TexCoord[0].st).xyz * 2.0 - 1.0);
}
vec3 l = normalize(vecLight);
//Lambert term
float NdotL = dot(n, l);
if(NdotL > 0.0)
{
//Diffuse part
color += gl_LightSource[0].diffuse * gl_FrontMaterial.diffuse * max(0.0, NdotL);
//Specular part
vec3 e = normalize(vecEye);
vec3 r = normalize(-reflect(l,n));
float spec = pow(max(0.0, dot(r, e)), fShininess);
color += gl_LightSource[0].specular * gl_FrontMaterial.specular * spec;
}
return color;
}
void main(void)
{
vec4 texel = texture2D(tex, gl_TexCoord[0].st);
if(texel.a < fTransparencyThresh)
discard;
//Get shading
vec4 color = getLighting();
//Color only mode?
if(onlyColor)
{
color *= vecColor;
}
else
{
color *= texel;
}
//Set fragment color, alpha comes from MTL file
gl_FragColor = vec4(color.xyz, alpha);
}
Edit, OpenGL code:
void MyClass::sendToShader(const OBJ::StelModel* pStelModel, Effect cur, bool& tangEnabled, int& tangLocation)
{
int location;
tangEnabled = false;
if(cur != No)
{
location = curShader->uniformLocation("fTransparencyThresh");
curShader->setUniform(location, fTransparencyThresh);
location = curShader->uniformLocation("alpha");
curShader->setUniform(location, pStelModel->pMaterial->alpha);
location = curShader->uniformLocation("fShininess");
curShader->setUniform(location, 0.0f);
...
Edit: Even this wont work:
GLint loc = glGetUniformLocation(curShader->program, "fShininess");
glUniform1f(loc, 0.0f);
Note that pow(0, 0) is undefined. This means spec is undefined if dot(r, e) == 0 and fShininess == 0.
Do you make sure the program is actively bound when you call glUniform? Do you check glGetError anywhere?
Related
Does anyone know why I keep getting the error that says:
The ♦ shader uses varying _I;DATA;g_mapCoord, but previous shader does not write to it.
The ♦ shader uses varying _I;DATA;worldPosition, but previous shader does not write to it.
Take a look at my shaders here.
Vertex
#version 430
layout (location = 0) in vec2 position0;
out DATA {
vec2 v_mapCoord;
vec3 worldPosition;
} Out;
uniform vec3 u_cameraPosition;
uniform mat4 u_localMatrix;
uniform mat4 u_worldMatrix;
uniform float u_scaleY;
uniform int u_lod;
uniform vec2 u_index;
uniform float u_gap;
uniform vec2 u_location;
uniform sampler2D s_heightmap;
uniform int u_lodMorphArea[8];
float morphLatitude(vec2 position)
{
//not important code
return 0;
}
float morphLongitude(vec2 position)
{
//not important code
return 0;
}
vec2 morph(vec2 localPosition, int morph_area){
//not important code
return vec2(0);
}
void main()
{
vec2 localPosition = (u_localMatrix * vec4(position0.x,0,position0.y,1)).xz;
if (u_lod > 0) {
localPosition += morph(localPosition, u_lodMorphArea[u_lod-1]); // Translate position by morphing vector
}
float height = texture(s_heightmap, localPosition).r;
Out.v_mapCoord = localPosition;
vec4 _worldPosition = u_worldMatrix * vec4(localPosition.x, height, localPosition.y,1);
Out.worldPosition = _worldPosition.xyz;
gl_Position = u_worldMatrix * vec4(localPosition.x, height, localPosition.y,1);
}
Fragment
#version 430
layout (location = 0) out vec4 outputColor;
in DATA {
vec2 g_mapCoord;
vec3 worldPosition;
} In;
const vec3 lightDirection = vec3(-0.2, -1.0, -0.2);
const float intensity = 1.2;
uniform sampler2D s_textureNormal;
uniform sampler2D s_textureWater;
uniform sampler2D s_textureLand;
float diffuse(vec3 direction, vec3 normal, float intensity)
{
return max(0.01, dot(normal, -direction) * intensity);
}
void main()
{
vec3 normal = texture(s_textureNormal, In.g_mapCoord).rgb;
float diff = diffuse(lightDirection, normal, intensity);
outputColor = vec4(1,0,0,1);
}
Geom
#version 430
layout(triangles) in;
layout(triangle_strip, max_vertices = 3) out;
in vec2 te_mapCoord[];
out vec2 g_mapCoord;
uniform mat4 u_viewProjection;
void main() {
for (int i = 0; i < gl_in.length(); ++i)
{
vec4 position = gl_in[i].gl_Position;
gl_Position = u_viewProjection * position;
g_mapCoord = te_mapCoord[i];
EmitVertex();
}
EndPrimitive();
}
TCS
#version 430
layout(vertices = 16) out;
in DATA {
vec2 v_mapCoord;
vec3 worldPosition;
} In[];
out vec2 tc_mapCoord[];
const int AB = 2;
const int BC = 3;
const int CD = 0;
const int DA = 1;
uniform int u_tessellationFactor;
uniform float u_tessellationSlope;
uniform float u_tessellationShift;
uniform vec3 u_cameraPosition;
// Calculate tessellation levels
float lodFactor(float dist)
{
float tessellationLevel = max(0.0, u_tessellationFactor/pow(dist, u_tessellationSlope) + u_tessellationShift);
return tessellationLevel;
}
void main()
{
if (gl_InvocationID == 0){
// Calculate mid points of the edges of the quad
vec3 abMid = vec3(gl_in[0].gl_Position + gl_in[3].gl_Position)/2.0; //Bottom left, Bottom right
vec3 bcMid = vec3(gl_in[3].gl_Position + gl_in[15].gl_Position)/2.0; //Bottom right Top right
vec3 cdMid = vec3(gl_in[15].gl_Position + gl_in[12].gl_Position)/2.0; //Top right, Top left
vec3 daMid = vec3(gl_in[12].gl_Position + gl_in[0].gl_Position)/2.0; //Top left, Bottom left
// Calculate distance between camera and mid points of the edges of the quad
float distanceAB = distance(abMid, u_cameraPosition);
float distanceBC = distance(bcMid, u_cameraPosition);
float distanceCD = distance(cdMid, u_cameraPosition);
float distanceDA = distance(daMid, u_cameraPosition);
// Tesselation levels used by tessellation primitive generator (define how much tessellation to apply to the patch). Value between 1 and gl_MaxTessGenLevel, depending on lodFactor.
gl_TessLevelOuter[AB] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceAB));
gl_TessLevelOuter[BC] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceBC));
gl_TessLevelOuter[CD] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceCD));
gl_TessLevelOuter[DA] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceDA));
gl_TessLevelInner[0] = (gl_TessLevelOuter[BC] + gl_TessLevelOuter[DA])/4;
gl_TessLevelInner[1] = (gl_TessLevelOuter[AB] + gl_TessLevelOuter[CD])/4;
}
tc_mapCoord[gl_InvocationID] = In[gl_InvocationID].v_mapCoord; // Just pass to the next stage
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
TES
#version 430
layout(quads, fractional_odd_spacing, cw) in;
in vec2 tc_mapCoord[];
out vec2 te_mapCoord;
uniform sampler2D s_heightmap;
uniform float u_scaleY;
void main(){
float u = gl_TessCoord.x;
float v = gl_TessCoord.y;
// Compute new position for each tessellated vertex within the patch. gl_in with index 12, 0, 3, 15 are corners of the patch.
vec4 position = ((1 - u) * (1 - v) * gl_in[12].gl_Position + u * (1 - v) * gl_in[0].gl_Position + u * v * gl_in[3].gl_Position +(1 - u) * v * gl_in[15].gl_Position);
vec2 mapCoord = ((1 - u) * (1 - v) * tc_mapCoord[12] + u * (1 - v) * tc_mapCoord[0] + u * v * tc_mapCoord[3] +(1 - u) * v * tc_mapCoord[15]);
float height = texture(s_heightmap, mapCoord).r;
height *= u_scaleY;
position.y = height;
te_mapCoord = mapCoord;
gl_Position = position;
}
Can anyone help me find the error here which is why I'm getting that error message?
When you introduce a geometry shader you need to pass the varyings for the fragment shader from the geometry shader, not the vertex shader.
You can see how your geometry shader doing this:
out vec2 g_mapCoord;
is incompatible with your fragment shader expecting this:
in DATA {
vec2 g_mapCoord;
vec3 worldPosition;
} In;
Related question and subsequent answers here.
I'm designing a sprite class, and I would like to display only a color if no texture is loaded.
Here are my vertex shader
#version 330 core
layout (location = 0) in vec4 vertex; // <vec2 pos, vec2 tex>
out vec2 vs_tex_coords;
uniform mat4 model;
uniform mat4 projection;
void main()
{
vs_tex_coords = vertex.zw;
gl_Position = projection * model * vec4(vertex.xy, 0.0, 1.0);
}
And the fragment shader :
#version 330 core
in vec2 vs_tex_coords;
out vec4 fs_color;
uniform sampler2D image;
uniform vec3 sprite_color;
void main()
{
fs_color = vec4(sprite_color, 1.0) * texture(image, vs_tex_coords);
}
My problem is that if I don't bind a texture, it displays only a black sprite. I think the problem is that the texture function in my fragment shader returns a 0, and screw all the formula.
Is there a way to know if the sampler2D is not initialized or null, and just return the sprite_color?
A sampler cannot be "empty". A valid texture must be bound to the texture units referenced by each sampler in order for rendering to have well-defined behavior.
But that doesn't mean you have to read from the texture that's bound there. It's perfectly valid to use a uniform value to tell the shader whether to read from the texture or not.
But you still have to bind a simple, 1x1 texture there. Indeed, you can use textureSize on the sampler; if it is a 1x1 texture, then don't bother to read from it. Note that this might be slower than using a uniform.
Here below the 2 versions, with and without if... else... conditional statement. The conditional statement avoids to have to sample the texture if not used.
The uniform int textureSample is set to 1 or 0 for the texture or the color to show up respectively. Both uniform variables are normally set up by the program, not the shader.
uniform int textureSample = 1;
uniform vec3 color = vec3(1.0, 1.0, 0.0);
void main() { // without if... else...
// ...
vec3 materialDiffuseColor = textureSample * texture( textureSampler, fragmentTexture ).rgb - (textureSample - 1) * color;
// ...
}
void main() { // with if... else...
// ...
if (textureSample == 1) { // 1 if texture, 0 if color
vec3 materialDiffuseColor = textureSample * texture( textureSampler, fragmentTexture ).rgb;
vec3 materialAmbientColor = vec3(0.5, 0.5, 0.5) * materialDiffuseColor;
vec3 materialSpecularColor = vec3(0.3, 0.3, 0.3);
gl_Color = brightness *
(materialAmbientColor +
materialDiffuseColor * lightPowerColor * cosTheta / distanceLight2 +
materialSpecularColor * lightPowerColor * pow(cosAlpha, 10000) / distanceLight2);
}
else {
vec3 materialDiffuseColor = color;
vec3 materialAmbientColor = vec3(0.5, 0.5, 0.5) * materialDiffuseColor;
vec3 materialSpecularColor = vec3(0.3, 0.3, 0.3);
gl_Color = brightness *
(materialAmbientColor +
materialDiffuseColor * lightPowerColor * cosTheta / distanceLight2 +
materialSpecularColor * lightPowerColor * pow(cosAlpha, 10000) / distanceLight2);
}
// ...
}
I'd check length of rgb for diffuse texture. This won't work on a specular map though
vec3 texDiffuseCol = texture2D(diffuseTex, TexCoord).rgb;
if(length(texDiffuseCol) == 0.0)
{
//Texture not present
}else
{
//Texture not present
}
I am having a very strange occurrence where glDisableVertexAttribArray works in my one solution but when I get the solution from my Perforce repository, it doesn't run and throws an assert.
I checked out this forum question but it, unfortunately, didn't solve my problem. This is for shadow mapping that I have been working on and when I try to render things to the depth buffer and then disable the vertex attributes, it throws an error.
Here's how my code is laid out:
glUseProgram(shaderProgram);
glUniform1i(u_diffuseTextureLocation, 0);
glUniform1i(u_shadowMapLocation, 1);
[...]
glUseProgram(shaderProgram);
[Render some stuff to depth buffer]
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glDisableVertexAttibArray(a_normalAttribLocation); // This gives the GL_INVALID_OPERATION
// enum
And here's the vertex shader in that program:
#version 430 core
uniform mat4 u_projection;
uniform mat4 u_view;
uniform mat4 u_model;
uniform mat4 u_lightSpaceMat;
in vec3 a_position;
in vec3 a_normal;
in vec2 a_texture;
out VS_OUT {
vec3 v_fragPos;
vec3 v_normal;
vec2 v_texCoords;
vec4 v_fragPosLightSpace;
} vs_out;
void main()
{
gl_Position = u_projection * u_view * u_model * vec4(a_position, 1.0);
vs_out.v_fragPos = (u_model * vec4(a_position, 1.0)).xyz;
vs_out.v_normal = transpose(inverse(mat3(u_model))) * a_normal;
vs_out.v_texCoords = a_texture;
vs_out.v_fragPosLightSpace = u_lightSpaceMat * vec4(vs_out.v_fragPos, 1.0);
}
And the fragment shader in the program:
#version 430 core
uniform sampler2D u_shadowMap;
uniform sampler2D u_diffuseTexture;
uniform vec3 u_lightPos;
uniform vec3 u_viewPos;
in VS_OUT {
vec3 v_fragPos;
vec3 v_normal;
vec2 v_texCoords;
vec4 v_fragPosLightSpace;
} fs_in;
out vec4 fragColor;
float shadowCalculation(vec4 fragPosLightSpace, vec3 normal, vec3 lightDir)
{
// perform perspective divide
vec3 projCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
// transform to [0,1] range
projCoords = projCoords * 0.5 + 0.5;
// Get closest depth value from light's perspective (using [0,1] range
// fragPosLight as coords)
float closestDepth = texture(u_shadowMap, projCoords.xy).r;
// Get depth of current fragment from lights perspective
float currentDepth = projCoords.z;
float bias = max(0.05 * (1.0 - dot(normal, lightDir)), 0.005);
// Percentage closer filtering
float shadow = 0.0;
vec2 texelSize = 1.0 / textureSize(u_shadowMap, 0);
for (int x = -1; x <= 1; ++x)
{
for (int y = -1; y <= 1; ++y)
{
float pcfDepth = texture(u_shadowMap, projCoords.xy + vec2(x, y) * texelSize).r;
shadow += currentDepth - bias > pcfDepth ? 1.0 : 0.0;
}
}
shadow /= 9.0;
return shadow;
}
void main()
{
vec3 color = texture(u_diffuseTexture, fs_in.v_texCoords).rgb;
vec3 normal = normalize(fs_in.v_normal);
vec3 lightColor = vec3(1.0);
// ambient
vec3 ambient = 0.15 * color;
// diffuse
vec3 lightDir = normalize(u_lightPos - fs_in.v_fragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * lightColor;
// specular
vec3 viewDir = normalize(u_viewPos - fs_in.v_fragPos);
float spec = 0.0;
vec3 halfWayDir = normalize(lightDir + viewDir);
spec = pow(max(dot(normal, halfWayDir), 0.0), 64.0);
vec3 specular = spec * lightColor;
// calculate shadow
float shadow = shadowCalculation(fs_in.v_fragPosLightSpace, normal, lightDir);
vec3 lighting = (ambient + (1.0 - shadow) * (diffuse + specular)) * color;
fragColor = vec4(lighting, 1.0);
}
What I'm really confused about is that the program runs when I'm using my local files. But when I pull the files from the Perforce repository and try and run it, then it throws the exception. I checked and all the necessary files are uploaded to Perforce. It would seem that there is something going wrong with which attributes are actually active? I'm not sure. Just scratching my head here...
glBindVertexArray(0);
glDisableVertexAttibArray(a_normalAttribLocation);
glDisableVertexAttribArray modifies the current VAO. You just removed the current VAO, setting it to 0. Which, in a core profile, means no VAO at all. In the compatibility profile, there is a VAO 0, which is probably why it works elsewhere: you're getting the compatibility profile on a different machine.
However, if you're using VAOs, it's not clear why you want to disable an attribute array at all. The whole point of VAOs is that you don't have to call the attribute array functions every frame. You just bind the VAO and go.
I'm searching for a way to interpolate / smooth a texture which is red from a texture atlas.
That problem is that if i set the MIN_FILTER to GL_LINEAR i'll get bleeding at the edges. (the pixel of the next texture are used for interpolation)
I tried solving this by writing a shader which does the interpolation manually but to detect which pixels it has to ignore, the shader has to know the texCoords of the vertices so that it can calculate if the pixel used is in or outside of the triangle. To get the texCoords into the shader i tried to use uniform but i use DisplayLists (i considered switching to VBOs, but my first implemetation used them and it was extremly slow) which makes it very complicated.
How can i smooth my textures without bleeding?
Another problem is that the method that checks if a point is in a triangle doesn't seem to work.
Vertex Shader:
#version 130
void main()
{
gl_TexCoord[0] = gl_TextureMatrix[0] * gl_MultiTexCoord0;
gl_FrontColor = gl_Color;
gl_Position = ftransform();
}
Fragement Shader:
#version 130
uniform sampler2D tex;
uniform vec2 texCoord[4];
float textureSize;
float texelSize;
bool pointInTriangle(vec3 P, vec3 A, vec3 B, vec3 C)
{
vec3 u = B - A;
vec3 v = C - A;
vec3 w = P - A;
vec3 vCrossW = cross(v, w);
vec3 vCrossU = cross(v, u);
if(dot(vCrossW, vCrossU) < 0)
{
return false;
}
vec3 uCrossW = cross(u, w);
vec3 uCrossV = cross(u, v);
if(dot(uCrossW, uCrossV) < 0)
{
return false;
}
float denom = length(uCrossV);
float r = length(vCrossW);
float t = length(uCrossW);
return (r + t <= 1);
}
vec4 texture2DBilinear(sampler2D textureSampler, vec2 uv)
{
vec4 tl = texture2D(textureSampler, uv);
vec4 tr = texture2D(textureSampler, uv + vec2(texelSize, 0));
vec4 bl = texture2D(textureSampler, uv + vec2(0, texelSize));
vec4 br = texture2D(textureSampler, uv + vec2(texelSize , texelSize));
vec2 f = fract( uv.xy * textureSize );
vec4 tA = mix( tl, tr, f.x );
vec4 tB = mix( bl, br, f.x );
return mix( tA, tB, f.y );
}
void main()
{
ivec2 textureSize2d = textureSize(tex,0);
textureSize = float(textureSize2d.x);
texelSize = 1.0 / textureSize;
//texture coordinate:
vec2 texCoord = (gl_TexCoord[0].st);
//vec4 color = texture2D(tex,gl_TexCoord[0].st);
gl_FragColor = texture2DBilinear(tex, texCoord) * gl_Color;
//TEST IF METHOD WORKS
if(pointInTriangle(vec3(1,1,0),vec3(0,0,0),vec3(3,0,0),vec3(0,3,0)))
{
//pointInTriangle DOES NOT WORK!
gl_FragColor = gl_Color;
}
}
Me and a friend are developing an editor (CAD-like) to use in our future game.
We are using the Qt framework and OpenGL.
The problem we are encountering is that on his laptop with an integrated nVidia card, the shading is working as expected and renders well. On my laptop with an integrated ATI card, as well as on my desktop with Radeon HD5850, the phong lighting is behaving slightly differently. There are more bright spots and dark spots and the image doesn't look good. Also, we are using a toon shader to draw a silhouette around the edges and to limit the amount of shades a color can have.
The toon shader uses 2-pass rendering - first pass renders the object in black, slightly larger than original (shifting each vertex in its normals direction slightly) to make the silhouette and then the second pass renders the object normally (only limiting the shade spectrum, so it looks more comic-like).
The images are of the same thing on our 2 computers. The first difference I mentioned above, the second is that the silhouette is stretched out as it should be on my friends computer, so it makes an even silhouette around the object, but is moved slightly up on my computer, making a thick line on the top and no line on the bottom.
The other thing is the phong lighting, illuminating the cube within which the object is edited. Again, rendering well on my friends computer, but being almost all-black or all-white on mine.
First image (nVidia card):
Second image (ATI cards):
I understand that the code is long and maybe the problem lays in some Qt settings, not in the shaders, but if you see anything that strikes you as bad practice, please answer.
Code for the phong shading follows
#version 400
in vec4 aVertex;
in vec4 aNormal;
in vec2 aTexCoord;
uniform mat4 uPVM;
uniform mat4 uViewModel;
uniform mat4 uNormal;
uniform int uLightsOn;
out vec2 vTexCoord;
out vec3 vNormal;
flat out vec3 mEye;
flat out vec3 mLightDirection;
flat out vec4 mAxisColor;
void main(void)
{
if(uLightsOn == 1) {
mEye = (uViewModel * aVertex).xyz;
mLightDirection = vec4(2.0,-2.0,1.0,0.0).xyz;
vNormal = (uNormal * aNormal).xyz;
}
gl_Position = uPVM * aVertex;
vTexCoord = aTexCoord;
mAxisColor = aNormal;
}
The phong fragment shader :
#version 400
uniform sampler2D uTexture0;
uniform int uLightsOn;
uniform vec3 uHighlightColor;
uniform int uTextured;
uniform int uAxisRender;
in vec2 vTexCoord;
in vec3 vNormal;
flat in vec3 mEye;
flat in vec3 mLightDirection;
out vec4 fragColor;
flat in vec4 mAxisColor;
struct TMaterial {
vec4 diffuse;
vec4 ambient;
vec4 specular;
float shininess;
};
TMaterial material;
void setup() {
// setupMaterials
material.ambient = vec4(0.4);
material.diffuse = vec4(0.9);
material.specular = vec4(0.0);
material.shininess = 0.3;
}
void main(void)
{
setup();
vec3 finalHighlightColor = uHighlightColor;
if(finalHighlightColor.x <= 0.0) finalHighlightColor.x = 0.1;
if(finalHighlightColor.y <= 0.0) finalHighlightColor.y = 0.1;
if(finalHighlightColor.z <= 0.0) finalHighlightColor.z = 0.1;
if(uLightsOn == 0) {
if(uAxisRender == 1) fragColor = mAxisColor;
else fragColor = vec4(finalHighlightColor,1.0);
return;
}
vec4 diffuse;
vec4 spec = vec4(0.0);
vec4 ambient;
vec3 L = normalize(mLightDirection - mEye);
vec3 E = normalize(-mEye);
vec3 R = normalize(reflect(-L,vNormal));
ambient = material.ambient;
float intens = max(dot(vNormal,L), 0.0);
diffuse = clamp( material.diffuse * intens , 0.0, 1.0 );
if(intens > 0.0) spec = clamp ( material.specular * pow(max(dot(R,E),0.0),material.shininess) , 0.0, 1.0 );
if(uTextured == 1) fragColor = (ambient + diffuse + spec) * texture(uTexture0,vTexCoord);
else fragColor = (ambient + diffuse + spec) * vec4(finalHighlightColor,1.0);
}
And the toon shaders :
#version 400
in vec4 aVertex;
in vec4 aNormal;
in vec2 aTexCoord;
uniform mat4 uPV;
uniform mat4 uM;
uniform mat4 uN;
uniform vec3 uLightPosition;
uniform vec3 uCameraPosition;
uniform int uSilhouetteMode;
uniform float uOffset;
// if this uniform is passed, all the toon rendering is going off and only simple axis are rendered
// last data in aNormal are colors of those axis if everything was ser properly.
uniform int uAxisRendering;
flat out vec4 fAxisColor;
out vec4 vNormal;
out vec2 vTexCoord;
out vec3 vDirectionToCamera;
out vec3 vDirectionToLight;
void silhouetteMode() {
gl_Position = uPV * uM * vec4(aVertex.xyz + aNormal.xyz * uOffset,1.0f);
}
void toonMode() {
vec4 worldPosition = uM * aVertex;
vDirectionToCamera = uCameraPosition - worldPosition.xyz;
vDirectionToLight = uLightPosition - worldPosition.xyz;
vNormal = uN * aNormal;
gl_Position = uPV * worldPosition;
}
void axisMode() {
fAxisColor = aNormal;
gl_Position = uPV * uM * aVertex;
}
void main(void)
{
vTexCoord = aTexCoord;
if(uSilhouetteMode == 1) {
silhouetteMode();
} else {
if(uAxisRendering == 1) axisMode();
else toonMode();
}
}
and the fragment shader
#version 400
uniform sampler2D uTexture;
uniform vec3 uBaseColor;
uniform float uNumShades;
uniform int uSilhouetteMode;
uniform int uAxisRendering;
flat in vec4 fAxisColor;
in vec4 vNormal;
in vec2 vTexCoord;
in vec3 vDirectionToCamera;
in vec3 vDirectionToLight;
out vec4 outFragColor;
void main(void)
{
if(uSilhouetteMode == 1) {
outFragColor = vec4(uBaseColor,1.0);
return;
}
if(uAxisRendering == 1) {
outFragColor = fAxisColor;
return;
}
float l_ambient = 0.1;
float l_diffuse = clamp(dot(vDirectionToLight,vNormal.xyz),0.0,1.0);
float l_specular;
vec3 halfVector = normalize(vDirectionToCamera + vDirectionToLight);
if(dot(vDirectionToLight,vNormal.xyz) > 0.0) {
l_specular = pow(clamp(dot(halfVector,vNormal.xyz),0.0,1.0),64.0);
} else {
l_specular = 0.0;
}
float intensity = l_ambient + l_diffuse + l_specular;
float shadeIntesity = ceil(intensity * uNumShades)/ uNumShades;
outFragColor = vec4(texture(uTexture,vTexCoord).xyz * shadeIntesity * uBaseColor,1.0);
}
And finally, our OpenGLWindow initialization (in Qt)
OpenGLWindow::OpenGLWindow(QWindow *parent) :
QWindow(parent),m_animating(false), m_initialized(false), m_animationTimer(NULL)
{
setSurfaceType(QWindow::OpenGLSurface);
QSurfaceFormat format;
format.setDepthBufferSize( 24 );
format.setMajorVersion( 4 );
format.setMinorVersion( 0 );
format.setSamples( 4 );
format.setProfile( QSurfaceFormat::CoreProfile );
setFormat( format );
create();
if(!m_context) {
m_context = new QOpenGLContext(this);
m_context->setFormat(requestedFormat());
m_context->create();
m_context->makeCurrent(this);
initializeOpenGLFunctions();
}
m_animationTimer = new QTimer(this);
connect(m_animationTimer, SIGNAL(timeout()), this, SLOT(renderer()));
m_animationTimer->setInterval(16);
}
To my eyes the nVidia image seems to be using alpha whereas the AMD one is not. I also can't see a
format.setAlpha(true);
in your Qt setup so may be that.