Interpolate Texture in TextureAtlas - opengl

I'm searching for a way to interpolate / smooth a texture which is red from a texture atlas.
That problem is that if i set the MIN_FILTER to GL_LINEAR i'll get bleeding at the edges. (the pixel of the next texture are used for interpolation)
I tried solving this by writing a shader which does the interpolation manually but to detect which pixels it has to ignore, the shader has to know the texCoords of the vertices so that it can calculate if the pixel used is in or outside of the triangle. To get the texCoords into the shader i tried to use uniform but i use DisplayLists (i considered switching to VBOs, but my first implemetation used them and it was extremly slow) which makes it very complicated.
How can i smooth my textures without bleeding?
Another problem is that the method that checks if a point is in a triangle doesn't seem to work.
Vertex Shader:
#version 130
void main()
{
gl_TexCoord[0] = gl_TextureMatrix[0] * gl_MultiTexCoord0;
gl_FrontColor = gl_Color;
gl_Position = ftransform();
}
Fragement Shader:
#version 130
uniform sampler2D tex;
uniform vec2 texCoord[4];
float textureSize;
float texelSize;
bool pointInTriangle(vec3 P, vec3 A, vec3 B, vec3 C)
{
vec3 u = B - A;
vec3 v = C - A;
vec3 w = P - A;
vec3 vCrossW = cross(v, w);
vec3 vCrossU = cross(v, u);
if(dot(vCrossW, vCrossU) < 0)
{
return false;
}
vec3 uCrossW = cross(u, w);
vec3 uCrossV = cross(u, v);
if(dot(uCrossW, uCrossV) < 0)
{
return false;
}
float denom = length(uCrossV);
float r = length(vCrossW);
float t = length(uCrossW);
return (r + t <= 1);
}
vec4 texture2DBilinear(sampler2D textureSampler, vec2 uv)
{
vec4 tl = texture2D(textureSampler, uv);
vec4 tr = texture2D(textureSampler, uv + vec2(texelSize, 0));
vec4 bl = texture2D(textureSampler, uv + vec2(0, texelSize));
vec4 br = texture2D(textureSampler, uv + vec2(texelSize , texelSize));
vec2 f = fract( uv.xy * textureSize );
vec4 tA = mix( tl, tr, f.x );
vec4 tB = mix( bl, br, f.x );
return mix( tA, tB, f.y );
}
void main()
{
ivec2 textureSize2d = textureSize(tex,0);
textureSize = float(textureSize2d.x);
texelSize = 1.0 / textureSize;
//texture coordinate:
vec2 texCoord = (gl_TexCoord[0].st);
//vec4 color = texture2D(tex,gl_TexCoord[0].st);
gl_FragColor = texture2DBilinear(tex, texCoord) * gl_Color;
//TEST IF METHOD WORKS
if(pointInTriangle(vec3(1,1,0),vec3(0,0,0),vec3(3,0,0),vec3(0,3,0)))
{
//pointInTriangle DOES NOT WORK!
gl_FragColor = gl_Color;
}
}

Related

Problem with Shader "The shader uses varying --- but previous shader does not write to it"

Does anyone know why I keep getting the error that says:
The ♦ shader uses varying _I;DATA;g_mapCoord, but previous shader does not write to it.
The ♦ shader uses varying _I;DATA;worldPosition, but previous shader does not write to it.
Take a look at my shaders here.
Vertex
#version 430
layout (location = 0) in vec2 position0;
out DATA {
vec2 v_mapCoord;
vec3 worldPosition;
} Out;
uniform vec3 u_cameraPosition;
uniform mat4 u_localMatrix;
uniform mat4 u_worldMatrix;
uniform float u_scaleY;
uniform int u_lod;
uniform vec2 u_index;
uniform float u_gap;
uniform vec2 u_location;
uniform sampler2D s_heightmap;
uniform int u_lodMorphArea[8];
float morphLatitude(vec2 position)
{
//not important code
return 0;
}
float morphLongitude(vec2 position)
{
//not important code
return 0;
}
vec2 morph(vec2 localPosition, int morph_area){
//not important code
return vec2(0);
}
void main()
{
vec2 localPosition = (u_localMatrix * vec4(position0.x,0,position0.y,1)).xz;
if (u_lod > 0) {
localPosition += morph(localPosition, u_lodMorphArea[u_lod-1]); // Translate position by morphing vector
}
float height = texture(s_heightmap, localPosition).r;
Out.v_mapCoord = localPosition;
vec4 _worldPosition = u_worldMatrix * vec4(localPosition.x, height, localPosition.y,1);
Out.worldPosition = _worldPosition.xyz;
gl_Position = u_worldMatrix * vec4(localPosition.x, height, localPosition.y,1);
}
Fragment
#version 430
layout (location = 0) out vec4 outputColor;
in DATA {
vec2 g_mapCoord;
vec3 worldPosition;
} In;
const vec3 lightDirection = vec3(-0.2, -1.0, -0.2);
const float intensity = 1.2;
uniform sampler2D s_textureNormal;
uniform sampler2D s_textureWater;
uniform sampler2D s_textureLand;
float diffuse(vec3 direction, vec3 normal, float intensity)
{
return max(0.01, dot(normal, -direction) * intensity);
}
void main()
{
vec3 normal = texture(s_textureNormal, In.g_mapCoord).rgb;
float diff = diffuse(lightDirection, normal, intensity);
outputColor = vec4(1,0,0,1);
}
Geom
#version 430
layout(triangles) in;
layout(triangle_strip, max_vertices = 3) out;
in vec2 te_mapCoord[];
out vec2 g_mapCoord;
uniform mat4 u_viewProjection;
void main() {
for (int i = 0; i < gl_in.length(); ++i)
{
vec4 position = gl_in[i].gl_Position;
gl_Position = u_viewProjection * position;
g_mapCoord = te_mapCoord[i];
EmitVertex();
}
EndPrimitive();
}
TCS
#version 430
layout(vertices = 16) out;
in DATA {
vec2 v_mapCoord;
vec3 worldPosition;
} In[];
out vec2 tc_mapCoord[];
const int AB = 2;
const int BC = 3;
const int CD = 0;
const int DA = 1;
uniform int u_tessellationFactor;
uniform float u_tessellationSlope;
uniform float u_tessellationShift;
uniform vec3 u_cameraPosition;
// Calculate tessellation levels
float lodFactor(float dist)
{
float tessellationLevel = max(0.0, u_tessellationFactor/pow(dist, u_tessellationSlope) + u_tessellationShift);
return tessellationLevel;
}
void main()
{
if (gl_InvocationID == 0){
// Calculate mid points of the edges of the quad
vec3 abMid = vec3(gl_in[0].gl_Position + gl_in[3].gl_Position)/2.0; //Bottom left, Bottom right
vec3 bcMid = vec3(gl_in[3].gl_Position + gl_in[15].gl_Position)/2.0; //Bottom right Top right
vec3 cdMid = vec3(gl_in[15].gl_Position + gl_in[12].gl_Position)/2.0; //Top right, Top left
vec3 daMid = vec3(gl_in[12].gl_Position + gl_in[0].gl_Position)/2.0; //Top left, Bottom left
// Calculate distance between camera and mid points of the edges of the quad
float distanceAB = distance(abMid, u_cameraPosition);
float distanceBC = distance(bcMid, u_cameraPosition);
float distanceCD = distance(cdMid, u_cameraPosition);
float distanceDA = distance(daMid, u_cameraPosition);
// Tesselation levels used by tessellation primitive generator (define how much tessellation to apply to the patch). Value between 1 and gl_MaxTessGenLevel, depending on lodFactor.
gl_TessLevelOuter[AB] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceAB));
gl_TessLevelOuter[BC] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceBC));
gl_TessLevelOuter[CD] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceCD));
gl_TessLevelOuter[DA] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceDA));
gl_TessLevelInner[0] = (gl_TessLevelOuter[BC] + gl_TessLevelOuter[DA])/4;
gl_TessLevelInner[1] = (gl_TessLevelOuter[AB] + gl_TessLevelOuter[CD])/4;
}
tc_mapCoord[gl_InvocationID] = In[gl_InvocationID].v_mapCoord; // Just pass to the next stage
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
TES
#version 430
layout(quads, fractional_odd_spacing, cw) in;
in vec2 tc_mapCoord[];
out vec2 te_mapCoord;
uniform sampler2D s_heightmap;
uniform float u_scaleY;
void main(){
float u = gl_TessCoord.x;
float v = gl_TessCoord.y;
// Compute new position for each tessellated vertex within the patch. gl_in with index 12, 0, 3, 15 are corners of the patch.
vec4 position = ((1 - u) * (1 - v) * gl_in[12].gl_Position + u * (1 - v) * gl_in[0].gl_Position + u * v * gl_in[3].gl_Position +(1 - u) * v * gl_in[15].gl_Position);
vec2 mapCoord = ((1 - u) * (1 - v) * tc_mapCoord[12] + u * (1 - v) * tc_mapCoord[0] + u * v * tc_mapCoord[3] +(1 - u) * v * tc_mapCoord[15]);
float height = texture(s_heightmap, mapCoord).r;
height *= u_scaleY;
position.y = height;
te_mapCoord = mapCoord;
gl_Position = position;
}
Can anyone help me find the error here which is why I'm getting that error message?
When you introduce a geometry shader you need to pass the varyings for the fragment shader from the geometry shader, not the vertex shader.
You can see how your geometry shader doing this:
out vec2 g_mapCoord;
is incompatible with your fragment shader expecting this:
in DATA {
vec2 g_mapCoord;
vec3 worldPosition;
} In;
Related question and subsequent answers here.

Porting ShaderToy Chromakey example to P5.js

I’m try to porting the shadertoy chromakey example to p5 with webcam as video source. After many time reading documentations of shaders, my code seems not working. I need some help.
I followed this guide to port the code for the p5
Fragment shader code:
#ifdef GL_ES
precision mediump float;
#endif
uniform sampler2D tex0;
uniform sampler2D tex1;
mat4 RGBtoYUV = mat4(0.257, 0.439, -0.148, 0.0,
0.504, -0.368, -0.291, 0.0,
0.098, -0.071, 0.439, 0.0,
0.0625, 0.500, 0.500, 1.0 );
vec4 chromaKey = vec4(0.05, 0.63, 0.14, 1);
vec2 maskRange = vec2(0.005, 0.26);
float colorclose(vec3 yuv, vec3 keyYuv, vec2 tol)
{
float tmp = sqrt(pow(keyYuv.g - yuv.g, 2.0) + pow(keyYuv.b - yuv.b, 2.0));
if (tmp < tol.x)
return 0.0;
else if (tmp < tol.y)
return (tmp - tol.x)/(tol.y - tol.x);
else
return 1.0;
}
void main()
{
vec2 fragPos = gl_FragCoord.xy / iResolution.xy;
vec4 texColor0 = texture(text0, fragPos);
vec4 texColor1 = texture(text1, fragPos);
vec4 keyYUV = RGBtoYUV * chromaKey;
vec4 yuv = RGBtoYUV * texColor0;
float mask = 1.0 - colorclose(yuv.rgb, keyYUV.rgb, maskRange);
gl_FragColor = max(texColor0 - mask * chromaKey, 0.0) + texColor1 * mask;
}
P5 sketch code:
let theShader;
let cam;
let img;
function preload(){
theShader = loadShader('webcam.vert', 'webcam.frag');
img = loadImage('http://www.quadrochave.com/wp-content/uploads/elementor/thumbs/nodulo_bannersite_ptodu%C3%A7%C3%A3o2-mpe2nvmu8s8o2uqcd7b2oh3mnuv9up05ubby33shz4.png');
}
function setup() {
pixelDensity(1);
createCanvas(windowWidth, windowHeight, WEBGL);
noStroke();
cam = createCapture(VIDEO);
cam.size(windowWidth, windowHeight);
cam.hide();
}
function draw() {
// shader() sets the active shader with our shader
shader(theShader);
// passing cam as a texture
theShader.setUniform('tex0', cam);
theShader.setUniform('tex1', img);
// rect gives us some geometry on the screen
theShader.rect(0,0,width,height);
}
Test on Glitch
Shadertoy chromakey original fragment shader
The mayor issue is, that you didn't specify and set the uniform variable iResolution. But there are some more issues in the shader code (tex0 and tex1 rather than text0 and text1).
Fragment shader:
precision mediump float;
uniform sampler2D tex0;
uniform sampler2D tex1;
uniform vec2 iResolution;
mat4 RGBtoYUV = mat4(0.257, 0.439, -0.148, 0.0,
0.504, -0.368, -0.291, 0.0,
0.098, -0.071, 0.439, 0.0,
0.0625, 0.500, 0.500, 1.0 );
vec4 chromaKey = vec4(0.05, 0.63, 0.14, 1);
vec2 maskRange = vec2(0.005, 0.26);
float colorclose(vec3 yuv, vec3 keyYuv, vec2 tol)
{
float tmp = sqrt(pow(keyYuv.g - yuv.g, 2.0) + pow(keyYuv.b - yuv.b, 2.0));
if (tmp < tol.x)
return 0.0;
else if (tmp < tol.y)
return (tmp - tol.x)/(tol.y - tol.x);
else
return 1.0;
}
void main()
{
vec2 fragPos = gl_FragCoord.xy / iResolution.xy;
vec4 texColor0 = texture2D(tex0, fragPos);
vec4 texColor1 = texture2D(tex1, fragPos);
vec4 keyYUV = RGBtoYUV * chromaKey;
vec4 yuv = RGBtoYUV * texColor0;
float mask = 1.0 - colorclose(yuv.rgb, keyYUV.rgb, maskRange);
gl_FragColor = max(texColor0 - mask * chromaKey, 0.0) + texColor1 * mask;
}
Script:
let theShader;
let cam;
let img;
function setup() {
createCanvas(windowWidth, windowHeight, WEBGL);
theShader = loadShader('webcam.vert', 'webcam.frag');
img = loadImage('http://www.quadrochave.com/wp-content/uploads/elementor/thumbs/nodulo_bannersite_ptodu%C3%A7%C3%A3o2-mpe2nvmu8s8o2uqcd7b2oh3mnuv9up05ubby33shz4.png');
pixelDensity(1);
noStroke();
cam = createCapture(VIDEO);
cam.size(windowWidth, windowHeight);
cam.hide();
}
function draw() {
// shader() sets the active shader with our shader
shader(theShader);
// passing cam as a texture
theShader.setUniform('tex0', cam);
theShader.setUniform('tex1', img);
theShader.setUniform('iResolution', [width, height]);
// rect gives us some geometry on the screen
rect(0,0,width,height);
}
If the vertex shader provides the texture the coordinate:
// our vertex data
attribute vec3 aPosition;
attribute vec2 aTexCoord;
// lets get texcoords just for fun!
varying vec2 vTexCoord;
void main() {
// copy the texcoords
vTexCoord = aTexCoord;
// copy the position data into a vec4, using 1.0 as the w component
vec4 positionVec4 = vec4(aPosition, 1.0);
positionVec4.xy = positionVec4.xy * 2.0 - 1.0;
// send the vertex information on to the fragment shader
gl_Position = positionVec4;
}
then you can use this coordinate instead of gl_FragCoord.xy / iResolution.xy:
varying vec2 vTexCoord;
// [...]
void main() {
vec2 fragPos = vTexCoord.xy;
// [...]
}

Weird Layered Effect During Parallax Mapping

I am following along with the LearnOpenGL guide and am trying to implement Steep Parallax Mapping.
Everything seems to be working fine except my brick wall seems to have distinct visible layers whereas the photos in the guide don't show any layers. I was trying to use this code to parallax the topography of the world but these weird layers seem to show up there too so I was hoping to find a fix for this.
Layered wall photo
[1
Photo of how it should look
Here is my modified vertex shader
#version 300 es
in vec4 vPosition; // aPos
in vec2 texCoord; // aTexCoords
in vec4 vNormal; // aNormal
in vec4 vTangent; // aTangent
uniform mat4 model_view;
uniform mat4 projection;
uniform vec4 light_position;
out vec2 ftexCoord;
out vec3 vT;
out vec3 vN;
out vec4 position;
out vec3 FragPos;
out vec3 TangentLightPos;
out vec3 TangentViewPos;
out vec3 TangentFragPos;
void
main()
{
// Normal variables
vN = normalize(model_view * vNormal).xyz;
vT = normalize(model_view * vTangent).xyz;
vec4 veyepos = model_view*vPosition;
position = veyepos;
ftexCoord = texCoord;
// Displacement variables
vec3 bi = cross(vT, vN);
FragPos = vec3(model_view * vPosition).xyz;
vec3 T = normalize(mat3(model_view) * vTangent.xyz);
vec3 B = normalize(mat3(model_view) * bi);
vec3 N = normalize(mat3(model_view) * vNormal.xyz);
mat3 TBN = transpose(mat3(T, B, N));
TangentLightPos = TBN * light_position.xyz;
TangentViewPos = TBN * vPosition.xyz;
TangentFragPos = TBN * FragPos;
gl_Position = projection * model_view * vPosition;
}
and my modified fragment shader is here
#version 300 es
precision highp float;
in vec2 ftexCoord;
in vec3 vT; //parallel to surface in eye space
in vec3 vN; //perpendicular to surface in eye space
in vec4 position;
in vec3 FragPos;
in vec3 TangentLightPos;
in vec3 TangentViewPos;
in vec3 TangentFragPos;
uniform int mode;
uniform vec4 light_position;
uniform vec4 light_color;
uniform vec4 ambient_light;
uniform sampler2D colorMap;
uniform sampler2D normalMap;
uniform sampler2D depthMap;
out vec4 fColor;
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
return currentTexCoords;
}
void main()
{
// DO NORMAL MAPPING
if (mode == 0) {
vec3 T = normalize(vT);
vec3 N = normalize(vN);
vec3 bi = cross(T, N);
mat4 changeOfCoord = mat4(vec4(T, 0), vec4(bi, 0), vec4(N, 0), vec4(0, 0, 0, 1));
vec3 L = normalize(light_position - position).xyz;
vec3 E = normalize(-position).xyz;
vec4 text = vec4(texture(normalMap, ftexCoord) * 2.0 - 1.0);
vec4 eye = changeOfCoord * text;
vec4 amb = texture(colorMap, ftexCoord) * ambient_light;
vec4 diff = max(0.0, dot(L, eye.xyz)) * light_color * texture(colorMap, ftexCoord);
fColor = amb + diff;
} else if (mode == 1) { // DO PARALLAX MAPPING
// offset texture coordinates with Parallax Mapping
vec3 viewDir = normalize(TangentViewPos - TangentFragPos);
vec2 texCoords = ftexCoord;
texCoords = ParallaxMapping(ftexCoord, viewDir);
// discard samples outside of the default texture coordinate space
if(texCoords.x > 1.0 || texCoords.y > 1.0 || texCoords.x < 0.0 || texCoords.y < 0.0)
discard;
// obtain normal from normal map
vec3 normal = texture(normalMap, texCoords).rgb;
//values stored in normal texture is [0,1] range, we need [-1, 1] range
normal = normalize(normal * 2.0 - 1.0);
// get diffuse color
vec3 color = texture(colorMap, texCoords).rgb;
// ambient
vec3 ambient = 0.1 * color;
// diffuse
vec3 lightDir = normalize(TangentLightPos - TangentFragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * color;
// specular
vec3 reflectDir = reflect(lightDir, normal);
vec3 halfwayDir = normalize(lightDir + viewDir);
float spec = pow(max(dot(normal, halfwayDir), 0.0), 32.0);
vec3 specular = vec3(0.2) * spec;
fColor = vec4(ambient + diffuse + 0.0, 1.0);
}
}
The layers at acute gazing angles are a common effect at parallax mapping. To improve the result you've to increment the number of samples or implement Parallax Occlusion Mapping (as described in the bottom part of the tutorial):
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
// get texture coordinates before collision (reverse operations)
vec2 prevTexCoords = currentTexCoords + deltaTexCoords;
// get depth after and before collision for linear interpolation
float afterDepth = currentDepthMapValue - currentLayerDepth;
float beforeDepth = texture(depthMap, prevTexCoords).r - currentLayerDepth + layerDepth;
// interpolation of texture coordinates
float weight = afterDepth / (afterDepth - beforeDepth);
vec2 finalTexCoords = prevTexCoords * weight + currentTexCoords * (1.0 - weight);
return finalTexCoords;
}
By thee way, the vector seems to be inverted. In common the bitangent is the Cross product of the normal vector and the tangent in a Right-handed system. But that depends on the displacement texture.
vec3 bi = cross(vT, vN);
vec3 bi = cross(vN, vT);
See further:
Bump Mapping with javascript and glsl
Normal, Parallax and Relief mapping
Demo

LWJGL Objects not shown on ATI graphics card

I've written an LWJGL application that uses .obj files, reads them and displays them (using displaylists).
On my nvidia graphics card, everything runs fine. But on an amd graphics card i can't see the objects.
How i give data to the shaders:
glUseProgram(shaderEngine.obj);
glUniform1i(glGetUniformLocation(shaderEngine.obj, "inOrangeJuice"), inOrangeJuice ? 1 : 0);
shaderEngine.loadMatrix(glGetUniformLocation(shaderEngine.standard, "projectionMatrix"), camera.projectionMatrix);
shaderEngine.loadMatrix(glGetUniformLocation(shaderEngine.obj, "viewMatrix"), camera.viewMatrix);
ModelMatrix is loaded:
shaderEngine.createModelMatrix(new Vector3f(x, y, z), new Vector3f(rx, ry, rz), new Vector3f(1, 1, 1));
shaderEngine.loadModelMatrix(shaderEngine.obj);
Fragment Shader:
#version 130
uniform sampler2D tex;
uniform vec2 texCoord[4];
float textureSize;
float texelSize;
uniform int inOrangeJuice;
bool pointInTriangle(vec3 P, vec3 A, vec3 B, vec3 C)
{
vec3 u = B - A;
vec3 v = C - A;
vec3 w = P - A;
vec3 vCrossW = cross(v, w);
vec3 vCrossU = cross(v, u);
if(dot(vCrossW, vCrossU) < 0)
{
return false;
}
vec3 uCrossW = cross(u, w);
vec3 uCrossV = cross(u, v);
if(dot(uCrossW, uCrossV) < 0)
{
return false;
}
float denom = length(uCrossV);
float r = length(vCrossW);
float t = length(uCrossW);
return (r + t <= 1);
}
vec4 texture2DBilinear(sampler2D textureSampler, vec2 uv)
{
vec4 tl = texture2D(textureSampler, uv);
vec4 tr = texture2D(textureSampler, uv + vec2(texelSize, 0));
vec4 bl = texture2D(textureSampler, uv + vec2(0, texelSize));
vec4 br = texture2D(textureSampler, uv + vec2(texelSize , texelSize));
vec2 f = fract( uv.xy * textureSize );
vec4 tA = mix( tl, tr, f.x );
vec4 tB = mix( bl, br, f.x );
return mix( tA, tB, f.y );
}
void main()
{
ivec2 textureSize2d = textureSize(tex,0);
textureSize = float(textureSize2d.x);
texelSize = 1.0 / textureSize;
//texture coordinate:
vec2 texCoord = (gl_TexCoord[0].st);
bool inOJ = false;
if(inOrangeJuice == 1)
{
float depth = gl_FragCoord.z / gl_FragCoord.w;//works only with perspective projection
depth = depth / 6;
if(depth > 1)
{
depth = 1;
}
inOJ = true;
gl_FragColor = texture2DBilinear(tex, texCoord) * gl_Color * (1.0 - depth) + vec4(1.0, 0.5, 0.0, 1.0) * depth;
}
if(inOJ == false)
{
gl_FragColor = texture2DBilinear(tex, texCoord) * gl_Color;
}
//Nothing is shown, inOrangeJuice should be 0
//gl_FragColor = vec4(inOrangeJuice,0,0,1);
//Always works:
//gl_FragColor = texture2D(tex, texCoord) * gl_Color;
}

Passing float to GLSL shader does nothing

I've been having problems sending the shininess factor to my bump mapping shader.
The result always looks like this: http://i.imgur.com/unzdx.jpg
But if I hard code the value inside the shader to 0.0 it's working just fine.
When I send 0.0 to the shader it's turning out like in the picture above.
Any ideas?
Here's my shader
#version 110
uniform sampler2D tex;
uniform sampler2D bmap;
uniform bool boolBump;
uniform vec4 vecColor;
uniform bool onlyColor;
uniform float fTransparencyThresh;
uniform float fShininess;
uniform float alpha;
varying vec3 vecLight;
varying vec3 vecEye;
varying vec3 vecNormal;
vec4 getLighting()
{
//Ambient part
vec4 color = (gl_FrontLightModelProduct.sceneColor * gl_FrontMaterial.ambient) + (gl_LightSource[0].ambient * gl_FrontMaterial.ambient);
//For bump mapping, the normal comes from the bump map texture lookup
vec3 n = normalize(vecNormal);
if(boolBump)
{
n = normalize(texture2D(bmap, gl_TexCoord[0].st).xyz * 2.0 - 1.0);
}
vec3 l = normalize(vecLight);
//Lambert term
float NdotL = dot(n, l);
if(NdotL > 0.0)
{
//Diffuse part
color += gl_LightSource[0].diffuse * gl_FrontMaterial.diffuse * max(0.0, NdotL);
//Specular part
vec3 e = normalize(vecEye);
vec3 r = normalize(-reflect(l,n));
float spec = pow(max(0.0, dot(r, e)), fShininess);
color += gl_LightSource[0].specular * gl_FrontMaterial.specular * spec;
}
return color;
}
void main(void)
{
vec4 texel = texture2D(tex, gl_TexCoord[0].st);
if(texel.a < fTransparencyThresh)
discard;
//Get shading
vec4 color = getLighting();
//Color only mode?
if(onlyColor)
{
color *= vecColor;
}
else
{
color *= texel;
}
//Set fragment color, alpha comes from MTL file
gl_FragColor = vec4(color.xyz, alpha);
}
Edit, OpenGL code:
void MyClass::sendToShader(const OBJ::StelModel* pStelModel, Effect cur, bool& tangEnabled, int& tangLocation)
{
int location;
tangEnabled = false;
if(cur != No)
{
location = curShader->uniformLocation("fTransparencyThresh");
curShader->setUniform(location, fTransparencyThresh);
location = curShader->uniformLocation("alpha");
curShader->setUniform(location, pStelModel->pMaterial->alpha);
location = curShader->uniformLocation("fShininess");
curShader->setUniform(location, 0.0f);
...
Edit: Even this wont work:
GLint loc = glGetUniformLocation(curShader->program, "fShininess");
glUniform1f(loc, 0.0f);
Note that pow(0, 0) is undefined. This means spec is undefined if dot(r, e) == 0 and fShininess == 0.
Do you make sure the program is actively bound when you call glUniform? Do you check glGetError anywhere?