I'm designing a sprite class, and I would like to display only a color if no texture is loaded.
Here are my vertex shader
#version 330 core
layout (location = 0) in vec4 vertex; // <vec2 pos, vec2 tex>
out vec2 vs_tex_coords;
uniform mat4 model;
uniform mat4 projection;
void main()
{
vs_tex_coords = vertex.zw;
gl_Position = projection * model * vec4(vertex.xy, 0.0, 1.0);
}
And the fragment shader :
#version 330 core
in vec2 vs_tex_coords;
out vec4 fs_color;
uniform sampler2D image;
uniform vec3 sprite_color;
void main()
{
fs_color = vec4(sprite_color, 1.0) * texture(image, vs_tex_coords);
}
My problem is that if I don't bind a texture, it displays only a black sprite. I think the problem is that the texture function in my fragment shader returns a 0, and screw all the formula.
Is there a way to know if the sampler2D is not initialized or null, and just return the sprite_color?
A sampler cannot be "empty". A valid texture must be bound to the texture units referenced by each sampler in order for rendering to have well-defined behavior.
But that doesn't mean you have to read from the texture that's bound there. It's perfectly valid to use a uniform value to tell the shader whether to read from the texture or not.
But you still have to bind a simple, 1x1 texture there. Indeed, you can use textureSize on the sampler; if it is a 1x1 texture, then don't bother to read from it. Note that this might be slower than using a uniform.
Here below the 2 versions, with and without if... else... conditional statement. The conditional statement avoids to have to sample the texture if not used.
The uniform int textureSample is set to 1 or 0 for the texture or the color to show up respectively. Both uniform variables are normally set up by the program, not the shader.
uniform int textureSample = 1;
uniform vec3 color = vec3(1.0, 1.0, 0.0);
void main() { // without if... else...
// ...
vec3 materialDiffuseColor = textureSample * texture( textureSampler, fragmentTexture ).rgb - (textureSample - 1) * color;
// ...
}
void main() { // with if... else...
// ...
if (textureSample == 1) { // 1 if texture, 0 if color
vec3 materialDiffuseColor = textureSample * texture( textureSampler, fragmentTexture ).rgb;
vec3 materialAmbientColor = vec3(0.5, 0.5, 0.5) * materialDiffuseColor;
vec3 materialSpecularColor = vec3(0.3, 0.3, 0.3);
gl_Color = brightness *
(materialAmbientColor +
materialDiffuseColor * lightPowerColor * cosTheta / distanceLight2 +
materialSpecularColor * lightPowerColor * pow(cosAlpha, 10000) / distanceLight2);
}
else {
vec3 materialDiffuseColor = color;
vec3 materialAmbientColor = vec3(0.5, 0.5, 0.5) * materialDiffuseColor;
vec3 materialSpecularColor = vec3(0.3, 0.3, 0.3);
gl_Color = brightness *
(materialAmbientColor +
materialDiffuseColor * lightPowerColor * cosTheta / distanceLight2 +
materialSpecularColor * lightPowerColor * pow(cosAlpha, 10000) / distanceLight2);
}
// ...
}
I'd check length of rgb for diffuse texture. This won't work on a specular map though
vec3 texDiffuseCol = texture2D(diffuseTex, TexCoord).rgb;
if(length(texDiffuseCol) == 0.0)
{
//Texture not present
}else
{
//Texture not present
}
Related
Currently I am rendering mesh triangles like this:
// draw the same polygons again
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
shader.setVec3("objectColor", obj_color);
glDrawElements(GL_TRIANGLES, static_cast<unsigned int>(indices.size()), GL_UNSIGNED_INT, 0);
The problem with this code is that I am setting object color inside shader for the full mesh.
What would be a good way to render one single mesh whose faces have different colors?
For now I only know how to set vertex colors, and pass it the fragment shader.
What are the most common ways to set individual face colors?
I only think about duplicating mesh vertices twice to avoid vertex color interpolation.
My current shader looks like this:
Vertex Shader:
#version 330 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec3 aNormal;
out vec3 FragPos;
out vec3 Normal;
out vec3 LightPos;
uniform vec3 lightPos;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(aPos, 1.0);
FragPos = vec3(view * model * vec4(aPos, 1.0));
Normal = mat3(transpose(inverse(view * model))) * aNormal;
LightPos = vec3(vec4(lightPos, 1.0)); // Transform world-space light position to view-space light position
// FragPos = vec3(model * vec4(aPos, 1.0));
//Normal = mat3(transpose(inverse(model))) * aNormal;
// gl_Position = projection * view * vec4(FragPos, 1.0);
}
Fragment Shader:
#version 330 core
out vec4 FragColor;
in vec3 FragPos;
in vec3 Normal;
in vec3 LightPos;
// extra in variable, since we need the light position in view space we calculate this in the vertex shader
uniform vec3 lightColor;
uniform vec3 objectColor;
uniform float f;
uniform float transparency;
void main()
{
//flat shading
// vec3 x_ = dFdx(FragPos);
// vec3 y_= dFdy(FragPos);
// vec3 normal_ = cross(x_, y_);
// vec3 norm_ = normalize(normal_);
// ambient
float ambientStrength = 0.75;
vec3 ambient = ambientStrength * lightColor;
// diffuse
vec3 norm = normalize(Normal);
vec3 lightDir = normalize(LightPos - FragPos);
float diff = max(dot(norm, lightDir), 0.0);//change "norm_" to "norm" avoid the performance warning and have unwelded view
vec3 diffuse = diff * lightColor;
// specular
float specularStrength = 0.01;
vec3 viewDir = normalize(-FragPos); // the viewer is always at (0,0,0) in view-space, so viewDir is (0,0,0) - Position => -Position
vec3 reflectDir = reflect(-lightDir, norm);
float spec = pow(max(dot(viewDir, reflectDir), 0.0), 32);
vec3 specular = specularStrength * spec * lightColor;
vec3 shading = (ambient + diffuse + specular)*objectColor;
//float f = 0.75;
float r_interpolated = shading[0] + f * (objectColor[0] - shading[0]);
float g_interpolated = shading[1] + f * (objectColor[1] - shading[1]);
float b_interpolated = shading[2] + f * (objectColor[2] - shading[2]);
vec3 result = vec3(r_interpolated,g_interpolated,b_interpolated);
FragColor = vec4(result, transparency);
}
You can use the flat Interpolation qualifier:
The value will not be interpolated. The value given to the fragment shader is the value from the Provoking Vertex for that primitive.
Vertex shader
// [...]
layout (location = 0) in vec3 aColor;
flat out vec3 vColor;
void main()
{
vColor = aColor;
// [...]
}
Fragment shader
// [...]
flat in vec3 vColor;
void main()
{
FragColor = vec4(vColor, 1.0);
}
With this implementation, the entire triangle primitive is rendered with one color. If you find an intelligent system for assigning the color attributes to the vertices, you can render all triangles with different colors. e.g. 2 tringles with the indices 0-1-2 and 1-2-3. The color attribute of vertex 0 defines the color of the first triangle and the color attribute of vertex 1 defines the color of the 2nd triangle.
An alternative way would be to create an array of colors for each triangle primitive and store this color array in a Shader Storage Buffer Object. Use gl_VertexID to address the color in the vertex shader.
layout(std430, binding = 0) buffer primitiveColors
{
vec4 colors[];
};
void main()
{
vColor = colors[gl_VertexID / 3];
// [...]
}
I am having a very strange occurrence where glDisableVertexAttribArray works in my one solution but when I get the solution from my Perforce repository, it doesn't run and throws an assert.
I checked out this forum question but it, unfortunately, didn't solve my problem. This is for shadow mapping that I have been working on and when I try to render things to the depth buffer and then disable the vertex attributes, it throws an error.
Here's how my code is laid out:
glUseProgram(shaderProgram);
glUniform1i(u_diffuseTextureLocation, 0);
glUniform1i(u_shadowMapLocation, 1);
[...]
glUseProgram(shaderProgram);
[Render some stuff to depth buffer]
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glDisableVertexAttibArray(a_normalAttribLocation); // This gives the GL_INVALID_OPERATION
// enum
And here's the vertex shader in that program:
#version 430 core
uniform mat4 u_projection;
uniform mat4 u_view;
uniform mat4 u_model;
uniform mat4 u_lightSpaceMat;
in vec3 a_position;
in vec3 a_normal;
in vec2 a_texture;
out VS_OUT {
vec3 v_fragPos;
vec3 v_normal;
vec2 v_texCoords;
vec4 v_fragPosLightSpace;
} vs_out;
void main()
{
gl_Position = u_projection * u_view * u_model * vec4(a_position, 1.0);
vs_out.v_fragPos = (u_model * vec4(a_position, 1.0)).xyz;
vs_out.v_normal = transpose(inverse(mat3(u_model))) * a_normal;
vs_out.v_texCoords = a_texture;
vs_out.v_fragPosLightSpace = u_lightSpaceMat * vec4(vs_out.v_fragPos, 1.0);
}
And the fragment shader in the program:
#version 430 core
uniform sampler2D u_shadowMap;
uniform sampler2D u_diffuseTexture;
uniform vec3 u_lightPos;
uniform vec3 u_viewPos;
in VS_OUT {
vec3 v_fragPos;
vec3 v_normal;
vec2 v_texCoords;
vec4 v_fragPosLightSpace;
} fs_in;
out vec4 fragColor;
float shadowCalculation(vec4 fragPosLightSpace, vec3 normal, vec3 lightDir)
{
// perform perspective divide
vec3 projCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
// transform to [0,1] range
projCoords = projCoords * 0.5 + 0.5;
// Get closest depth value from light's perspective (using [0,1] range
// fragPosLight as coords)
float closestDepth = texture(u_shadowMap, projCoords.xy).r;
// Get depth of current fragment from lights perspective
float currentDepth = projCoords.z;
float bias = max(0.05 * (1.0 - dot(normal, lightDir)), 0.005);
// Percentage closer filtering
float shadow = 0.0;
vec2 texelSize = 1.0 / textureSize(u_shadowMap, 0);
for (int x = -1; x <= 1; ++x)
{
for (int y = -1; y <= 1; ++y)
{
float pcfDepth = texture(u_shadowMap, projCoords.xy + vec2(x, y) * texelSize).r;
shadow += currentDepth - bias > pcfDepth ? 1.0 : 0.0;
}
}
shadow /= 9.0;
return shadow;
}
void main()
{
vec3 color = texture(u_diffuseTexture, fs_in.v_texCoords).rgb;
vec3 normal = normalize(fs_in.v_normal);
vec3 lightColor = vec3(1.0);
// ambient
vec3 ambient = 0.15 * color;
// diffuse
vec3 lightDir = normalize(u_lightPos - fs_in.v_fragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * lightColor;
// specular
vec3 viewDir = normalize(u_viewPos - fs_in.v_fragPos);
float spec = 0.0;
vec3 halfWayDir = normalize(lightDir + viewDir);
spec = pow(max(dot(normal, halfWayDir), 0.0), 64.0);
vec3 specular = spec * lightColor;
// calculate shadow
float shadow = shadowCalculation(fs_in.v_fragPosLightSpace, normal, lightDir);
vec3 lighting = (ambient + (1.0 - shadow) * (diffuse + specular)) * color;
fragColor = vec4(lighting, 1.0);
}
What I'm really confused about is that the program runs when I'm using my local files. But when I pull the files from the Perforce repository and try and run it, then it throws the exception. I checked and all the necessary files are uploaded to Perforce. It would seem that there is something going wrong with which attributes are actually active? I'm not sure. Just scratching my head here...
glBindVertexArray(0);
glDisableVertexAttibArray(a_normalAttribLocation);
glDisableVertexAttribArray modifies the current VAO. You just removed the current VAO, setting it to 0. Which, in a core profile, means no VAO at all. In the compatibility profile, there is a VAO 0, which is probably why it works elsewhere: you're getting the compatibility profile on a different machine.
However, if you're using VAOs, it's not clear why you want to disable an attribute array at all. The whole point of VAOs is that you don't have to call the attribute array functions every frame. You just bind the VAO and go.
I'm using OpenGL 3.3 and having some odd lighting issue, I'll first show two screenshots at different angles and then give the shader code.
First angle:
Second angle:
What you see here is:
A cube, with its middle on the origin;
A directional light source, coming from the yellow point through the origin;
In cyan you see the normals of the vertices.
I know the normals of the vertices are "wrong", but I was exactly trying to debug those.
What I expected was: A (from top-to-bottom) varying color of every face, depending on the position of the "sun" and the camera.
But what I get is that two parts of the cube (upper and lower) that both have varying colors, but not in the way I expected.
There is code for shadows in the shader, but I deliberately disabled them here to avoid confusion.
Vertex shader:
#version 430 core
layout(location = 0) in vec4 position;
layout(location = 1) in vec3 normal;
layout(location = 0) uniform mat4 model_matrix;
layout(location = 1) uniform mat4 view_matrix;
layout(location = 2) uniform mat4 proj_matrix;
layout(location = 3) uniform mat4 shadow_matrix;
out VS_OUT {
vec3 N;
vec3 L;
vec3 V;
vec4 shadow_coord;
} vs_out;
uniform vec4 light_pos = vec4(-20.0, 7.5, -20.0, 1.0);
void main(void) {
vec4 local_light_pos = view_matrix * light_pos;
vec4 p = view_matrix * model_matrix * position;
//normal
vs_out.N = normalize(normal);
//light vector
vs_out.L = local_light_pos.xyz - p.xyz;
//view vector
vs_out.V = -p.xyz;
//light space coordinates
vs_out.shadow_coord = shadow_matrix * position;
gl_Position = proj_matrix * p;
}
Fragment shader:
#version 430 core
out vec4 color;
in VS_OUT {
vec3 N;
vec3 L;
vec3 V;
vec4 shadow_coord;
} fs_in;
layout(binding = 0) uniform sampler2DShadow shadow_tex;
uniform vec3 light_ambient_albedo = vec3(1.0);
uniform vec3 light_diffuse_albedo = vec3(1.0);
uniform vec3 light_specular_albedo = vec3(1.0);
uniform vec3 ambient_albedo = vec3(0.0, 0.2, 0.0);
uniform vec3 diffuse_albedo = vec3(0.2, 0.7, 0.2);
uniform vec3 specular_albedo = vec3(0.0, 0.0, 0.0);
uniform float specular_power = 128.0;
vec3 rgb_to_grayscale_luminosity(vec3 color) {
float value = color.r * 0.21 + color.g * 0.71 + color.b * 0.07;
return vec3(value);
}
void main(void) {
//normalize
vec3 N = normalize(fs_in.N);
vec3 L = normalize(fs_in.L);
vec3 V = normalize(fs_in.V);
//calculate R
vec3 R = reflect(-L, N);
//calcualte ambient
vec3 ambient = ambient_albedo * light_ambient_albedo;
//calculate diffuse
vec3 diffuse = max(dot(N, L), 0.0) * diffuse_albedo * light_diffuse_albedo;
//calcualte spcular
vec3 specular = pow(max(dot(R, V), 0.0), specular_power) * specular_albedo * light_specular_albedo;
//apply shadow and write color
float shadow_value = textureProj(shadow_tex, fs_in.shadow_coord);
if (shadow_value > 0.0001 || true) {
//no shadow
color = vec4(ambient + diffuse + specular, 1.0);
}
else {
//in shadow
//color = vec4(rgb_to_grayscale_luminosity((ambient + diffuse) * (1 - shadow_value)), 0.5);
//color = vec4(vec3(shadow_value), 0.5);
color = vec4((ambient + diffuse) * (1 - shadow_value) * 0.5, 1.0);
}
}
What could be going wrong here?
Assuming your normals only point upwards/downwards (x=0 and z=0 in the OpenGL coordinate system) what you see should be the expected behavior (no bug concerning the shaders/graphics pipeline).
During the rasterization stage in the graphics pipeline the attributes are interpolated among the vertices (barycentric coordinates).
Assuming that all normals above the plane "y=0" are
"vec3(0, 1, 0)"
and all normals below this plane are
"vec3(0, -1, 0)"
then for every pixel the interpolated normal will be
"vec3(0, *, 0)" where * is >0 above the "y=0"-plane and <0 below that plane.
In your fragment shader you normalize all normals hence they will all again be
"vec3(0, 1, 0)" if the corresponding vertex lies above the "y=0"-plane and
"vec3(0, -1, 0)" if the corresponding vertex lies below that plane.
This will result in the same color for all vertices below and above the "y=0"-plane.
You could check this if you would remove the normal-"normalization" within the fragment shader or if you add a minimal offset to the x- or z-coordinate of some normals e.g.
vec3(0.0000001, +/-1, 0)
I've got a shader that implements shadow mapping like this:
#version 430 core
out vec4 color;
in VS_OUT {
vec3 N;
vec3 L;
vec3 V;
vec4 shadow_coord;
} fs_in;
layout(binding = 0) uniform sampler2DShadow shadow_tex;
uniform vec3 light_ambient_albedo = vec3(1.0);
uniform vec3 light_diffuse_albedo = vec3(1.0);
uniform vec3 light_specular_albedo = vec3(1.0);
uniform vec3 ambient_albedo = vec3(0.1, 0.1, 0.2);
uniform vec3 diffuse_albedo = vec3(0.4, 0.4, 0.8);
uniform vec3 specular_albedo = vec3(0.0, 0.0, 0.0);
uniform float specular_power = 128.0;
void main(void) {
//color = vec4(0.4, 0.4, 0.8, 1.0);
//normalize
vec3 N = normalize(fs_in.N);
vec3 L = normalize(fs_in.L);
vec3 V = normalize(fs_in.V);
//calculate R
vec3 R = reflect(-L, N);
//calcualte ambient
vec3 ambient = ambient_albedo * light_ambient_albedo;
//calculate diffuse
vec3 diffuse = max(dot(N, L), 0.0) * diffuse_albedo * light_diffuse_albedo;
//calcualte spcular
vec3 specular = pow(max(dot(R, V), 0.0), specular_power) * specular_albedo * light_specular_albedo;
//write color
color = textureProj(shadow_tex, fs_in.shadow_coord) * vec4(ambient + diffuse + specular, 0.5);
//if in shadow, then multiply color by 0.5 ^^, except alpha
}
What I want to do is to check first if the fragment is indeed in the shadow, and only then change the color (halve it, such that it becomes halfway between fully black and original color).
However how to check if the textureProj(...) result is indeed in shadow, as far as I know it returns a normalized float value.
Would something like textureProj(...) > 0.9999 suffice already? I know that it can returns values other than zero or one if you are using multisampling and I'd like behaviour that will not just break at one point.
The outputting vertex shader:
#version 430 core
layout(location = 0) in vec4 position;
layout(location = 0) uniform mat4 model_matrix;
layout(location = 1) uniform mat4 view_matrix;
layout(location = 2) uniform mat4 proj_matrix;
layout(location = 3) uniform mat4 shadow_matrix;
out VS_OUT {
vec3 N;
vec3 L;
vec3 V;
vec4 shadow_coord;
} vs_out;
uniform vec4 light_pos = vec4(-20.0, 7.5, -20.0, 1.0);
void main(void) {
vec4 local_light_pos = view_matrix * light_pos;
vec4 p = view_matrix * model_matrix * position;
//normal
vs_out.N = vec3(0.0, 1.0, 0.0);
//light vector
vs_out.L = local_light_pos.xyz - p.xyz;
//view vector
vs_out.V = -p.xyz;
//light space coordinates
vs_out.shadow_coord = shadow_matrix * position;
gl_Position = proj_matrix * p;
}
Note that the fragment shader is for terrain, and the vertex shader is for the floor, so there might be minor inconsistencies between the two, but they should be non relevant.
shadow_matrix is an uniform passed in as bias_matrix * light_projection_matrix * light_view_matrix * light_model_matrix.
textureProj (...) does not return a normalized floating-point value. It does return a single float if you use it on a sampler<1D|2D|2DRect>Shadow, but this value represents the result of a depth test. 1.0 = pass, 0.0 = fail.
Now, the interesting thing to note here, and the reason returning a float for a shadow sampler is meaningful at all has to do with filtering the shadow map. If you use a GL_LINEAR filter mode on the shadow map together with a shadow sampler, GL will actually pick the 4 closest texels in the shadow map and perform 4 independent depth tests.
Each depth test still has a binary result, but GL will return a weighted average of the result of all 4 tests (based on distance from the ideal sample location). So if you use GL_LINEAR in conjunction with a shadow sampler, you will have a value that lies somewhere in-between 0.0 and 1.0 representing the average occlusion for the 4 nearest depth samples.
I should point out that your use of textureProj (...) looks potentially wrong to me. The coordinate it uses is a 4D vector consisting of (s,t,r) [projected coordinates] and (q) [depth value to test]. I do not see anywhere in your code where you are assigning q a depth value. If you could edit your question to include the vertex/geometry shader that is outputting shadow_coord, that would help.
Try the following:
Get the distance from each vertex of your model to the light.
Send this distance to your fragment shader.
Compare the distance to the value stored in your shadow map sampler (I assume this texture stores the depth values of your scene from the camera's point of view?)
If the distance is greater than the sampler, the point is in shadow. Else, it is not.
If this is confusing, here's a pair of tutorials that should help:
http://ogldev.atspace.co.uk/www/tutorial23/tutorial23.html
http://ogldev.atspace.co.uk/www/tutorial24/tutorial24.html
I've been having problems sending the shininess factor to my bump mapping shader.
The result always looks like this: http://i.imgur.com/unzdx.jpg
But if I hard code the value inside the shader to 0.0 it's working just fine.
When I send 0.0 to the shader it's turning out like in the picture above.
Any ideas?
Here's my shader
#version 110
uniform sampler2D tex;
uniform sampler2D bmap;
uniform bool boolBump;
uniform vec4 vecColor;
uniform bool onlyColor;
uniform float fTransparencyThresh;
uniform float fShininess;
uniform float alpha;
varying vec3 vecLight;
varying vec3 vecEye;
varying vec3 vecNormal;
vec4 getLighting()
{
//Ambient part
vec4 color = (gl_FrontLightModelProduct.sceneColor * gl_FrontMaterial.ambient) + (gl_LightSource[0].ambient * gl_FrontMaterial.ambient);
//For bump mapping, the normal comes from the bump map texture lookup
vec3 n = normalize(vecNormal);
if(boolBump)
{
n = normalize(texture2D(bmap, gl_TexCoord[0].st).xyz * 2.0 - 1.0);
}
vec3 l = normalize(vecLight);
//Lambert term
float NdotL = dot(n, l);
if(NdotL > 0.0)
{
//Diffuse part
color += gl_LightSource[0].diffuse * gl_FrontMaterial.diffuse * max(0.0, NdotL);
//Specular part
vec3 e = normalize(vecEye);
vec3 r = normalize(-reflect(l,n));
float spec = pow(max(0.0, dot(r, e)), fShininess);
color += gl_LightSource[0].specular * gl_FrontMaterial.specular * spec;
}
return color;
}
void main(void)
{
vec4 texel = texture2D(tex, gl_TexCoord[0].st);
if(texel.a < fTransparencyThresh)
discard;
//Get shading
vec4 color = getLighting();
//Color only mode?
if(onlyColor)
{
color *= vecColor;
}
else
{
color *= texel;
}
//Set fragment color, alpha comes from MTL file
gl_FragColor = vec4(color.xyz, alpha);
}
Edit, OpenGL code:
void MyClass::sendToShader(const OBJ::StelModel* pStelModel, Effect cur, bool& tangEnabled, int& tangLocation)
{
int location;
tangEnabled = false;
if(cur != No)
{
location = curShader->uniformLocation("fTransparencyThresh");
curShader->setUniform(location, fTransparencyThresh);
location = curShader->uniformLocation("alpha");
curShader->setUniform(location, pStelModel->pMaterial->alpha);
location = curShader->uniformLocation("fShininess");
curShader->setUniform(location, 0.0f);
...
Edit: Even this wont work:
GLint loc = glGetUniformLocation(curShader->program, "fShininess");
glUniform1f(loc, 0.0f);
Note that pow(0, 0) is undefined. This means spec is undefined if dot(r, e) == 0 and fShininess == 0.
Do you make sure the program is actively bound when you call glUniform? Do you check glGetError anywhere?