GLSL How to show normals with Geometry shader? - opengl

I have vertex shader
#version 330 core
layout(location = 0) in vec3 VertexPosition;
layout(location = 1) in vec2 VertexUV;
layout(location = 2) in vec3 VertexNormal;
out VS_GS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_out;
uniform mat4 proj_matrix;
uniform mat4 model_matrix;
void main(void)
{
gl_Normal = VertexNormal;
gl_Position = proj_matrix * vec4(VertexPosition, 1.0);
vertex_out.UV = VertexUV; //VertexPosition.xy;
vertex_out.vs_worldpos = gl_Position.xyz;
vertex_out.vs_normal = mat3(model_matrix) * gl_Normal;
}
and fragment shader
#version 330 core
in GS_FS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_in;
// Values that stay constant for the whole mesh.
uniform sampler2D sampler0;
uniform sampler2D sampler1;
uniform sampler2D sampler2;
uniform sampler2D sampler3;
//uniform sampler2D alphamap0;
uniform sampler2D alphamap1;
uniform sampler2D alphamap2;
uniform sampler2D alphamap3;
uniform int tex_count;
uniform vec4 color_ambient = vec4(0.75, 0.75, 0.75, 1.0);
uniform vec4 color_diffuse = vec4(0.25, 0.25, 0.25, 1.0);
//uniform vec4 color_specular = vec4(1.0, 1.0, 1.0, 1.0);
uniform vec4 color_specular = vec4(0.1, 0.1, 0.1, 0.25);
uniform float shininess = 5.0f;
uniform vec3 light_position = vec3(12.0f, 32.0f, 560.0f);
void main(){
vec3 light_direction = normalize(light_position - vertex_in.vs_worldpos);
vec3 normal = normalize(vertex_in.vs_normal);
vec3 half_vector = normalize(light_direction + normalize(vertex_in.vs_worldpos));
float diffuse = max(0.0, dot(normal, light_direction));
float specular = pow(max(0.0, dot(vertex_in.vs_normal, half_vector)), shininess);
gl_FragColor = texture( sampler0, vertex_in.UV ) * color_ambient + diffuse * color_diffuse + specular * color_specular;
// http://www.opengl.org/wiki/Texture_Combiners
// GL_MODULATE = *
// GL_INTERPOLATE Blend tex0 and tex1 based on a blending factor = mix(texel0, texel1, BlendFactor)
// GL_INTERPOLATE Blend tex0 and tex1 based on alpha of tex0 = mix(texel0, texel1, texel0.a)
// GL_ADD = clamp(texel0 + texel1, 0.0, 1.0)
if (tex_count > 0){
vec4 temp = texture( sampler1, vertex_in.UV );
vec4 amap = texture( alphamap1, vertex_in.UV);
gl_FragColor = mix(gl_FragColor, temp, amap.a);
}
if (tex_count > 1){
vec4 temp = texture( sampler2, vertex_in.UV );
vec4 amap = texture( alphamap2, vertex_in.UV);
gl_FragColor = mix(gl_FragColor, temp, amap.a);
}
if (tex_count > 2){
vec4 temp = texture( sampler3, vertex_in.UV );
vec4 amap = texture( alphamap3, vertex_in.UV);
gl_FragColor = mix(gl_FragColor, temp, amap.a);
}
}
It takes indexed GL_TRIANGLE_STRIP as input
glBindBuffer(GL_ARRAY_BUFFER, tMt.vertex_buf_id[cx, cy]);
glVertexAttribPointer(VERTEX_LAYOUT_POSITION, 3, GL_FLOAT, false, 0, pointer(0));
glEnableVertexAttribArray(0);
{ chunk tex position }
glBindBuffer(GL_ARRAY_BUFFER, chunkTexPositionBO);
glVertexAttribPointer(VERTEX_LAYOUT_TEX_UV, 2, GL_FLOAT, false, 0, pointer(0));
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, tMt.normal_buf_id[cx, cy]);
glVertexAttribPointer(VERTEX_LAYOUT_NORMAL, 3, GL_FLOAT, true, 0, pointer(0));
glEnableVertexAttribArray(2);
{ index buffer }
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, chunkIndexBO);
for i := 0 to tMt.texCount - 1 do begin
bt := tMt.texture_buf_id[cx, cy][i];
if bt = nil then
break;
glUniform1i(proj_tex_count_loc, i);
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, bt.id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
if i > 0 then begin
// this time, use blending:
glActiveTexture(GL_TEXTURE4 + 1);
glBindTexture(GL_TEXTURE_2D, tMt.alphamaps[cx, cy][i - 1]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
end;
end;
glDrawElements(GL_TRIANGLE_STRIP, length(chunkIndexArr), GL_UNSIGNED_SHORT, nil);
Code works as intended except I'm not sure is my normals arranged properly: they was stored as bytes (converted to GLfloat as b / FF), coordinates xyz changed and some probably need negation.
Can someone show me geometry shader to show normals as lines as shown at http://blogs.agi.com/insight3d/index.php/2008/10/23/geometry-shader-for-debugging-normals/ (those shader not works at all and it seems out/in data losed between vertex and fragment shader).
P.S. I'm not sure I did everything properly (starting OpenGL and GLSL) so any suggestions also appreciated.
Edit:
I made simple geometry shader by examples
// This is a very simple pass-through geometry shader
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices = 145) out;
in VS_GS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_in[];
out GS_FS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_out;
uniform float uNormalsLength = 0.5;
void main()
{
int i;
// Loop over the input vertices
for (i = 0; i < gl_in.length(); i++)
{
vertex_out.UV = vertex_in[i].UV;
vertex_out.vs_worldpos = vertex_in[i].vs_worldpos;
vertex_out.vs_normal = vertex_in[i].vs_normal;
// Copy the input position to the output
gl_Position = gl_PositionIn[i];
EmitVertex();
gl_Position = gl_ModelViewProjectionMatrix * (gl_PositionIn[i] + (vec4(vertex_in[i].vs_normal, 0) * uNormalsLength));
gl_FrontColor = vec4(0.0, 0.0, 0.0, 1.0); //gl_FrontColorIn[i];
EmitVertex();
}
// End the primitive. This is not strictly necessary
// and is only here for illustrative purposes.
EndPrimitive();
}
but I don't knwo where it takes gl_ModelViewProjectionMatrix (seems deprecated) and result looks awful, it seems everything including normals stripped. Picture in glPolygonMode(GL_FRONT, GL_LINE) mode, textures also trying to map onto those.

As it seems, you're doing it all in a single pass and you actually emit 6 vertices per incoming triangle. This is not what you want.
Either do it in two passes, i.e. one pass for the mesh, the other for the normals, or try to emit the original triangle and a degenerate triangle for the normal. For simplicity I'd go for the two-pass version:
Inside your render loop:
render terrain
if and only if debug geometry is to be rendered
enable your debug normals shader
render the terrain mesh a second time, passing POINTS to the vertex shader
To make this work, you'll need a second program object that is made up like in the blog post you previously linked to, consisting of a simple pass trough vertex shader, the following geometry shader and a fragment shader for coloring the lines representing the normals.
The vertex and fragment shaders should be no problem. Assuming you have a smoothed mesh, i.e. you have actual, averaged vertex normals, you can simply pass in points and emit lines.
#version 330 core
// assuming you have vertex normals, you need to render a vertex
// only a single time. with any other prim type, you may render
// the same normal multiple times
layout (points) in;
// Geometry shaders can only output points, line strips or triangle
// strips by definition. you output a single line per vertex. therefore,
// the maximum number of vertices per line_strip is 2. This is effectively
// the same as rendering distinct line segments.
layout (line_strip, max_vertices = 2) out;
in vec3 vs_normal[];
uniform float normal_scale = 0.5; // don't forget: this is the default value!
/* if you're never going to change the normal_scale, consider simply putting a
constant there instead:
const float normal_scale = 0.5;
*/
void main()
{
// we simply transform and emit the incoming vertex - this is v0 of our
// line segment
vec4 v0 = gl_in[0].gl_Position;
gl_Position = gl_ModelViewProjectionMatrix * v0;
EmitVertex();
// we calculate v1 of our line segment
vec4 v1 = v0 + vec4(vs_normal[0] * normal_scale, 0);
gl_Position = gl_ModelViewProjectionMatrix * v1;
EmitVertex();
EndPrimitive();
}
Warning: Untested code!
This is probably as simple as it gets. Add a uniform to your fragment shader so you can color your normals as you like or simply export a constant color.
Note: This code still uses gl_ModevelViewProjectionMatrix. If you're writing GL core code, please consider replacing legacy GL constructs, like the matrix stack, with your own stuff!
Note 2: Your geometry shader is not what is usually referred to as a pass through shader. First, you do processing on the incoming data that is more than just assigning incoming values to outgoing values. Second, how can it be a pass-through shader, if you generate geometry? Pass-through means, you don't do anything else than pass incoming values to the next shader stage.

Related

Empty (white) framebuffer - shadow mapping

See EDIT since the first part of the problem is solved.
I am trying to replicate the shadow mapping demo from http://learnopengl.com/#!Advanced-Lighting/Shadows/Shadow-Mapping with my own framework, but interestingly I did not get any shadows. The first significant problem is that my depthmap is not correctly working. I have debugged and double checked each line without success. Maybe another pair of eyes will have more success.
See (top left, 5th row - the image is completely white):
I will write about the second render pass, since it seems that the first one is not working. By the way, the objects are centered at 0, 0, 0. The following code is used for the first render pass:
/// 1. render target is the depth map
glViewport(0, 0, SHADOW_MAP_WIDTH_u32, SHADOW_MAP_HEIGHT_u32);
m_frameBufferObject.bind(); // set the depth map as render target
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
/// place the camera where the light is positioned and render the scene
math::Matrix4D l_lightViewMatrix = math::Matrix4D::lookAt(m_light_p->getPosition(), math::Vector3D(0, 0, 0), math::Vector3D(0, 1, 0));
const math::Matrix4D& l_orthographicLightMatrix_r = m_light_p->getShadowInformation().getProjectionMatrix();
math::Matrix4D lightSpaceMatrix = l_orthographicLightMatrix_r * l_lightViewMatrix;
m_depthMapShader_p->bind();
m_depthMapShader_p->setUniformMat4("lightSpaceMatrix", lightSpaceMatrix);
renderNodes();
m_depthMapShader_p->printShaderInfoLog();
m_depthMapShader_p->unbind();
m_frameBufferObject.unbind();
I have tested that the view matrix and projection matrix generation delivers exactly the same results as GLM (math library for opengl). However, my orthographic matrix is defined by:
left = -10.0f
right = 10.0f
bottom = -10.0f
top = 10.0f
near = -1.0f
far = 7.5f
The initialization of the framebuffer object and the texture is as follows:
// - Create depth texture
glGenTextures(1, &m_shadowTextureBuffer_u32);
glBindTexture(GL_TEXTURE_2D, m_shadowTextureBuffer_u32);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, SHADOW_MAP_WIDTH_u32, SHADOW_MAP_HEIGHT_u32, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
m_frameBufferObject.bind();
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, m_shadowTextureBuffer_u32, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
fprintf(stderr, "Error on building shadow framebuffer\n");
exit(EXIT_FAILURE);
}
m_frameBufferObject.unbind();
The fragment and the vertex shader looks like below.
#version 430
// Fragment shader for rendering the depth values to a texture.
out vec4 gl_FragColor;
void main()
{
gl_FragColor = vec4 (gl_FragCoord.z);
}
#version 430
// Vertex shader for rendering the depth values to a texture.
in layout (location = 0) vec3 position;
in layout (location = 1) vec4 color;
in layout (location = 2) vec3 normal;
in layout (location = 3) vec2 uv;
in layout (location = 4) vec3 tangent;
in layout (location = 5) int materialId;
uniform mat4 pr_matrix;
uniform mat4 vw_matrix;
uniform mat4 ml_matrix;
uniform mat4 lightSpaceMatrix;
void main()
{
gl_Position = lightSpaceMatrix * ml_matrix * vec4(position, 1.0);
}
EDIT:
After some sleep, I have found a little error in my renderer and the shader draws a "nice" depth map.
However, it looks like that the texture mapping (depth comparison) is in the same coordinate system.
But the second rendering step is still not correct:
The vertex and the fragment shader for the second render pass looks like
#version 430
in layout (location = 0) vec3 position;
in layout (location = 1) vec4 color;
in layout (location = 2) vec3 normal;
in layout (location = 3) vec2 uv;
in layout (location = 4) vec3 tangent;
in layout (location = 5) int materialId;
uniform mat4 pr_matrix = mat4(1.0);
uniform mat4 vw_matrix = mat4(1.0);
uniform mat4 ml_matrix = mat4(1.0);
uniform mat4 lightSpaceMatrix = mat4(1.0);
out VS_OUT
{
vec4 color;
vec2 texture_coordinates;
vec3 normal;
vec3 tangent;
vec3 binormal;
vec3 worldPos;
vec4 shadowProj;
flat int materialIdOut;
} vs_out;
void main()
{
vs_out.color = color;
vs_out.texture_coordinates = uv;
mat3 normalMatrix = transpose ( inverse ( mat3 ( ml_matrix )));
vs_out.normal = normalize ( normalMatrix * normalize ( normal ));
vs_out.tangent = normalize ( normalMatrix * normalize ( tangent ));
vs_out.binormal = normalize ( normalMatrix * normalize ( cross (normal , tangent )));
vs_out.worldPos = ( ml_matrix * vec4 ( position, 1)).xyz;
vs_out.materialIdOut = materialId;
vs_out.shadowProj = ( lightSpaceMatrix * ml_matrix * vec4 (position, 1.0) );
gl_Position = ( pr_matrix * vw_matrix * ml_matrix ) * vec4 (position, 1.0);
}
and
#version 430
#define MAX_NUM_TEXTURES 5
#define MAX_NUM_MATERIALS 12
struct SMaterial
{
vec3 m_ambient_v3;
vec3 m_diffuse_v3;
vec3 m_specular_v3;
float m_shininess_f32;
int m_textureIds[MAX_NUM_TEXTURES];
};
in VS_OUT
{
vec4 color;
vec2 texture_coordinates;
vec3 normal;
vec3 tangent;
vec3 binormal;
vec3 worldPos;
vec4 shadowProj;
flat int materialIdOut;
} fs_in;
uniform vec3 cameraPos;
uniform mat4 ml_matrix;
uniform mat4 vw_matrix;
uniform sampler2D texSlots[32];
uniform SMaterial material[MAX_NUM_MATERIALS];
uniform SLight light;
out vec4 gl_FragColor;
float shadowCalculation(vec4 fragPosLightSpace)
{
// perform perspective divide
vec3 projCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
// Transform to [0,1] range
projCoords = projCoords * vec3(0.5) + vec3(0.5);
// Get closest depth value from light's perspective (using [0,1] range fragPosLight as coords)
float closestDepth = texture(texSlots[31], projCoords.xy).r;
// Get depth of current fragment from light's perspective
float currentDepth = projCoords.z;
// Check whether current frag pos is in shadow
float shadow = currentDepth > closestDepth ? 1.0 : 0.0;
return shadow;
}
void main()
{
if ( (fs_in.materialIdOut >= 0) && (fs_in.materialIdOut < MAX_NUM_MATERIALS) )
{
int ambientTextureId = material[fs_in.materialIdOut].m_textureIds[0];
int diffuseTextureId = material[fs_in.materialIdOut].m_textureIds[1];
int specularTextureId = material[fs_in.materialIdOut].m_textureIds[2];
int alphaTextureId = material[fs_in.materialIdOut].m_textureIds[3];
int bumpTextureId = material[fs_in.materialIdOut].m_textureIds[4];
vec3 diffTexColor = vec3(0.6,0.6,0.6);
if ((diffuseTextureId >= 0) && (32 > diffuseTextureId))
{
diffTexColor = texture (texSlots[diffuseTextureId], fs_in.texture_coordinates).rgb;
}
// Calculate shadow
float shadow = 1.0 - shadowCalculation(fs_in.shadowProj);
gl_FragColor = vec4(diffTexColor, 1.0) * vec4(shadow, shadow, shadow, 1.0);
}
else
{
gl_FragColor = vec4(fs_in.normal,1.0);
}
}
In my experience a depth map is pretty much always completely white, because a distance of more than 1 away from the light already makes that pixel white. If your whole scene is further than 1 unit then the whole map is white.
To render the map like they show in the tutorial you either need your scene to be really small or to perform an operation on your depth map. I always like to check my maps by dividing their depth values by the camera's zFar distance. Try to find the best value at which you can see contrast.

OpenGL color transform

I'm using OpenGL to draw a large array of 2D points with their colors. Each point (vertex) has also defined it's alpha channel in MX.c array. I'd like to be able to increase or decrease the alpha value of whole array (of every vertex displayed). Is there a clever way to do it, using OpenGL functions? Here's my drawing method:
void PointsMX::drawMX()
{
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(4, GL_UNSIGNED_BYTE, 0, MX.c);
glVertexPointer(2, GL_DOUBLE, 0, MX.p);
glPushMatrix();
glTranslated(position[X], position[Y], 0.0);
glScaled(scale, scale, 1.0);
glDrawArrays(GL_POINTS, 0, MX.size);
glPopMatrix();
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
}
As datenwolf points out in his comments, you can do this pretty simply using a shader, but not using the fixed function pipeline (which is what you're using if you never call glUseProgram().
If you're not using lighting, reproducing the fixed function shaders isn't very hard, and a little googling will help you get up to that point.
The key here is that you want to change something that is normally a vertex attribute (the alpha channel of the color) to a configurable value for the entire drawing operation. In shader terms this means overriding the vertex attribute with a uniform. A uniform is simply a value you pass into an OpenGL program which then has the same value for every vertex or fragment processed (depending on whether you put it into the vertex or fragment shader).
Here's an example of a very basic vertex shader:
#version 330
uniform mat4 Projection = mat4(1);
uniform mat4 ModelView = mat4(1);
layout(location = 0) in vec3 Position;
layout(location = 3) in vec4 Color;
out vec4 vColor;
void main() {
gl_Position = Projection * ModelView * vec4(Position, 1);
vColor = Color;
}
And a corresponding fragment shader
#version 330
in vec4 vColor;
out vec4 FragColor;
void main()
{
FragColor = vColor;
}
In order to accomplish what you're trying to do, you'd want to change the vertex shader to add an additional uniform representing your alpha override:
#version 330
uniform mat4 Projection = mat4(1);
uniform mat4 ModelView = mat4(1);
uniform float AlphaOverride = -1.0;
layout(location = 0) in vec3 Position;
layout(location = 3) in vec4 Color;
out vec4 vColor;
void main() {
gl_Position = Projection * ModelView * vec4(Position, 1);
vColor = Color;
if (AlphaOverride > 0.0) {
vColor.a = AlphaOverride;
}
}
If you fail to set the AlphaOverride uniform it will be -1, and will therefore be ignored by the vertex shader. But if you set it to a value between 0 and 1, then it will be applied to the alpha channel of your vertex.

OpenGL 3.3 deferred shading not working

I've setup an OpenGL environment with deferred shading following this tutorial but I can't make the second shader output on my final buffer.
I can see that the first shader (the one that doesn't use lights) is working properly because with gDEBugger I can see that the output buffers are correct, but the second shader really can't display anything. I've also tried to make the second shader output a single color for all the scene just to see if it was displying something, bot nothing is visible (the screen should be completely red but it isn't).
The first pass shader (the one I use to create the buffers for the GBuffer) is working so I'm not add it's code or how I created and implemented my GBuffer, but if you need I'll add them, just tell me.
I think the problem is when I tell OpenGL to output on the FrameBuffer 0 (my video).
This is how I enalbe OpenGL to write to the FrameBuffer 0:
glEnable(GL_BLEND);
m_MotoreGrafico->glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_ONE, GL_ONE);
// Abilito la scrittura sul buffer finale
m_MotoreGrafico->glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
m_gBuffer.BindForReading();
glClear(GL_COLOR_BUFFER_BIT);
// Imposto le matrici dello shader
SetUpOGLProjectionViewMatrix(1);
// Passo le texture del GBuffer allo shader
pActiveShader->setUniform1i(_T("gPositionMap"), m_gBuffer.GetPositionTexture());
pActiveShader->setUniform1i(_T("gColorMap"), m_gBuffer.GetDiffuseTexture());
pActiveShader->setUniform1i(_T("gNormalMap"), m_gBuffer.GetNormalTexture());
// Passo variabili necessarie allo shader
float dimensioneFinestra[2], posizioneCamera[3];
dimensioneFinestra[0] = m_nLarghezzaFinestra;
dimensioneFinestra[1] = m_nAltezzaFinestra;
m_MotoreGrafico->GetActiveCameraPosition(posizioneCamera);
pActiveShader->setUniform2f(_T("gScreenSize"), dimensioneFinestra);
pActiveShader->setUniform3f(_T("gCameraPos"), posizioneCamera);
pActiveShader->setUniform1i(_T("gUsaLuci"), 0);
// Disegno le luci
float coloreLuce[3], posizioneLuce[3], direzioneLuce[3], vUpLuce[3], vRightLuce[3], intensita;
for(int i = 0; i < GetDocument()->m_RTL.GetNLights(); i++)
{
CRTLuce* pRTLuce = GetDocument()->m_RTL.GetRTLightAt(i);
...
m_MotoreGrafico->glBindVertexArray(pRTLuce->GetRTLuce()->GetVBO()->getVBAIndex());
glDrawArrays(GL_TRIANGLES, 0, pRTLuce->GetRTLuce()->GetNVertPerShader());
}
The function m_gBuffer.BindForReading() is like this (bot I think it doesn't matter for my problem):
for (unsigned int i = 0 ; i < ARRAY_SIZE_IN_ELEMENTS(m_textures); i++)
{
m_pMotoreGrafico->glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, m_textures[GBUFFER_TEXTURE_TYPE_POSITION + i]);
}
So far my GBuffer is working (it creates the textures) and my first shader is also working (it's drawing the textures of my GBuffer).
The problem then is that I can't reset OpenGL to draw in my video.
The first 4 textures are the ones create with the first-pass shader.
This is my back buffer (after the second-pass shader)
And this is my front buffer (after the second-pass shader)
This is my second-pass fragment shader code (it outputs only red)
out vec4 outputColor;
void main()
{
outputColor = vec4(1.0, 0.0, 0.0, 1.0);
}
Does anyone have an idea of what I'm doing wrong?
Second-pass vertex shader code:
#version 330
uniform struct Matrici
{
mat4 projectionMatrix;
mat4 modelMatrix;
mat4 viewMatrix;
} matrices;
layout (location = 0) in vec3 inPosition;
void main()
{
vec4 vEyeSpacePosVertex = matrices.viewMatrix * matrices.modelMatrix * vec4(inPosition, 1.0);
gl_Position = matrices.projectionMatrix * vEyeSpacePosVertex;
}
Second-pass fragment shader code:
#version 330
uniform struct MDLight
{
vec3 vColor;
vec3 vPosition;
vec3 vDirection;
float fAmbientIntensity;
float fStrength;
int bOn;
float fConeCosine;
float fAltezza;
float fLarghezza;
vec3 vUp;
vec3 vRight;
} gLuce;
uniform float gSpecularIntensity;
uniform float gSpecularPower;
uniform sampler2D gPositionMap;
uniform sampler2D gColorMap;
uniform sampler2D gNormalMap;
uniform vec3 gCameraPos;
uniform vec2 gScreenSize;
uniform int gLightType;
uniform int gUsaLuci;
vec2 CalcTexCoord()
{
return gl_FragCoord.xy / gScreenSize;
}
out vec4 outputColor;
void main()
{
vec2 TexCoord = CalcTexCoord();
vec4 Color = texture(gColorMap, TexCoord);
outputColor = vec4(1.0, 0.0, 0.0, 1.0);
}

How do I get textures to work in OpenGL?

I'm using the tutorials on http://arcsynthesis.org/gltut/ to learn OpenGL, it's required, I have to use it. Mostly I want to apply the textures from Tutorial 15 onto objects in tutorial 7 (world with UBO).
For now it seemed like the textures only work when mipmaps are turned on. This comes with a downside: The only mipmap used is the one with an index of zero, and that's the 1 colored 1x1 pixel one. I tried setting the minimum level of a mipmap higher or turning off mipmaps entirely, but even that doesn't fix thing, because then everything turns pitch black. Now I'll list the most important parts of my program
EDIT: I guess I'll add more details...
The vertex shader has something like this:
#version 330
layout(location = 0) in vec4 position;
layout(location = 1) in vec4 color;
layout(location = 2) in vec3 normal;
//Added these later
layout(location = 5) in vec2 texCoord;
out vec2 colorCoord;
smooth out vec4 interpColor;
out vec3 vertexNormal;
out vec3 modelSpacePosition;
out vec3 cameraSpacePosition;
uniform mat4 worldToCameraMatrix;
uniform mat4 modelToWorldMatrix;
uniform mat3 normalModelToCameraMatrix;
uniform vec3 dirToLight;
uniform vec4 lightIntensity;
uniform vec4 ambientIntensity;
uniform vec4 baseColor;
uniform mat4 cameraToClipMatrix;
void main()
{
vertexNormal = normal;
vec3 normCamSpace = normalize(normalModelToCameraMatrix * vertexNormal);
cameraSpacePosition = normCamSpace;
float cosAngIncidence = dot(normCamSpace, dirToLight);
cosAngIncidence = clamp(cosAngIncidence, 0, 1);
modelSpacePosition.x = position.x;
modelSpacePosition.y = position.y;
modelSpacePosition.z = position.z;
vec4 temp = modelToWorldMatrix * position;
temp = worldToCameraMatrix * temp;
gl_Position = cameraToClipMatrix * temp;
interpColor = ((lightIntensity * cosAngIncidence) + (ambientIntensity)) * baseColor;
colorCoord= texCoord ;
}
The fragment shader like this:
#version 330
in vec3 vertexNormal;
in vec3 modelSpacePosition;
smooth in vec4 interpColor;
uniform vec3 modelSpaceLightPos;
uniform vec4 lightIntensity2;
uniform vec4 ambientIntensity2;
out vec4 outputColor;
//Added later
in vec2 colorCoord;
uniform sampler2D colorTexture;
void main()
{
vec3 lightDir2 = normalize(modelSpacePosition - modelSpaceLightPos);
float cosAngIncidence2 = dot(normalize(vertexNormal), lightDir2);
cosAngIncidence2 = clamp(cosAngIncidence2, 0, 1);
float light2DistanceSqr = dot(modelSpacePosition - modelSpaceLightPos, modelSpacePosition - modelSpaceLightPos);
//added
vec4 texture2 = texture(colorTexture, colorCoord);
outputColor = ((ambientIntensity2 + (interpColor*2))/4) +
((((interpColor) * lightIntensity2/200 * cosAngIncidence2) + (ambientIntensity2* interpColor ))
/( ( sqrt(light2DistanceSqr) + light2DistanceSqr)/200 ));
//No outputColor for texture testing
outputColor = texture2 ;
}
}
Those were both shaders. And here are the parts added to the .cpp:
#include <glimg/glimg.h>
#include "../framework/directories.h"
[...]
const int g_colorTexUnit = 0;
GLuint g_checkerTexture = 0;
And here's the loader for the texture:
void LoadCheckerTexture()
{
try
{
std::string filename(LOCAL_FILE_DIR);
filename += "checker.dds";
std::auto_ptr<glimg::ImageSet>
pImageSet(glimg::loaders::dds::LoadFromFile(filename.c_str()));
glGenTextures(1, &g_checkerTexture);
glBindTexture(GL_TEXTURE_2D, g_checkerTexture);
glimg::SingleImage image = pImageSet->GetImage(0, 0, 0);
glimg::Dimensions dims = image.GetDimensions();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, dims.width, dims.height, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, image.GetImageData());
glBindTexture(GL_TEXTURE_2D, 0);
}
catch(std::exception &e)
{
printf("%s\n", e.what());
throw;
}
}
Naturally I've got this in void init():
LoadCheckerTexture();
And then when rendering the object:
glActiveTexture(GL_TEXTURE0 + g_colorTexUnit);
glBindTexture(GL_TEXTURE_2D,g_checkerTexture);
g_pLeftMesh->Render();
glBindSampler(g_colorTexUnit, 0);
glBindTexture(GL_TEXTURE_2D, 0);
With all of this, I get put pitch black for everything, however when I change the outputColor equation into "texture + outputColor;", everything looks normal. I have no idea what I'm doing wrong here. A friend tried to help me, we removed some unnecessairy stuff, but we got nothing running.
Ok guys, I've worked on this whole thing, and did manage to somehow get it running. First off I had to add samplers:
GLuint g_samplers;
//Add Later
void CreateSamplers()
{
glGenSamplers(1, &g_samplers);
glSamplerParameteri(g_samplers, GL_TEXTURE_WRAP_S, GL_REPEAT);
glSamplerParameteri(g_samplers, GL_TEXTURE_WRAP_T, GL_REPEAT);
//Linear mipmap Nearest
glSamplerParameteri(g_samplers, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glSamplerParameteri(g_samplers, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
I also added this to the file thing:
glimg::OpenGLPixelTransferParams xfer = glimg::GetUploadFormatType(pImageSet->GetFormat(), 0);
glimg::SingleImage image = pImageSet->GetImage(0, 0, 0);
glimg::Dimensions dims = image.GetDimensions();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, dims.width, dims.height, 0,
xfer.format, xfer.type, image.GetImageData());
The xfer variable does get the format and type adjusted to the dds.
Also the render code got turned into this:
//Added necessary
glActiveTexture(GL_TEXTURE0 + g_colorTexUnit);
glBindTexture(GL_TEXTURE_2D,g_checkerTexture);
glBindSampler(g_colorTexUnit, g_samplers);
g_pLeftMesh->Render();
glBindSampler(g_colorTexUnit, 0);
glBindTexture(GL_TEXTURE_2D, 0);
And of course at the end of init() I needed to add the CreateSamplers thing:
//Added this later
LoadCheckerTexture();
CreateSamplers();
I'm sorry for all the trouble with all this, but guess OpenGL really is just this confusing and it was just dumb luck that I got it right. Just posting this so that people know
Your fail to add textures may be caused by:
Have you add texture coordinates to objects? (this is the most probable cause, because you are adding textures to non textured tutorial), add textures to VAO.
Did you add uniform textureunit (Sampler2D)? (it must be uniform, else texturing will not work properly)
Is your texture loaded,binded,enabled (GL_TEXTURE_2D) ?
Is your active texture unit - 0? if not change layout/multitexture coords or set active texture 0
This two codes are simple texturing shaders (texture unit 0) no special things (like light,blend,bump,...):
tm_l2g is transformation local obj space -> world space (Modelview)
tm_g2s is transformation world space -> screen space (Projection)
pos are vertex coordinates
txt are texture coordinates
col are colors
Do not forget to change uniform names and layout locations to yours.
Vertex:
//------------------------------------------------------------------
#version 420 core
//------------------------------------------------------------------
uniform mat4x4 tm_l2g;
uniform mat4x4 tm_g2s;
layout(location=0) in vec3 pos;
layout(location=1) in vec4 col;
layout(location=2) in vec2 txr;
out smooth vec4 pixel_col;
out smooth vec2 pixel_txr;
//------------------------------------------------------------------
void main(void)
{
vec4 p;
p.xyz=pos;
p.w=1.0;
p=tm_l2g*p;
p=tm_g2s*p;
gl_Position=p;
pixel_col=col;
pixel_txr=txr;
}
//------------------------------------------------------------------
fragment:
//------------------------------------------------------------------
#version 420 core
//------------------------------------------------------------------
in smooth vec4 pixel_col;
in smooth vec2 pixel_txr;
uniform sampler2D txr_texture0;
out layout(location=0) vec4 frag_col;
//------------------------------------------------------------------
void main(void)
{
vec4 col;
col=texture(txr_texture0,pixel_txr.st);
frag_col=col*pixel_col;
}
//------------------------------------------------------------------
[edit1] CPU old style OpenGL render code (initializations are not included its only render code they can be found here)
//------------------------------------------------------------------
// set modelview,projection,textures,bind GLSL programs...
GLfloat a=10.0,z=0.0;
glColor3f(1.0,1.0,1.0);
glBegin(GL_QUADS);
// textured quad
glTexCoord2f(0.0,0.0); glVertex3f(-a,-a,z);
glTexCoord2f(0.0,1.0); glVertex3f(-a,+a,z);
glTexCoord2f(1.0,1.0); glVertex3f(+a,+a,z);
glTexCoord2f(1.0,0.0); glVertex3f(+a,-a,z);
// reverse order quad to be shore that at least one passes by CULL_FACE
glTexCoord2f(1.0,0.0); glVertex3f(+a,-a,z);
glTexCoord2f(1.0,1.0); glVertex3f(+a,+a,z);
glTexCoord2f(0.0,1.0); glVertex3f(-a,+a,z);
glTexCoord2f(0.0,0.0); glVertex3f(-a,-a,z);
glEnd();
//------------------------------------------------------------------
[edit2] ok here goes VAO/VBO render code,...
//------------------------------------------------------------------------------
// enum of VBO locations (it is also your layout location) I use enums for simple in code changes
enum _vbo_enum
{
_vbo_pos=0, // glVertex
_vbo_col, // glColor
_vbo_tan, // glNormal
_vbo_unused0, // unused (at least i dont see anything at this location in your code)
_vbo_unused1, // unused (at least i dont see anything at this location in your code)
_vbo_txr, // glTexCoord
_vbos
};
//------------------------------------------------------------------------------
// 'global' names and size for OpenGL mesh in VAO/VBO ... similar ot texture names/handles
GLuint vao[1],vbo[_vbos],num_pnt=0;
//------------------------------------------------------------------------------
void VAO_init_cube() // call this before VAO use,...but after OpenGL init !
{
//[1] first you need some model to render (mesh), here is a simple cube
// size,position of cube - change it that it is visible in your scene
const GLfloat a=1.0,x=0.0,y=0.0,z=0.0;
// cube points 3f x,y,z
GLfloat mesh_pos[]=
{
x-a,y-a,z-a,x-a,y+a,z-a,x+a,y+a,z-a,x+a,y-a,z-a,
x-a,y-a,z+a,x-a,y+a,z+a,x+a,y+a,z+a,x+a,y-a,z+a,
x-a,y-a,z-a,x-a,y-a,z+a,x+a,y-a,z+a,x+a,y-a,z-a,
x-a,y+a,z-a,x-a,y+a,z+a,x+a,y+a,z+a,x+a,y+a,z-a,
x-a,y-a,z-a,x-a,y+a,z-a,x-a,y+a,z+a,x-a,y-a,z+a,
x+a,y-a,z-a,x+a,y+a,z-a,x+a,y+a,z+a,x+a,y-a,z+a,
};
// cube colors 3f r,g,b
GLfloat mesh_col[]=
{
0.0,0.0,0.0,0.0,1.0,0.0,1.0,1.0,0.0,1.0,0.0,0.0,
0.0,0.0,1.0,0.0,1.0,1.0,1.0,1.0,1.0,1.0,0.0,1.0,
0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,1.0,1.0,0.0,0.0,
0.0,1.0,0.0,0.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.0,
0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,1.0,0.0,0.0,1.0,
1.0,0.0,0.0,1.0,1.0,0.0,1.0,1.0,1.0,1.0,0.0,1.0,
};
// cube normals 3f x,y,z
GLfloat mesh_tan[]=
{
-0.6,-0.6,-0.6,-0.6,+0.6,-0.6,+0.6,+0.6,-0.6,+0.6,-0.6,-0.6,
-0.6,-0.6,+0.6,-0.6,+0.6,+0.6,+0.6,+0.6,+0.6,+0.6,-0.6,+0.6,
-0.6,-0.6,-0.6,-0.6,-0.6,+0.6,+0.6,-0.6,+0.6,+0.6,-0.6,-0.6,
-0.6,+0.6,-0.6,-0.6,+0.6,+0.6,+0.6,+0.6,+0.6,+0.6,+0.6,-0.6,
-0.6,-0.6,-0.6,-0.6,+0.6,-0.6,-0.6,+0.6,+0.6,-0.6,-0.6,+0.6,
+0.6,-0.6,-0.6,+0.6,+0.6,-0.6,+0.6,+0.6,+0.6,+0.6,-0.6,+0.6,
};
// cube texture coords 2f s,t
GLfloat mesh_txr[]=
{
0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0,
0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0,
0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0,
0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0,
0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0,
0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0,
};
// init VAO/VBO
glGenVertexArrays(1,vao); // allocate 1 x VAO
glGenBuffers(_vbos,vbo); // allocate _vbos x VBO
// copy mesh to VAO/VBO ... after this you do not need the mesh anymore
GLint i,sz,n; // n = number of numbers per 1 entry
glBindVertexArray(vao[0]);
num_pnt=sizeof(mesh_pos)/(sizeof(GLfloat)*3); // num of all points in mesh
i=_OpenGLVAOgfx_pos; n=3; sz=sizeof(GLfloat)*n;
glBindBuffer(GL_ARRAY_BUFFER,vbo[i]);
glBufferData(GL_ARRAY_BUFFER,sz*num_pnt,mesh_pos,GL_STATIC_DRAW);
glEnableVertexAttribArray(i);
glVertexAttribPointer(i,n,GL_FLOAT,GL_FALSE,0,0);
i=_OpenGLVAOgfx_col; n=3; sz=sizeof(GLfloat)*n;
glBindBuffer(GL_ARRAY_BUFFER,vbo[i]);
glBufferData(GL_ARRAY_BUFFER,sz*num_pnt,mesh_col,GL_STATIC_DRAW);
glEnableVertexAttribArray(i);
glVertexAttribPointer(i,n,GL_FLOAT,GL_FALSE,0,0);
i=_OpenGLVAOgfx_tan; n=3; sz=sizeof(GLfloat)*n;
glBindBuffer(GL_ARRAY_BUFFER,vbo[i]);
glBufferData(GL_ARRAY_BUFFER,sz*num_pnt,mesh_tan,GL_STATIC_DRAW);
glEnableVertexAttribArray(i);
glVertexAttribPointer(i,n,GL_FLOAT,GL_FALSE,0,0);
i=_OpenGLVAOgfx_txr; n=2; sz=sizeof(GLfloat)*n;
glBindBuffer(GL_ARRAY_BUFFER,vbo[i]);
glBufferData(GL_ARRAY_BUFFER,sz*num_pnt,mesh_txr,GL_STATIC_DRAW);
glEnableVertexAttribArray(i);
glVertexAttribPointer(i,n,GL_FLOAT,GL_FALSE,0,0);
glBindVertexArray(0);
}
//------------------------------------------------------------------------------
void VAO_draw() // call this to draw your mesh,... need to enable and bind textures,... before use
{
glDisable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glBindVertexArray(vao[0]);
glEnableVertexAttribArray(_vbo_pos);
glEnableVertexAttribArray(_vbo_col);
glEnableVertexAttribArray(_vbo_tan);
glDisableVertexAttribArray(_vbo_unused0);
glEnableVertexAttribArray(_vbo_txr);
glDrawArrays(GL_QUADS,0,num_pnt);
glDisableVertexAttribArray(_vbo_pos);
glDisableVertexAttribArray(_vbo_col);
glDisableVertexAttribArray(_vbo_tan);
glDisableVertexAttribArray(_vbo_unused0);
glDisableVertexAttribArray(_vbo_unused1);
glDisableVertexAttribArray(_vbo_txr);
glBindVertexArray(0);
}
//------------------------------------------------------------------------------
void VAO_exit() // clean up ... call this when you do not need VAO/VBO anymore
{
glDisableVertexAttribArray(_vbo_pos);
glDisableVertexAttribArray(_vbo_col);
glDisableVertexAttribArray(_vbo_tan);
glDisableVertexAttribArray(_vbo_unused0);
glDisableVertexAttribArray(_vbo_unused1);
glDisableVertexAttribArray(_vbo_txr);
glBindVertexArray(0);
glDeleteVertexArrays(1,vao);
glDeleteBuffers(_vbos,vbo);
}
//------------------------------------------------------------------------------
[edit3] if you are win32/64 user you can try my IDE for GLSL
It is very simple and easy to use, but cannot change texture/attrib locations. Press [F1] for help,... [F9] for run [F10] for return to normal OpenGL mode. Also txt-editor is little buggy sometimes but it is enough for my purpose.
GLSL IDE

Alternate gl_FragColor values from two textures in GLSL

I have two textures, cloud and hill, each with 512 x 512 size, and i intend to create a gl_FragColor output which will obtain the pixel values from the previous textures. In this case, i want to obtain the 1st pixel in gl_FragColor from the 1st pixel in the 1st texture, the 2nd pixel in the gl_FragColor from the 2nd pixel in the 2nd texture, the 3rd pixel in gl_FragColor from the 3rd pixel in the 1st texture an so on. Here is my fragment shader code:
uniform sampler2D tex0;
uniform sampler2D tex1;
void main() {
vec4 cloud = texture2D(tex0, gl_TexCoord[0].st);
vec4 hill = texture2D(tex1, gl_TexCoord[0].st);
for (int i=0;i<512;i++) {
for (int j=0;j<512;j++) {
if ( j%2 == 0)
gl_FragColor = cloud;
else
gl_FragColor = hill;
}
}
}
Here's the texture unit setup:
t1 = loadTexture("pY.raw", 512, 512);
t2 = loadTexture("pZ.raw", 512, 512);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, t2);
glEnable(GL_TEXTURE_2D);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, t1);
glEnable(GL_TEXTURE_2D);
And here is the uniform setup:
glUseProgram(program);
GLuint t1Location = glGetUniformLocation(program, "tex0");
GLuint t2Location = glGetUniformLocation(program, "tex1");
glUniform1i(t1Location, 0);
glUniform1i(t2Location, 1);
The problem is, the output for the program is only the hill texture, and i don't know how to fix this. Any suggestion?
You don't need to do any iterations in your shader. Pixel shader will be called once for every pixel in your object. Instead use gl_TexCoord[0] to get current texture coordinates. Your code should look something like that:
uniform sampler2D tex0;
uniform sampler2D tex1;
void main()
{
vec4 cloud = texture2D(tex0, gl_TexCoord[0].st);
vec4 hill = texture2D(tex1, gl_TexCoord[0].st);
if ( int(gl_TexCoord[0].x*512)%2 == 0)
gl_FragColor = cloud;
else
gl_FragColor = hill;
}
}
This one should work, even with older opengl:
#ifdef GL_ES
precision highp float;
#endif
uniform sampler2D tex0;
uniform sampler2D tex1;
void main(void)
{
if((gl_FragCoord/32.0- vec4(ivec4(gl_FragCoord/32.0))).x<0.5)
gl_FragColor = texture2D(tex0, gl_FragCoord.xy/512.0);
else
gl_FragColor = texture2D(tex1, gl_FragCoord.xy/512.0);
}
You can try it out with WebGL at: http://www.iquilezles.org/apps/shadertoy/