I'm trying to use uniform buffers but it doesn't work as supposed. I have two uniform buffers, one is lighting and the other is for material. The problem is that the colors aren't what they are supposed to be and they change every time I move the camera. This problem didn't exist when I used normal uniforms. Here's pictures to show what I mean: When using uniform buffers and when using normal uniforms!
This is my fragment shader:
#version 400 // Fragment Shader
uniform layout(std140);
in vec3 EyePosition;
in vec3 EyeNormal;
in vec2 TexCoord;
out vec4 FragColor;
uniform sampler2D Texture;
uniform LightBlock
{
vec4 Position;
vec4 Intensity;
} Light;
uniform MaterialBlock
{
vec4 Ambient;
vec4 Diffuse;
} Material;
vec4 PointLight(in int i, in vec3 ECPosition, in vec3 ECNormal)
{
vec3 n = normalize(ECNormal);
vec3 s = normalize(Light.Position.xyz - ECPosition);
return Light.Intensity * (Material.Ambient + Material.Diffuse * max(dot(s, n), 0.0));
}
void main()
{
FragColor = texture(Texture, TexCoord);
FragColor *= PointLight(0, EyePosition, EyeNormal);
}
I'm not sure I have done everything right but here's how I create the uniform buffers:
glGenBuffers(1, &light_buffer);
glGenBuffers(1, &material_buffer);
glBindBuffer(GL_UNIFORM_BUFFER, light_buffer);
glBufferData(GL_UNIFORM_BUFFER, sizeof(LightBlock), nullptr, GL_DYNAMIC_DRAW);
glBindBuffer(GL_UNIFORM_BUFFER, material_buffer);
glBufferData(GL_UNIFORM_BUFFER, sizeof(MaterialBlock), nullptr, GL_DYNAMIC_DRAW);
GLuint program = Shaders.GetProgram();
light_index = glGetUniformBlockIndex(program, "LightBlock");
material_index = glGetUniformBlockIndex(program, "MaterialBlock");
glUniformBlockBinding(program, light_index, 0);
glUniformBlockBinding(program, material_index, 1);
glBindBufferBase(GL_UNIFORM_BUFFER, 0, light_buffer);
glBindBufferBase(GL_UNIFORM_BUFFER, 1, material_buffer);
EDIT: Here's how I fill the buffers:
// Global structures
struct LightBlock
{
Vector4 Position; // Vector4 is a vector class I made
Vector4 Intensity;
};
struct MaterialBlock
{
Vector4 Ambient;
Vector4 Diffuse;
};
// This is called for every object rendered
LightBlock Light;
Light.Position = Vector3(0.0f, 5.0f, 5.0f) * Camera.GetCameraMatrix();
Light.Intensity = Vector4(1.0f);
glBindBuffer(GL_UNIFORM_BUFFER, light_buffer);
glBufferSubData(GL_UNIFORM_BUFFER, 0, sizeof(LightBlock), &Light);
MaterialBlock Material;
Material.Diffuse = Vector4(1.0f);
Material.Ambient = Material.Diffuse * Vector4(0.3f);
glBindBuffer(GL_UNIFORM_BUFFER, material_buffer);
glBufferSubData(GL_UNIFORM_BUFFER, 0, sizeof(MaterialBlock), &Material);
I had the same problem, but only with AMD (not NVIDIA). The funny thing was that the problem only happened when changing the view matrix.
As I had a repeatable problem depending on change of the view matrix, I was able to trace to the root cause (doing arduous trial and error). When changing the view in my application, I allocate and free some OpenGL resources dynamically depending on what is needed. In this process, there is a call to glDeleteBuffers() for buffer 0. If I use a conditional statement so as not to call glDeleteBuffers for buffer 0, then the problem goes away.
According to documentation, buffer 0 will be silently ignored by glDeleteBuffers. My guess is that there is a bug in the AMD drivers.
Try to update the buffer using glMapBuffer/glUnmapBuffer.
Related
I am trying to get a basic geometry shader to work, but I am completely failing. After checking numerous resources, I still cannot find a solution to my problem.
Here is my code for my vertex, geometry, and fragment shaders.
Vertex Shader:
#version 330 core
// Vertex Shader Inputs
layout (location = 0) in vec3 Pos;
layout (location = 1) in vec3 Norm;
layout (location = 2) in vec3 Color;
// Vertex to Fragment Shader Outputs
out DATA {
vec3 vsPos;
vec3 vsNorm;
vec4 vsColor;
} data_out;
// Main.cpp Imports
uniform mat4 camMatrix; // viewProjection Matrix
uniform mat4 model;
void main()
{
vec3 vsPos = vec3(model * vec4(Pos, 1.0f));
vec3 vsNorm = mat3(transpose(inverse(model))) * Norm; // Normal vector correction
vec4 vsColor = vec4(Color, 1.0f);
gl_Position = camMatrix * vec4(vsPos, 1.0f);
}
Geometry Shader:
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices = 3) out;
out vec3 gsPos;
out vec3 gsNorm;
out vec3 gsColor;
in DATA {
vec3 vsPos;
vec3 vsNorm;
vec4 vsColor;
} data_in[];
uniform mat4 camMatrix;
void main()
{
for (int i=0; i<3; i++)
{
gsPos = data_in[i].vsPos;
gsNorm = data_in[i].vsNorm;
gsColor = data_in[i].vsColor;
gl_Position = camMatrix * vec4(data_in[i].vsPos, 1.0f);
EmitVertex();
}
EndPrimitive();
}
Fragment Shader:
#version 330 core
out vec4 FragColor;
// Fragment Shader Inputs
in vec3 gsPos;
in vec3 gsNorm;
in vec4 gsColor;
// Fragment Shader Uniforms
uniform sampler2D diffuse0;
uniform sampler2D specular0;
uniform vec4 lightColor;
uniform vec3 lightPos;
uniform vec3 camPos;
vec4 pointLight()
{
vec3 lightVec = (lightPos - vsPos);
// intensity of light with respect to distance
float dist = length(lightVec);
float a = 0.7;
float b = 0.4;
float c = 1.0;
float inten = 1.0f / (a * dist * dist + b * dist + c);
// ambient lighting
float ambient = 0.75f;
// diffuse lighting
vec3 fsNorm = normalize(gsNorm);
vec3 lightDirection = normalize(lightVec);
float diffuse = max(dot(fsNorm, lightDirection), 0.0f);
// specular lighting
float specular = 0.0f;
if (diffuse != 0.0f)
{
float specularLight = 0.50f;
vec3 viewDirection = normalize(gsNorm - gsPos);
vec3 halfwayVec = normalize(viewDirection + lightDirection);
float specAmount = pow(max(dot(fsNorm, halfwayVec), 0.0f), 32);
specular = specAmount * specularLight;
};
return inten * (gsColor * (diffuse + ambient) + gsColor * specular) * lightColor;
}
void main()
{// outputs final color
FragColor = pointLight();
}
My mesh generation function:
void genMesh()
{
VAO.Bind();
VBO VBO(vtx);
EBO EBO(idx);
VAO.LinkAttrib(VBO, 0, 3, GL_FLOAT, sizeof(Vertex), (void*)0);
VAO.LinkAttrib(VBO, 1, 3, GL_FLOAT, sizeof(Vertex), (void*)(3 * sizeof(float)));
VAO.LinkAttrib(VBO, 2, 4, GL_FLOAT, sizeof(Vertex), (void*)(6 * sizeof(float)));
VAO.Unbind();
VBO.Unbind();
EBO.Unbind();
};
My mesh draw function:
void Mesh::Draw(Shader& shader, Camera& camera)
{
shader.Activate();
VAO.Bind();
// Take care of the camera Matrix
glUniform3f(glGetUniformLocation(shader.ID, "camPos"),
camera.Position.x,
camera.Position.y,
camera.Position.z);
camera.Matrix(shader, "camMatrix");
// Draw the actual mesh
glDrawElements(GL_TRIANGLES, idx.size() * sizeof(GLuint), GL_UNSIGNED_INT, 0);
};
I call my mesh generation function outside of the main while loop, then I draw the mesh in my main while loop.
Debugging my program through RenderDoc gives me the error, "No vertex shader bound at draw!" Without the geometry shader (keeping everything else roughly the same), I do not get any errors in RenderDoc. I tried updating my graphics drivers, but I am just getting the same error. Please help me, I feel like I am losing my mind.
In the fragment shader gsColor is defined at a vec4 variable but in the geometry shader it is declared a vec3 variable
I'm trying to send colors to the shader but the colors get swapped,
I send 0xFF00FFFF (magenta) but I get 0xFFFF00FF (yellow) in the shader.
I think is happening something like this, by experimenting:
My vertex shader:
#version 330 core
layout(location = 0) in vec4 position;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec4 color;
uniform mat4 pr_matrix;
uniform mat4 vw_matrix = mat4(1.0);
uniform mat4 ml_matrix = mat4(1.0);
out DATA
{
vec4 position;
vec3 normal;
vec4 color;
} vs_out;
void main()
{
gl_Position = pr_matrix * vw_matrix * ml_matrix * position;
vs_out.position = position;
vs_out.color = color;
vs_out.normal = normalize(mat3(ml_matrix) * normal);
}
And the fragment shader:
#version 330 core
layout(location = 0) out vec4 out_color;
in DATA
{
vec3 position;
vec3 normal;
vec4 color;
} fs_in;
void main()
{
out_color = fs_in.color;
//out_color = vec4(fs_in.color.y, 0, 0, 1);
//out_color = vec4((fs_in.normal + 1 / 2.0), 1.0);
}
Here is how I set up the mesh:
struct Vertex_Color {
Vec3 vertex;
Vec3 normal;
GLint color; // GLuint tested
};
std::vector<Vertex_Color> verts = std::vector<Vertex_Color>();
[loops]
int color = 0xFF00FFFF; // magenta, uint tested
verts.push_back({ vert, normal, color });
glBufferData(GL_ARRAY_BUFFER, verts.size() * sizeof(Vertex_Color), &verts[0], GL_DYNAMIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex_Color), (const GLvoid*)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex_Color), (const GLvoid*)(offsetof(Vertex_Color, normal)));
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(Vertex_Color), (const GLvoid*)(offsetof(Vertex_Color, color)));
glEnableVertexAttribArray(2);
Here are some examples:
I can't figure it out what's wrong. Thanks in advance.
Your code is reinterpreting an int as 4 consecutive bytes in memory. The internal encoding for int (and all other types) is machine-specific. In your case, you got 32 bit integers stored in little endian byte order, which is kind of the typical case for PC environments.
You could use an array like GLubyte color[4] to explicitely get a defined memory layout.
If you really want to use an integer type, you could send the data as a an integer attribute with glVertexAttribIPointer (note the I there) and use unpackUnorm4x8 om the shader to get a normalized float vector. However, that requires at least GLSL 4.10, and might be less efficient than the standard approach.
I'm using OpenGL to draw a large array of 2D points with their colors. Each point (vertex) has also defined it's alpha channel in MX.c array. I'd like to be able to increase or decrease the alpha value of whole array (of every vertex displayed). Is there a clever way to do it, using OpenGL functions? Here's my drawing method:
void PointsMX::drawMX()
{
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(4, GL_UNSIGNED_BYTE, 0, MX.c);
glVertexPointer(2, GL_DOUBLE, 0, MX.p);
glPushMatrix();
glTranslated(position[X], position[Y], 0.0);
glScaled(scale, scale, 1.0);
glDrawArrays(GL_POINTS, 0, MX.size);
glPopMatrix();
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
}
As datenwolf points out in his comments, you can do this pretty simply using a shader, but not using the fixed function pipeline (which is what you're using if you never call glUseProgram().
If you're not using lighting, reproducing the fixed function shaders isn't very hard, and a little googling will help you get up to that point.
The key here is that you want to change something that is normally a vertex attribute (the alpha channel of the color) to a configurable value for the entire drawing operation. In shader terms this means overriding the vertex attribute with a uniform. A uniform is simply a value you pass into an OpenGL program which then has the same value for every vertex or fragment processed (depending on whether you put it into the vertex or fragment shader).
Here's an example of a very basic vertex shader:
#version 330
uniform mat4 Projection = mat4(1);
uniform mat4 ModelView = mat4(1);
layout(location = 0) in vec3 Position;
layout(location = 3) in vec4 Color;
out vec4 vColor;
void main() {
gl_Position = Projection * ModelView * vec4(Position, 1);
vColor = Color;
}
And a corresponding fragment shader
#version 330
in vec4 vColor;
out vec4 FragColor;
void main()
{
FragColor = vColor;
}
In order to accomplish what you're trying to do, you'd want to change the vertex shader to add an additional uniform representing your alpha override:
#version 330
uniform mat4 Projection = mat4(1);
uniform mat4 ModelView = mat4(1);
uniform float AlphaOverride = -1.0;
layout(location = 0) in vec3 Position;
layout(location = 3) in vec4 Color;
out vec4 vColor;
void main() {
gl_Position = Projection * ModelView * vec4(Position, 1);
vColor = Color;
if (AlphaOverride > 0.0) {
vColor.a = AlphaOverride;
}
}
If you fail to set the AlphaOverride uniform it will be -1, and will therefore be ignored by the vertex shader. But if you set it to a value between 0 and 1, then it will be applied to the alpha channel of your vertex.
I've setup an OpenGL environment with deferred shading following this tutorial but I can't make the second shader output on my final buffer.
I can see that the first shader (the one that doesn't use lights) is working properly because with gDEBugger I can see that the output buffers are correct, but the second shader really can't display anything. I've also tried to make the second shader output a single color for all the scene just to see if it was displying something, bot nothing is visible (the screen should be completely red but it isn't).
The first pass shader (the one I use to create the buffers for the GBuffer) is working so I'm not add it's code or how I created and implemented my GBuffer, but if you need I'll add them, just tell me.
I think the problem is when I tell OpenGL to output on the FrameBuffer 0 (my video).
This is how I enalbe OpenGL to write to the FrameBuffer 0:
glEnable(GL_BLEND);
m_MotoreGrafico->glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_ONE, GL_ONE);
// Abilito la scrittura sul buffer finale
m_MotoreGrafico->glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
m_gBuffer.BindForReading();
glClear(GL_COLOR_BUFFER_BIT);
// Imposto le matrici dello shader
SetUpOGLProjectionViewMatrix(1);
// Passo le texture del GBuffer allo shader
pActiveShader->setUniform1i(_T("gPositionMap"), m_gBuffer.GetPositionTexture());
pActiveShader->setUniform1i(_T("gColorMap"), m_gBuffer.GetDiffuseTexture());
pActiveShader->setUniform1i(_T("gNormalMap"), m_gBuffer.GetNormalTexture());
// Passo variabili necessarie allo shader
float dimensioneFinestra[2], posizioneCamera[3];
dimensioneFinestra[0] = m_nLarghezzaFinestra;
dimensioneFinestra[1] = m_nAltezzaFinestra;
m_MotoreGrafico->GetActiveCameraPosition(posizioneCamera);
pActiveShader->setUniform2f(_T("gScreenSize"), dimensioneFinestra);
pActiveShader->setUniform3f(_T("gCameraPos"), posizioneCamera);
pActiveShader->setUniform1i(_T("gUsaLuci"), 0);
// Disegno le luci
float coloreLuce[3], posizioneLuce[3], direzioneLuce[3], vUpLuce[3], vRightLuce[3], intensita;
for(int i = 0; i < GetDocument()->m_RTL.GetNLights(); i++)
{
CRTLuce* pRTLuce = GetDocument()->m_RTL.GetRTLightAt(i);
...
m_MotoreGrafico->glBindVertexArray(pRTLuce->GetRTLuce()->GetVBO()->getVBAIndex());
glDrawArrays(GL_TRIANGLES, 0, pRTLuce->GetRTLuce()->GetNVertPerShader());
}
The function m_gBuffer.BindForReading() is like this (bot I think it doesn't matter for my problem):
for (unsigned int i = 0 ; i < ARRAY_SIZE_IN_ELEMENTS(m_textures); i++)
{
m_pMotoreGrafico->glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, m_textures[GBUFFER_TEXTURE_TYPE_POSITION + i]);
}
So far my GBuffer is working (it creates the textures) and my first shader is also working (it's drawing the textures of my GBuffer).
The problem then is that I can't reset OpenGL to draw in my video.
The first 4 textures are the ones create with the first-pass shader.
This is my back buffer (after the second-pass shader)
And this is my front buffer (after the second-pass shader)
This is my second-pass fragment shader code (it outputs only red)
out vec4 outputColor;
void main()
{
outputColor = vec4(1.0, 0.0, 0.0, 1.0);
}
Does anyone have an idea of what I'm doing wrong?
Second-pass vertex shader code:
#version 330
uniform struct Matrici
{
mat4 projectionMatrix;
mat4 modelMatrix;
mat4 viewMatrix;
} matrices;
layout (location = 0) in vec3 inPosition;
void main()
{
vec4 vEyeSpacePosVertex = matrices.viewMatrix * matrices.modelMatrix * vec4(inPosition, 1.0);
gl_Position = matrices.projectionMatrix * vEyeSpacePosVertex;
}
Second-pass fragment shader code:
#version 330
uniform struct MDLight
{
vec3 vColor;
vec3 vPosition;
vec3 vDirection;
float fAmbientIntensity;
float fStrength;
int bOn;
float fConeCosine;
float fAltezza;
float fLarghezza;
vec3 vUp;
vec3 vRight;
} gLuce;
uniform float gSpecularIntensity;
uniform float gSpecularPower;
uniform sampler2D gPositionMap;
uniform sampler2D gColorMap;
uniform sampler2D gNormalMap;
uniform vec3 gCameraPos;
uniform vec2 gScreenSize;
uniform int gLightType;
uniform int gUsaLuci;
vec2 CalcTexCoord()
{
return gl_FragCoord.xy / gScreenSize;
}
out vec4 outputColor;
void main()
{
vec2 TexCoord = CalcTexCoord();
vec4 Color = texture(gColorMap, TexCoord);
outputColor = vec4(1.0, 0.0, 0.0, 1.0);
}
I have vertex shader
#version 330 core
layout(location = 0) in vec3 VertexPosition;
layout(location = 1) in vec2 VertexUV;
layout(location = 2) in vec3 VertexNormal;
out VS_GS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_out;
uniform mat4 proj_matrix;
uniform mat4 model_matrix;
void main(void)
{
gl_Normal = VertexNormal;
gl_Position = proj_matrix * vec4(VertexPosition, 1.0);
vertex_out.UV = VertexUV; //VertexPosition.xy;
vertex_out.vs_worldpos = gl_Position.xyz;
vertex_out.vs_normal = mat3(model_matrix) * gl_Normal;
}
and fragment shader
#version 330 core
in GS_FS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_in;
// Values that stay constant for the whole mesh.
uniform sampler2D sampler0;
uniform sampler2D sampler1;
uniform sampler2D sampler2;
uniform sampler2D sampler3;
//uniform sampler2D alphamap0;
uniform sampler2D alphamap1;
uniform sampler2D alphamap2;
uniform sampler2D alphamap3;
uniform int tex_count;
uniform vec4 color_ambient = vec4(0.75, 0.75, 0.75, 1.0);
uniform vec4 color_diffuse = vec4(0.25, 0.25, 0.25, 1.0);
//uniform vec4 color_specular = vec4(1.0, 1.0, 1.0, 1.0);
uniform vec4 color_specular = vec4(0.1, 0.1, 0.1, 0.25);
uniform float shininess = 5.0f;
uniform vec3 light_position = vec3(12.0f, 32.0f, 560.0f);
void main(){
vec3 light_direction = normalize(light_position - vertex_in.vs_worldpos);
vec3 normal = normalize(vertex_in.vs_normal);
vec3 half_vector = normalize(light_direction + normalize(vertex_in.vs_worldpos));
float diffuse = max(0.0, dot(normal, light_direction));
float specular = pow(max(0.0, dot(vertex_in.vs_normal, half_vector)), shininess);
gl_FragColor = texture( sampler0, vertex_in.UV ) * color_ambient + diffuse * color_diffuse + specular * color_specular;
// http://www.opengl.org/wiki/Texture_Combiners
// GL_MODULATE = *
// GL_INTERPOLATE Blend tex0 and tex1 based on a blending factor = mix(texel0, texel1, BlendFactor)
// GL_INTERPOLATE Blend tex0 and tex1 based on alpha of tex0 = mix(texel0, texel1, texel0.a)
// GL_ADD = clamp(texel0 + texel1, 0.0, 1.0)
if (tex_count > 0){
vec4 temp = texture( sampler1, vertex_in.UV );
vec4 amap = texture( alphamap1, vertex_in.UV);
gl_FragColor = mix(gl_FragColor, temp, amap.a);
}
if (tex_count > 1){
vec4 temp = texture( sampler2, vertex_in.UV );
vec4 amap = texture( alphamap2, vertex_in.UV);
gl_FragColor = mix(gl_FragColor, temp, amap.a);
}
if (tex_count > 2){
vec4 temp = texture( sampler3, vertex_in.UV );
vec4 amap = texture( alphamap3, vertex_in.UV);
gl_FragColor = mix(gl_FragColor, temp, amap.a);
}
}
It takes indexed GL_TRIANGLE_STRIP as input
glBindBuffer(GL_ARRAY_BUFFER, tMt.vertex_buf_id[cx, cy]);
glVertexAttribPointer(VERTEX_LAYOUT_POSITION, 3, GL_FLOAT, false, 0, pointer(0));
glEnableVertexAttribArray(0);
{ chunk tex position }
glBindBuffer(GL_ARRAY_BUFFER, chunkTexPositionBO);
glVertexAttribPointer(VERTEX_LAYOUT_TEX_UV, 2, GL_FLOAT, false, 0, pointer(0));
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, tMt.normal_buf_id[cx, cy]);
glVertexAttribPointer(VERTEX_LAYOUT_NORMAL, 3, GL_FLOAT, true, 0, pointer(0));
glEnableVertexAttribArray(2);
{ index buffer }
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, chunkIndexBO);
for i := 0 to tMt.texCount - 1 do begin
bt := tMt.texture_buf_id[cx, cy][i];
if bt = nil then
break;
glUniform1i(proj_tex_count_loc, i);
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, bt.id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
if i > 0 then begin
// this time, use blending:
glActiveTexture(GL_TEXTURE4 + 1);
glBindTexture(GL_TEXTURE_2D, tMt.alphamaps[cx, cy][i - 1]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
end;
end;
glDrawElements(GL_TRIANGLE_STRIP, length(chunkIndexArr), GL_UNSIGNED_SHORT, nil);
Code works as intended except I'm not sure is my normals arranged properly: they was stored as bytes (converted to GLfloat as b / FF), coordinates xyz changed and some probably need negation.
Can someone show me geometry shader to show normals as lines as shown at http://blogs.agi.com/insight3d/index.php/2008/10/23/geometry-shader-for-debugging-normals/ (those shader not works at all and it seems out/in data losed between vertex and fragment shader).
P.S. I'm not sure I did everything properly (starting OpenGL and GLSL) so any suggestions also appreciated.
Edit:
I made simple geometry shader by examples
// This is a very simple pass-through geometry shader
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices = 145) out;
in VS_GS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_in[];
out GS_FS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_out;
uniform float uNormalsLength = 0.5;
void main()
{
int i;
// Loop over the input vertices
for (i = 0; i < gl_in.length(); i++)
{
vertex_out.UV = vertex_in[i].UV;
vertex_out.vs_worldpos = vertex_in[i].vs_worldpos;
vertex_out.vs_normal = vertex_in[i].vs_normal;
// Copy the input position to the output
gl_Position = gl_PositionIn[i];
EmitVertex();
gl_Position = gl_ModelViewProjectionMatrix * (gl_PositionIn[i] + (vec4(vertex_in[i].vs_normal, 0) * uNormalsLength));
gl_FrontColor = vec4(0.0, 0.0, 0.0, 1.0); //gl_FrontColorIn[i];
EmitVertex();
}
// End the primitive. This is not strictly necessary
// and is only here for illustrative purposes.
EndPrimitive();
}
but I don't knwo where it takes gl_ModelViewProjectionMatrix (seems deprecated) and result looks awful, it seems everything including normals stripped. Picture in glPolygonMode(GL_FRONT, GL_LINE) mode, textures also trying to map onto those.
As it seems, you're doing it all in a single pass and you actually emit 6 vertices per incoming triangle. This is not what you want.
Either do it in two passes, i.e. one pass for the mesh, the other for the normals, or try to emit the original triangle and a degenerate triangle for the normal. For simplicity I'd go for the two-pass version:
Inside your render loop:
render terrain
if and only if debug geometry is to be rendered
enable your debug normals shader
render the terrain mesh a second time, passing POINTS to the vertex shader
To make this work, you'll need a second program object that is made up like in the blog post you previously linked to, consisting of a simple pass trough vertex shader, the following geometry shader and a fragment shader for coloring the lines representing the normals.
The vertex and fragment shaders should be no problem. Assuming you have a smoothed mesh, i.e. you have actual, averaged vertex normals, you can simply pass in points and emit lines.
#version 330 core
// assuming you have vertex normals, you need to render a vertex
// only a single time. with any other prim type, you may render
// the same normal multiple times
layout (points) in;
// Geometry shaders can only output points, line strips or triangle
// strips by definition. you output a single line per vertex. therefore,
// the maximum number of vertices per line_strip is 2. This is effectively
// the same as rendering distinct line segments.
layout (line_strip, max_vertices = 2) out;
in vec3 vs_normal[];
uniform float normal_scale = 0.5; // don't forget: this is the default value!
/* if you're never going to change the normal_scale, consider simply putting a
constant there instead:
const float normal_scale = 0.5;
*/
void main()
{
// we simply transform and emit the incoming vertex - this is v0 of our
// line segment
vec4 v0 = gl_in[0].gl_Position;
gl_Position = gl_ModelViewProjectionMatrix * v0;
EmitVertex();
// we calculate v1 of our line segment
vec4 v1 = v0 + vec4(vs_normal[0] * normal_scale, 0);
gl_Position = gl_ModelViewProjectionMatrix * v1;
EmitVertex();
EndPrimitive();
}
Warning: Untested code!
This is probably as simple as it gets. Add a uniform to your fragment shader so you can color your normals as you like or simply export a constant color.
Note: This code still uses gl_ModevelViewProjectionMatrix. If you're writing GL core code, please consider replacing legacy GL constructs, like the matrix stack, with your own stuff!
Note 2: Your geometry shader is not what is usually referred to as a pass through shader. First, you do processing on the incoming data that is more than just assigning incoming values to outgoing values. Second, how can it be a pass-through shader, if you generate geometry? Pass-through means, you don't do anything else than pass incoming values to the next shader stage.