OpenGL line width geometry shader - opengl

I am trying to implement geometry shader for line thickness using OpenGL 4.3.
I followed accepted answer and other given solutions of stackoverflow, but it is wrong according to the screenshot. Is there any proper way how can I get a normal of a screen? It seems correct in the first frame but the moment I move my mouse, the camera changes and offset direction is not correct. The shader is updated by camera matrix in while loop.
GLSL Geometry shader to replace glLineWidth
Vertex shader
#version 330 core
layout (location = 0) in vec3 aPos;
uniform mat4 projection_view_model;
void main()
{
gl_Position = projection_view_model * vec4(aPos, 1.0);
}
Fragment shader
#version 330 core
//resources:
//https://stackoverflow.com/questions/6017176/gllinestipple-deprecated-in-opengl-3-1
out vec4 FragColor;
uniform vec4 uniform_fragment_color;
void main()
{
FragColor = uniform_fragment_color;
}
Geometry shader
#version 330 core
layout (lines) in;
layout(triangle_strip, max_vertices = 4) out;
uniform float u_thickness ;
uniform vec2 u_viewportSize ;
in gl_PerVertex
{
vec4 gl_Position;
//float gl_PointSize;
//float gl_ClipDistance[];
} gl_in[];
void main() {
//https://stackoverflow.com/questions/54686818/glsl-geometry-shader-to-replace-gllinewidth
vec4 p1 = gl_in[0].gl_Position;
vec4 p2 = gl_in[1].gl_Position;
vec2 dir = normalize((p2.xy - p1.xy) * u_viewportSize);
vec2 offset = vec2(-dir.y, dir.x) * u_thickness*100 / u_viewportSize;
gl_Position = p1 + vec4(offset.xy * p1.w, 0.0, 0.0);
EmitVertex();
gl_Position = p1 - vec4(offset.xy * p1.w, 0.0, 0.0);
EmitVertex();
gl_Position = p2 + vec4(offset.xy * p2.w, 0.0, 0.0);
EmitVertex();
gl_Position = p2 - vec4(offset.xy * p2.w, 0.0, 0.0);
EmitVertex();
EndPrimitive();
}

To get the direction of the line in normalized device space, the x and y components of the clip space coordinated must be divided by the w component (perspective divide):
vec2 dir = normalize((p2.xy - p1.xy) * u_viewportSize);
vec2 dir = normalize((p2.xy / p2.w - p1.xy / p1.w) * u_viewportSize);

Related

Calculating Per face normals in Geometry Shader

I'm trying to calculate per-face normals in geometry shader, using the following pipeline
//VERTEX_SHADER
#version 330
layout(location = 0) in vec4 vertex;
out vec3 vert;
uniform mat4 projMatrix;
uniform mat4 mvMatrix;
void main()
{
vert = vertex.xyz;
gl_Position = projMatrix * mvMatrix * vertex;
}
//GEOMETRY_SHADER
#version 330
layout ( triangles ) in;
layout ( triangle_strip, max_vertices = 3 ) out;
out vec3 normal_out;
uniform mat3 normalMatrix;
void main()
{
vec3 A = gl_in[2].gl_Position.xyz - gl_in[0].gl_Position.xyz;
vec3 B = gl_in[1].gl_Position.xyz - gl_in[0].gl_Position.xyz;
normal_out = normalMatrix * normalize(cross(A,B));
gl_Position = gl_in[0].gl_Position;
EmitVertex();
gl_Position = gl_in[1].gl_Position;
EmitVertex();
gl_Position = gl_in[2].gl_Position;
EmitVertex();
EndPrimitive();
}
//FRAG_SHADER
#version 330
in vec3 normal_out;
in vec3 vert;
out vec4 fColor;
uniform vec3 lightPos;
void main()
{
highp vec3 L = normalize(lightPos - vert);
highp float NL = max(dot(normal_out, L), 0.0);
highp vec3 color = vec3(1, 1, 0.0);
fColor = vec4(color*NL, 1.0);
}
However, I end up with very weird looking faces that keeps flickering(I included a snapshot below). It occurred to me that it might be because I'm using 8 vertices to represent 1 cell(cube) instead of 24 vertices, But I'm not quite sure if that is what is causing the problem.
Left: Using Light Weighting 'NL', Right:Without
After every call to EmitVertex, the contents of all output variables are made undefined. Therefore, if you want to output the same value to multiple vertices, you must copy it to the output every time.
Also, note that each shader stage's outputs provide inputs only to the next stage. So if you have a GS, and you want to pass a value from the VS to the FS, you must have the GS explicitly pass that value through.

OpenGL Uniform Across Multiple Shaders

I have created an application in OpenGL that uses a Vertex Shader, Geometry Shader, and Fragment Shader.
I have a uniform variable, eyePositionWorld that I would like to use both in the Geometry Shader and the Fragment Shader.
(I am rendering the position of the verticies compared to the eyePositionWorld as the color)
Vertex Shader
#version 430
in vec4 vertexPositionModel;
in vec3 vertexColor;
in vec3 vertexNormalModel;
in mat4 modelMatrix;
uniform mat4 viewMatrix;//World To View
uniform mat4 projectionMatrix;//View to Projection
struct fData
{
vec3 fragColor;
vec3 fragPositionWorld;
vec3 fragNormalWorld;
};
out fData geomData;
void main()
{
gl_Position = projectionMatrix * viewMatrix * modelMatrix * vertexPositionModel;
geomData.fragColor = vertexColor;
geomData.fragPositionWorld = (modelMatrix * vertexPositionModel).xyz;
geomData.fragNormalWorld = (modelMatrix * vec4(vertexNormalModel, 0.0)).xyz;
}
Geometry Shader
#version 430
layout(triangles_adjacency) in;
layout(triangle_strip, max_vertices=3) out;
struct fData
{
vec3 fragColor;
vec3 fragPositionWorld;
vec3 fragNormalWorld;
};
uniform vec3 eyePositionWorldGeomShader;
in fData geomData[];
out fData fragData;
void main() {
gl_Position = gl_in[0].gl_Position;
fragData = geomData[0];
fragData.fragColor = gl_in[0].gl_Position.xyz - eyePositionWorldGeomShader;
EmitVertex();
gl_Position = gl_in[2].gl_Position;
fragData = geomData[2];
fragData.fragColor = gl_in[2].gl_Position.xyz - eyePositionWorldGeomShader;
EmitVertex();
gl_Position = gl_in[4].gl_Position;
fragData = geomData[4];
fragData.fragColor = gl_in[4].gl_Position.xyz - eyePositionWorldGeomShader;
EmitVertex();
EndPrimitive();
}
Fragment Shader
#version 430
struct fData
{
vec3 fragColor;
vec3 fragPositionWorld;
vec3 fragNormalWorld;
};
in fData fragData;
uniform vec4 ambientLight;
uniform vec3 lightPositionWorld;
uniform vec3 eyePositionWorld;
uniform bool isLighted;
out vec4 color;
void main()
{
if (!isLighted)
{
color = vec4(fragData.fragColor, 1.0);
}
else
{
vec3 lightVectorWorld = normalize(lightPositionWorld - fragData.fragPositionWorld);
float brightness = clamp(dot(lightVectorWorld, normalize(fragData.fragNormalWorld)), 0.0, 1.0);
vec4 diffuseLight = vec4(brightness, brightness, brightness, 1.0);
vec3 reflectedLightVectorWorld = reflect(-lightVectorWorld, fragData.fragNormalWorld);
vec3 eyeVectorWorld = normalize(eyePositionWorld - fragData.fragPositionWorld);
float specularity = pow(clamp(dot(reflectedLightVectorWorld, eyeVectorWorld), 0.0, 1.0), 40) * 0.5;
vec4 specularLight = vec4(specularity, specularity, specularity, 1.0);
//Maximum Distance of All Lights
float maxDist = 55.0;
float attenuation = clamp((maxDist - length(lightPositionWorld - fragData.fragPositionWorld)) / maxDist, 0.0, 1.0);
color = (ambientLight + (diffuseLight + specularLight) * attenuation) * vec4(fragData.fragColor, 1.0);
}
}
C++ Code (the m_eyePositionUL and m_eyePositionGeomShaderUL are both just loaded with glGetUniformLocation)
glUniform3fv(m_eyePositionUL, 1, &m_camera.getPosition()[0]);
glUniform3fv(m_eyePositionGeomShaderUL, 1, &m_camera.getPosition()[0]);
How can I only upload one uniform to OpenGL and use it in both the Geometry Shader and Vertex Shader?
It's a bit surprising but OpenGL makes it easy. All that you have to do is use the same uniform name in both Shaders!
Then just upload it once under that uniform location.
Replace uniform vec3 eyePositionWorldGeomShader; with uniform vec3 eyePositionWorld; in your Geometry Shader and keep the uniform name the same in the Fragment Shader.
Then just don't upload the other Uniform so your C++ code will simply be
glUniform3fv(m_eyePositionUL, 1, &m_camera.getPosition()[0]);

OpenGL Grass rendering

I have been researching different techniques for rendering grass. I've decided to go with a Geometry shader generated grass mainly so I can generate triangle fans on the fly when I render them as GL_POINTS but I'm not seeing the performance I'd like to see. I'm getting maybe 20-50 fps with 100,000 blades of grass, and I have a decent GPU. I'm wondering if my approach is wrong, or if I'm reaching the limitations of my GPU or If I am doing something incorrectly or maybe if their is a faster way (My aim is individual blades where I can manipulate the vertices ideally). The texture I am using 256x256
My rendering steps are:
Creation of the VAO and VBO and storing locations and binding once:
float[] GrassLocations= new float[100000];
int vaoID = createVAO();
. //bind VBO to VAO
storeDataInAttributeList(0, 3, GrassLocations,0,0);
I then render:
GL30.glBindVertexArray(VAO);
GL20.glEnableVertexAttribArray(0);
GL13.glActiveTexture(GL13.GL_TEXTURE0);
GL11.glBindTexture(GL11.GL_TEXTURE_2D, texture);
GL11.glDrawArrays(GL11.GL_POINTS, 0, 100000);
GL20.glDisableVertexAttribArray(0);
GL30.glBindVertexArray(0);
then My Vertex Shader:
#version 400
layout (location = 0) in vec3 VertexLocation;
uniform float time;
out vec3 offsets;
out vec3 Position;
out vec3 Normal;
out vec2 TexCoord;
out float visibility;
uniform mat4 transformationMatrix;
uniform mat4 viewMatrix;
uniform mat4 MVPmatrix;
uniform mat4 modelViewMatrix;
const float density = .007;
const float gradient = 1.5;
out float Time;
void main()
{
Time = time;
vec4 worldPosition = transformationMatrix * vec4(VertexLocation,1.0);
vec4 positionRelativeToCam = modelViewMatrix* vec4(VertexLocation,1.0);
Normal = vec3(0,1,0);
Position = vec3( worldPosition );
gl_Position = MVPmatrix* vec4(VertexLocation,1.0);
float distance = length(positionRelativeToCam.xyz);
visibility = exp(-pow((distance * density), gradient));
visibility = clamp(visibility,0.0,1.0);
offsets = offset;
}
I did gut the vertex shader and left only GL_POSITION and still not the issue.
My Geometry Shader:
#version 400
layout( points ) in;
layout( triangle_strip, max_vertices = 10 ) out;
float Size2=1; // Half the width of the quad
in vec3 Position[];
in vec3 Normal[];
in vec3 offsets[];
out vec3 position;
out vec3 normal;
in float Time[];
out vec2 TexCoord;
out vec3 color;
const float width = 5;
void main()
{
position = Position[0];
normal = Normal[0];
color = offsets[0];
gl_Position = (vec4(-Size2*width,-Size2,0.0,0.0) + gl_in[0].gl_Position);
TexCoord = vec2(0.0,0.0);
EmitVertex();
gl_Position = (vec4(Size2*width,-Size2,0.0,0.0) + gl_in[0].gl_Position);
TexCoord = vec2(1.0,0.0);
EmitVertex();
gl_Position = (vec4(-Size2*width+(Time[0].x),10,0.0,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(0.0,.25);
EmitVertex();
gl_Position = (vec4(Size2*width+(Time[0].x),10,0.0,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(1.0,.25);
EmitVertex();
///////////////////////////////////////////////////
gl_Position = (vec4(-Size2*width+(Time[0].x)*2,15,0.0,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(0.0,.50);
EmitVertex();
gl_Position = (vec4(Size2*width+(Time[0].x)*2,15,0.0,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(1.0,.50);
EmitVertex();
///////////////////////////////////////////////////
gl_Position = (vec4(-Size2*width+(Time[0].x)*3,25,0.0,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(0.0,.75);
EmitVertex();
gl_Position = (vec4(Size2*width+(Time[0].x)*3,25,0.0,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(1.0,.75);
EmitVertex();
///////////////////////////////////////////////////
gl_Position = (vec4(-Size2*width,Size2*7,Time[0].x,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(0.0,1.0);
EmitVertex();
gl_Position = (vec4(Size2*width,Size2*7,Time[0].x,0.0) +
gl_in[0].gl_Position);
TexCoord = vec2(1.0,1.0);
EmitVertex();
}
and my fragment Shader: (This is in a deferred engine, I've tried it with forward rendering also and I don't think performance hit is here)
#version 400
in vec2 TexCoord;
layout (binding=0) uniform sampler2D SpriteTex;
in vec3 color;
in vec3 normal;
in vec3 position;
layout( location = 0 ) out vec4 FragColor;
void main() {
vec4 texColor = texture(SpriteTex,TexCoord);
vec4 posColor = vec4(position.xyz,0);
gl_FragData[1] = posColor;
gl_FragData[2] = vec4(normal,1);
if(texColor.a<.5){
discard;
}
gl_FragData[0] = texColor;
}
What you want is a technique called Instancing. The tutorial I've linked is fantastic for figuring out how to do instancing.
I would probably advise that you avoid the geometry shader (since the geometry shader doesn't usually scale well when its purpose is to expand the quantity of vertices), and instead just define a buffer containing all the vertices necessary to draw a single blade (or patch) of grass, then use instancing to redraw that object thousands of times.

GLSL geometry shader matrices

I have the following vertex shader:
#version 150 core
attribute vec4 vertex;
varying vec3 vert;
varying float zdepth;
uniform mat4 projMatrix;
uniform mat4 mvMatrix;
void main() {
vert = vertex.xyz;
zdepth = -(mvMatrix * vertex).z;
gl_Position = projMatrix * mvMatrix * vertex;
}
and geometry shader:
#version 150 core
uniform mat4 projMatrix;
uniform mat4 mvMatrix;
layout(lines_adjacency) in;
layout(triangle_strip, max_vertices = 4) out;
void main() {
vec4 p0 = gl_in[0].gl_Position;
vec4 p1 = gl_in[1].gl_Position;
vec4 p2 = gl_in[2].gl_Position;
vec4 p3 = gl_in[3].gl_Position;
vec4 v0 = normalize(p1-p0);
vec4 v1 = normalize(p2-p1);
vec4 v2 = normalize(p3-p2);
vec4 n11 = normalize(v1-v0);
vec4 n12 = -n11;
vec4 n21 = normalize(v2-v1);
vec4 n22 = -n21;
gl_Position = p1+n11*0.2;
EmitVertex();
gl_Position = p1+n12*0.2;
EmitVertex();
gl_Position = p2+n21*0.2;
EmitVertex();
gl_Position = p2+n22*0.2;
EmitVertex();
EndPrimitive();
}
The task of the geometry shader is to convert a line strip into triangle strip.
This is what I get for a line strip spiral:
I want to have the triangle strip normal always pointing in the viewer direction and get a even thickness. Of course it has to be less thick further away.
I need to rotate the n11,n12,n21,n22 so they are parallel to the view plane:
I would probably need to manipulate v0,v1,v2 with projMatrix and mvMatrix?
Thanks!
The projection matrix should not be applied to the vertex shader, I would do this all in view-space and then transform the final result into clip-space in the geometry shader. This avoids having to divide everything by W in the geometry shader.
You want to screen-align each of your triangles, which is very easy to do in a geometry shader (this is effectively billboarding). Pull the right/up vectors out of your ModelView matrix and then use those to calculate the offset in X and Y.
Geometry Shader Pseudo-code:
// Right = Column 0
vec3 right = vec3 (mvMatrix [0][0],
mvMatrix [1][0],
mvMatrix [2][0]);
// Up = Column 1
vec3 up = vec3 (mvMatrix [0][1],
mvMatrix [1][1],
mvMatrix [2][1]);
//
// Screen-align everything, and give a width of 0.4
//
gl_Position = projMatrix * ((p1+n11*0.2) - vec4 ((right + up) * 0.2, 0.0));
EmitVertex();
gl_Position = projMatrix * ((p1+n12*0.2) - vec4 ((right - up) * 0.2, 0.0));
EmitVertex();
gl_Position = projMatrix * ((p2+n21*0.2) + vec4 ((right - up) * 0.2, 0.0));
EmitVertex();
gl_Position = projMatrix * ((p2+n22*0.2) + vec4 ((right + up) * 0.2, 0.0));
EmitVertex ();
I don't get good results with this code. I guess it has something to do with the mvMatrix applied in the vertex shader and then using it again in right and up vectors.
I come up with a new code that works relatively well:
Vertex shader:
#version 150 core
attribute vec4 vertex;
varying vec3 vert;
uniform mat4 projMatrix;
uniform mat4 mvMatrix;
uniform vec3 camPos;
void main() {
vert = vertex.xyz;
gl_Position = vertex;
}
Geometry shader:
#version 150 core
uniform mat4 projMatrix;
uniform mat4 mvMatrix;
uniform vec3 camPos;
layout(lines_adjacency) in;
layout(triangle_strip, max_vertices = 6) out;
void main() {
vec4 p0 = gl_in[0].gl_Position;
vec4 p1 = gl_in[1].gl_Position;
vec4 p2 = gl_in[2].gl_Position;
vec4 p3 = gl_in[3].gl_Position;
vec3 forward1 = normalize(camPos - p1.xyz);
vec3 forward2 = normalize(camPos - p2.xyz);
vec3 v0 = normalize(vec3(p1-p0));
vec3 v1 = normalize(vec3(p2-p1));
vec3 v2 = normalize(vec3(p3-p2));
vec3 v0p1 = normalize(v0-(dot(v0,forward1))*forward1);
vec3 v1p1 = normalize(v1-(dot(v1,forward1))*forward1);
vec3 v1p2 = normalize(v1-(dot(v1,forward2))*forward2);
vec3 v2p2 = normalize(v2-(dot(v2,forward2))*forward2);
vec3 n0p1 = normalize(cross(v0p1,forward1));
vec3 n1p1 = normalize(cross(v1p1,forward1));
vec3 n1p2 = normalize(cross(v1p2,forward2));
vec3 n2p2 = normalize(cross(v2p2,forward2));
vec3 n11 = normalize(n0p1+n1p1);
vec3 n12 = -n11;
//if (n11[0]<0){
//n11 = n12;
//n12 = -n11;
// }
vec3 n21 = normalize(n1p2+n2p2);
vec3 n22 = -n21;
//if (n21[0]<0){
// n21 = n22;
//n22 = -n21;
// }
gl_Position = projMatrix * mvMatrix * vec4(p1.xyz+n11*0.2,1.0);
EmitVertex();
gl_Position = projMatrix * mvMatrix * vec4(p1.xyz+n12*0.2,1.0);
EmitVertex();
// EndPrimitive();
gl_Position = projMatrix * mvMatrix * vec4(p2.xyz+n21*0.2,1.0);
EmitVertex();
gl_Position = projMatrix * mvMatrix * vec4(p2.xyz+n22*0.2,1.0);
EmitVertex ();
EndPrimitive();
// gl_Position = projMatrix * mvMatrix * p1;
// EmitVertex();
// gl_Position = projMatrix * mvMatrix * p2;
// EmitVertex ();
// EndPrimitive();
}
I work without the matrixes applied. And I basiclly just included eye to point vector. So everything on screen seems turned to the eye.

Fragment shader input interfering with texture access

I am using a vertex, geometry and fragment shader to render a scene with shadows:
Vertex Shader:
#version 400
layout(location=0) in vec3 position;
out vec4 vShadowCoord;
uniform mat4 modelViewProjectionMatrix;
uniform mat4 shadowMatrix;
void main(void)
{
vShadowCoord = shadowMatrix * vec4(position, 1.0);
gl_Position = modelViewProjectionMatrix * vec4(position, 1.0);
}
Geometry Shader:
#version 400
layout(triangles_adjacency) in;
layout(triangle_strip, max_vertices = 3) out;
in vec4 vShadowCoord[];
out vec4 gShadowCoord;
uniform vec3 lightPosition;
void main()
{
gShadowCoord = vShadowCoord[0];
gl_Position = gl_in[0].gl_Position;
EmitVertex();
gShadowCoord = vShadowCoord[2];
gl_Position = gl_in[2].gl_Position;
EmitVertex();
gShadowCoord = vShadowCoord[4];
gl_Position = gl_in[4].gl_Position;
EmitVertex();
EndPrimitive();
}
Fragment Shader:
#version 400
in vec4 shadowCoord;
out vec4 fColor;
uniform sampler2DShadow shadowMap;
void main(void)
{
float shadow = textureProj(shadowMap, shadowCoord);
fColor = (shadow > 0.0) ? vec4(1.0, 1.0, 1.0, 1.0) : vec4(0.1, 0.1, 0.1, 1.0);
}
This successfully renders my scene with shadows. The cubes in my scene are lit and in shadow where I would expect them to be. The problem occurs when I try to pass one of the two colors in from the geometry shader. When I do this, my conditional statement always evaluates to false.
Geometry shader:
#version 400
layout(triangles_adjacency) in;
layout(triangle_strip, max_vertices = 3) out;
in vec4 vShadowCoord[];
out vec4 gShadowCoord; // Added
out vec4 gColorLit; // Added
uniform vec3 lightPosition;
void main()
{
gShadowCoord = vShadowCoord[0];
gColorLit = vec4(1.0, 1.0, 1.0, 1.0); // Added
gl_Position = gl_in[0].gl_Position;
EmitVertex();
gShadowCoord = vShadowCoord[2];
gColorLit = vec4(1.0, 1.0, 1.0, 1.0); // Added
gl_Position = gl_in[2].gl_Position;
EmitVertex();
gShadowCoord = vShadowCoord[4];
gColorLit = vec4(1.0, 1.0, 1.0, 1.0); // Added
gl_Position = gl_in[4].gl_Position;
EmitVertex();
EndPrimitive();
}
Fragment Shader:
#version 400
in vec4 shadowCoord;
in vec4 gColorLit; // Added
out vec4 fColor;
uniform sampler2DShadow shadowMap;
void main(void)
{
float shadow = textureProj(shadowMap, shadowCoord);
// Changed
fColor = (shadow > 0.0) ? gColorLit : vec4(0.1, 0.1, 0.1, 1.0);
}
What could be causing this to happen?
This occurs on both Ubuntu 12.04 and Windows 7
Some of the code in my shaders might seem unnecessary, but that is because I have stripped as much as I could away while troubleshooting.
There's no reason for your second shaders to successfully link. Your geometry shader states that it's writing:
out vec4 gShadowCoord; // Added
But your fragment shader is expecting:
in vec4 shadowCoord;
You should have gotten a linker error.