I want to use geometry shader to draw triangles of mesh, but encounter a really odd issue.
Result As follow: The wrong output.
The Right output.
The only diff between wrong and right in code, is when converting 3d position vector to 4d position vector. The right one did in vertex shader. The wrong one did in geometry shader.
Code as follow. Why this happend?
#version 330 core
layout (location = 0) in vec3 pos;
layout (location = 1) in vec3 normal;
uniform mat4 model;
uniform mat4 view;
uniform mat4 project;
out vec3 normal_;
out vec4 pos_;
out vec3 pos_bug_;
out mat4 mvp_;
void main()
{
mvp_ = project * view * model;
normal_ = normal;
pos_ = vec4(pos, 1.0);
pos_bug_ = pos;
}
#version 330 core
layout (triangles) in;
layout (line_strip, max_vertices = 12) out;
uniform float length = 0.4f;
out vec4 color;
in mat4 mvp_[];
in vec3 normal_[];
in vec4 pos_[];
in vec3 pos_bug_[];
void GenNormal(int index) {
color = vec4(1, 1, 0, 1);
gl_Position = mvp_[0] * pos_[index];
EmitVertex();
gl_Position = mvp_[0] * pos_[index] + vec4(normal_[index], 0.0) * length;
EmitVertex();
EndPrimitive();
}
void GenTriangle(int index0, int index1) {
color = vec4(1, 1, 1, 1);
gl_Position = mvp_[0] * pos_[index0]; // Right
// gl_Position = mvp_[0] * vec4(pos_bug_[index0], 1.0); // Wrong
EmitVertex();
gl_Position = mvp_[0] * pos_[index1]; // Right
// gl_Position = mvp_[0] * vec4(pos_bug_[index1], 1.0); // Wrong
EmitVertex();
EndPrimitive();
}
void main()
{
GenNormal(0);
GenNormal(1);
GenNormal(2);
GenTriangle(0, 1);
GenTriangle(1, 2);
GenTriangle(0, 2);
}
Related
The problem is that artifacts appear in the shadows at a great distance. I want to try to make a logarithmic depth buffer, but I don’t understand where it should be done and how ... I use point light method for omnidirectional shadow maps
Vertex shader:
#version 460 core
layout (location = 0) in vec3 aPos;
uniform mat4 model;
uniform float zCoef;
//out vec4 pos;
void main() {
gl_Position = model * vec4(aPos, 1.0f);
// gl_Position.z = log2(max(1e-6, gl_Position.w + 1.0)) * zCoef - 1.0;
// gl_Position.z *= gl_Position.w;
}
Geometry shader:
#version 460 core
layout (triangles) in;
layout (triangle_strip, max_vertices = 18) out;
uniform mat4 shadowMatrices[6];
uniform float zCoef;
out vec4 FragPos; // FragPos from GS (output per emitvertex)
void main()
{
for(int face = 0; face < 6; ++face)
{
gl_Layer = face; // встроенная переменная, указывающая на то, какую грань мы рендерим
for(int i = 0; i < 3; ++i) // для каждой вершины треугольника
{
FragPos = gl_in[i].gl_Position;
gl_Position = shadowMatrices[face] * FragPos;
// gl_Position.z = log2(max(1e-6, gl_Position.w + 1.0)) * zCoef - 1.0;
// gl_Position.z *= gl_Position.w;
EmitVertex();
}
EndPrimitive();
}
}
Fragment shader:
#version 460 core
in vec4 FragPos;
uniform vec3 lightPos;
uniform float farPlane;
uniform float zCoef;
void main() {
float lightDistance = length(FragPos.xyz - lightPos);
lightDistance = lightDistance / farPlane;
gl_FragDepth = lightDistance;
}
Guys, pls help me,i tried to use the linearization depth, it didn't work too..
I try to get my glsl shader to calculate a directional light for me but I run into the problem that the direction seems to be dependend on the viewMatrix while I want to specify it in worldSpace.
My initial idea was to just multipy the worldSpace Vector with the viewMatrix(
Vector4f dir = new Vector4f(dirLight.direction, 1);
dir.mul(window.getCamera().viewMatrix);
) in the code before setting the direction uniform but it seems that the light still changes depended on my viewMatrix, so I obviously do something wrong.
the relevant code of my shader:
//vertex shader
layout (location =0) in vec3 position;
layout (location =1) in vec2 texCoord;
layout (location =2) in vec3 vertexNormal;
layout (location=3) in vec4 jointWeights;
layout (location=4) in ivec4 jointIndices;
out vec3 gmvVertexNormal;
out vec3 gmvVertexPos;
uniform mat4 projectionMatrix;
uniform mat4 modelViewMatrix;
struct Material
{
vec3 color;
int hasTexture;
float reflectance;
};
uniform Material material;
void main()
{
vec4 mvPos = modelViewMatrix * vec4(position, 1.0);
gl_Position = vec4(position,1.0);
gmvVertexNormal = normalize(modelViewMatrix * vec4(vertexNormal, 0.0)).xyz;
gmvVertexPos = position;
}
//geometry shader
layout ( triangles ) in;
layout ( triangle_strip, max_vertices = 3) out;
uniform mat4 projectionMatrix;
uniform mat4 modelViewMatrix;
out vec3 mvVertexNormal;
out vec3 mvVertexPos;
in vec3 gmvVertexNormal[3];
in vec3 gmvVertexPos[3];
vec3 calculateTriangleNormal(){
vec3 tangent = gl_in[1].gl_Position.xyz - gl_in[0].gl_Position.xyz;
vec3 bitangent = gl_in[2].gl_Position.xyz - gl_in[0].gl_Position.xyz;
vec3 normal = cross(tangent, bitangent);
return normalize(normal);
}
void main()
{
vec4 mvPos = modelViewMatrix * vec4(gmvVertexPos[0], 1.0);
gl_Position = projectionMatrix * mvPos;
mvVertexNormal=calculateTriangleNormal();
mvVertexPos=mvPos.xyz;
EmitVertex();
mvPos = modelViewMatrix * vec4(gmvVertexPos[1], 1.0);
gl_Position = projectionMatrix * mvPos;
mvVertexNormal=calculateTriangleNormal();
mvVertexPos=mvPos.xyz;
EmitVertex();
mvPos = modelViewMatrix * vec4(gmvVertexPos[2], 1.0);
gl_Position = projectionMatrix * mvPos;
mvVertexNormal=calculateTriangleNormal();
mvVertexPos=mvPos.xyz;
EmitVertex();
EndPrimitive();
}
//fragment shader
in vec3 mvVertexNormal;
in vec3 mvVertexPos;
struct DirectionalLight {
vec3 color;
vec3 direction;
float intensity;
};
const int MAX_DIRECTIONALLIGHT = 10;
uniform int USED_DIRECTIONALLIGHTS;
uniform DirectionalLight directionalLight[MAX_DIRECTIONALLIGHT];
vec4 calcDirectionalLight(DirectionalLight light, vec3 position, vec3 normal)
{
return calcLightColor(light.color, light.intensity, position, normalize(light.direction), normal);
}
vec4 calcLightColor(vec3 light_color, float light_intensity, vec3 position, vec3 to_light_dir, vec3 normal)
{
vec4 diffuseColor = vec4(0, 0, 0, 0);
vec4 specColor = vec4(0, 0, 0, 0);
// Diffuse Light
float diffuseFactor = max(dot(normal, to_light_dir), 0.0);
diffuseColor = vec4(light_color, 1.0) * light_intensity * diffuseFactor;
// Specular Light
vec3 camera_direction = normalize(- position);
vec3 from_light_dir = -to_light_dir;
vec3 reflected_light = normalize(reflect(from_light_dir , normal));
float specularFactor = max( dot(camera_direction, reflected_light), 0.0);
specularFactor = pow(specularFactor, specularPower);
specColor = light_intensity * specularFactor * material.reflectance * vec4(light_color, 1.0);
return (diffuseColor + specColor);
}
void main()
{
vec4 totalLight = vec4(0);
//directional Light
for (int i=0; i<USED_DIRECTIONALLIGHTS; i++) {
totalLight += calcDirectionalLight(directionalLight[i], mvVertexPos, mvVertexNormal);
}
//...
fragColor = vec4(ambientLight, 1.0) + totalLight;
}
I am kinda new to shader so I dont know what to do anymore.
To specify the effect I get: the directional light that should only come from one direction (in worldSpace) comes from different directions based on the viewMatrix
I feel stupid now. I found the answer just after posting.
The geometry shader passes the vertexNormal directly instead of mutiplying it with the modelViewMatrix.
So the answer is this:
mvVertexNormal=normalize(modelViewMatrix * vec4(calculateTriangleNormal(), 0.0)).xyz;
instead of this:
mvVertexNormal=calculateTriangleNormal();
I have created an application in OpenGL that uses a Vertex Shader, Geometry Shader, and Fragment Shader.
I have a uniform variable, eyePositionWorld that I would like to use both in the Geometry Shader and the Fragment Shader.
(I am rendering the position of the verticies compared to the eyePositionWorld as the color)
Vertex Shader
#version 430
in vec4 vertexPositionModel;
in vec3 vertexColor;
in vec3 vertexNormalModel;
in mat4 modelMatrix;
uniform mat4 viewMatrix;//World To View
uniform mat4 projectionMatrix;//View to Projection
struct fData
{
vec3 fragColor;
vec3 fragPositionWorld;
vec3 fragNormalWorld;
};
out fData geomData;
void main()
{
gl_Position = projectionMatrix * viewMatrix * modelMatrix * vertexPositionModel;
geomData.fragColor = vertexColor;
geomData.fragPositionWorld = (modelMatrix * vertexPositionModel).xyz;
geomData.fragNormalWorld = (modelMatrix * vec4(vertexNormalModel, 0.0)).xyz;
}
Geometry Shader
#version 430
layout(triangles_adjacency) in;
layout(triangle_strip, max_vertices=3) out;
struct fData
{
vec3 fragColor;
vec3 fragPositionWorld;
vec3 fragNormalWorld;
};
uniform vec3 eyePositionWorldGeomShader;
in fData geomData[];
out fData fragData;
void main() {
gl_Position = gl_in[0].gl_Position;
fragData = geomData[0];
fragData.fragColor = gl_in[0].gl_Position.xyz - eyePositionWorldGeomShader;
EmitVertex();
gl_Position = gl_in[2].gl_Position;
fragData = geomData[2];
fragData.fragColor = gl_in[2].gl_Position.xyz - eyePositionWorldGeomShader;
EmitVertex();
gl_Position = gl_in[4].gl_Position;
fragData = geomData[4];
fragData.fragColor = gl_in[4].gl_Position.xyz - eyePositionWorldGeomShader;
EmitVertex();
EndPrimitive();
}
Fragment Shader
#version 430
struct fData
{
vec3 fragColor;
vec3 fragPositionWorld;
vec3 fragNormalWorld;
};
in fData fragData;
uniform vec4 ambientLight;
uniform vec3 lightPositionWorld;
uniform vec3 eyePositionWorld;
uniform bool isLighted;
out vec4 color;
void main()
{
if (!isLighted)
{
color = vec4(fragData.fragColor, 1.0);
}
else
{
vec3 lightVectorWorld = normalize(lightPositionWorld - fragData.fragPositionWorld);
float brightness = clamp(dot(lightVectorWorld, normalize(fragData.fragNormalWorld)), 0.0, 1.0);
vec4 diffuseLight = vec4(brightness, brightness, brightness, 1.0);
vec3 reflectedLightVectorWorld = reflect(-lightVectorWorld, fragData.fragNormalWorld);
vec3 eyeVectorWorld = normalize(eyePositionWorld - fragData.fragPositionWorld);
float specularity = pow(clamp(dot(reflectedLightVectorWorld, eyeVectorWorld), 0.0, 1.0), 40) * 0.5;
vec4 specularLight = vec4(specularity, specularity, specularity, 1.0);
//Maximum Distance of All Lights
float maxDist = 55.0;
float attenuation = clamp((maxDist - length(lightPositionWorld - fragData.fragPositionWorld)) / maxDist, 0.0, 1.0);
color = (ambientLight + (diffuseLight + specularLight) * attenuation) * vec4(fragData.fragColor, 1.0);
}
}
C++ Code (the m_eyePositionUL and m_eyePositionGeomShaderUL are both just loaded with glGetUniformLocation)
glUniform3fv(m_eyePositionUL, 1, &m_camera.getPosition()[0]);
glUniform3fv(m_eyePositionGeomShaderUL, 1, &m_camera.getPosition()[0]);
How can I only upload one uniform to OpenGL and use it in both the Geometry Shader and Vertex Shader?
It's a bit surprising but OpenGL makes it easy. All that you have to do is use the same uniform name in both Shaders!
Then just upload it once under that uniform location.
Replace uniform vec3 eyePositionWorldGeomShader; with uniform vec3 eyePositionWorld; in your Geometry Shader and keep the uniform name the same in the Fragment Shader.
Then just don't upload the other Uniform so your C++ code will simply be
glUniform3fv(m_eyePositionUL, 1, &m_camera.getPosition()[0]);
I have the following vertex shader:
#version 150 core
attribute vec4 vertex;
varying vec3 vert;
varying float zdepth;
uniform mat4 projMatrix;
uniform mat4 mvMatrix;
void main() {
vert = vertex.xyz;
zdepth = -(mvMatrix * vertex).z;
gl_Position = projMatrix * mvMatrix * vertex;
}
and geometry shader:
#version 150 core
uniform mat4 projMatrix;
uniform mat4 mvMatrix;
layout(lines_adjacency) in;
layout(triangle_strip, max_vertices = 4) out;
void main() {
vec4 p0 = gl_in[0].gl_Position;
vec4 p1 = gl_in[1].gl_Position;
vec4 p2 = gl_in[2].gl_Position;
vec4 p3 = gl_in[3].gl_Position;
vec4 v0 = normalize(p1-p0);
vec4 v1 = normalize(p2-p1);
vec4 v2 = normalize(p3-p2);
vec4 n11 = normalize(v1-v0);
vec4 n12 = -n11;
vec4 n21 = normalize(v2-v1);
vec4 n22 = -n21;
gl_Position = p1+n11*0.2;
EmitVertex();
gl_Position = p1+n12*0.2;
EmitVertex();
gl_Position = p2+n21*0.2;
EmitVertex();
gl_Position = p2+n22*0.2;
EmitVertex();
EndPrimitive();
}
The task of the geometry shader is to convert a line strip into triangle strip.
This is what I get for a line strip spiral:
I want to have the triangle strip normal always pointing in the viewer direction and get a even thickness. Of course it has to be less thick further away.
I need to rotate the n11,n12,n21,n22 so they are parallel to the view plane:
I would probably need to manipulate v0,v1,v2 with projMatrix and mvMatrix?
Thanks!
The projection matrix should not be applied to the vertex shader, I would do this all in view-space and then transform the final result into clip-space in the geometry shader. This avoids having to divide everything by W in the geometry shader.
You want to screen-align each of your triangles, which is very easy to do in a geometry shader (this is effectively billboarding). Pull the right/up vectors out of your ModelView matrix and then use those to calculate the offset in X and Y.
Geometry Shader Pseudo-code:
// Right = Column 0
vec3 right = vec3 (mvMatrix [0][0],
mvMatrix [1][0],
mvMatrix [2][0]);
// Up = Column 1
vec3 up = vec3 (mvMatrix [0][1],
mvMatrix [1][1],
mvMatrix [2][1]);
//
// Screen-align everything, and give a width of 0.4
//
gl_Position = projMatrix * ((p1+n11*0.2) - vec4 ((right + up) * 0.2, 0.0));
EmitVertex();
gl_Position = projMatrix * ((p1+n12*0.2) - vec4 ((right - up) * 0.2, 0.0));
EmitVertex();
gl_Position = projMatrix * ((p2+n21*0.2) + vec4 ((right - up) * 0.2, 0.0));
EmitVertex();
gl_Position = projMatrix * ((p2+n22*0.2) + vec4 ((right + up) * 0.2, 0.0));
EmitVertex ();
I don't get good results with this code. I guess it has something to do with the mvMatrix applied in the vertex shader and then using it again in right and up vectors.
I come up with a new code that works relatively well:
Vertex shader:
#version 150 core
attribute vec4 vertex;
varying vec3 vert;
uniform mat4 projMatrix;
uniform mat4 mvMatrix;
uniform vec3 camPos;
void main() {
vert = vertex.xyz;
gl_Position = vertex;
}
Geometry shader:
#version 150 core
uniform mat4 projMatrix;
uniform mat4 mvMatrix;
uniform vec3 camPos;
layout(lines_adjacency) in;
layout(triangle_strip, max_vertices = 6) out;
void main() {
vec4 p0 = gl_in[0].gl_Position;
vec4 p1 = gl_in[1].gl_Position;
vec4 p2 = gl_in[2].gl_Position;
vec4 p3 = gl_in[3].gl_Position;
vec3 forward1 = normalize(camPos - p1.xyz);
vec3 forward2 = normalize(camPos - p2.xyz);
vec3 v0 = normalize(vec3(p1-p0));
vec3 v1 = normalize(vec3(p2-p1));
vec3 v2 = normalize(vec3(p3-p2));
vec3 v0p1 = normalize(v0-(dot(v0,forward1))*forward1);
vec3 v1p1 = normalize(v1-(dot(v1,forward1))*forward1);
vec3 v1p2 = normalize(v1-(dot(v1,forward2))*forward2);
vec3 v2p2 = normalize(v2-(dot(v2,forward2))*forward2);
vec3 n0p1 = normalize(cross(v0p1,forward1));
vec3 n1p1 = normalize(cross(v1p1,forward1));
vec3 n1p2 = normalize(cross(v1p2,forward2));
vec3 n2p2 = normalize(cross(v2p2,forward2));
vec3 n11 = normalize(n0p1+n1p1);
vec3 n12 = -n11;
//if (n11[0]<0){
//n11 = n12;
//n12 = -n11;
// }
vec3 n21 = normalize(n1p2+n2p2);
vec3 n22 = -n21;
//if (n21[0]<0){
// n21 = n22;
//n22 = -n21;
// }
gl_Position = projMatrix * mvMatrix * vec4(p1.xyz+n11*0.2,1.0);
EmitVertex();
gl_Position = projMatrix * mvMatrix * vec4(p1.xyz+n12*0.2,1.0);
EmitVertex();
// EndPrimitive();
gl_Position = projMatrix * mvMatrix * vec4(p2.xyz+n21*0.2,1.0);
EmitVertex();
gl_Position = projMatrix * mvMatrix * vec4(p2.xyz+n22*0.2,1.0);
EmitVertex ();
EndPrimitive();
// gl_Position = projMatrix * mvMatrix * p1;
// EmitVertex();
// gl_Position = projMatrix * mvMatrix * p2;
// EmitVertex ();
// EndPrimitive();
}
I work without the matrixes applied. And I basiclly just included eye to point vector. So everything on screen seems turned to the eye.
I am looking for some help debugging my GLSL phong shading code. Here is my vertex shader:
layout(std140) uniform Matrices {
mat4 model[1024];
};
layout(location = 0) in vec4 vertexCoord;
layout(location = 2) in vec3 vertexNormal;
uniform mat4 view; // from lookAt()
uniform mat4 projection; // perspective projection
out vec3 Position;
out vec3 Normal;
out vec4 lightPosEye;
void main() {
mat4 modelView = view * model[gl_InstanceID];
mat3 normalMatrix = mat3(transpose(inverse(modelView)));
//mat3 normalMatrix = mat3(modelView);
vec4 lightPos = vec4(350, 350, 350, 1);
lightPosEye = modelView * lightPos;
Position = vec3(modelView * vertexCoord);
Normal = normalize(normalMatrix * vertexNormal);
gl_Position = projection * vec4(Position, 1.0);
}
and here is my fragment shader:
in vec3 Position;
in vec3 Normal;
in vec4 lightPosEye;
layout(location = 0) out vec4 FragColor;
vec3 ads() {
vec3 Ka = vec3(0, 0.5, 0);
vec3 Kd = vec3(0, 0.5, 0);
vec3 Ks = vec3(0, 0.1, 0);
vec3 intensity = vec3(0.3, 0.5, 0.0);
float shininess = 0.1;
vec3 n = normalize(Normal);
vec3 s = normalize(vec3(lightPosEye) - Position);
vec3 v = normalize(vec3(-Position));
vec3 r = reflect(-s, n);
return intensity * (Ka + Kd * max(dot(s, n), 0.0) + Ks * pow(max(dot(r,v), 0.0), shininess));
}
void main() {
FragColor = vec4(ads(), 1);
}
Here is a screenshot of the result (I am also rendering the normals with another geometry shader for debugging purposes):
The artifacts on the cube are wrong, and the "shadows" on the circle also moves a bit when I move the camera around (change the view matrix).
Any obvious errors in my GLSL code?