I have the following vertex shader:
#version 150 core
attribute vec4 vertex;
varying vec3 vert;
varying float zdepth;
uniform mat4 projMatrix;
uniform mat4 mvMatrix;
void main() {
vert = vertex.xyz;
zdepth = -(mvMatrix * vertex).z;
gl_Position = projMatrix * mvMatrix * vertex;
}
and geometry shader:
#version 150 core
uniform mat4 projMatrix;
uniform mat4 mvMatrix;
layout(lines_adjacency) in;
layout(triangle_strip, max_vertices = 4) out;
void main() {
vec4 p0 = gl_in[0].gl_Position;
vec4 p1 = gl_in[1].gl_Position;
vec4 p2 = gl_in[2].gl_Position;
vec4 p3 = gl_in[3].gl_Position;
vec4 v0 = normalize(p1-p0);
vec4 v1 = normalize(p2-p1);
vec4 v2 = normalize(p3-p2);
vec4 n11 = normalize(v1-v0);
vec4 n12 = -n11;
vec4 n21 = normalize(v2-v1);
vec4 n22 = -n21;
gl_Position = p1+n11*0.2;
EmitVertex();
gl_Position = p1+n12*0.2;
EmitVertex();
gl_Position = p2+n21*0.2;
EmitVertex();
gl_Position = p2+n22*0.2;
EmitVertex();
EndPrimitive();
}
The task of the geometry shader is to convert a line strip into triangle strip.
This is what I get for a line strip spiral:
I want to have the triangle strip normal always pointing in the viewer direction and get a even thickness. Of course it has to be less thick further away.
I need to rotate the n11,n12,n21,n22 so they are parallel to the view plane:
I would probably need to manipulate v0,v1,v2 with projMatrix and mvMatrix?
Thanks!
The projection matrix should not be applied to the vertex shader, I would do this all in view-space and then transform the final result into clip-space in the geometry shader. This avoids having to divide everything by W in the geometry shader.
You want to screen-align each of your triangles, which is very easy to do in a geometry shader (this is effectively billboarding). Pull the right/up vectors out of your ModelView matrix and then use those to calculate the offset in X and Y.
Geometry Shader Pseudo-code:
// Right = Column 0
vec3 right = vec3 (mvMatrix [0][0],
mvMatrix [1][0],
mvMatrix [2][0]);
// Up = Column 1
vec3 up = vec3 (mvMatrix [0][1],
mvMatrix [1][1],
mvMatrix [2][1]);
//
// Screen-align everything, and give a width of 0.4
//
gl_Position = projMatrix * ((p1+n11*0.2) - vec4 ((right + up) * 0.2, 0.0));
EmitVertex();
gl_Position = projMatrix * ((p1+n12*0.2) - vec4 ((right - up) * 0.2, 0.0));
EmitVertex();
gl_Position = projMatrix * ((p2+n21*0.2) + vec4 ((right - up) * 0.2, 0.0));
EmitVertex();
gl_Position = projMatrix * ((p2+n22*0.2) + vec4 ((right + up) * 0.2, 0.0));
EmitVertex ();
I don't get good results with this code. I guess it has something to do with the mvMatrix applied in the vertex shader and then using it again in right and up vectors.
I come up with a new code that works relatively well:
Vertex shader:
#version 150 core
attribute vec4 vertex;
varying vec3 vert;
uniform mat4 projMatrix;
uniform mat4 mvMatrix;
uniform vec3 camPos;
void main() {
vert = vertex.xyz;
gl_Position = vertex;
}
Geometry shader:
#version 150 core
uniform mat4 projMatrix;
uniform mat4 mvMatrix;
uniform vec3 camPos;
layout(lines_adjacency) in;
layout(triangle_strip, max_vertices = 6) out;
void main() {
vec4 p0 = gl_in[0].gl_Position;
vec4 p1 = gl_in[1].gl_Position;
vec4 p2 = gl_in[2].gl_Position;
vec4 p3 = gl_in[3].gl_Position;
vec3 forward1 = normalize(camPos - p1.xyz);
vec3 forward2 = normalize(camPos - p2.xyz);
vec3 v0 = normalize(vec3(p1-p0));
vec3 v1 = normalize(vec3(p2-p1));
vec3 v2 = normalize(vec3(p3-p2));
vec3 v0p1 = normalize(v0-(dot(v0,forward1))*forward1);
vec3 v1p1 = normalize(v1-(dot(v1,forward1))*forward1);
vec3 v1p2 = normalize(v1-(dot(v1,forward2))*forward2);
vec3 v2p2 = normalize(v2-(dot(v2,forward2))*forward2);
vec3 n0p1 = normalize(cross(v0p1,forward1));
vec3 n1p1 = normalize(cross(v1p1,forward1));
vec3 n1p2 = normalize(cross(v1p2,forward2));
vec3 n2p2 = normalize(cross(v2p2,forward2));
vec3 n11 = normalize(n0p1+n1p1);
vec3 n12 = -n11;
//if (n11[0]<0){
//n11 = n12;
//n12 = -n11;
// }
vec3 n21 = normalize(n1p2+n2p2);
vec3 n22 = -n21;
//if (n21[0]<0){
// n21 = n22;
//n22 = -n21;
// }
gl_Position = projMatrix * mvMatrix * vec4(p1.xyz+n11*0.2,1.0);
EmitVertex();
gl_Position = projMatrix * mvMatrix * vec4(p1.xyz+n12*0.2,1.0);
EmitVertex();
// EndPrimitive();
gl_Position = projMatrix * mvMatrix * vec4(p2.xyz+n21*0.2,1.0);
EmitVertex();
gl_Position = projMatrix * mvMatrix * vec4(p2.xyz+n22*0.2,1.0);
EmitVertex ();
EndPrimitive();
// gl_Position = projMatrix * mvMatrix * p1;
// EmitVertex();
// gl_Position = projMatrix * mvMatrix * p2;
// EmitVertex ();
// EndPrimitive();
}
I work without the matrixes applied. And I basiclly just included eye to point vector. So everything on screen seems turned to the eye.
Related
I am trying to implement geometry shader for line thickness using OpenGL 4.3.
I followed accepted answer and other given solutions of stackoverflow, but it is wrong according to the screenshot. Is there any proper way how can I get a normal of a screen? It seems correct in the first frame but the moment I move my mouse, the camera changes and offset direction is not correct. The shader is updated by camera matrix in while loop.
GLSL Geometry shader to replace glLineWidth
Vertex shader
#version 330 core
layout (location = 0) in vec3 aPos;
uniform mat4 projection_view_model;
void main()
{
gl_Position = projection_view_model * vec4(aPos, 1.0);
}
Fragment shader
#version 330 core
//resources:
//https://stackoverflow.com/questions/6017176/gllinestipple-deprecated-in-opengl-3-1
out vec4 FragColor;
uniform vec4 uniform_fragment_color;
void main()
{
FragColor = uniform_fragment_color;
}
Geometry shader
#version 330 core
layout (lines) in;
layout(triangle_strip, max_vertices = 4) out;
uniform float u_thickness ;
uniform vec2 u_viewportSize ;
in gl_PerVertex
{
vec4 gl_Position;
//float gl_PointSize;
//float gl_ClipDistance[];
} gl_in[];
void main() {
//https://stackoverflow.com/questions/54686818/glsl-geometry-shader-to-replace-gllinewidth
vec4 p1 = gl_in[0].gl_Position;
vec4 p2 = gl_in[1].gl_Position;
vec2 dir = normalize((p2.xy - p1.xy) * u_viewportSize);
vec2 offset = vec2(-dir.y, dir.x) * u_thickness*100 / u_viewportSize;
gl_Position = p1 + vec4(offset.xy * p1.w, 0.0, 0.0);
EmitVertex();
gl_Position = p1 - vec4(offset.xy * p1.w, 0.0, 0.0);
EmitVertex();
gl_Position = p2 + vec4(offset.xy * p2.w, 0.0, 0.0);
EmitVertex();
gl_Position = p2 - vec4(offset.xy * p2.w, 0.0, 0.0);
EmitVertex();
EndPrimitive();
}
To get the direction of the line in normalized device space, the x and y components of the clip space coordinated must be divided by the w component (perspective divide):
vec2 dir = normalize((p2.xy - p1.xy) * u_viewportSize);
vec2 dir = normalize((p2.xy / p2.w - p1.xy / p1.w) * u_viewportSize);
I want to use geometry shader to draw triangles of mesh, but encounter a really odd issue.
Result As follow: The wrong output.
The Right output.
The only diff between wrong and right in code, is when converting 3d position vector to 4d position vector. The right one did in vertex shader. The wrong one did in geometry shader.
Code as follow. Why this happend?
#version 330 core
layout (location = 0) in vec3 pos;
layout (location = 1) in vec3 normal;
uniform mat4 model;
uniform mat4 view;
uniform mat4 project;
out vec3 normal_;
out vec4 pos_;
out vec3 pos_bug_;
out mat4 mvp_;
void main()
{
mvp_ = project * view * model;
normal_ = normal;
pos_ = vec4(pos, 1.0);
pos_bug_ = pos;
}
#version 330 core
layout (triangles) in;
layout (line_strip, max_vertices = 12) out;
uniform float length = 0.4f;
out vec4 color;
in mat4 mvp_[];
in vec3 normal_[];
in vec4 pos_[];
in vec3 pos_bug_[];
void GenNormal(int index) {
color = vec4(1, 1, 0, 1);
gl_Position = mvp_[0] * pos_[index];
EmitVertex();
gl_Position = mvp_[0] * pos_[index] + vec4(normal_[index], 0.0) * length;
EmitVertex();
EndPrimitive();
}
void GenTriangle(int index0, int index1) {
color = vec4(1, 1, 1, 1);
gl_Position = mvp_[0] * pos_[index0]; // Right
// gl_Position = mvp_[0] * vec4(pos_bug_[index0], 1.0); // Wrong
EmitVertex();
gl_Position = mvp_[0] * pos_[index1]; // Right
// gl_Position = mvp_[0] * vec4(pos_bug_[index1], 1.0); // Wrong
EmitVertex();
EndPrimitive();
}
void main()
{
GenNormal(0);
GenNormal(1);
GenNormal(2);
GenTriangle(0, 1);
GenTriangle(1, 2);
GenTriangle(0, 2);
}
I try to get my glsl shader to calculate a directional light for me but I run into the problem that the direction seems to be dependend on the viewMatrix while I want to specify it in worldSpace.
My initial idea was to just multipy the worldSpace Vector with the viewMatrix(
Vector4f dir = new Vector4f(dirLight.direction, 1);
dir.mul(window.getCamera().viewMatrix);
) in the code before setting the direction uniform but it seems that the light still changes depended on my viewMatrix, so I obviously do something wrong.
the relevant code of my shader:
//vertex shader
layout (location =0) in vec3 position;
layout (location =1) in vec2 texCoord;
layout (location =2) in vec3 vertexNormal;
layout (location=3) in vec4 jointWeights;
layout (location=4) in ivec4 jointIndices;
out vec3 gmvVertexNormal;
out vec3 gmvVertexPos;
uniform mat4 projectionMatrix;
uniform mat4 modelViewMatrix;
struct Material
{
vec3 color;
int hasTexture;
float reflectance;
};
uniform Material material;
void main()
{
vec4 mvPos = modelViewMatrix * vec4(position, 1.0);
gl_Position = vec4(position,1.0);
gmvVertexNormal = normalize(modelViewMatrix * vec4(vertexNormal, 0.0)).xyz;
gmvVertexPos = position;
}
//geometry shader
layout ( triangles ) in;
layout ( triangle_strip, max_vertices = 3) out;
uniform mat4 projectionMatrix;
uniform mat4 modelViewMatrix;
out vec3 mvVertexNormal;
out vec3 mvVertexPos;
in vec3 gmvVertexNormal[3];
in vec3 gmvVertexPos[3];
vec3 calculateTriangleNormal(){
vec3 tangent = gl_in[1].gl_Position.xyz - gl_in[0].gl_Position.xyz;
vec3 bitangent = gl_in[2].gl_Position.xyz - gl_in[0].gl_Position.xyz;
vec3 normal = cross(tangent, bitangent);
return normalize(normal);
}
void main()
{
vec4 mvPos = modelViewMatrix * vec4(gmvVertexPos[0], 1.0);
gl_Position = projectionMatrix * mvPos;
mvVertexNormal=calculateTriangleNormal();
mvVertexPos=mvPos.xyz;
EmitVertex();
mvPos = modelViewMatrix * vec4(gmvVertexPos[1], 1.0);
gl_Position = projectionMatrix * mvPos;
mvVertexNormal=calculateTriangleNormal();
mvVertexPos=mvPos.xyz;
EmitVertex();
mvPos = modelViewMatrix * vec4(gmvVertexPos[2], 1.0);
gl_Position = projectionMatrix * mvPos;
mvVertexNormal=calculateTriangleNormal();
mvVertexPos=mvPos.xyz;
EmitVertex();
EndPrimitive();
}
//fragment shader
in vec3 mvVertexNormal;
in vec3 mvVertexPos;
struct DirectionalLight {
vec3 color;
vec3 direction;
float intensity;
};
const int MAX_DIRECTIONALLIGHT = 10;
uniform int USED_DIRECTIONALLIGHTS;
uniform DirectionalLight directionalLight[MAX_DIRECTIONALLIGHT];
vec4 calcDirectionalLight(DirectionalLight light, vec3 position, vec3 normal)
{
return calcLightColor(light.color, light.intensity, position, normalize(light.direction), normal);
}
vec4 calcLightColor(vec3 light_color, float light_intensity, vec3 position, vec3 to_light_dir, vec3 normal)
{
vec4 diffuseColor = vec4(0, 0, 0, 0);
vec4 specColor = vec4(0, 0, 0, 0);
// Diffuse Light
float diffuseFactor = max(dot(normal, to_light_dir), 0.0);
diffuseColor = vec4(light_color, 1.0) * light_intensity * diffuseFactor;
// Specular Light
vec3 camera_direction = normalize(- position);
vec3 from_light_dir = -to_light_dir;
vec3 reflected_light = normalize(reflect(from_light_dir , normal));
float specularFactor = max( dot(camera_direction, reflected_light), 0.0);
specularFactor = pow(specularFactor, specularPower);
specColor = light_intensity * specularFactor * material.reflectance * vec4(light_color, 1.0);
return (diffuseColor + specColor);
}
void main()
{
vec4 totalLight = vec4(0);
//directional Light
for (int i=0; i<USED_DIRECTIONALLIGHTS; i++) {
totalLight += calcDirectionalLight(directionalLight[i], mvVertexPos, mvVertexNormal);
}
//...
fragColor = vec4(ambientLight, 1.0) + totalLight;
}
I am kinda new to shader so I dont know what to do anymore.
To specify the effect I get: the directional light that should only come from one direction (in worldSpace) comes from different directions based on the viewMatrix
I feel stupid now. I found the answer just after posting.
The geometry shader passes the vertexNormal directly instead of mutiplying it with the modelViewMatrix.
So the answer is this:
mvVertexNormal=normalize(modelViewMatrix * vec4(calculateTriangleNormal(), 0.0)).xyz;
instead of this:
mvVertexNormal=calculateTriangleNormal();
I have a very strange behaviour of specular(phong light model) light. It seems to be appering on both sides of all objects. Does anyone know what could be the issue ?
The actual calculation seems to be alright, as I can see that the light changes its position as object rotates.
#version 330
in vec4 CameraPos0;
in vec3 Pos0;
in vec4 Colour0;
in vec3 Normal0;
out vec4 FragColor;
// Ambient light parameters
uniform vec3 gAmbientLightIntensity;
// Directional light parameters
uniform vec3 gDirectionalLightIntensity;
uniform vec3 gDirectionalLightDirection;
// Specular light parameter
uniform vec3 gSpecularLightIntensity;
uniform vec3 gLightSourcePosition;
uniform vec3 gCameraPosition;
// Material constants
uniform float gKa;
uniform float gKd;
uniform float gKs;
void main()
{
// Calculate the ambient light intensity at the vertex
// Ia = Ka * ambientLightIntensity
vec4 ambientLightIntensity = gKa * vec4(gAmbientLightIntensity, 1.0);
// Setup the light direction and normalise it
vec3 lightDirection = normalize(-gDirectionalLightDirection);
//lightDirection = normalize(gDirectionalLightDirection);
// Id = kd * lightItensity * N.L
// Calculate N.L
float diffuseFactor = dot(Normal0, lightDirection);
diffuseFactor = clamp(diffuseFactor, 0.0, 1.0);
// N.L * light source colour * intensity
vec4 diffuseLightIntensity = gKd * vec4(gDirectionalLightIntensity, 1.0f) * diffuseFactor;
// Phong light
vec3 L = normalize(gLightSourcePosition - Pos0);
vec3 V = normalize(-Pos0);
vec3 R = normalize(2 * Normal0 * dot(Normal0, L) - L);
float specularFactor = pow(dot(R, V), 0.1f);
vec4 specularLightIntensity = gKs * vec4(gSpecularLightIntensity, 1.0f) * specularFactor;
specularLightIntensity = clamp(specularLightIntensity, 0.0, 1.0);
// Final vertex colour is the product of the vertex colour
// and the total light intensity at the vertex
vec4 lightedFragColor = Colour0 * (ambientLightIntensity + diffuseLightIntensity + specularLightIntensity);
FragColor = lightedFragColor;
}
Vertex Shader
#version 330
layout (location = 0) in vec3 Position;
layout (location = 1) in vec3 Normal;
layout (location = 2) in vec4 Colour;
out vec3 Pos0;
out vec4 Colour0;
out vec3 Normal0;
out vec4 CameraPos0;
uniform mat4 gModelToWorldTransform;
uniform mat4 gWorldToViewTransform;
uniform mat4 gProjectionTransform;
void main()
{
vec4 vertexPositionInModelSpace = vec4(Position, 1);
vec4 vertexInWorldSpace = gModelToWorldTransform * vertexPositionInModelSpace;
vec4 vertexInViewSpace = gWorldToViewTransform * vertexInWorldSpace;
vec4 vertexInHomogeneousClipSpace = gProjectionTransform * vertexInViewSpace;
gl_Position = vertexInHomogeneousClipSpace;
vec3 normalInWorldSpace = (gModelToWorldTransform * vec4(Normal, 0.0)).xyz;
normalInWorldSpace = normalize(normalInWorldSpace);
Normal0 = normalInWorldSpace;
CameraPos0 = vertexInViewSpace;
Pos0 = vertexInWorldSpace.xyz;
Colour0 = Colour;
}
you need to clamp the dot result from the saturation calculus because on the back side the result is negative and the pow can return a positive number instead of clamping it to zero.
float specularFactor = pow(clamp(dot(R, V),0.0,1.0), 0.1f);
Edit:
Also the V should be a vector pointing to the camera position, not to the vertex position in world space:
vec3 V = normalize(CameraPos0 - Pos0);
I am looking for some help debugging my GLSL phong shading code. Here is my vertex shader:
layout(std140) uniform Matrices {
mat4 model[1024];
};
layout(location = 0) in vec4 vertexCoord;
layout(location = 2) in vec3 vertexNormal;
uniform mat4 view; // from lookAt()
uniform mat4 projection; // perspective projection
out vec3 Position;
out vec3 Normal;
out vec4 lightPosEye;
void main() {
mat4 modelView = view * model[gl_InstanceID];
mat3 normalMatrix = mat3(transpose(inverse(modelView)));
//mat3 normalMatrix = mat3(modelView);
vec4 lightPos = vec4(350, 350, 350, 1);
lightPosEye = modelView * lightPos;
Position = vec3(modelView * vertexCoord);
Normal = normalize(normalMatrix * vertexNormal);
gl_Position = projection * vec4(Position, 1.0);
}
and here is my fragment shader:
in vec3 Position;
in vec3 Normal;
in vec4 lightPosEye;
layout(location = 0) out vec4 FragColor;
vec3 ads() {
vec3 Ka = vec3(0, 0.5, 0);
vec3 Kd = vec3(0, 0.5, 0);
vec3 Ks = vec3(0, 0.1, 0);
vec3 intensity = vec3(0.3, 0.5, 0.0);
float shininess = 0.1;
vec3 n = normalize(Normal);
vec3 s = normalize(vec3(lightPosEye) - Position);
vec3 v = normalize(vec3(-Position));
vec3 r = reflect(-s, n);
return intensity * (Ka + Kd * max(dot(s, n), 0.0) + Ks * pow(max(dot(r,v), 0.0), shininess));
}
void main() {
FragColor = vec4(ads(), 1);
}
Here is a screenshot of the result (I am also rendering the normals with another geometry shader for debugging purposes):
The artifacts on the cube are wrong, and the "shadows" on the circle also moves a bit when I move the camera around (change the view matrix).
Any obvious errors in my GLSL code?