Per vertex lighting problems in opengl 4 - c++

I have a model made of cubes with 8 vertices only, and I am having issues with per vertex directional lighting. Instead of the entire model being lit at once, each cube seems to be lit separately like this.
This is my vertex shader:
layout(location = 0)in vec3 vp;
layout(location = 1)in vec3 color;
layout(location = 2)in vec3 normal;
out vec3 fColor;
uniform mat4 model;
uniform mat3 nm;
uniform mat3 partNM;
uniform mat4 modelPart;
uniform mat4 view;
uniform mat4 projection;
void main () {
gl_Position = modelPart * vec4(vp, 1.0f);
gl_Position = model * gl_Position;
gl_Position = view * gl_Position;
gl_Position = projection * gl_Position;
mat3 normalMatrix = partNM*nm;
vec3 normalDirection = normalize(normalMatrix*normal);
vec3 lightDirection = normalize(vec3(-1.0, 1.0, -1.0));
vec3 diffuseReflection = clamp(dot(normalDirection, lightDirection),0.0,1.0);
fColor = color+diffuseReflection;
}
and my fragment shader:
in vec3 fColor;
out vec4 frag_colour;
void main () {
frag_colour = vec4(fColor.xyz,1.0);
}
This is the function I use to set the normal matrix:
void Shader::setNormalMatrix(string name,glm::mat4 matrix) {
glm::mat3 nm = glm::transpose(glm::inverse(glm::mat3(matrix)));
unsigned int location = glGetUniformLocation(program, name.c_str());
glUniformMatrix3fv(location, 1, false, &nm[0][0]);
}
and the function which generates the vertices and normals for my cubes:
std::vector<float> Cube::createCube(float size,float x,float y,float z,float r, float g, float b) {
VertexType points[8];
points[0].x = (x*size)+0.0f;
points[0].y = (y*size)+0.0f;
points[0].z = (z*size)+size;
points[0].nx = 0.577350;
points[0].ny = 0.577350;
points[0].nz = -0.577350;
points[0].r = r;
points[0].g = g;
points[0].b = b;
points[1].x = (x*size)+size;
points[1].y = (y*size)+0.0f;
points[1].z = (z*size)+size;
points[1].nx = -0.577350;
points[1].ny = 0.577350;
points[1].nz = -0.577350;
points[1].r = r;
points[1].g = g;
points[1].b = b;
points[2].x = (x*size)+size;
points[2].y = (y*size)+size;
points[2].z = (z*size)+size;
points[2].nx = -0.577350;
points[2].ny = -0.577350;
points[2].nz = -0.577350;
points[2].r = r;
points[2].g = g;
points[2].b = b;
points[3].x = (x*size)+0.0f;
points[3].y = (y*size)+size;
points[3].z = (z*size)+size;
points[3].nx = 0.577350;
points[3].ny = -0.577350;
points[3].nz = -0.577350;
points[3].r = r;
points[3].g = g;
points[3].b = b;
points[4].x = (x*size)+0.0f;
points[4].y = (y*size)+0.0f;
points[4].z = (z*size)+0.0f;
points[4].nx = 0.577350;
points[4].ny = 0.577350;
points[4].nz = 0.577350;
points[4].r = r;
points[4].g = g;
points[4].b = b;
points[5].x = (x*size)+size;
points[5].y = (y*size)+0.0f;
points[5].z = (z*size)+0.0f;
points[5].nx = -0.577350;
points[5].ny = 0.577350;
points[5].nz = 0.577350;
points[5].r = r;
points[5].g = g;
points[5].b = b;
points[6].x = (x*size)+size;
points[6].y = (y*size)+size;
points[6].z = (z*size)+0.0f;
points[6].nx = -0.577350;
points[6].ny = -0.577350;
points[6].nz = 0.577350;
points[6].r = r;
points[6].g = g;
points[6].b = b;
points[7].x = (x*size)+0.0f;
points[7].y = (y*size)+size;
points[7].z = (z*size)+0.0f;
points[7].nx = 0.577350;
points[7].ny = -0.577350;
points[7].nz = 0.577350;
points[7].r = r;
points[7].g = g;
points[7].b = b;
std::vector<float> rPoint;
for(VertexType p:points) {
rPoint.push_back(p.x);
rPoint.push_back(p.y);
rPoint.push_back(p.z);
rPoint.push_back(p.r);
rPoint.push_back(p.g);
rPoint.push_back(p.b);
rPoint.push_back(p.nx);
rPoint.push_back(p.ny);
rPoint.push_back(p.nz);
}
return rPoint;
}
The models are divided up into parts, which is why I have two normal and model matrices; one for the model as a whole, and one for an individual piece of the model. Is there a problem with my code, or do I need to use per-fragment lighting to fix this bug?

Your problem is the topology of your mesh. At the corner of a cube 3 faces meet. Each of these faces have a different normal. This creates a discontinuity in the normal's topology. Or to put it into simpler terms. You must use 3 vertices per corner, one for each face, with the face normal pointing into the right direction.
And while you're at it, you could remove those cube faces not being visible anyway.

The reason is because you are rendering each cube as separate models. The shader will thus run once per model, in your case, once per cube. To solve this, you need render your entire model (your robot) as one model, with one set of vertices, rather than as a set of cubes.

Related

Incorrect Bone Transforms Assimp

Im having troubles with creating skeletal animation, my model is having incorrect transformations
void Animation(glm::mat4 a[])
{
float Factor= fmod(glfwGetTime(),1.0);
for(int b=0;b<BoneList.size();b++)
{
Bone *BoneT = &BoneList[b];
aiMatrix4x4 temp = inverse;
while(BoneT)
{
aiVector3D sc= BoneT->ScaleFrame[0] + (Factor * (BoneT->ScaleFrame[1] - BoneT->ScaleFrame[0]));
aiMatrix4x4 S=aiMatrix4x4();
S[0][0]=sc.x;
S[1][1]=sc.y;
S[2][2]=sc.z;
aiVector3D tr= BoneT->LocFrame[0] + (Factor * (BoneT->LocFrame[1] - BoneT->LocFrame[0]));
aiMatrix4x4 T=aiMatrix4x4();
T[0][3]=tr.x;
T[1][3]=tr.y;
T[2][3]=tr.z;
aiQuaternion R;
aiQuaternion::Interpolate(R, BoneT->RotFrame[0], BoneT->RotFrame[1], Factor);
R = R.Normalize();
temp*=BoneT->NodeTransform*(T* aiMatrix4x4(R.GetMatrix()) * S );
BoneT=BoneT->BoneParent;
}
temp*=BoneList[b].offset;
temp.Transpose();
ai_to_glm(temp,a[b]);
}
}
im creating a Temp aiMatrix4x4 to preserve assimp matrix multiplcation order, then i convert the aiMatrix4x4 to glm::mat4 using function:
void ai_to_glm(const aiMatrix4x4 &from, glm::mat4 &to)
{
to[0][0] = from[0][0];
to[0][1] = from[0][1];
to[0][2] = from[0][2];
to[0][3] = from[0][3];
to[1][0] = from[1][0];
to[1][1] = from[1][1];
to[1][2] = from[1][2];
to[1][3] = from[1][3];
to[2][0] = from[2][0];
to[2][1] = from[2][1];
to[2][2] = from[2][2];
to[2][3] = from[2][3];
to[3][0] = from[3][0];
to[3][1] = from[3][1];
to[3][2] = from[3][2];
to[3][3] = from[3][3];
}
however the end frame of the model looks like this:
i noticed if i removed the translation matrix from the function, the model looks closer what it was supposed to be
the skinning is done in shader
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec2 Tpos;
layout (location = 2) in ivec4 Bones;
layout (location = 3) in vec4 Weight;
uniform mat4 view;
uniform mat4 proj;
uniform mat4 LocRot;
uniform mat4 Test[8];
out vec3 vertexColor;
out vec2 TextCoord;
void main()
{
mat4 BoneTransform = Test[Bones.x]* Weight.x;
BoneTransform += Test[Bones.y] * Weight.y;
BoneTransform += Test[Bones.z] * Weight.z;
BoneTransform += Test[Bones.w] * Weight.w;
gl_Position = proj * view *LocRot* BoneTransform *vec4(aPos, 1.0);
vertexColor = vec3(1,1,1);
TextCoord = Tpos;
}
and the uniform is acessed
for(int i=0; i<4;i++)
{
glUniformMatrix4fv(glGetUniformLocation(ActiveShader->ID, "Test[0]")+i, 1, GL_FALSE, glm::value_ptr(AnimMatrix[i]));
}
what i am aware:
-inverse matrix is identity matrix, ,which doesnt do anything right now to this model.
-some weight sum arent equal 1.0 but i think its not the problem
-changing matrix multiplication order doesnt solve it
model is created and exported in blender
link to model https://send.firefox.com/download/fe0b85d3f4581630/#6S0Vr9EIjgLNN03rerMW0w
my bet is that ai_to_glm function is at fault here, but i am not sure.
Edit: I noticed that rotations are flipped aswell, as shown on images, however multiplying it by inverseai (inverted root bone transformation) does nothing.
Update: i transposed the assimp matrix before conversion and it fixed most problems, but the offsets and parent inheritance is bugged out
before any suspiction, i had no idea that i had account on stack overflow, and i answer this question from my real account
to fix this, it required multiple things:
Iterating bones and assigning children/parents were using unstable pointers and were corrupted, after solving it it fixed the major thing
i have used codingadventures's answer from question Matrix calculations for gpu skinning
my ai_to_glm was wrong, and after replacing it with
glm::mat4 ai_to_glm(aiMatrix4x4* from)
{
glm::mat4 to = glm::mat4(1.0f);
to[0][0] = (GLfloat)from->a1; to[0][1] = (GLfloat)from->b1; to[0][2] = (GLfloat)from->c1; to[0][3] = (GLfloat)from->d1;
to[1][0] = (GLfloat)from->a2; to[1][1] = (GLfloat)from->b2; to[1][2] = (GLfloat)from->c2; to[1][3] = (GLfloat)from->d2;
to[2][0] = (GLfloat)from->a3; to[2][1] = (GLfloat)from->b3; to[2][2] = (GLfloat)from->c3; to[2][3] = (GLfloat)from->d3;
to[3][0] = (GLfloat)from->a4; to[3][1] = (GLfloat)from->b4; to[3][2] = (GLfloat)from->c4; to[3][3] = (GLfloat)from->d4;
return to;
};
after doing that, it got fixed

Shader Flipping Faces

I'm trying to construct a render engine using OpenGL and C++. but can't seem to get past this problem. The same model is being rendered 5 different times using different shaders, in 4 out of the 5 shaders the backface culling is working properly. In the tessellation shader, however, it is not. Any outwards faces are invisible, so you can see directly to the rear ones. Does anyone know why this shader flips the faces?
Vertex Shader
void main()
{
worldVertexPosition_cs = (transformationMatrix * vec4(position_vs, 1.0)).xyz;
worldTextureCoords_cs = textureCoords_vs;
worldNormal_cs = mat3(transpose(inverse(transformationMatrix))) * normal_vs;
}
Control Shader
float getTessLevel(float distance0, float distance1)
{
float avgDistance = (distance0 + distance1) / 2.0;
avgDistance = (100 - avgDistance) / 20;
if (avgDistance < 1) {
avgDistance = 1;
}
return avgDistance;
}
void main()
{
worldTextureCoords_es[gl_InvocationID] = worldTextureCoords_cs[gl_InvocationID];
worldNormal_es[gl_InvocationID] = worldNormal_cs[gl_InvocationID];
worldVertexPosition_es[gl_InvocationID] = worldVertexPosition_cs[gl_InvocationID];
float eyeToVertexDistance0 = distance(eyePos, worldVertexPosition_es[0]);
float eyeToVertexDistance1 = distance(eyePos, worldVertexPosition_es[1]);
float eyeToVertexDistance2 = distance(eyePos, worldVertexPosition_es[2]);
gl_TessLevelOuter[0] = getTessLevel(eyeToVertexDistance1, eyeToVertexDistance2);
gl_TessLevelOuter[1] = getTessLevel(eyeToVertexDistance2, eyeToVertexDistance0);
gl_TessLevelOuter[2] = getTessLevel(eyeToVertexDistance0, eyeToVertexDistance1);
gl_TessLevelInner[0] = gl_TessLevelOuter[2];
}
Evaluation Shader
vec2 interpolate2D(vec2 v0, vec2 v1, vec2 v2)
{
return vec2(gl_TessCoord.x) * v0 + vec2(gl_TessCoord.y) * v1 + vec2(gl_TessCoord.z) * v2;
}
vec3 interpolate3D(vec3 v0, vec3 v1, vec3 v2)
{
return vec3(gl_TessCoord.x) * v0 + vec3(gl_TessCoord.y) * v1 + vec3(gl_TessCoord.z) * v2;
}
void main()
{
worldTextureCoords_fs = interpolate2D(worldTextureCoords_es[0], worldTextureCoords_es[1], worldTextureCoords_es[2]);
worldNormal_fs = interpolate3D(worldNormal_es[0], worldNormal_es[1], worldNormal_es[2]);
worldNormal_fs = normalize(worldNormal_fs);
worldVertexPosition_fs = interpolate3D(worldVertexPosition_es[0], worldVertexPosition_es[1], worldVertexPosition_es[2]);
float displacement = texture(texture_displacement0, worldTextureCoords_fs.xy).x;
worldVertexPosition_fs += worldNormal_fs * (displacement / 1.0f);
gl_Position = projectionMatrix * viewMatrix * vec4(worldVertexPosition_fs.xyz, 1.0);
}
Fragment Shader
void main()
{
vec3 unitNormal = normalize(worldNormal_fs);
vec3 unitLightVector = normalize(lightPosition - worldVertexPosition_fs);
float dotResult = dot(unitNormal, unitLightVector);
float brightness = max(dotResult, blackPoint);
vec3 diffuse = brightness * lightColor;
FragColor = vec4(diffuse, 1.0) * texture(texture_diffuse0, worldTextureCoords_fs);
FragColor.rgb = pow(FragColor.rgb, vec3(1.0/gamma));
}
In the Tessellation Evaluation Shader you've to define the winding order of the generated triangles.
This is done via the cw and ccw parameters. Default is ccw.
Either generate clockwise primitives:
layout(triangles, cw) in;
Or generate counterclockwise primitives:
layout(triangles, ccw) in;

OpenGL - strange SSAO artifact

I followed the tutorial at Learn OpenGL to implement Screenspace Ambient Occlusion. Things are mostly looking okay besides a strange artifact at the top and bottom of the window.
The problem is more obvious moving the camera, when it appears as if top parts of the image are imprinted on the bottom and vise versa, as shown in this video.
The artifact worsens when standing close to a wall and looking up and down so perhaps the Znear value is contributing? The scale of my scene does seem small compared to other demos, Znear and Zfar are 0.01f and 1000 and the width of the shown hallway is around 1.2f.
I've read into the common SSAO artifacts and haven't found anything resembling this.
#version 330 core
in vec2 TexCoords;
layout (location = 0) out vec3 FragColor;
uniform sampler2D MyTexture0; // Position
uniform sampler2D MyTexture1; // Normal
uniform sampler2D MyTexture2; // TexNoise
const int samples = 64;
const float radius = 0.25;
const float bias = 0.025;
uniform mat4 projectionMatrix;
uniform float screenWidth;
uniform float screenHeight;
void main()
{
//tile noise texture over screen based on screen dimensions divided by noise size
vec2 noiseScale = vec2(screenWidth/4.0, screenHeight/4.0);
vec3 sample_sphere[64];
sample_sphere[0] = vec3(0.04977, -0.04471, 0.04996);
sample_sphere[1] = vec3(0.01457, 0.01653, 0.00224);
sample_sphere[2] = vec3(-0.04065, -0.01937, 0.03193);
sample_sphere[3] = vec3(0.01378, -0.09158, 0.04092);
sample_sphere[4] = vec3(0.05599, 0.05979, 0.05766);
sample_sphere[5] = vec3(0.09227, 0.04428, 0.01545);
sample_sphere[6] = vec3(-0.00204, -0.0544, 0.06674);
sample_sphere[7] = vec3(-0.00033, -0.00019, 0.00037);
sample_sphere[8] = vec3(0.05004, -0.04665, 0.02538);
sample_sphere[9] = vec3(0.03813, 0.0314, 0.03287);
sample_sphere[10] = vec3(-0.03188, 0.02046, 0.02251);
sample_sphere[11] = vec3(0.0557, -0.03697, 0.05449);
sample_sphere[12] = vec3(0.05737, -0.02254, 0.07554);
sample_sphere[13] = vec3(-0.01609, -0.00377, 0.05547);
sample_sphere[14] = vec3(-0.02503, -0.02483, 0.02495);
sample_sphere[15] = vec3(-0.03369, 0.02139, 0.0254);
sample_sphere[16] = vec3(-0.01753, 0.01439, 0.00535);
sample_sphere[17] = vec3(0.07336, 0.11205, 0.01101);
sample_sphere[18] = vec3(-0.04406, -0.09028, 0.08368);
sample_sphere[19] = vec3(-0.08328, -0.00168, 0.08499);
sample_sphere[20] = vec3(-0.01041, -0.03287, 0.01927);
sample_sphere[21] = vec3(0.00321, -0.00488, 0.00416);
sample_sphere[22] = vec3(-0.00738, -0.06583, 0.0674);
sample_sphere[23] = vec3(0.09414, -0.008, 0.14335);
sample_sphere[24] = vec3(0.07683, 0.12697, 0.107);
sample_sphere[25] = vec3(0.00039, 0.00045, 0.0003);
sample_sphere[26] = vec3(-0.10479, 0.06544, 0.10174);
sample_sphere[27] = vec3(-0.00445, -0.11964, 0.1619);
sample_sphere[28] = vec3(-0.07455, 0.03445, 0.22414);
sample_sphere[29] = vec3(-0.00276, 0.00308, 0.00292);
sample_sphere[30] = vec3(-0.10851, 0.14234, 0.16644);
sample_sphere[31] = vec3(0.04688, 0.10364, 0.05958);
sample_sphere[32] = vec3(0.13457, -0.02251, 0.13051);
sample_sphere[33] = vec3(-0.16449, -0.15564, 0.12454);
sample_sphere[34] = vec3(-0.18767, -0.20883, 0.05777);
sample_sphere[35] = vec3(-0.04372, 0.08693, 0.0748);
sample_sphere[36] = vec3(-0.00256, -0.002, 0.00407);
sample_sphere[37] = vec3(-0.0967, -0.18226, 0.29949);
sample_sphere[38] = vec3(-0.22577, 0.31606, 0.08916);
sample_sphere[39] = vec3(-0.02751, 0.28719, 0.31718);
sample_sphere[40] = vec3(0.20722, -0.27084, 0.11013);
sample_sphere[41] = vec3(0.0549, 0.10434, 0.32311);
sample_sphere[42] = vec3(-0.13086, 0.11929, 0.28022);
sample_sphere[43] = vec3(0.15404, -0.06537, 0.22984);
sample_sphere[44] = vec3(0.05294, -0.22787, 0.14848);
sample_sphere[45] = vec3(-0.18731, -0.04022, 0.01593);
sample_sphere[46] = vec3(0.14184, 0.04716, 0.13485);
sample_sphere[47] = vec3(-0.04427, 0.05562, 0.05586);
sample_sphere[48] = vec3(-0.02358, -0.08097, 0.21913);
sample_sphere[49] = vec3(-0.14215, 0.19807, 0.00519);
sample_sphere[50] = vec3(0.15865, 0.23046, 0.04372);
sample_sphere[51] = vec3(0.03004, 0.38183, 0.16383);
sample_sphere[52] = vec3(0.08301, -0.30966, 0.06741);
sample_sphere[53] = vec3(0.22695, -0.23535, 0.19367);
sample_sphere[54] = vec3(0.38129, 0.33204, 0.52949);
sample_sphere[55] = vec3(-0.55627, 0.29472, 0.3011);
sample_sphere[56] = vec3(0.42449, 0.00565, 0.11758);
sample_sphere[57] = vec3(0.3665, 0.00359, 0.0857);
sample_sphere[58] = vec3(0.32902, 0.0309, 0.1785);
sample_sphere[59] = vec3(-0.08294, 0.51285, 0.05656);
sample_sphere[60] = vec3(0.86736, -0.00273, 0.10014);
sample_sphere[61] = vec3(0.45574, -0.77201, 0.00384);
sample_sphere[62] = vec3(0.41729, -0.15485, 0.46251);
sample_sphere[63] = vec3 (-0.44272, -0.67928, 0.1865);
// get input for SSAO algorithm
vec3 fragPos = texture(MyTexture0, TexCoords).xyz;
vec3 normal = normalize(texture(MyTexture1, TexCoords).rgb);
vec3 randomVec = normalize(texture(MyTexture2, TexCoords * noiseScale).xyz);
// create TBN change-of-basis matrix: from tangent-space to view-space
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
// iterate over the sample kernel and calculate occlusion factor
float occlusion = 0.0;
for(int i = 0; i < samples; ++i)
{
// get sample position
vec3 sample = TBN * sample_sphere[i]; // from tangent to view-space
sample = fragPos + sample * radius;
// project sample position (to sample texture) (to get position on screen/texture)
vec4 offset = vec4(sample, 1.0);
offset = projectionMatrix * offset; // from view to clip-space
offset.xyz /= offset.w; // perspective divide
offset.xyz = offset.xyz * 0.5 + 0.5; // transform to range 0.0 - 1.0
// get sample depth
float sampleDepth = texture(MyTexture0, offset.xy).z;
// range check & accumulate
float rangeCheck = smoothstep(0.0, 1.0, radius / abs(fragPos.z - sampleDepth));
occlusion += (sampleDepth >= sample.z + bias ? 1.0 : 0.0) * rangeCheck;
}
occlusion = 1.0 - (occlusion / samples);
FragColor = vec3(occlusion);
}
As Rabbid76 suggested, the artifacts were caused by sampling outside of the screen borders. I added a check to prevent this and things are looking much better..
vec4 clipSpacePos = projectionMatrix * vec4(sample, 1.0); // from view to clip-space
vec3 ndcSpacePos = clipSpacePos.xyz /= clipSpacePos.w; // perspective divide
vec2 windowSpacePos = ((ndcSpacePos.xy + 1.0) / 2.0) * vec2(screenWidth, screenHeight);
if ((windowSpacePos.y > 0) && (windowSpacePos.y < screenHeight))
if ((windowSpacePos.x > 0) && (windowSpacePos.x < screenWidth))
// THEN APPLY AMBIENT OCCLUSION
It hasn't entirely fixed the issue though as areas close to the windows edge now appear lighter than they should because fewer samples are tested. Perhaps somebody can suggest an approach that moves the sample area to an appropriate location?

OpenGL Phong Shading

I'd like to render object to see the inside of the box.
I used Phong Shading.
When I draw object with glPolygonMode(GL_FRONT_AND_BACK, GL_LINE), an image looks like this:
But When I used glPolygonMode(GL_FRONT_AND_BACK, GL_FILL), the image looks like this:
I'd like to shade only rectangle part. So I want to see the objects inside the box. This is fragment shade code, and I think it works well. But I don't know why i can't see inside.
#version 400
struct LIGHT {
vec4 position; // assume point or direction in EC in this example shader
vec4 ambient_color, diffuse_color, specular_color;
vec4 light_attenuation_factors; // compute this effect only if .w != 0.0f
vec3 spot_direction;
float spot_exponent;
float spot_cutoff_angle;
bool light_on;
};
struct MATERIAL {
vec4 ambient_color;
vec4 diffuse_color;
vec4 specular_color;
vec4 emissive_color;
float specular_exponent;
};
uniform vec4 u_global_ambient_color;
#define NUMBER_OF_LIGHTS_SUPPORTED 4
uniform LIGHT u_light[NUMBER_OF_LIGHTS_SUPPORTED];
uniform MATERIAL u_material;
const float zero_f = 0.0f;
const float one_f = 1.0f;
in vec3 v_position_EC;
in vec3 v_normal_EC;
layout (location = 0) out vec4 final_color;
vec4 lighting_equation(in vec3 P_EC, in vec3 N_EC) {
vec4 color_sum;
float local_scale_factor, tmp_float;
vec3 L_EC;
color_sum = u_material.emissive_color + u_global_ambient_color * u_material.ambient_color;
for (int i = 0; i < NUMBER_OF_LIGHTS_SUPPORTED; i++) {
if (!u_light[i].light_on) continue;
local_scale_factor = one_f;
if (u_light[i].position.w != zero_f) { // point light source
L_EC = u_light[i].position.xyz - P_EC.xyz;
if (u_light[i].light_attenuation_factors.w != zero_f) {
vec4 tmp_vec4;
tmp_vec4.x = one_f;
tmp_vec4.z = dot(L_EC, L_EC);
tmp_vec4.y = sqrt(tmp_vec4.z);
tmp_vec4.w = zero_f;
local_scale_factor = one_f/dot(tmp_vec4, u_light[i].light_attenuation_factors);
}
L_EC = normalize(L_EC);
if (u_light[i].spot_cutoff_angle < 180.0f) { // [0.0f, 90.0f] or 180.0f
float spot_cutoff_angle = clamp(u_light[i].spot_cutoff_angle, zero_f, 90.0f);
vec3 spot_dir = normalize(u_light[i].spot_direction);
tmp_float = dot(-L_EC, spot_dir);
if (tmp_float >= cos(radians(spot_cutoff_angle))) {
tmp_float = pow(tmp_float, u_light[i].spot_exponent);
}
else
tmp_float = zero_f;
local_scale_factor *= tmp_float;
}
}
else { // directional light source
L_EC = normalize(u_light[i].position.xyz);
}
if (local_scale_factor > zero_f) {
vec4 local_color_sum = u_light[i].ambient_color * u_material.ambient_color;
tmp_float = dot(N_EC, L_EC);
if (tmp_float > zero_f) {
local_color_sum += u_light[i].diffuse_color*u_material.diffuse_color*tmp_float;
vec3 H_EC = normalize(L_EC - normalize(P_EC));
tmp_float = dot(N_EC, H_EC);
if (tmp_float > zero_f) {
local_color_sum += u_light[i].specular_color
*u_material.specular_color*pow(tmp_float, u_material.specular_exponent);
}
}
color_sum += local_scale_factor*local_color_sum;
}
}
return color_sum;
}
void main(void) {
final_color = lighting_equation(v_position_EC, normalize(v_normal_EC)); // for normal rendering
}

How to generate OBJ mesh file if I used GLSL

I want to generate a OBJ file from a code, which using GLSL file to generate mesh, now I can get the vertex information from the code, but how can I extract the triangle information from the .geom.glsl file and export it into a OBJ file?
Also, is there any helper function do to so? if not, how should I write the code to get the points and triangle information from the geom.glsl file?
Here attached the geom.glsl:
#version 400 core
#extension GL_EXT_geometry_shader4 : enable
layout(lines, invocations = 1) in;
layout(triangle_strip, max_vertices = 100) out;
uniform mat4 matLightView;
uniform mat4 matViewProjection;
uniform vec3 lightPos;
uniform vec3 camPos;
uniform int isExplicit;
in vec4 VertPosition[];
in vec4 VertColor[];
in vec3 VertNormal[];
in vec3 VertTexture[];
in float VertLengthTotal[];
in float VertLengthFromBeginning[];
out vec3 GeomNormal;
out vec2 GeomTexCoords;
out float GeomDiffuse;
out float GeomThickness;
out vec4 texCoordA;
out vec4 texCoordB;
const float PI2 = 2 * 3.141592654;
void main()
{
// for(int i=0; i<gl_VerticesIn-1; ++i)
for (int i = 0; i<gl_in.length ()-1; ++i)
{
//Reading Data
vec4 posS = VertPosition[i];
vec4 posT = VertPosition[i+1];
vec3 vS = VertColor[i].xyz;
vec3 vT = VertColor[i+1].xyz;
vec3 tS = VertTexture[i].xyz;
vec3 tT = VertTexture[i+1].xyz;
float thickS = VertColor[i].w;
float thickT = VertColor[i+1].w;
//Computing
vec3 v11 = normalize(vS);
vec3 v12 = normalize(cross(vS, tS));
vec3 v21 = normalize(vT);
vec3 v22 = normalize(cross(vT, tT));
float rS = max(0.0001, thickS);
float rT = max(0.0001, thickT);
int pS = 10;
int pT = 10;
int forMax = 16;
//Light Pos
vec4 lPos = normalize(vec4(-lightPos.x, -lightPos.y, -lightPos.z, 1));
vec3 L = normalize(lPos.xyz);
for(int k=0; k<=forMax; ++k)
{
float angle = k * (PI2 / forMax);
vec3 newPS = posS.xyz + (v11 * sin(angle) + v12 * cos(angle)) * rS;
vec3 newPT = posT.xyz + (v21 * sin(angle) + v22 * cos(angle)) * rT;
float scale = 1.0f;
float texX = float(k) / float(forMax);
float edgeLength = length(posS - posT);
float sTexY = (VertLengthFromBeginning[i] * scale);
float tTexY = (VertLengthFromBeginning[i+1] * scale);
//Source Vertex
vec3 N = normalize(posS.xyz - newPS);
texCoordB = matLightView * vec4(newPS, 1);
GeomNormal = N;
GeomThickness = rS;
GeomDiffuse = rS < 0.0005 ? 0.0f : max(dot(N, L), 0.0);
GeomTexCoords = vec2(texX, sTexY);
gl_Position = matViewProjection * vec4(newPS, 1);
EmitVertex();
//Target Vertex
N = normalize(posT.xyz - newPT);
texCoordB = matLightView * vec4(newPT, 1);
GeomNormal = N;
GeomThickness = rT;
GeomDiffuse = rT < 0.0005 ? 0.0f : max(dot(N, L), 0.0);
GeomTexCoords = vec2(texX, tTexY);
gl_Position = matViewProjection * vec4(newPT, 1);
EmitVertex();
}
}
EndPrimitive();
}
And the vert.glsl:
#version 400 core
#define VERT_POSITION 0
#define VERT_NORMAL 1
#define VERT_COLOR 2
#define VERT_TEXTURE 3
layout(location = VERT_POSITION) in vec4 Position;
layout(location = VERT_NORMAL) in vec4 Normal;
layout(location = VERT_COLOR) in vec4 Color;
layout(location = VERT_TEXTURE) in vec4 Texture;
out vec4 VertPosition;
out vec3 VertNormal;
out vec3 VertTexture;
out vec4 VertColor;
out float VertLengthFromBeginning;
out float VertLengthTotal;
uniform mat4 matModel;
void main()
{
VertPosition = matModel * Position;
VertNormal = Normal.xyz; // Direction
VertColor = Color; // V from PTF, VertColor.w = thick
VertTexture = Texture.xyz; // Tangent
VertLengthFromBeginning = Normal.w; // Global Texture Coordinates
VertLengthTotal = Texture.w; // total length of chain
}
Lots of Thanks!!