How to generate OBJ mesh file if I used GLSL - c++

I want to generate a OBJ file from a code, which using GLSL file to generate mesh, now I can get the vertex information from the code, but how can I extract the triangle information from the .geom.glsl file and export it into a OBJ file?
Also, is there any helper function do to so? if not, how should I write the code to get the points and triangle information from the geom.glsl file?
Here attached the geom.glsl:
#version 400 core
#extension GL_EXT_geometry_shader4 : enable
layout(lines, invocations = 1) in;
layout(triangle_strip, max_vertices = 100) out;
uniform mat4 matLightView;
uniform mat4 matViewProjection;
uniform vec3 lightPos;
uniform vec3 camPos;
uniform int isExplicit;
in vec4 VertPosition[];
in vec4 VertColor[];
in vec3 VertNormal[];
in vec3 VertTexture[];
in float VertLengthTotal[];
in float VertLengthFromBeginning[];
out vec3 GeomNormal;
out vec2 GeomTexCoords;
out float GeomDiffuse;
out float GeomThickness;
out vec4 texCoordA;
out vec4 texCoordB;
const float PI2 = 2 * 3.141592654;
void main()
{
// for(int i=0; i<gl_VerticesIn-1; ++i)
for (int i = 0; i<gl_in.length ()-1; ++i)
{
//Reading Data
vec4 posS = VertPosition[i];
vec4 posT = VertPosition[i+1];
vec3 vS = VertColor[i].xyz;
vec3 vT = VertColor[i+1].xyz;
vec3 tS = VertTexture[i].xyz;
vec3 tT = VertTexture[i+1].xyz;
float thickS = VertColor[i].w;
float thickT = VertColor[i+1].w;
//Computing
vec3 v11 = normalize(vS);
vec3 v12 = normalize(cross(vS, tS));
vec3 v21 = normalize(vT);
vec3 v22 = normalize(cross(vT, tT));
float rS = max(0.0001, thickS);
float rT = max(0.0001, thickT);
int pS = 10;
int pT = 10;
int forMax = 16;
//Light Pos
vec4 lPos = normalize(vec4(-lightPos.x, -lightPos.y, -lightPos.z, 1));
vec3 L = normalize(lPos.xyz);
for(int k=0; k<=forMax; ++k)
{
float angle = k * (PI2 / forMax);
vec3 newPS = posS.xyz + (v11 * sin(angle) + v12 * cos(angle)) * rS;
vec3 newPT = posT.xyz + (v21 * sin(angle) + v22 * cos(angle)) * rT;
float scale = 1.0f;
float texX = float(k) / float(forMax);
float edgeLength = length(posS - posT);
float sTexY = (VertLengthFromBeginning[i] * scale);
float tTexY = (VertLengthFromBeginning[i+1] * scale);
//Source Vertex
vec3 N = normalize(posS.xyz - newPS);
texCoordB = matLightView * vec4(newPS, 1);
GeomNormal = N;
GeomThickness = rS;
GeomDiffuse = rS < 0.0005 ? 0.0f : max(dot(N, L), 0.0);
GeomTexCoords = vec2(texX, sTexY);
gl_Position = matViewProjection * vec4(newPS, 1);
EmitVertex();
//Target Vertex
N = normalize(posT.xyz - newPT);
texCoordB = matLightView * vec4(newPT, 1);
GeomNormal = N;
GeomThickness = rT;
GeomDiffuse = rT < 0.0005 ? 0.0f : max(dot(N, L), 0.0);
GeomTexCoords = vec2(texX, tTexY);
gl_Position = matViewProjection * vec4(newPT, 1);
EmitVertex();
}
}
EndPrimitive();
}
And the vert.glsl:
#version 400 core
#define VERT_POSITION 0
#define VERT_NORMAL 1
#define VERT_COLOR 2
#define VERT_TEXTURE 3
layout(location = VERT_POSITION) in vec4 Position;
layout(location = VERT_NORMAL) in vec4 Normal;
layout(location = VERT_COLOR) in vec4 Color;
layout(location = VERT_TEXTURE) in vec4 Texture;
out vec4 VertPosition;
out vec3 VertNormal;
out vec3 VertTexture;
out vec4 VertColor;
out float VertLengthFromBeginning;
out float VertLengthTotal;
uniform mat4 matModel;
void main()
{
VertPosition = matModel * Position;
VertNormal = Normal.xyz; // Direction
VertColor = Color; // V from PTF, VertColor.w = thick
VertTexture = Texture.xyz; // Tangent
VertLengthFromBeginning = Normal.w; // Global Texture Coordinates
VertLengthTotal = Texture.w; // total length of chain
}
Lots of Thanks!!

Related

Why is my Open GL Compute Shader so slow?

I have been building an OpenGL compute shader that implements ray tracing. Currently it just computes the pixel color by casting a ray against an array of triangles.
#version 430 core
struct Triangle {
vec3 vertex1;
vec3 vertex2;
vec3 vertex3;
vec3 color1;
vec3 color2;
vec3 color3;
vec3 normal1;
vec3 normal2;
vec3 normal3;
vec3 edge1;
vec3 edge2;
};
layout (std430, binding = 0) readonly buffer TriangleBuffer {
int numTriangles;
Triangle triangles[];
};
layout (std430, binding = 1, column_major) buffer CameraBuffer {
vec3 cameraPosition;
mat4 view;
mat4 projection;
mat4 inverseViewProjection;
};
layout (rgba8, binding = 2) writeonly uniform image2D outputImage;
layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
vec3 getBarycentricCoords(int triangleIndex, vec3 closestIntersectionPoint) {
vec3 v0 = triangles[triangleIndex].vertex2 - triangles[triangleIndex].vertex1;
vec3 v1 = triangles[triangleIndex].vertex3 - triangles[triangleIndex].vertex1;
vec3 v2 = closestIntersectionPoint - triangles[triangleIndex].vertex1;
float d00 = dot(v0, v0);
float d01 = dot(v0, v1);
float d11 = dot(v1, v1);
float d20 = dot(v2, v0);
float d21 = dot(v2, v1);
float denom = d00 * d11 - d01 * d01;
float b1 = (d11 * d20 - d01 * d21) / denom;
float b2 = (d00 * d21 - d01 * d20) / denom;
float b0 = 1.0f - b1 - b2;
return vec3(b0, b1, b2);
}
vec3 getTriangleColor(int triangleIndex, vec3 closestIntersectionPoint) {
vec3 barycentric = getBarycentricCoords(triangleIndex, closestIntersectionPoint);
vec3 triangleColor = barycentric.x * triangles[triangleIndex].color1 + barycentric.y * triangles[triangleIndex].color2 + barycentric.z * triangles[triangleIndex].color3;
return triangleColor;
}
bool rayTriangleIntersection(vec3 rayOrigin, vec3 rayDirection, int triangleIndex, out vec3 intersectionPoint) {
vec3 h = cross(rayDirection, triangles[triangleIndex].edge2);
float a = dot(triangles[triangleIndex].edge1, h);
if (a > -0.00001 && a < 0.00001) {
return false;
}
float f = 1.0 / a;
vec3 s = rayOrigin - triangles[triangleIndex].vertex1;
float u = f * dot(s, h);
if (u < 0.0 || u > 1.0) {
return false;
}
vec3 q = cross(s, triangles[triangleIndex].edge1);
float v = f * dot(rayDirection, q);
if (v < 0.0 || u + v > 1.0) {
return false;
}
float t = f * dot(triangles[triangleIndex].edge2, q);
if (t > 0.00001) {
intersectionPoint = rayOrigin + rayDirection * t;
return true;
}
return false;
}
vec3 unProject(vec3 win, mat4 model, mat4 proj, vec4 viewport) {
vec4 tmp = vec4(win, 1);
tmp.x = (tmp.x - viewport[0]) / viewport[2];
tmp.y = (tmp.y - viewport[1]) / viewport[3];
tmp.x = tmp.x * 2 - 1;
tmp.y = tmp.y * 2 - 1;
vec4 obj = inverseViewProjection * tmp;
obj /= obj.w;
return obj.xyz;
}
void main() {
ivec2 pixelCoord = ivec2(gl_GlobalInvocationID.xy);
vec4 viewport = vec4(0, 0, vec2(imageSize(outputImage)).xy);
vec3 near = vec3(pixelCoord.x, pixelCoord.y, -1);
vec3 far = vec3(pixelCoord.x, pixelCoord.y, 0.9518f);
vec3 rayOrigin = unProject(near, view, projection, viewport);
vec3 rayWorldFar = unProject(far, view, projection, viewport);
vec3 rayDirection = normalize(rayWorldFar - rayOrigin);
vec3 intersectionPoint;
vec3 closestIntersectionPoint = vec3(0,0,0);
float closestIntersectionDistance = 999999999.0f;
vec3 finalColor = vec3(0,0,0);
bool intersectionFound = false;
for (int triangleIndex = 0; triangleIndex < numTriangles; triangleIndex++) {
if (rayTriangleIntersection(rayOrigin, rayDirection, triangleIndex, intersectionPoint)) {
float intersectionDistance = distance(intersectionPoint, rayOrigin);
if (intersectionDistance < closestIntersectionDistance) {
closestIntersectionDistance = intersectionDistance;
closestIntersectionPoint = intersectionPoint;
finalColor = getTriangleColor(triangleIndex, closestIntersectionPoint);
intersectionFound = true;
}
}
}
if (intersectionFound) {
imageStore(outputImage, pixelCoord, vec4(finalColor, 1.0f));
}
else
imageStore(outputImage, pixelCoord, vec4(0));
}
However when running the shader I only get 30fps. There is a significant bottleneck in the code. This is running with only 20 triangles.
What optimizations can I make to increase the performance of the code? Why is there a bottleneck?
I managed to more than double my framerate by making the following modifications:
Change layout to a higher value
for this I used GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS
GLint glMaxComputeWorkGroupInvocations = 0;
glGetIntegerv(GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS, &glMaxComputeWorkGroupInvocations);
LIGHTING_SHADER_LOCAL_SIZE_Y = LIGHTING_SHADER_LOCAL_SIZE_X = sqrt(glMaxComputeWorkGroupInvocations);
and update the layout sizes:
layout (local_size_x = ${LIGHTING_SHADER_LOCAL_SIZE_X}, local_size_y = ${LIGHTING_SHADER_LOCAL_SIZE_Y}, local_size_z = 1) in;
Get pixelCoord based on group_id and local_id
ivec3 groupId = ivec3(gl_WorkGroupID);
ivec3 localId = ivec3(gl_LocalInvocationID);
ivec3 globalId = ivec3(gl_GlobalInvocationID);
ivec3 coords = groupId * ivec3(gl_WorkGroupSize) + localId;
ivec2 pixelCoord = ivec2(coords.xy);
Update glDispatchCompute
glDispatchCompute(windowWidth / LIGHTING_SHADER_LOCAL_SIZE_X, windowHeight / LIGHTING_SHADER_LOCAL_SIZE_Y, 1);

Problem with Shader "The shader uses varying --- but previous shader does not write to it"

Does anyone know why I keep getting the error that says:
The ♦ shader uses varying _I;DATA;g_mapCoord, but previous shader does not write to it.
The ♦ shader uses varying _I;DATA;worldPosition, but previous shader does not write to it.
Take a look at my shaders here.
Vertex
#version 430
layout (location = 0) in vec2 position0;
out DATA {
vec2 v_mapCoord;
vec3 worldPosition;
} Out;
uniform vec3 u_cameraPosition;
uniform mat4 u_localMatrix;
uniform mat4 u_worldMatrix;
uniform float u_scaleY;
uniform int u_lod;
uniform vec2 u_index;
uniform float u_gap;
uniform vec2 u_location;
uniform sampler2D s_heightmap;
uniform int u_lodMorphArea[8];
float morphLatitude(vec2 position)
{
//not important code
return 0;
}
float morphLongitude(vec2 position)
{
//not important code
return 0;
}
vec2 morph(vec2 localPosition, int morph_area){
//not important code
return vec2(0);
}
void main()
{
vec2 localPosition = (u_localMatrix * vec4(position0.x,0,position0.y,1)).xz;
if (u_lod > 0) {
localPosition += morph(localPosition, u_lodMorphArea[u_lod-1]); // Translate position by morphing vector
}
float height = texture(s_heightmap, localPosition).r;
Out.v_mapCoord = localPosition;
vec4 _worldPosition = u_worldMatrix * vec4(localPosition.x, height, localPosition.y,1);
Out.worldPosition = _worldPosition.xyz;
gl_Position = u_worldMatrix * vec4(localPosition.x, height, localPosition.y,1);
}
Fragment
#version 430
layout (location = 0) out vec4 outputColor;
in DATA {
vec2 g_mapCoord;
vec3 worldPosition;
} In;
const vec3 lightDirection = vec3(-0.2, -1.0, -0.2);
const float intensity = 1.2;
uniform sampler2D s_textureNormal;
uniform sampler2D s_textureWater;
uniform sampler2D s_textureLand;
float diffuse(vec3 direction, vec3 normal, float intensity)
{
return max(0.01, dot(normal, -direction) * intensity);
}
void main()
{
vec3 normal = texture(s_textureNormal, In.g_mapCoord).rgb;
float diff = diffuse(lightDirection, normal, intensity);
outputColor = vec4(1,0,0,1);
}
Geom
#version 430
layout(triangles) in;
layout(triangle_strip, max_vertices = 3) out;
in vec2 te_mapCoord[];
out vec2 g_mapCoord;
uniform mat4 u_viewProjection;
void main() {
for (int i = 0; i < gl_in.length(); ++i)
{
vec4 position = gl_in[i].gl_Position;
gl_Position = u_viewProjection * position;
g_mapCoord = te_mapCoord[i];
EmitVertex();
}
EndPrimitive();
}
TCS
#version 430
layout(vertices = 16) out;
in DATA {
vec2 v_mapCoord;
vec3 worldPosition;
} In[];
out vec2 tc_mapCoord[];
const int AB = 2;
const int BC = 3;
const int CD = 0;
const int DA = 1;
uniform int u_tessellationFactor;
uniform float u_tessellationSlope;
uniform float u_tessellationShift;
uniform vec3 u_cameraPosition;
// Calculate tessellation levels
float lodFactor(float dist)
{
float tessellationLevel = max(0.0, u_tessellationFactor/pow(dist, u_tessellationSlope) + u_tessellationShift);
return tessellationLevel;
}
void main()
{
if (gl_InvocationID == 0){
// Calculate mid points of the edges of the quad
vec3 abMid = vec3(gl_in[0].gl_Position + gl_in[3].gl_Position)/2.0; //Bottom left, Bottom right
vec3 bcMid = vec3(gl_in[3].gl_Position + gl_in[15].gl_Position)/2.0; //Bottom right Top right
vec3 cdMid = vec3(gl_in[15].gl_Position + gl_in[12].gl_Position)/2.0; //Top right, Top left
vec3 daMid = vec3(gl_in[12].gl_Position + gl_in[0].gl_Position)/2.0; //Top left, Bottom left
// Calculate distance between camera and mid points of the edges of the quad
float distanceAB = distance(abMid, u_cameraPosition);
float distanceBC = distance(bcMid, u_cameraPosition);
float distanceCD = distance(cdMid, u_cameraPosition);
float distanceDA = distance(daMid, u_cameraPosition);
// Tesselation levels used by tessellation primitive generator (define how much tessellation to apply to the patch). Value between 1 and gl_MaxTessGenLevel, depending on lodFactor.
gl_TessLevelOuter[AB] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceAB));
gl_TessLevelOuter[BC] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceBC));
gl_TessLevelOuter[CD] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceCD));
gl_TessLevelOuter[DA] = mix(1, gl_MaxTessGenLevel, lodFactor(distanceDA));
gl_TessLevelInner[0] = (gl_TessLevelOuter[BC] + gl_TessLevelOuter[DA])/4;
gl_TessLevelInner[1] = (gl_TessLevelOuter[AB] + gl_TessLevelOuter[CD])/4;
}
tc_mapCoord[gl_InvocationID] = In[gl_InvocationID].v_mapCoord; // Just pass to the next stage
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
TES
#version 430
layout(quads, fractional_odd_spacing, cw) in;
in vec2 tc_mapCoord[];
out vec2 te_mapCoord;
uniform sampler2D s_heightmap;
uniform float u_scaleY;
void main(){
float u = gl_TessCoord.x;
float v = gl_TessCoord.y;
// Compute new position for each tessellated vertex within the patch. gl_in with index 12, 0, 3, 15 are corners of the patch.
vec4 position = ((1 - u) * (1 - v) * gl_in[12].gl_Position + u * (1 - v) * gl_in[0].gl_Position + u * v * gl_in[3].gl_Position +(1 - u) * v * gl_in[15].gl_Position);
vec2 mapCoord = ((1 - u) * (1 - v) * tc_mapCoord[12] + u * (1 - v) * tc_mapCoord[0] + u * v * tc_mapCoord[3] +(1 - u) * v * tc_mapCoord[15]);
float height = texture(s_heightmap, mapCoord).r;
height *= u_scaleY;
position.y = height;
te_mapCoord = mapCoord;
gl_Position = position;
}
Can anyone help me find the error here which is why I'm getting that error message?
When you introduce a geometry shader you need to pass the varyings for the fragment shader from the geometry shader, not the vertex shader.
You can see how your geometry shader doing this:
out vec2 g_mapCoord;
is incompatible with your fragment shader expecting this:
in DATA {
vec2 g_mapCoord;
vec3 worldPosition;
} In;
Related question and subsequent answers here.

Volumetric Light not rendering volume correctly

I've been following this tutorial (https://www.programmersought.com/article/68075912719/) to get volumetric light. But I am not getting the correct output.
The shadow volume is incorrectly rendered and I am not sure what I am doing wrong. My vertex and fragment shader looks exactly like the tutorial but still I'm not getting correct output.
Here is the vertex shader code
#version 450 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
layout (location = 2) in vec3 normal;
uniform mat4 model;
uniform mat4 vp;
uniform float zFar;
uniform float fov;;
uniform float aspectRatio;
out vec2 TexCoord;
out vec2 farPlanePos;
void main(void){
gl_Position = vec4(position, 1.0);
TexCoord = vec2 (texCoord.x, 1 - texCoord.y);
float t = tan(fov/2);
farPlanePos.x = (TexCoord.x * 2 - 1) * zFar * t * aspectRatio;
farPlanePos.y = (TexCoord.y * 2 - 1) * zFar * t;
}
And Here is the fragment shader code
#version 450 core
in vec2 TexCoord;
uniform vec3 cameraPos;
uniform vec3 lightPos;
uniform vec3 lightColor;
uniform mat4 invViewMatrix;
uniform mat4 invProjectionMatrix;
uniform float ambientStrength;
uniform sampler2D gPosition;
uniform sampler2D gNormal;
uniform sampler2D gAlbedoSpec;
uniform sampler2D gDepth;
uniform sampler2D shadowMapTexture;
in vec2 farPlanePos;
uniform float zFar;
uniform float zNear;
float g = 0.0f;
uniform mat4 lightViewMatrix;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
vec3 yellow_light = vec3(1,198.0/255.0,107.0/255.0);
out vec4 finalColor;
// use linear z depth
vec3 ComputeWorldPos(float depth){
vec4 pos = vec4(vec3(farPlanePos.x, farPlanePos.y, -zFar) * depth , 1.0f);
vec4 ret = invViewMatrix * pos;
return ret.xyz / ret.w;
}
bool IsInShadow(vec4 worldPos){
float fShadow = 0.0;
vec4 lightPos = (lightViewMatrix * (worldPos));
float fDistance = -lightPos.z / zFar;
lightPos = projectionMatrix * lightPos;
vec2 uv = lightPos.xy / lightPos.w * 0.5 + vec2(0.5f, 0.5f);
uv.x = clamp(uv.x, 0.0f, 1.0f);
uv.y = clamp(uv.y, 0.0f, 1.0f);
float offset = 0.5f/zFar;
float distanceMap = texture2D(shadowMapTexture, uv).r;
return fDistance - offset > distanceMap;
}
void main(void){
float depth = texture2D(gDepth, TexCoord).w;
vec3 total_light;
// volume light
{
float I = 0.0f;
float d = depth * zFar;
int virtual_plane_num = 100;
int begin = int(virtual_plane_num * zNear / (d - zNear));
int end = int(virtual_plane_num * (zFar - d) / (d - zNear));
for(int j = begin; j <= virtual_plane_num + begin; j++)
{
float z = 1.0f * j / (begin + virtual_plane_num + end);
vec3 pos = ComputeWorldPos(z);
if(z < depth && !IsInShadow(vec4(pos,1.0f)))
{
//vec3 lightDis = pos - lightPos;
//vec3 viewDis = pos - cameraPos;
//float lightDis2 = lightDis.x * lightDis.x + lightDis.y * lightDis.y + lightDis.z * lightDis.z;
vec3 lightDir = normalize(pos - lightPos);
vec3 viewDir = normalize(pos - cameraPos);
float cosTheta = dot(lightDir,normalize(-lightPos));
float hg = 1.0f/(4.0f*3.14f)* (1.0f - g*g)/ pow(1.0f + g * g - 2.0f * g * dot(lightDir,-viewDir), 1.5f);
if(cosTheta >0.9){
I += clamp(10 * hg / virtual_plane_num, 0.0f, 1.0f);
}
}
}
I = clamp(I , 0.0f,1.0f);
total_light += I * yellow_light;
}
vec3 normal = texture2D(gNormal, TexCoord).xyz * 2 - 1; //result.xyz * 2 - 1;
vec3 worldPos = ComputeWorldPos(depth);
// parallel lights
/*
{
vec3 ViewDir = normalize( cameraPos - worldPos );
vec3 lightDir = normalize(vec3(0.5,1,0.2) );
vec3 halfDir = normalize(lightDir + ViewDir);
float diffuse = 0.3 * clamp(dot(normal, lightDir), 0, 1) ;
vec3 reflectDir = normalize(reflect(-lightDir,normal));
float specular = 0.3 * pow(clamp(dot(reflectDir,halfDir),0,1),50.0);
vec3 color = (diffuse + specular) *vec3(1,1,1);
total_light += color;
}
*/
vec3 color = vec3(texture2D(gAlbedoSpec,TexCoord));
float ambient = 0.1;
finalColor = vec4(total_light + ambient * color,1);
}
So you can see the vertex and fragment shader code is exactly like the blog, but still the output is different.
Unfortunately it doesn't say how to contact the blogger otherwise I would have asked them directly. So the next best option is stockoverflow, so I am asking here.
Ok after 2 days I was able to fix the issue. I think the isInShadow function is incorrect as it always perspective divies by zFar and also doesnt multiply by projection matrix before getting current depth.
So I replaced the code with learnOpengl shadow calculation as below.
bool IsInShadow(vec4 worldPos){
vec4 lightPos = (lightViewMatrix * (worldPos));
//float fDistance = -lightPos.z/ zFar;
lightPos = projectionMatrix * lightPos;
vec3 projCoords = lightPos.xyz / lightPos.w ;
projCoords = projCoords* 0.5 + 0.5f;
//uv.x = clamp(uv.x, 0.0f, 1.0f);
//uv.y = clamp(uv.y, 0.0f, 1.0f);
float offset = 0.5f/zFar;
float distanceMap = texture2D(shadowMapTexture, projCoords.xy).r;
return projCoords.z - offset > distanceMap;
}
And now the code works!!

OpenGL 0(84): error C7623: implicit narrowing of type from "vec4" to "float"

I am working on shadows for a Minecraft shader, and im stuck at trying to resolve the following error.
21:51:27.725
[Shaders] Error compiling fragment shader: /shaders/composite.fsh
21:51:27.726
[Shaders] Shader info log: /shaders/composite.fsh
0(84) : error C7623: implicit narrowing of type from "vec4" to "float"
21:51:27.727
[Shaders] Error linking program: 10 (composite)
I know the error is caused by incompatible types, but I'm still not sure how to solve it, so any help is appreciated.
#version 120
const int shadowMapResolution = 2048;
const float shadowDistance = 128;
const float shadowMapBias = 0.85;
const int noiseTextureResolution = 256;
#define SHADOWMAP_BIAS 0.85
uniform sampler2D colortex0;
uniform sampler2D shadowtex0;
uniform sampler2D shadowcolor0;
uniform sampler2D depthtex1;
uniform sampler2D noisetex;
uniform vec3 cameraPosition;
uniform mat4 gbufferModelViewInverse;
uniform mat4 gbufferModelView;
uniform mat4 shadowProjection;
uniform mat4 gbufferProjection;
uniform mat4 gbufferProjectionInverse;
uniform mat4 shadowModelView;
uniform float viewWidth;
uniform float viewHeight;
varying vec4 texcoord;
float depth = 0.5;
vec4 getCameraPosition(in vec2 coord)
{
float getdepth = depth;
vec4 positionNdcSpace = vec4(coord.s * 2.0 - 1.0, coord.t * 2.0 - 1.0, 2.0 * getdepth - 1.0, 1.0);
vec4 positionCameraSpace = gbufferProjectionInverse * positionNdcSpace;
return positionCameraSpace / positionCameraSpace.w;
}
vec4 getWorldSpacePosition(in vec2 coord)
{
vec4 cameraPos = getCameraPosition(coord);
vec4 worldPos = gbufferModelViewInverse * cameraPos;
worldPos.xyz += cameraPosition;
return worldPos;
}
vec3 getShadowSpacePosition(in vec2 coord)
{
vec4 worldSpacePos = getWorldSpacePosition(coord);
worldSpacePos.xyz -= cameraPosition;
vec4 shadowSpacePos = shadowModelView * worldSpacePos;
shadowSpacePos = shadowProjection * shadowSpacePos;
return shadowSpacePos.xyz * 0.5 + 0.5;
}
mat2 getRotationMatrix(in vec2 coord)
{
float rotationAmount = texture2D(
noisetex,
coord * vec2(
viewWidth / noiseTextureResolution,
viewHeight / noiseTextureResolution
)
).r;
return mat2(
cos(rotationAmount), -sin(rotationAmount),
sin(rotationAmount), cos(rotationAmount)
);
}
vec3 getShadows(in vec2 coord)
{
vec3 shadowCoord = getShadowSpacePosition(coord);
mat2 rotationMatrix = getRotationMatrix(coord);
vec3 shadowCol = vec3(0.0);
for (int i = 0; i < 32; i++)
{
vec2 offset = vec2(32 / shadowMapResolution);
offset = rotationMatrix * offset;
float shadowMapSample = texture2D(shadowtex0, shadowCoord.st + offset);
float visibility = step(shadowCoord.z - shadowMapSample, 0.001);
vec3 dayCol = vec3(1.0);
vec3 colorSample = texture2D(shadowcolor0, shadowCoord.st + offset).rgb;
shadowCol += mix(colorSample, dayCol, visibility);
}
return vec3(shadowCol) / 32;
}
vec3 calculateLighting(in vec3 color)
{
vec3 sunLight = getShadows(texcoord.st);
vec3 ambientLight = vec3(0.5, 0.7, 1.0) * 0.5;
return color * (sunLight + ambientLight);
}
void main()
{
depth = texture2D(depthtex1, texcoord.st).r;
vec3 color = texture2D(colortex0, texcoord.st).rbg;
color = calculateLighting(color);
gl_FragData[0] = vec4(color, 1.0);
gl_FragData[1] = vec4(depth);
}
The problem is that texture2D returns a vec4, but you are treating as a float. Read the red component instead
float shadowMapSample = texture2D(shadowtex0, shadowCoord.st + offset).r;

Screen Space Reflections Artifacts

When I implemented SSR I encountered the problem of artifacts. Below I present the code and screenshots.
Fragment SSR shader:
#version 330 core
uniform sampler2D normalMap; // in view space
uniform sampler2D colorMap;
uniform sampler2D reflectionStrengthMap;
uniform sampler2D positionMap; // in view space
uniform mat4 projection;
uniform vec3 skyColor = vec3(0.1, 0, 0.5);
in vec2 texCoord;
layout (location = 0) out vec4 fragColor;
const int binarySearchCount = 10;
const int rayMarchCount = 30;
const float step = 0.05;
const float LLimiter = 0.2;
const float minRayStep = 0.2;
vec3 getPosition(in vec2 texCoord) {
return texture(positionMap, texCoord).xyz;
}
vec2 binarySearch(inout vec3 dir, inout vec3 hitCoord, inout float dDepth) {
float depth;
vec4 projectedCoord;
for(int i = 0; i < binarySearchCount; i++) {
projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
depth = getPosition(projectedCoord.xy).z;
dDepth = hitCoord.z - depth;
dir *= 0.5;
if(dDepth > 0.0)
hitCoord += dir;
else
hitCoord -= dir;
}
projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
return vec2(projectedCoord.xy);
}
vec2 rayCast(vec3 dir, inout vec3 hitCoord, out float dDepth) {
dir *= step;
for (int i = 0; i < rayMarchCount; i++) {
hitCoord += dir;
vec4 projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
float depth = getPosition(projectedCoord.xy).z;
dDepth = hitCoord.z - depth;
if((dir.z - dDepth) < 1.2 && dDepth <= 0.0) return binarySearch(dir, hitCoord, dDepth);
}
return vec2(-1.0);
}
void main() {
float reflectionStrength = texture(reflectionStrengthMap, texCoord).r;
if (reflectionStrength == 0) {
fragColor = texture(colorMap, texCoord);
return;
}
vec3 normal = texture(normalMap, texCoord).xyz;
vec3 viewPos = getPosition(texCoord);
// Reflection vector
vec3 reflected = normalize(reflect(normalize(viewPos), normalize(normal)));
// Ray cast
vec3 hitPos = viewPos;
float dDepth;
vec2 coords = rayCast(reflected * max(-viewPos.z, minRayStep), hitPos, dDepth);
float L = length(getPosition(coords) - viewPos);
L = clamp(L * LLimiter, 0, 1);
float error = 1 - L;
vec3 color = texture(colorMap, coords.xy).rgb * error;
if (coords.xy != vec2(-1.0)) {
fragColor = mix(texture(colorMap, texCoord), vec4(color, 1.0), reflectionStrength);
return;
}
fragColor = mix(texture(colorMap, texCoord), vec4(skyColor, 1.0), reflectionStrength);
}
Result without blackout (without * error):
Result with blackout:
Note: blue is filled specifically to see artifacts
And one more question, what is the best way to add fresnel without harming scene?