When I implemented SSR I encountered the problem of artifacts. Below I present the code and screenshots.
Fragment SSR shader:
#version 330 core
uniform sampler2D normalMap; // in view space
uniform sampler2D colorMap;
uniform sampler2D reflectionStrengthMap;
uniform sampler2D positionMap; // in view space
uniform mat4 projection;
uniform vec3 skyColor = vec3(0.1, 0, 0.5);
in vec2 texCoord;
layout (location = 0) out vec4 fragColor;
const int binarySearchCount = 10;
const int rayMarchCount = 30;
const float step = 0.05;
const float LLimiter = 0.2;
const float minRayStep = 0.2;
vec3 getPosition(in vec2 texCoord) {
return texture(positionMap, texCoord).xyz;
}
vec2 binarySearch(inout vec3 dir, inout vec3 hitCoord, inout float dDepth) {
float depth;
vec4 projectedCoord;
for(int i = 0; i < binarySearchCount; i++) {
projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
depth = getPosition(projectedCoord.xy).z;
dDepth = hitCoord.z - depth;
dir *= 0.5;
if(dDepth > 0.0)
hitCoord += dir;
else
hitCoord -= dir;
}
projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
return vec2(projectedCoord.xy);
}
vec2 rayCast(vec3 dir, inout vec3 hitCoord, out float dDepth) {
dir *= step;
for (int i = 0; i < rayMarchCount; i++) {
hitCoord += dir;
vec4 projectedCoord = projection * vec4(hitCoord, 1.0);
projectedCoord.xy /= projectedCoord.w;
projectedCoord.xy = projectedCoord.xy * 0.5 + 0.5;
float depth = getPosition(projectedCoord.xy).z;
dDepth = hitCoord.z - depth;
if((dir.z - dDepth) < 1.2 && dDepth <= 0.0) return binarySearch(dir, hitCoord, dDepth);
}
return vec2(-1.0);
}
void main() {
float reflectionStrength = texture(reflectionStrengthMap, texCoord).r;
if (reflectionStrength == 0) {
fragColor = texture(colorMap, texCoord);
return;
}
vec3 normal = texture(normalMap, texCoord).xyz;
vec3 viewPos = getPosition(texCoord);
// Reflection vector
vec3 reflected = normalize(reflect(normalize(viewPos), normalize(normal)));
// Ray cast
vec3 hitPos = viewPos;
float dDepth;
vec2 coords = rayCast(reflected * max(-viewPos.z, minRayStep), hitPos, dDepth);
float L = length(getPosition(coords) - viewPos);
L = clamp(L * LLimiter, 0, 1);
float error = 1 - L;
vec3 color = texture(colorMap, coords.xy).rgb * error;
if (coords.xy != vec2(-1.0)) {
fragColor = mix(texture(colorMap, texCoord), vec4(color, 1.0), reflectionStrength);
return;
}
fragColor = mix(texture(colorMap, texCoord), vec4(skyColor, 1.0), reflectionStrength);
}
Result without blackout (without * error):
Result with blackout:
Note: blue is filled specifically to see artifacts
And one more question, what is the best way to add fresnel without harming scene?
Related
Most of the shader codes are follow the instruction of LearnOpenGL. I could make sure that the g-buffer and noise data pass into the shader are correct. It seems like some kind of dislocation, but But I really can't figure out why this happened.
misplace ssao
#version 450 core
out float OUT_FragColor;
in vec2 TexCoords;
uniform sampler2D g_position;
uniform sampler2D g_normal;
uniform sampler2D noise_texture;
struct CameraInfo
{
vec4 position;
mat4 view;
mat4 projection;
};
layout(std140, binding = 0) uniform Camera
{
CameraInfo camera;
};
float radius = 0.5;
float bias = 0.025;
uniform int noise_tex_size;
void main()
{
const vec2 noise_scale = vec2(1280.0/noise_tex_size, 720.0/noise_tex_size);
vec3 frag_pos = texture(g_position, TexCoords).xyz;
vec3 normal = normalize(texture(g_normal, TexCoords).xyz);
vec3 random_vec = normalize(texture(noise_texture, TexCoords * noise_scale).xyz);
vec3 tangent = normalize(random_vec - normal * dot(random_vec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
float occlusion = 0.f;
for(int i = 0; i < sample_array.length(); ++i)
{
vec3 sample_pos = TBN * sample_array[i].xyz;
sample_pos = frag_pos + sample_pos * radius;
vec4 offset = vec4(sample_pos, 1.0);
offset = camera.projection * offset; // from view to clip-space
offset.xyz /= offset.w; // perspective divide ?????
offset.xyz = offset.xyz * 0.5 + 0.5; // transform to range 0.0 - 1.0
float sample_depth = texture(g_position, offset.xy).z;
float range_check = smoothstep(0.f, 1.f, radius / abs(frag_pos.z - sample_depth));
occlusion += (sample_depth >= sample_pos.z + bias ? 1.0 : 0.0) * range_check; //ignore sample points that too near the origin point
}
occlusion = 1.f - (occlusion / sample_array.length());
OUT_FragColor = occlusion;
}
transform the g_postion and g_normal into model space
FragPos = (camera.view * vec4(WorldPos, 1.0)).xyz;
mat4 normal_matrix = camera.view * mat4(transpose(inverse(mat3(model))));
FragNormal = mat3(normal_matrix) * normal;
I want to generate a OBJ file from a code, which using GLSL file to generate mesh, now I can get the vertex information from the code, but how can I extract the triangle information from the .geom.glsl file and export it into a OBJ file?
Also, is there any helper function do to so? if not, how should I write the code to get the points and triangle information from the geom.glsl file?
Here attached the geom.glsl:
#version 400 core
#extension GL_EXT_geometry_shader4 : enable
layout(lines, invocations = 1) in;
layout(triangle_strip, max_vertices = 100) out;
uniform mat4 matLightView;
uniform mat4 matViewProjection;
uniform vec3 lightPos;
uniform vec3 camPos;
uniform int isExplicit;
in vec4 VertPosition[];
in vec4 VertColor[];
in vec3 VertNormal[];
in vec3 VertTexture[];
in float VertLengthTotal[];
in float VertLengthFromBeginning[];
out vec3 GeomNormal;
out vec2 GeomTexCoords;
out float GeomDiffuse;
out float GeomThickness;
out vec4 texCoordA;
out vec4 texCoordB;
const float PI2 = 2 * 3.141592654;
void main()
{
// for(int i=0; i<gl_VerticesIn-1; ++i)
for (int i = 0; i<gl_in.length ()-1; ++i)
{
//Reading Data
vec4 posS = VertPosition[i];
vec4 posT = VertPosition[i+1];
vec3 vS = VertColor[i].xyz;
vec3 vT = VertColor[i+1].xyz;
vec3 tS = VertTexture[i].xyz;
vec3 tT = VertTexture[i+1].xyz;
float thickS = VertColor[i].w;
float thickT = VertColor[i+1].w;
//Computing
vec3 v11 = normalize(vS);
vec3 v12 = normalize(cross(vS, tS));
vec3 v21 = normalize(vT);
vec3 v22 = normalize(cross(vT, tT));
float rS = max(0.0001, thickS);
float rT = max(0.0001, thickT);
int pS = 10;
int pT = 10;
int forMax = 16;
//Light Pos
vec4 lPos = normalize(vec4(-lightPos.x, -lightPos.y, -lightPos.z, 1));
vec3 L = normalize(lPos.xyz);
for(int k=0; k<=forMax; ++k)
{
float angle = k * (PI2 / forMax);
vec3 newPS = posS.xyz + (v11 * sin(angle) + v12 * cos(angle)) * rS;
vec3 newPT = posT.xyz + (v21 * sin(angle) + v22 * cos(angle)) * rT;
float scale = 1.0f;
float texX = float(k) / float(forMax);
float edgeLength = length(posS - posT);
float sTexY = (VertLengthFromBeginning[i] * scale);
float tTexY = (VertLengthFromBeginning[i+1] * scale);
//Source Vertex
vec3 N = normalize(posS.xyz - newPS);
texCoordB = matLightView * vec4(newPS, 1);
GeomNormal = N;
GeomThickness = rS;
GeomDiffuse = rS < 0.0005 ? 0.0f : max(dot(N, L), 0.0);
GeomTexCoords = vec2(texX, sTexY);
gl_Position = matViewProjection * vec4(newPS, 1);
EmitVertex();
//Target Vertex
N = normalize(posT.xyz - newPT);
texCoordB = matLightView * vec4(newPT, 1);
GeomNormal = N;
GeomThickness = rT;
GeomDiffuse = rT < 0.0005 ? 0.0f : max(dot(N, L), 0.0);
GeomTexCoords = vec2(texX, tTexY);
gl_Position = matViewProjection * vec4(newPT, 1);
EmitVertex();
}
}
EndPrimitive();
}
And the vert.glsl:
#version 400 core
#define VERT_POSITION 0
#define VERT_NORMAL 1
#define VERT_COLOR 2
#define VERT_TEXTURE 3
layout(location = VERT_POSITION) in vec4 Position;
layout(location = VERT_NORMAL) in vec4 Normal;
layout(location = VERT_COLOR) in vec4 Color;
layout(location = VERT_TEXTURE) in vec4 Texture;
out vec4 VertPosition;
out vec3 VertNormal;
out vec3 VertTexture;
out vec4 VertColor;
out float VertLengthFromBeginning;
out float VertLengthTotal;
uniform mat4 matModel;
void main()
{
VertPosition = matModel * Position;
VertNormal = Normal.xyz; // Direction
VertColor = Color; // V from PTF, VertColor.w = thick
VertTexture = Texture.xyz; // Tangent
VertLengthFromBeginning = Normal.w; // Global Texture Coordinates
VertLengthTotal = Texture.w; // total length of chain
}
Lots of Thanks!!
I need to flip my textures upside-down in shaders before applying perspective transformations. I modified vertTexCoord in vert.glsl, but I don't know where to use it in swap.glsl. The way to do it like
gl_FragColor = texture2D(texture, vertTexCoord );
does not work, because I also need the texture to be modified in perspective.
vert.glsl:
#define PROCESSING_COLOR_SHADER
uniform mat4 transform;
uniform mat4 texMatrix;
attribute vec4 vertex;
attribute vec4 color;
attribute vec2 texCoord;
varying vec4 vertColor;
varying vec4 vertTexCoord;
void main() {
gl_Position = transform * vertex;
vertColor = color;
vertTexCoord = texMatrix * vec4(texCoord, 1.0, 1.0);
}
swap.glsl:
#ifdef GL_ES
precision highp float;
#endif
// General parameters
uniform sampler2D from;
uniform sampler2D to;
uniform float progress;
uniform vec2 resolution;
uniform float reflection;
uniform float perspective;
uniform float depth;
varying vec4 vertColor;
varying vec4 vertTexCoord;
const vec4 black = vec4(0.0, 0.0, 0.0, 1.0);
const vec2 boundMin = vec2(0.0, 0.0);
const vec2 boundMax = vec2(1.0, 1.0);
bool inBounds (vec2 p) {
return all(lessThan(boundMin, p)) && all(lessThan(p, boundMax));
}
vec2 project (vec2 p) {
return p * vec2(1.0, -1.2) + vec2(0.0, -0.02);
}
vec4 bgColor (vec2 p, vec2 pfr, vec2 pto) {
vec4 c = black;
pfr = project(pfr);
if (inBounds(pfr)) {
c += mix(black, texture2D(from, pfr), reflection * mix(1.0, 0.0, pfr.y));
}
pto = project(pto);
if (inBounds(pto)) {
c += mix(black, texture2D(to, pto), reflection * mix(1.0, 0.0, pto.y));
}
return c;
}
void main() {
vec2 p = gl_FragCoord.xy / resolution.xy;
vec2 pfr, pto = vec2(-1.);
float size = mix(1.0, depth, progress);
float persp = perspective * progress;
pfr = (p + vec2(-0.0, -0.5)) * vec2(size/(1.0-perspective*progress), size/(1.0-size*persp*p.x)) + vec2(0.0, 0.5);
size = mix(1.0, depth, 1.-progress);
persp = perspective * (1.-progress);
pto = (p + vec2(-1.0, -0.5)) * vec2(size/(1.0-perspective*(1.0-progress)), size/(1.0-size*persp*(0.5-p.x))) + vec2(1.0, 0.5);
bool fromOver = progress < 0.5;
if (fromOver) {
if (inBounds(pfr)) {
gl_FragColor = texture2D(from, pfr);
}
else if (inBounds(pto)) {
gl_FragColor = texture2D(to, pto);
}
else {
gl_FragColor = bgColor(p, pfr, pto);
}
}
else {
if (inBounds(pto)) {
gl_FragColor = texture2D(to, pto);
}
else if (inBounds(pfr)) {
gl_FragColor = texture2D(from, pfr);
}
else {
gl_FragColor = bgColor(p, pfr, pto);
}
}
}
You sample the texture at
(u,v)
If you want to flip the Y-axis, just sample at
(u, 1.0f -v)
So your updated main will look like:
void main() {
gl_Position = transform * vertex;
vertColor = color;
newTCoord = texCoord;
newTCoord.y = 1.0 - newTCoord.y;
vertTexCoord = vec4(newTCoord, 1.0, 1.0);
}
I've written an LWJGL application that uses .obj files, reads them and displays them (using displaylists).
On my nvidia graphics card, everything runs fine. But on an amd graphics card i can't see the objects.
How i give data to the shaders:
glUseProgram(shaderEngine.obj);
glUniform1i(glGetUniformLocation(shaderEngine.obj, "inOrangeJuice"), inOrangeJuice ? 1 : 0);
shaderEngine.loadMatrix(glGetUniformLocation(shaderEngine.standard, "projectionMatrix"), camera.projectionMatrix);
shaderEngine.loadMatrix(glGetUniformLocation(shaderEngine.obj, "viewMatrix"), camera.viewMatrix);
ModelMatrix is loaded:
shaderEngine.createModelMatrix(new Vector3f(x, y, z), new Vector3f(rx, ry, rz), new Vector3f(1, 1, 1));
shaderEngine.loadModelMatrix(shaderEngine.obj);
Fragment Shader:
#version 130
uniform sampler2D tex;
uniform vec2 texCoord[4];
float textureSize;
float texelSize;
uniform int inOrangeJuice;
bool pointInTriangle(vec3 P, vec3 A, vec3 B, vec3 C)
{
vec3 u = B - A;
vec3 v = C - A;
vec3 w = P - A;
vec3 vCrossW = cross(v, w);
vec3 vCrossU = cross(v, u);
if(dot(vCrossW, vCrossU) < 0)
{
return false;
}
vec3 uCrossW = cross(u, w);
vec3 uCrossV = cross(u, v);
if(dot(uCrossW, uCrossV) < 0)
{
return false;
}
float denom = length(uCrossV);
float r = length(vCrossW);
float t = length(uCrossW);
return (r + t <= 1);
}
vec4 texture2DBilinear(sampler2D textureSampler, vec2 uv)
{
vec4 tl = texture2D(textureSampler, uv);
vec4 tr = texture2D(textureSampler, uv + vec2(texelSize, 0));
vec4 bl = texture2D(textureSampler, uv + vec2(0, texelSize));
vec4 br = texture2D(textureSampler, uv + vec2(texelSize , texelSize));
vec2 f = fract( uv.xy * textureSize );
vec4 tA = mix( tl, tr, f.x );
vec4 tB = mix( bl, br, f.x );
return mix( tA, tB, f.y );
}
void main()
{
ivec2 textureSize2d = textureSize(tex,0);
textureSize = float(textureSize2d.x);
texelSize = 1.0 / textureSize;
//texture coordinate:
vec2 texCoord = (gl_TexCoord[0].st);
bool inOJ = false;
if(inOrangeJuice == 1)
{
float depth = gl_FragCoord.z / gl_FragCoord.w;//works only with perspective projection
depth = depth / 6;
if(depth > 1)
{
depth = 1;
}
inOJ = true;
gl_FragColor = texture2DBilinear(tex, texCoord) * gl_Color * (1.0 - depth) + vec4(1.0, 0.5, 0.0, 1.0) * depth;
}
if(inOJ == false)
{
gl_FragColor = texture2DBilinear(tex, texCoord) * gl_Color;
}
//Nothing is shown, inOrangeJuice should be 0
//gl_FragColor = vec4(inOrangeJuice,0,0,1);
//Always works:
//gl_FragColor = texture2D(tex, texCoord) * gl_Color;
}
How can I get this shader to have a smooth edge on the spot light instead of a hard one? In addition, the shader has to cope with a variable value of GL_SPOT_CUTOFF. Note that not all the lights are spot lights -- GL_LIGHT0 is a point light.
varying vec3 N;
varying vec3 v;
#define MAX_LIGHTS 2
void main (void)
{
vec4 finalColour;
float spotEffect;
for (int i=0; i<MAX_LIGHTS; i++)
{
vec3 L = normalize(gl_LightSource[i].position.xyz - v);
vec3 E = normalize(-v);
vec3 R = normalize(-reflect(L,N));
spotEffect = dot(normalize(gl_LightSource[i].spotDirection),
normalize(-L));
if (spotEffect > gl_LightSource[i].spotCosCutoff) {
vec4 Iamb = gl_FrontLightProduct[i].ambient;
vec4 Idiff = gl_FrontLightProduct[i].diffuse * max(dot(N,L), 0.0);
Idiff = clamp(Idiff, 0.0, 1.0);
vec4 Ispec = gl_FrontLightProduct[i].specular
* pow(max(dot(R,E),0.0),0.3*gl_FrontMaterial.shininess);
Ispec = clamp(Ispec, 0.0, 1.0);
finalColour += Iamb + Idiff + Ispec;
}
}
gl_FragColor = gl_FrontLightModelProduct.sceneColor + finalColour;
}
The scene looks like this:
This shader from http://www.ozone3d.net/tutorials/glsl_lighting_phong_p3.php produces the soft edges to the spotlight you are after.
[Pixel_Shader]
varying vec3 normal, lightDir, eyeVec;
const float cos_outer_cone_angle = 0.8; // 36 degrees
void main (void)
{
vec4 final_color =
(gl_FrontLightModelProduct.sceneColor * gl_FrontMaterial.ambient) +
(gl_LightSource[0].ambient * gl_FrontMaterial.ambient);
vec3 L = normalize(lightDir);
vec3 D = normalize(gl_LightSource[0].spotDirection);
float cos_cur_angle = dot(-L, D);
float cos_inner_cone_angle = gl_LightSource[0].spotCosCutoff;
float cos_inner_minus_outer_angle =
cos_inner_cone_angle - cos_outer_cone_angle;
//****************************************************
// Don't need dynamic branching at all, precompute
// falloff(i will call it spot)
float spot = 0.0;
spot = clamp((cos_cur_angle - cos_outer_cone_angle) /
cos_inner_minus_outer_angle, 0.0, 1.0);
//****************************************************
vec3 N = normalize(normal);
float lambertTerm = max( dot(N,L), 0.0);
if(lambertTerm > 0.0)
{
final_color += gl_LightSource[0].diffuse *
gl_FrontMaterial.diffuse *
lambertTerm * spot;
vec3 E = normalize(eyeVec);
vec3 R = reflect(-L, N);
float specular = pow( max(dot(R, E), 0.0),
gl_FrontMaterial.shininess );
final_color += gl_LightSource[0].specular *
gl_FrontMaterial.specular *
specular * spot;
}
gl_FragColor = final_color;