World To Screenspace for Mode7 effect (fragment shader) - opengl

I have a fragment shader that transforms the view into something resembling mode7.
I want to know the Screen-Space x,y coordinates given a world position.
As the transformation happens in the fragment shader, I can't simply inverse a matrix. This is the fragment shader code:
uniform float Fov; //1.4
uniform float Horizon; //0.6
uniform float Scaling; //0.8
void main() {
vec2 pos = uv.xy - vec2(0.5, Horizon);
vec3 p = vec3(pos.x, pos.y, pos.y + Fov);
vec2 s = vec2(p.x/p.z, p.y/p.z) * Scaling;
s.x += 0.5;
s.y += screenRatio;
gl_FragColor = texture2D(ColorTexture, s);
}
It transforms pixels in a pseudo 3d way:
-
What I want to do is get a screen-space coordinate for a given world position (in normal code, not shaders).
How do I reverse the order of operations above?
This is what I have right now:
(GAME_WIDTH and GAME_HEIGHT are constants and hold pixel values, e.g. 320x240)
vec2 WorldToScreenspace(float x, float y) {
// normalize coordinates 0..1, as x,y are in pixels
x = x/GAME_WIDTH - 0.5;
y = y/GAME_HEIGHT - Horizon;
// as z depends on a y value I have yet to calculate, how can I calc it?
float z = ??;
// invert: vec2 s = vec2(p.x/p.z, p.y/p.z) * Scaling;
float sx = x*z / Scaling;
float sy = y*z / Scaling;
// invert: pos = uv.xy - vec2(0.5, Horizon);
sx += 0.5;
sy += screenRatio;
// convert back to screen space
return new vec2(sx * GAME_WIDTH, sy * GAME_HEIGHT);
}

Related

Fish-eye warping about mouse position - fragment shader

I'm trying to create a fish-eye effect but only in a small radius around the mouse position. I've been able to modify this code to work about the mouse position (demo) but I can't figure out where the zooming is coming from. I'm expecting the output to warp the image similarly to this (ignore the color inversion for the sake of this question):
Relevant code:
// Check if within given radius of the mouse
vec2 diff = myUV - u_mouse - 0.5;
float distance = dot(diff, diff); // square of distance, saves a square-root
// Add fish-eye
if(distance <= u_radius_squared) {
vec2 xy = 2.0 * (myUV - u_mouse) - 1.0;
float d = length(xy * maxFactor);
float z = sqrt(1.0 - d * d);
float r = atan(d, z) / PI;
float phi = atan(xy.y, xy.x);
myUV.x = d * r * cos(phi) + 0.5 + u_mouse.x;
myUV.y = d * r * sin(phi) + 0.5 + u_mouse.y;
}
vec3 tex = texture2D(tMap, myUV).rgb;
gl_FragColor.rgb = tex;
This is my first shader, so other improvements besides fixing this issue are also welcome.
Compute the vector from the current fragment to the mouse and the length of the vector:
vec2 diff = myUV - u_mouse;
float distance = length(diff);
The new texture coordinate is the sum of the mouse position and the scaled direction vector:
myUV = u_mouse + normalize(diff) * u_radius * f(distance/u_radius);
For instance:
uniform float u_radius;
uniform vec2 u_mouse;
void main()
{
vec2 diff = myUV - u_mouse;
float distance = length(diff);
if (distance <= u_radius)
{
float scale = (1.0 - cos(distance/u_radius * PI * 0.5));
myUV = u_mouse + normalize(diff) * u_radius * scale;
}
vec3 tex = texture2D(tMap, myUV).rgb;
gl_FragColor = vec4(tex, 1.0);
}

Dome Image Projection

Im trying to create a GLSL fragment shader which projects an image to a dome. The input would be a sampler2D texture, an elevation and an azimuth.
The result should look like the following gif's.
Elevation between 0 and 90 degree (in this gif its between -90 and 90)
.
Azimuth between 0 and 360 degree
.
Right now my code looks like this:
#ifdef GL_ES
precision mediump float;
#endif
uniform float u_time;
uniform vec2 u_resolution;
uniform sampler2D u_texture_0;
uniform sampler2D u_texture_1;
// INPUT
const float azimuth=0.;// clockwise 360 degree
const float altitude=90.;// 0-90 dregree -> 90 = center
const float scale=1.;
// CALC
const float PI=3.14159265359;
const float azimuthRad=azimuth*PI/180.;
const float altitudeNormalization=sin((1.-(altitude/90.)));
float box(in vec2 _st,in vec2 _size){
_size=vec2(.5)-_size*.5;
vec2 uv=smoothstep(_size,_size+vec2(.001),_st);
uv*=smoothstep(_size,_size+vec2(.001),vec2(1.)-_st);
return uv.x*uv.y;
}
mat2 rotate(float angle){
return mat2(cos(angle),-sin(angle),sin(angle),cos(angle));
}
void main(){
vec2 st=gl_FragCoord.xy/u_resolution;
vec4 color = texture2D(u_texture_1,st); // set background grid
vec2 vPos=st;
float aperture=180.;
float apertureHalf=.5*aperture*(PI/180.);
float maxFactor=sin(apertureHalf);
// to unit sphere -> -1 - 1
vPos=vec2(2.*vPos-1.);
float l=length(vPos);
if(l<=1.){
float x=maxFactor*vPos.x;
float y=maxFactor*vPos.y;
float n=length(vec2(x,y));
float z=sqrt(1.-n*n);
float r=atan(n,z)/PI;
float phi=atan(y,x);
float u=r*cos(phi)+.5;
float v=r*sin(phi)+.5;
vec2 uv=vec2(u,v);
// translate
vec2 translate=vec2(sin(azimuthRad),cos(azimuthRad));
uv+=translate*altitudeNormalization;
// rotate
uv-=.5;
uv=rotate(PI-azimuthRad)*uv;
uv+=.5;
// scale
float size=.5*scale;
float box=box(uv,vec2(.5*size));
uv.x*=-1.;
uv.y*=-1.;
if(box>=.1){
vec3 b=vec3(box);
// gl_FragColor=vec4(b,1.);
//uv *= box;
color += texture2D(u_texture_0,uv);
}
gl_FragColor= color;
}
}
As you can see there are two things wrong, the texture is only displayed partially (I know that I kind of cut it out which is for sure wrong) and the distortion is also wrong. Any help would be apretiated.
The issue is, that you use a scaled uv coordinates for the box test:
float size=.5*scale;
float box=box(uv,vec2(.5*size));
You have to consider this scale when you do the texture look up the texture. Furthermore you wrongly add 0.5 to the uv coordinates:
float u=r*cos(phi)+.5;
float v=r*sin(phi)+.5;
Set up the uv coordinates in range [-1.0, 1.0]:
vec2 uv = vec2(r*cos(phi), r*sin(phi));
Translate, rotate and scale it (e.g. const float scale = 8.0;):
// translate
vec2 translate = vec2(sin(azimuthRad), cos(azimuthRad));
uv += translate * altitudeNormalization;
// rotate
uv = rotate(PI-azimuthRad)*uv;
// scale
uv = uv * scale;
Transform the uv coordinate from range [-1.0, 1.0] to [0.0, 1.0] and do a correct box test:
uv = uv * 0.5 + 0.5;
vec2 boxtest = step(0.0, uv) * step(uv, vec2(1.0));
if (boxtest.x * boxtest.y > 0.0)
color += texture2D(u_texture_0, uv);
Fragment shader main:
void main(){
vec2 st = gl_FragCoord.xy/u_resolution;
vec4 color = texture2D(u_texture_1,st); // set background grid
float aperture=180.;
float apertureHalf=.5*aperture*(PI/180.);
float maxFactor=sin(apertureHalf);
// to unit sphere -> -1 - 1
vec2 vPos = st * 2.0 - 1.0;
float l=length(vPos);
if(l<=1.){
float x = maxFactor*vPos.x;
float y = maxFactor*vPos.y;
float n = length(vec2(x,y));
float z = sqrt(1.-n*n);
float r = atan(n,z)/PI;
float phi = atan(y,x);
float u = r*cos(phi);
float v = r*sin(phi);
vec2 uv = vec2(r*cos(phi), r*sin(phi));
// translate
vec2 translate = vec2(sin(azimuthRad), cos(azimuthRad));
uv += translate * altitudeNormalization;
// rotate
uv = rotate(PI-azimuthRad)*uv;
// scale
uv = uv * scale;
uv = uv * 0.5 + 0.5;
vec2 boxtest = step(0.0, uv) * step(uv, vec2(1.0));
if (boxtest.x * boxtest.y > 0.0)
color += texture2D(u_texture_0, uv);
}
gl_FragColor = color;
}

I need help converting this 2D sky shader to 3D

I found this shader function on github and managed to get it working in GameMaker Studio 2, my current programming suite of choice. However this is a 2D effect that doesn't take into account the camera up vector, nor fov. Is there anyway that can be added into this? I'm only intermediate skill level when it comes to shaders so I'm not sure exactly what route to take, or whether it would even be considered worth it at this point, or if I should start with a different example.
uniform vec3 u_sunPosition;
varying vec2 v_vTexcoord;
varying vec4 v_vColour;
varying vec3 v_vPosition;
#define PI 3.141592
#define iSteps 16
#define jSteps 8
vec2 rsi(vec3 r0, vec3 rd, float sr) {
// ray-sphere intersection that assumes
// the sphere is centered at the origin.
// No intersection when result.x > result.y
float a = dot(rd, rd);
float b = 2.0 * dot(rd, r0);
float c = dot(r0, r0) - (sr * sr);
float d = (b*b) - 4.0*a*c;
if (d < 0.0) return vec2(1e5,-1e5);
return vec2(
(-b - sqrt(d))/(2.0*a),
(-b + sqrt(d))/(2.0*a)
);
}
vec3 atmosphere(vec3 r, vec3 r0, vec3 pSun, float iSun, float rPlanet, float rAtmos, vec3 kRlh, float kMie, float shRlh, float shMie, float g) {
// Normalize the sun and view directions.
pSun = normalize(pSun);
r = normalize(r);
// Calculate the step size of the primary ray.
vec2 p = rsi(r0, r, rAtmos);
if (p.x > p.y) return vec3(0,0,0);
p.y = min(p.y, rsi(r0, r, rPlanet).x);
float iStepSize = (p.y - p.x) / float(iSteps);
// Initialize the primary ray time.
float iTime = 0.0;
// Initialize accumulators for Rayleigh and Mie scattering.
vec3 totalRlh = vec3(0,0,0);
vec3 totalMie = vec3(0,0,0);
// Initialize optical depth accumulators for the primary ray.
float iOdRlh = 0.0;
float iOdMie = 0.0;
// Calculate the Rayleigh and Mie phases.
float mu = dot(r, pSun);
float mumu = mu * mu;
float gg = g * g;
float pRlh = 3.0 / (16.0 * PI) * (1.0 + mumu);
float pp = 1.0 + gg - 2.0 * mu * g;
float pMie = 3.0 / (8.0 * PI) * ((1.0 - gg) * (mumu + 1.0)) / (sign(pp)*pow(abs(pp), 1.5) * (2.0 + gg));
// Sample the primary ray.
for (int i = 0; i < iSteps; i++) {
// Calculate the primary ray sample position.
vec3 iPos = r0 + r * (iTime + iStepSize * 0.5);
// Calculate the height of the sample.
float iHeight = length(iPos) - rPlanet;
// Calculate the optical depth of the Rayleigh and Mie scattering for this step.
float odStepRlh = exp(-iHeight / shRlh) * iStepSize;
float odStepMie = exp(-iHeight / shMie) * iStepSize;
// Accumulate optical depth.
iOdRlh += odStepRlh;
iOdMie += odStepMie;
// Calculate the step size of the secondary ray.
float jStepSize = rsi(iPos, pSun, rAtmos).y / float(jSteps);
// Initialize the secondary ray time.
float jTime = 0.0;
// Initialize optical depth accumulators for the secondary ray.
float jOdRlh = 0.0;
float jOdMie = 0.0;
// Sample the secondary ray.
for (int j = 0; j < jSteps; j++) {
// Calculate the secondary ray sample position.
vec3 jPos = iPos + pSun * (jTime + jStepSize * 0.5);
// Calculate the height of the sample.
float jHeight = length(jPos) - rPlanet;
// Accumulate the optical depth.
jOdRlh += exp(-jHeight / shRlh) * jStepSize;
jOdMie += exp(-jHeight / shMie) * jStepSize;
// Increment the secondary ray time.
jTime += jStepSize;
}
// Calculate attenuation.
vec3 attn = exp(-(kMie * (iOdMie + jOdMie) + kRlh * (iOdRlh + jOdRlh)));
// Accumulate scattering.
totalRlh += odStepRlh * attn;
totalMie += odStepMie * attn;
// Increment the primary ray time.
iTime += iStepSize;
}
// Calculate and return the final color.
return iSun * (pRlh * kRlh * totalRlh + pMie * kMie * totalMie);
}
vec3 ACESFilm( vec3 x )
{
float tA = 2.51;
float tB = 0.03;
float tC = 2.43;
float tD = 0.59;
float tE = 0.14;
return clamp((x*(tA*x+tB))/(x*(tC*x+tD)+tE),0.0,1.0);
}
void main() {
vec3 color = atmosphere(
normalize( v_vPosition ), // normalized ray direction
vec3(0,6372e3,0), // ray origin
u_sunPosition, // position of the sun
22.0, // intensity of the sun
6371e3, // radius of the planet in meters
6471e3, // radius of the atmosphere in meters
vec3(5.5e-6, 13.0e-6, 22.4e-6), // Rayleigh scattering coefficient
21e-6, // Mie scattering coefficient
8e3, // Rayleigh scale height
1.2e3, // Mie scale height
0.758 // Mie preferred scattering direction
);
// Apply exposure.
color = ACESFilm( color );
gl_FragColor = vec4(color, 1.0);
}
However this is a 2D effect that doesn't take into account the camera up vector, nor fov.
If you want to draw a sky in 3D, then you have to draw the on the back plane of the normalized device space. The normalized device space is is a cube with the left, bottom near of (-1, -1, -1) and the right, top, f ar of (1, 1, 1).
The back plane is the quad with:
bottom left: -1, -1, 1
bottom right: 1, -1, 1
top right: -1, -1, 1
top left: -1, -1, 1
Render this quad. Note, the vertex coordinates have not to be transformed by any matrix, because the are normalized device space coordinates. But you have to transform the ray which is used for the sky (the direction which is passed to atmosphere).
This ray has to be a direction in world space, from the camera position to the the sky. By the vertex coordinate of the quad you can get a ray in normalized device space. You have tor transform this ray to world space. The inverse projection matrix (MATRIX_PROJECTION) transforms from normalized devices space to view space and the inverse view matrix (MATRIX_VIEW) transforms form view space to world space. Use this matrices in the vertex shader:
attribute vec3 in_Position;
varying vec3 v_world_ray;
void main()
{
gl_Position = vec4(inPos, 1.0);
vec3 proj_ray = vec3(inverse(gm_Matrices[MATRIX_PROJECTION]) * vec4(inPos.xyz, 1.0));
v_world_ray = vec3(inverse(gm_Matrices[MATRIX_VIEW]) * vec4(proj_ray.xyz, 0.0));
}
In the fragment shader you have to rotate the ray by 90° around the x axis, but that is just caused by the way the ray is interpreted by function atmosphere:
varying vec3 v_world_ray;
// [...]
void main() {
vec3 world_ray = vec3(v_world_ray.x, v_world_ray.z, -v_world_ray.y);
vec3 color = atmosphere(
normalize( world_ray.xyz ), // normalized ray direction
vec3(0,6372e3,0), // ray origin
u_sunPosition, // position of the sun
22.0, // intensity of the sun
6371e3, // radius of the planet in meters
6471e3, // radius of the atmosphere in meters
vec3(5.5e-6, 13.0e-6, 22.4e-6), // Rayleigh scattering coefficient
21e-6, // Mie scattering coefficient
8e3, // Rayleigh scale height
1.2e3, // Mie scale height
0.758 // Mie preferred scattering direction
);
// Apply exposure.
color = ACESFilm( color );
fragColor = vec4(color.rgb, 1.0);
}

SSAO implementation in Babylon JS and GLSL, using view ray for depth comparison

I'm trying to create my own SSAO shader in forward rendering (not in post processing) with GLSL. I'm encountering some issues, but I really can't figure out what's wrong with my code.
It is created with Babylon JS engine as a BABYLON.ShaderMaterial and set in a BABYLON.RenderTargetTexture, and it is mainly inspired by this renowned SSAO tutorial: http://john-chapman-graphics.blogspot.fr/2013/01/ssao-tutorial.html
For performance reasons, I have to do all the calculation without projecting and unprojecting in screen space, I'd rather use the view ray method described in the tutorial above.
Before explaining the whole thing, please note that Babylon JS uses a left-handed coordinate system, which may have quite an incidence on my code.
Here are my classic steps:
First, I calculate my four camera far plane corners positions in my JS code. They might be constants every time as they are calculated in view space position.
// Calculating 4 corners manually in view space
var tan = Math.tan;
var atan = Math.atan;
var ratio = SSAOSize.x / SSAOSize.y;
var far = scene.activeCamera.maxZ;
var fovy = scene.activeCamera.fov;
var fovx = 2 * atan(tan(fovy/2) * ratio);
var xFarPlane = far * tan(fovx/2);
var yFarPlane = far * tan(fovy/2);
var topLeft = new BABYLON.Vector3(-xFarPlane, yFarPlane, far);
var topRight = new BABYLON.Vector3( xFarPlane, yFarPlane, far);
var bottomRight = new BABYLON.Vector3( xFarPlane, -yFarPlane, far);
var bottomLeft = new BABYLON.Vector3(-xFarPlane, -yFarPlane, far);
var farCornersVec = [topLeft, topRight, bottomRight, bottomLeft];
var farCorners = [];
for (var i = 0; i < 4; i++) {
var vecTemp = farCornersVec[i];
farCorners.push(vecTemp.x, vecTemp.y, vecTemp.z);
}
These corner positions are sent to the vertex shader -- that is why the vector coordinates are serialized in the farCorners[] array to be sent in the vertex shader.
In my vertex shader, position.x and position.y signs let the shader know which corner to use at each pass.
These corners are then interpolated in my fragment shader for calculating a view ray, i.e. a vector from the camera to the far plane (its .z component is, therefore, equal to the far plane distance to camera).
The fragment shader follows the instructions of John Chapman's tutorial (see commented code below).
I get my depth buffer as a BABYLON.RenderTargetTexture with the DepthRenderer.getDepthMap() method. A depth texture lookup actually returns (according to Babylon JS's depth shaders):
(gl_FragCoord.z / gl_FragCoord.w) / far, with:
gl_FragCoord.z: the non-linear depth
gl_FragCoord.z = 1/Wc, where Wc is the clip-space vertex position (i.e. gl_Position.w in the vertex shader)
far: the positive distance from camera to the far plane.
The kernel samples are arranged in a hemisphere with random floats in [0,1], most being distributed close to origin with a linear interpolation.
As I don't have a normal texture, I calculate them from the current depth buffer value with getNormalFromDepthValue():
vec3 getNormalFromDepthValue(float depth) {
vec2 offsetX = vec2(texelSize.x, 0.0);
vec2 offsetY = vec2(0.0, texelSize.y);
// texelSize = size of a texel = (1/SSAOSize.x, 1/SSAOSize.y)
float depthOffsetX = getDepth(depthTexture, vUV + offsetX); // Horizontal neighbour
float depthOffsetY = getDepth(depthTexture, vUV + offsetY); // Vertical neighbour
vec3 pX = vec3(offsetX, depthOffsetX - depth);
vec3 pY = vec3(offsetY, depthOffsetY - depth);
vec3 normal = cross(pY, pX);
normal.z = -normal.z; // We want normal.z positive
return normalize(normal); // [-1,1]
}
Finally, my getDepth() function allows me to get the depth value at current UV in 32-bit float:
float getDepth(sampler2D tex, vec2 texcoord) {
return unpack(texture2D(tex, texcoord));
// unpack() retreives the depth value from the 4 components of the vector given by texture2D()
}
Here are my vertex and fragment shader codes (without function declarations):
// ---------------------------- Vertex Shader ----------------------------
precision highp float;
uniform float fov;
uniform float far;
uniform vec3 farCorners[4];
attribute vec3 position; // 3D position of each vertex (4) of the quad in object space
attribute vec2 uv; // UV of each vertex (4) of the quad
varying vec3 vPosition;
varying vec2 vUV;
varying vec3 vCornerPositionVS;
void main(void) {
vPosition = position;
vUV = uv;
// Map current vertex with associated frustum corner position in view space:
// 0: top left, 1: top right, 2: bottom right, 3: bottom left
// This frustum corner position will be interpolated so that the pixel shader always has a ray from camera->far-clip plane.
vCornerPositionVS = vec3(0.0);
if (positionVS.x > 0.0) {
if (positionVS.y <= 0.0) { // top left
vCornerPositionVS = farCorners[0];
}
else if (positionVS.y > 0.0) { // top right
vCornerPositionVS = farCorners[1];
}
}
else if (positionVS.x <= 0.0) {
if (positionVS.y > 0.0) { // bottom right
vCornerPositionVS = farCorners[2];
}
else if (positionVS.y <= 0.0) { // bottom left
vCornerPositionVS = farCorners[3];
}
}
gl_Position = vec4(position * 2.0, 1.0); // 2D position of each vertex
}
// ---------------------------- Fragment Shader ----------------------------
precision highp float;
uniform mat4 projection; // Projection matrix
uniform float radius; // Scaling factor for sample position, by default = 1.7
uniform float depthBias; // 1e-5
uniform vec2 noiseScale; // (SSAOSize.x / noiseSize, SSAOSize.y / noiseSize), with noiseSize = 4
varying vec3 vCornerPositionVS; // vCornerPositionVS is the interpolated position calculated from the 4 far corners
void main() {
// Get linear depth in [0,1] with texture2D(depthBufferTexture, vUV)
float fragDepth = getDepth(depthBufferTexture, vUV);
float occlusion = 0.0;
if (fragDepth < 1.0) {
// Retrieve fragment's view space normal
vec3 normal = getNormalFromDepthValue(fragDepth); // in [-1,1]
// Random rotation: rvec.xyz are the components of the generated random vector
vec3 rvec = texture2D(randomSampler, vUV * noiseScale).rgb * 2.0 - 1.0; // [-1,1]
rvec.z = 0.0; // Random rotation around Z axis
// Get view ray, from camera to far plane, scaled by 1/far so that viewRayVS.z == 1.0
vec3 viewRayVS = vCornerPositionVS / far;
// Current fragment's view space position
vec3 fragPositionVS = viewRay * fragDepth;
// Creation of TBN matrix
vec3 tangent = normalize(rvec - normal * dot(rvec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 tbn = mat3(tangent, bitangent, normal);
for (int i = 0; i < NB_SAMPLES; i++) {
// Get sample kernel position, from tangent space to view space
vec3 samplePosition = tbn * kernelSamples[i];
// Add VS kernel offset sample to fragment's VS position
samplePosition = samplePosition * radius + fragPosition;
// Project sample position from view space to screen space:
vec4 offset = vec4(samplePosition, 1.0);
offset = projection * offset; // To view space
offset.xy /= offset.w; // Perspective division
offset.xy = offset.xy * 0.5 + 0.5; // [-1,1] -> [0,1]
// Get current sample depth:
float sampleDepth = getDepth(depthTexture, offset.xy);
float rangeCheck = abs(fragDepth - sampleDepth) < radius ? 1.0 : 0.0;
// Reminder: fragDepth == fragPosition.z
// Range check and accumulate if fragment contributes to occlusion:
occlusion += (samplePosition.z - sampleDepth >= depthBias ? 1.0 : 0.0) * rangeCheck;
}
}
// Inversion
float ambientOcclusion = 1.0 - (occlusion / float(NB_SAMPLES));
ambientOcclusion = pow(ambientOcclusion, power);
gl_FragColor = vec4(vec3(ambientOcclusion), 1.0);
}
A horizontal and vertical Gaussian shader blur clears the noise generated by the random texture afterwards.
My parameters are:
NB_SAMPLES = 16
radius = 1.7
depthBias = 1e-5
power = 1.0
Here is the result:
The result has artifacts on its edges, and the close shadows are not very strong... Would anyone see something wrong or weird in my code?
Thanks a lot!
fragPositionVS is a position in view space coordinates and radius is length in view coordinates. You use them to calculate the samplePosition:
samplePosition = samplePosition * radius + fragPositionVS;
But in the line rangeCheck = abs(fragDepth - sampleDepth) < radius ? 1.0 : 0.0;, you compare the difference of fragDepth and sampleDepth with radius. That makes no sense, since fragDepth and sampleDepth are values from the depth buffer in, the range [0, 1] and radius is a lenght in the view space.
In the line occlusion += (samplePosition.z - sampleDepth >= depthBias ? 1.0 : 0.0) * rangeCheck;, you calculate the difference of samplePosition.z and sampleDepth. While samplePosition.z is a view space coordinate inbetween -near and -far, sampleDepth is a depth in range [0, 1]. Calculating a difference between these two values doesn't make any sense either.
I suggest using always Z coordinates, if you want to calculate distances or if you want to compare distances.
If you have a depth value, the Z-coordinate in view space can be calculated by converting the depth value to normalized device coordinate and converting the normalized device coordinate to a view coordinate:
float DepthToZ( in float depth )
{
float near = .... ; // distance to near plane (absolute value)
float far = .... ; // distance to far plane (absolute value)
float z_ndc = 2.0 * depth - 1.0;
float z_eye = 2.0 * near * far / (far + near - z_ndc * (far - near));
return -z_eye;
}
The depth is a value in the range [0, 1] and maps the range from the distance to the near plane and the distance to the far plane (in view space), but not linear (for perspective projection).
For this reason, the code line vec3 fragPositionVS = (vCornerPositionVS / far) * fragDepth; will not calculate a correct fragment position, but you can do it like this:
vec3 fragPositionVS = vCornerPositionVS * abs( DepthToZ(fragDepth) / far );
Note, in view space the z axis comes out of the view port. If the corner positions are set up in view space, then the Z-coordinate has to be the negative distance to the far plane:
var topLeft = new BABYLON.Vector3(-xFarPlane, yFarPlane, -far);
var topRight = new BABYLON.Vector3( xFarPlane, yFarPlane, -far);
var bottomRight = new BABYLON.Vector3( xFarPlane, -yFarPlane, -far);
var bottomLeft = new BABYLON.Vector3(-xFarPlane, -yFarPlane, -far);
In the vertex shader the assignment of the corner positions is mixed. The lower left position of the viewport is (-1,-1) and the top right position is (1,1) (in normalized device coordinates).Adapt the code like this:
JavaScript:
var farCornersVec = [bottomLeft, bottomRight, topLeft, topRight];
Vertex shader:
// bottomLeft=0*2+0*1, bottomRight=0*2+1*1, topLeft=1*2+0*1, topRight=1*2+1*1;
int i = (positionVS.y > 0.0 ? 2 : 0) + (positionVS.x > 0.0 ? 1 : 0);
vCornerPositionVS = farCorners[i];
Note, if you could add an additional vertex attribute for the corner position, then it would be simplified.
The calculation of the fragment position can be simplified, if the aspect ratio, the field of view angle and the normalized device coordinates of the fragment (fragment position in range [-1,1]) are known:
ndc_xy = vUV * 2.0 - 1.0;
tanFov_2 = tan( radians( fov / 2 ) )
aspect = vp_size_x / vp_size_y
fragZ = DepthToZ( fragDepth );
fragPos = vec3( ndc_xy.x * aspect * tanFov_2, ndc_xy.y * tanFov_2, -1.0 ) * abs( fragZ );
If the perspective projection matrix is known, this can be calculated easily:
vec2 ndc_xy = vUV.xy * 2.0 - 1.0;
vec4 viewH = inverse( projection ) * vec4( ndc_xy, fragDepth * 2.0 - 1.0, 1.0 );
vec3 fragPosition = viewH.xyz / viewH.w;
If the perspective projection is symmetric (the filed of view is not displaced and the Z-axis of the view space is in the center of the viewport), this can be simplified:
vec2 ndc_xy = vUV.xy * 2.0 - 1.0;
vec3 fragPosition = vec3( ndc_xy.x / projection[0][0], ndc_xy.y / projection[1][1], -1.0 ) * abs(DepthToZ(fragDepth));
See also:
How to recover view space position given view space depth value and ndc xy
How to render depth linearly in modern OpenGL with gl_FragCoord.z in fragment shader?
I suggest to write the fragment shader somehow like this:
float fragDepth = getDepth(depthBufferTexture, vUV);
float ambientOcclusion = 1.0;
if (fragDepth > 0.0)
{
vec3 normal = getNormalFromDepthValue(fragDepth); // in [-1,1]
vec3 rvec = texture2D(randomSampler, vUV * noiseScale).rgb * 2.0 - 1.0;
rvec.z = 0.0;
vec3 tangent = normalize(rvec - normal * dot(rvec, normal));
mat3 tbn = mat3(tangent, cross(normal, tangent), normal);
vec2 ndc_xy = vUV.xy * 2.0 - 1.0;
vec3 fragPositionVS = vec3( ndc_xy.x / projection[0][0], ndc_xy.y / projection[1][1], -1.0 ) * abs( DepthToZ(fragDepth) );
// vec3 fragPositionVS = vCornerPositionVS * abs( DepthToZ(fragDepth) / far );
float occlusion = 0.0;
for (int i = 0; i < NB_SAMPLES; i++)
{
vec3 samplePosition = fragPositionVS + radius * tbn * kernelSamples[i];
// Project sample position from view space to screen space:
vec4 offset = projection * vec4(samplePosition, 1.0);
offset.xy /= offset.w; // Perspective division -> [-1,1]
offset.xy = offset.xy * 0.5 + 0.5; // [-1,1] -> [0,1]
// Get current sample depth
float sampleZ = DepthToZ( getDepth(depthTexture, offset.xy) );
// Range check and accumulate if fragment contributes to occlusion:
float rangeCheck = step( abs(fragPositionVS.z - sampleZ), radius );
occlusion += step( samplePosition.z - sampleZ, -depthBias ) * rangeCheck;
}
// Inversion
ambientOcclusion = 1.0 - (occlusion / float(NB_SAMPLES));
ambientOcclusion = pow(ambientOcclusion, power);
}
gl_FragColor = vec4(vec3(ambientOcclusion), 1.0);
See the WebGL example, which demonstrates the full algorithm (Unfortunately the full code would exceed the limit of 30000 signs, which an answer is limited to):
JSFiddle or GitHub
Extension to the answer
The depth as it would be stored in the depth buffer is calculated like this:
(see OpenGL ES write depth data to color)
float ndc_depth = vPosPrj.z / vPosPrj.w;
float depth = ndc_depth * 0.5 + 0.5;
This value is already calculated in the fragment shader and is contained in gl_FragCoord.z. See the Khronos Group reference page for gl_FragCoord which says:
The z component is the depth value that would be used for the fragment's depth if no shader contained any writes to gl_FragDepth.
If the depth has to be stored in a RGBA8 buffer, the depth has to be encoded to the 4 bytes of the buffer to avoid a loss of accuracy, and has to be decoded when read from the buffer:
encode
vec3 PackDepth( in float depth )
{
float depthVal = depth * (256.0*256.0*256.0 - 1.0) / (256.0*256.0*256.0);
vec4 encode = fract( depthVal * vec4(1.0, 256.0, 256.0*256.0, 256.0*256.0*256.0) );
return encode.xyz - encode.yzw / 256.0 + 1.0/512.0;
}
decode
float UnpackDepth( in vec3 pack )
{
float depth = dot( pack, 1.0 / vec3(1.0, 256.0, 256.0*256.0) );
return depth * (256.0*256.0*256.0) / (256.0*256.0*256.0 - 1.0);
}
See also the answers to the following questions:
How do I convert between float and vec4,vec3,vec2?
OpenGL ES write depth data to color
How do you pack one 32bit int Into 4, 8bit ints in glsl / webgl?

Perlin Noise block grid

I encountered a problem trying to compute Perlin noise using an OpenGL fragment shader.
The result is blocky and not continuous at all.
I'm trying to use this kind of implementation:
I can't figure out the problem, here is my fragment shader code:
#version 330 core
out vec3 color;
in vec4 p;
in vec2 uv;
// random value for x gradiant coordinate
float randx(vec2 co){
return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453);
}
// random value for y gradaint coordiante
float randy(vec2 co){
return fract(cos(dot(co.xy ,vec2(4.9898,78.233))) * 68758.5453);
}
// smooth interpolation funtion
float smoothInter(float x){
return 6*x*x*x*x*x -15*x*x*x*x + 10*x*x*x;
}
float grid_dim = 10.0f;
void main() {
// Get coloumn and row of the bottom left
//point of the square in wich the point is in the grid
int col = int(uv.x * grid_dim);
int row = int(uv.y * grid_dim);
// Get the 4 corner coordinate of the square,
//divided by the grid_dim to have value between [0,1]
vec2 bl = vec2(col, row) / 10.0f;
vec2 br = vec2(col+1, row) / 10.0f;
vec2 tl = vec2(col, row+1) / 10.0f;
vec2 tr = vec2(col+1, row+1) / 10.0f;
// Get vectors that goes from the corner to the point
vec2 a = normalize(uv - bl);
vec2 b = normalize(uv - br);
vec2 c = normalize(uv - tl);
vec2 d = normalize(uv - tr);
// Compute the dot products
float q = dot(vec2(randx(tl),randy(tl)), c);
float r = dot(vec2(randx(tr),randy(tr)), d);
float s = dot(vec2(randx(bl),randy(bl)), a);
float t = dot(vec2(randx(br),randy(br)), b);
// interpolate using mix and our smooth interpolation function
float st = mix(s, t, smoothInter(uv.x));
float qr = mix(q, r, smoothInter(uv.x));
float noise = mix(st, qr, smoothInter(uv.y));
// Output the color
color = vec3(noise, noise, noise);
}
In the last few rows, you are calling smoothInter() on the global x and y coordinates, when you need to call it on the local coordinates.
float st = mix(s, t, smoothInter( (uv.x - col) * grid_dim ));
float qr = mix(q, r, smoothInter( (uv.x - col) * grid_dim ));
float noise = mix(st, qr, smoothInter( (uv.y - row) * grid_dim ));
Multiplying by grid_dim here because your grid cells are not unit width. smoothInter() should take values between 0 and 1 and this transform ensures that.
I also had to remove the normalize() calls, and instead "normalise" the result into the range [0,1]. This was tricky, I assume because of your method of generating the random gradient vectors at the grid vertices. As it stands, your code seems to output values between -2500 and +2500 approximately. Once I scaled this to the right range I had some undesirable regularity appearing. I again put this down to the choice of prng.