Dome Image Projection - glsl

Im trying to create a GLSL fragment shader which projects an image to a dome. The input would be a sampler2D texture, an elevation and an azimuth.
The result should look like the following gif's.
Elevation between 0 and 90 degree (in this gif its between -90 and 90)
.
Azimuth between 0 and 360 degree
.
Right now my code looks like this:
#ifdef GL_ES
precision mediump float;
#endif
uniform float u_time;
uniform vec2 u_resolution;
uniform sampler2D u_texture_0;
uniform sampler2D u_texture_1;
// INPUT
const float azimuth=0.;// clockwise 360 degree
const float altitude=90.;// 0-90 dregree -> 90 = center
const float scale=1.;
// CALC
const float PI=3.14159265359;
const float azimuthRad=azimuth*PI/180.;
const float altitudeNormalization=sin((1.-(altitude/90.)));
float box(in vec2 _st,in vec2 _size){
_size=vec2(.5)-_size*.5;
vec2 uv=smoothstep(_size,_size+vec2(.001),_st);
uv*=smoothstep(_size,_size+vec2(.001),vec2(1.)-_st);
return uv.x*uv.y;
}
mat2 rotate(float angle){
return mat2(cos(angle),-sin(angle),sin(angle),cos(angle));
}
void main(){
vec2 st=gl_FragCoord.xy/u_resolution;
vec4 color = texture2D(u_texture_1,st); // set background grid
vec2 vPos=st;
float aperture=180.;
float apertureHalf=.5*aperture*(PI/180.);
float maxFactor=sin(apertureHalf);
// to unit sphere -> -1 - 1
vPos=vec2(2.*vPos-1.);
float l=length(vPos);
if(l<=1.){
float x=maxFactor*vPos.x;
float y=maxFactor*vPos.y;
float n=length(vec2(x,y));
float z=sqrt(1.-n*n);
float r=atan(n,z)/PI;
float phi=atan(y,x);
float u=r*cos(phi)+.5;
float v=r*sin(phi)+.5;
vec2 uv=vec2(u,v);
// translate
vec2 translate=vec2(sin(azimuthRad),cos(azimuthRad));
uv+=translate*altitudeNormalization;
// rotate
uv-=.5;
uv=rotate(PI-azimuthRad)*uv;
uv+=.5;
// scale
float size=.5*scale;
float box=box(uv,vec2(.5*size));
uv.x*=-1.;
uv.y*=-1.;
if(box>=.1){
vec3 b=vec3(box);
// gl_FragColor=vec4(b,1.);
//uv *= box;
color += texture2D(u_texture_0,uv);
}
gl_FragColor= color;
}
}
As you can see there are two things wrong, the texture is only displayed partially (I know that I kind of cut it out which is for sure wrong) and the distortion is also wrong. Any help would be apretiated.

The issue is, that you use a scaled uv coordinates for the box test:
float size=.5*scale;
float box=box(uv,vec2(.5*size));
You have to consider this scale when you do the texture look up the texture. Furthermore you wrongly add 0.5 to the uv coordinates:
float u=r*cos(phi)+.5;
float v=r*sin(phi)+.5;
Set up the uv coordinates in range [-1.0, 1.0]:
vec2 uv = vec2(r*cos(phi), r*sin(phi));
Translate, rotate and scale it (e.g. const float scale = 8.0;):
// translate
vec2 translate = vec2(sin(azimuthRad), cos(azimuthRad));
uv += translate * altitudeNormalization;
// rotate
uv = rotate(PI-azimuthRad)*uv;
// scale
uv = uv * scale;
Transform the uv coordinate from range [-1.0, 1.0] to [0.0, 1.0] and do a correct box test:
uv = uv * 0.5 + 0.5;
vec2 boxtest = step(0.0, uv) * step(uv, vec2(1.0));
if (boxtest.x * boxtest.y > 0.0)
color += texture2D(u_texture_0, uv);
Fragment shader main:
void main(){
vec2 st = gl_FragCoord.xy/u_resolution;
vec4 color = texture2D(u_texture_1,st); // set background grid
float aperture=180.;
float apertureHalf=.5*aperture*(PI/180.);
float maxFactor=sin(apertureHalf);
// to unit sphere -> -1 - 1
vec2 vPos = st * 2.0 - 1.0;
float l=length(vPos);
if(l<=1.){
float x = maxFactor*vPos.x;
float y = maxFactor*vPos.y;
float n = length(vec2(x,y));
float z = sqrt(1.-n*n);
float r = atan(n,z)/PI;
float phi = atan(y,x);
float u = r*cos(phi);
float v = r*sin(phi);
vec2 uv = vec2(r*cos(phi), r*sin(phi));
// translate
vec2 translate = vec2(sin(azimuthRad), cos(azimuthRad));
uv += translate * altitudeNormalization;
// rotate
uv = rotate(PI-azimuthRad)*uv;
// scale
uv = uv * scale;
uv = uv * 0.5 + 0.5;
vec2 boxtest = step(0.0, uv) * step(uv, vec2(1.0));
if (boxtest.x * boxtest.y > 0.0)
color += texture2D(u_texture_0, uv);
}
gl_FragColor = color;
}

Related

Circle in GLSL is being drawn in the wrong location

So, I have a circle in glsl that is supposed to be drawn around the mouse. The resulting circle is drawn in the wrong location.
I'm drawing the circle by taking the step of the distance from st and the vector2 of the uniform mouse.
I have no Idea why this is happening.
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 u_resolution;
uniform vec2 u_mouse;
uniform float u_time;
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
st.x *= u_resolution.x/u_resolution.y;
float pct = 0.0;
vec2 brightness = vec2(0.0);
pct = step(distance(st,vec2(u_mouse/100.0)),0.5);
vec3 color = vec3(0.);
color = vec3(pct);
brightness = vec2(1.0);
gl_FragColor = vec4(color,brightness);
}
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 u_resolution;
uniform vec2 u_mouse;
uniform float u_time;
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
st.x *= u_resolution.x/u_resolution.y;
float pct = 0.0;
vec2 brightness = vec2(0.0);
pct = step(distance(st,vec2(u_mouse/100.0)),0.5);
vec3 color = vec3(0.);
color = vec3(pct);
brightness = vec2(1.0);
gl_FragColor = vec4(color,brightness);
}
Here is an example using Shadertoy, that can be trivially adapted to your OpenGL/GLSL code.
The code comes from a basic 2D tutorial on Shadertoy on how to draw a circle around the centre of the screen, by coloring a pixel based on whether it lies within a given cartesian distance (ie. its radius) from its centre. Then it is modified to instead draw the circle around the mouse pointer:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
vec2 m = (iMouse.xy / iResolution.xy);
vec2 uv = (fragCoord.xy / iResolution.xy);
uv.x *= iResolution.x/iResolution.y;
m.x *= iResolution.x/iResolution.y;
float radius = 0.25;
vec3 pixel;
if( (uv.x-m.x)*(uv.x-m.x) + (uv.y-m.y)*(uv.y-m.y) < radius*radius ) {
pixel = vec3(0.3, 0.3, 1.0);
} else {
pixel = vec3(1.0, 0.3, 0.2);
}
fragColor = vec4(pixel, 1.0);
}
Demo:
Is the code duplicated by accident? I would check that you are passing in the values you expect for mouse and resolution, and take into account whether your window is fullscreen or not.

How to draw a smooth circle with a border in glsl?

I want to draw a smooth circle in GLSL but with a border of variable width, in a separate colour. Potentially, the interior of the circle could be transparent.
My original non-smooth shader:
#version 330
layout(location=0) out vec4 frag_colour;
in vec4 color;
uniform float radius;
uniform vec2 position;
uniform vec4 borderColor;
uniform float borderThickness;
void main()
{
float distanceX = abs(gl_FragCoord.x - position.x);
float distanceY = abs(gl_FragCoord.y - position.y);
if(sqrt(distanceX * distanceX + distanceY * distanceY) > radius)
discard;
else if(sqrt(distanceX * distanceX + distanceY * distanceY) <= radius &&
sqrt(distanceX * distanceX + distanceY * distanceY) >= radius-borderThickness)
frag_colour = borderColor;
else
frag_colour = color;
}
This works, but is not smooth. I can draw smooth circles:
#version 330
layout(location=0) out vec4 frag_colour;
in vec4 color;
uniform float radius;
uniform vec2 position;
uniform vec4 borderColor;
uniform float borderThickness;
void main()
{
vec2 uv = gl_FragCoord.xy - position;
float d = sqrt(dot(uv,uv));
float t = 1.0 - smoothstep(radius-borderThickness,radius, d);
frag_colour = vec4(color.rgb,color.a*t);
}
But I can't work out how to add my border to the above.
You have to compute the absolut value of the difference between the radius and the distance and interpolate between 0.0 and borderThickness:
float t = 1.0 - smoothstep(0.0, borderThickness, abs(radius-d));
If you want to fill the circle, then you need 2 gradients. 1 for the transition between the inner circle and the border and a 2nd one for the alpha channel on the outline. mix the colors by the former and set the alpha channel by the later:
float t1 = 1.0 - smoothstep(radius-borderThickness, radius, d);
float t2 = 1.0 - smoothstep(radius, radius+borderThickness, d);
frag_colour = vec4(mix(color.rgb, baseColor.rgb, t1), t2);

GLSL Water shader normals morph as light passes over?

I decided to follow the classic guide for writing a basic GLSL water shaders using the sum of sines method. I attempted to implement it inside of Processing 5, where I made a field of vertices in a PShape to make a mesh to mess with. I then overwrote the default shaders with my own vertex and fragment shaders, and I dropped in a directional light so I can actually see the normals. I made sure the directional light was movable as well so I could see if the normals work from all angles.
I got the waves to form height correctly and I had some form of normals workings, but the normals are interacting really strange. When my light passes across the center axis of my water plane, the normals seem to morph between the different waves and change based on the light angle. The gif I captured was too large to post in line, so I'm sure seeing it would explain better than my words:
https://imgur.com/PCznL7U
You should maximize the link to see the whole picture. Note how as the light pans from left to right, the normals of the waves seem to morph between two sets? This is especially apparent as it crosses center. It's like the normals are inconsistent based on the direction the object is being lit from.
The sphere in the middle is a normal sphere using the standard Processing shader. I left it there as reference to see the waves as well as confirm where my lighting was and that it was working fine.
Any ideas what I did wrong? I know I did some math incorrectly somewhere.
EDIT: Was recommended I added the (lengthy) source code [which I should have done from the start].
Vertex Shader:
#define PROCESSING_LIGHT_SHADER
#define MAXWAVES 6
const float pi = 3.14159;
uniform mat4 transform;
uniform mat4 modelview;
uniform mat3 normalMatrix;
uniform float time; //Time since shader started
attribute vec4 position; //Position the vertex from Processing
attribute vec4 color; //Color of the vertex from Processing
attribute vec3 normal; //Normal of the vertex from Processing
attribute vec4 ambient;
attribute vec4 specular;
attribute vec4 emissive;
attribute float shininess;
varying vec4 vertColor; //Color passed on to fragment shader
varying vec4 backVertColor; //Color passed on to fragment shader
uniform float waveLength[MAXWAVES]; //Length of wave
uniform float speed[MAXWAVES]; //Cycle speed of wave
uniform float amplitude[MAXWAVES]; //Wave cycle height
uniform float xDirection[MAXWAVES];
uniform float yDirection[MAXWAVES]; //Flow vector of wave
uniform int lightCount;
uniform vec4 lightPosition[8];
uniform vec3 lightNormal[8];
uniform vec3 lightAmbient[8];
uniform vec3 lightDiffuse[8];
uniform vec3 lightSpecular[8];
uniform vec3 lightFalloff[8];
uniform vec2 lightSpot[8];
varying vec3 Normal;
varying vec3 FragPos;
varying vec3 Vec;
varying vec3 lightDir;
//Some constants that the processing shader used
const float zero_float = 0.0;
const float one_float = 1.0;
const vec3 zero_vec3 = vec3(0);
float falloffFactor(vec3 lightPos, vec3 vertPos, vec3 coeff) {
vec3 lpv = lightPos - vertPos;
vec3 dist = vec3(one_float);
dist.z = dot(lpv, lpv);
dist.y = sqrt(dist.z);
return one_float / dot(dist, coeff);
}
float spotFactor(vec3 lightPos, vec3 vertPos, vec3 lightNorm, float minCos, float spotExp) {
vec3 lpv = normalize(lightPos - vertPos);
vec3 nln = -one_float * lightNorm;
float spotCos = dot(nln, lpv);
return spotCos <= minCos ? zero_float : pow(spotCos, spotExp);
}
float lambertFactor(vec3 lightDir, vec3 vecNormal) {
return max(zero_float, dot(lightDir, vecNormal));
}
float blinnPhongFactor(vec3 lightDir, vec3 vertPos, vec3 vecNormal, float shine) {
vec3 np = normalize(vertPos);
vec3 ldp = normalize(lightDir - np);
return pow(max(zero_float, dot(ldp, vecNormal)), shine);
}
//Returns the height of a vertex given a single wave param
float WaveHeight(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
return amplitude[waveNumber] * sin(theta * frequency + time * phase);
}
//Returns height of a vertex given all the active waves
// and its current x/y value
float WaveSum(float x, float y)
{
float height = 0.0;
for(int i = 0; i < MAXWAVES; i++)
{
height += WaveHeight(i, x, y);
}
return height;
}
float getDy(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
float A = amplitude[waveNumber] * direction.y * frequency;
return A * cos(theta * frequency + time * phase);
}
float getDx(int waveNumber, float x, float y) {
vec2 direction = (vec2(xDirection[waveNumber], yDirection[waveNumber]));
float frequency = 2.0*pi/waveLength[waveNumber];
float phase = speed[waveNumber] * frequency;
float theta = dot(direction, vec2(x, y));
float A = amplitude[waveNumber] * direction.x * frequency;
return A * cos(theta * frequency + time * phase);
}
//Returns the normal vector for each vertex
vec3 getNormal(float x, float y) {
float dx = 0.0;
float dy = 0.0;
//Sum for each wave
for (int i = 0; i < MAXWAVES; i++) {
dx += getDx(i, x, y);
dy += getDy(i, x, y);
}
vec3 n = vec3(-dx, -dy, 1.0);
return normalize(n);
}
void main() {
vec4 pos = position; //Grab the position from Processing bc it's read only
pos.z = WaveSum(pos.x, pos.y);
gl_Position = transform * pos; //Get clipping matrix for view
vec3 ecVertex = vec3(modelview * pos);
// Normal vector in eye coordinates
vec3 Normal = getNormal(pos.x, pos.y);
vec3 ecNormal = normalize(normalMatrix * Normal);
vec3 ecNormalInv = ecNormal * -one_float;
// Light calculations
vec3 totalAmbient = vec3(0, 0, 0);
vec3 totalFrontDiffuse = vec3(0, 0, 0);
vec3 totalFrontSpecular = vec3(0, 0, 0);
vec3 totalBackDiffuse = vec3(0, 0, 0);
vec3 totalBackSpecular = vec3(0, 0, 0);
for (int i = 0; i < 8; i++) {
if (lightCount == i) break;
vec3 lightPos = lightPosition[i].xyz;
bool isDir = lightPosition[i].w < one_float;
float spotCos = lightSpot[i].x;
float spotExp = lightSpot[i].y;
vec3 lightDir;
float falloff;
float spotf;
if (isDir) {
falloff = one_float;
lightDir = -one_float * lightNormal[i];
} else {
falloff = falloffFactor(lightPos, ecVertex, lightFalloff[i]);
lightDir = normalize(lightPos - ecVertex);
}
spotf = spotExp > zero_float ? spotFactor(lightPos, ecVertex, lightNormal[i],
spotCos, spotExp)
: one_float;
if (any(greaterThan(lightAmbient[i], zero_vec3))) {
totalAmbient += lightAmbient[i] * falloff;
}
if (any(greaterThan(lightDiffuse[i], zero_vec3))) {
totalFrontDiffuse += lightDiffuse[i] * falloff * spotf *
lambertFactor(lightDir, ecNormal);
totalBackDiffuse += lightDiffuse[i] * falloff * spotf *
lambertFactor(lightDir, ecNormalInv);
}
if (any(greaterThan(lightSpecular[i], zero_vec3))) {
totalFrontSpecular += lightSpecular[i] * falloff * spotf *
blinnPhongFactor(lightDir, ecVertex, ecNormal, shininess);
totalBackSpecular += lightSpecular[i] * falloff * spotf *
blinnPhongFactor(lightDir, ecVertex, ecNormalInv, shininess);
}
}
// Calculating final color as result of all lights (plus emissive term).
// Transparency is determined exclusively by the diffuse component.
vertColor =
vec4(totalFrontDiffuse, 1) * color;
backVertColor =
vec4(totalBackDiffuse, 1) * color;
}
Fragment Shader:
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
varying vec4 vertColor; //Color from vertshader
varying vec4 backVertColor; //Color from vertshader
void main() {
gl_FragColor = gl_FrontFacing ? vertColor : backVertColor;
}

Move pixel with texture direction and texture velocity / GLSL

I write a little program to explain simply my problem, I try to change the pixel position of picture with a texture one where the component x is the direction, and where the other represent the velocity. The final objective is to use my data from CPU where are compute a NAVIER-STROKE fluid to move the pixel in GLSL. The CPU code is in Processing java library.
I try to undestand what is buggy in my code, but I don't understand how work the pixel translation.
in the first I transform my direction in value color from 0 to 255 in the CPU and after in the GPU transform this one in vectorial direction, and multiply this one by the velocity and scale this one in 1x1 but that's don't work... sorry if my explication is not really understable, but english is not really fluent.
link to the sketch
Processing :
PImage tex_velocity, tex_direction ;
PShader warping;
PImage img ;
int grid_w, grid_h ;
void setup() {
size(600,375,P2D);
// img = loadImage("pirate_small.jpg");
img = loadImage("puros_girl_small.jpg");
grid_w = 60 ;
grid_h = 37 ;
tex_velocity = createImage(grid_w,grid_h,RGB);
tex_direction = createImage(grid_w,grid_h,RGB);
warping = loadShader("shader/warp/rope_warp_frag.glsl");
noise_img(tex_velocity, 20, .1, .1); // max translate for the pixel
noise_img(tex_direction, 360, .1, .1); // degree direction
}
void draw() {
println(frameRate);
if(frameCount%30 == 0) {
noise_img(tex_velocity, 20, .1, .1); // max translate for the pixel
noise_img(tex_direction, 360, .1, .1); // degree direction
}
warping.set("mode", 0) ;
warping.set("texture",img);
warping.set("roof_component_colour",g.colorModeX);
warping.set("wh_ratio",1f/grid_w, 1f/grid_h);
warping.set("vel_texture",tex_velocity);
warping.set("dir_texture",tex_direction);
shader(warping);
image(img,0,0);
resetShader();
image(tex_velocity,5,5);
image(tex_direction,grid_w +15 ,5 );
}
float x_offset, y_offset ;
void noise_img(PImage dst, int max, float ratio_x, float ratio_y) {
noiseSeed((int)random(10000));
for(int x = 0 ; x < dst.width ; x++) {
x_offset += ratio_x ;
for(int y = 0 ; y < dst.height ; y++) {
y_offset += ratio_y ;
float v = map(noise(x_offset,y_offset),0,1,0,max);
v = (int)map(v,0,max,0,g.colorModeX);
int c = color(v,v,v,g.colorModeA) ;
dst.set(x,y,c);
}
}
}
GLSL
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
#define PI 3.1415926535897932384626433832795
varying vec4 vertTexCoord;
uniform sampler2D texture;
uniform int mode;
uniform float roof_component_colour;
uniform sampler2D vel_texture;
uniform sampler2D dir_texture;
uniform vec2 wh_ratio;
float map(float value, float start1, float stop1, float start2, float stop2) {
float result = start2 + (stop2 - start2) * ((value - start1) / (stop1 - start1));
return result;
}
vec2 cartesian_coord(float angle) {
float x = cos(angle);
float y = sin(angle);
return vec2(x,y);
}
vec2 translate(float fdir, float fvel) {
float angle_in_radian = map(fdir, 0, roof_component_colour, -PI, PI);
vec2 dir_cart = cartesian_coord(angle_in_radian);
return dir_cart *fvel ;
}
void main() {
vec2 ratio = gl_FragCoord.xy *wh_ratio;
vec4 vel = texture2D(vel_texture, ratio);
vec4 dir = texture2D(dir_texture, ratio);
// rendering picture ;
if(mode == 0) {
float direction = dir.x;
float velocity = vel.x;
vec2 translation = translate(direction,velocity);
// not bad, but totaly wrong
// vec2 coord_dest = vertTexCoord.st +translation
vec2 coord_dest = vertTexCoord.st *ratio +translation ;
// not bad, but totaly wrong
vec2 coord_dest = vertTexCoord.st *ratio +translation ;
vec4 tex_colour = texture2D(texture, coord_dest);
gl_FragColor = tex_colour;
}
// velocity
if(mode == 1 ) {
gl_FragColor = texture2D(vel_texture, vertTexCoord.st);;
}
// direction force field
if(mode == 2) {
gl_FragColor = texture2D(dir_texture, vertTexCoord.st);;
}
}
The texture format is GL_RGBA8, this means each color channel is stored to a byte in, which is a integral data tyoe in rage from 0 to 255.
But when you read texts from the texture sampler, the you will get a floating point value in the range from 0.0 to 1.0. (see glTexImage2D - GL_RGBA).
In the fragment shader you have to map the color channel (in [0, 1]), which you read from the texture sampler, to the range from -PI to PI. For this you can use the GLSL function mix, which does a linear interpolation between 2 values:
vec2 translate(float fdir, float fvel) // fdir, fvel in [0.0, 1.0]
{
float angle = mix(-PI, PI, fdir);
return vec2(cos(angle), sin(angle)) * fvel;
}
The texture coordinates are in range [0, 1]. You have to transform the translation to texture coordinates. For this you have to know the size of your image texture:
vec2 wh_ratio; // 1f/grid_w, 1f/grid_h
vec2 imageTexSize; // size of "texture"
vec2 scale = imageTexSize * wh_ratio;
vec2 coord_dest = vertTexCoord.st + translation / scale;
Thx for the help, now I know the picture size of picture in GLSL :) [0,1], but that's don't work expected, I use the the rendering size or the picture of the must be warp, so in my idea the vec2 imageTexSize is img.widthand img.height is passed from Processing for imageTexSize
uniform vec2 imageTexSize;
.../...
vec2 scale = imageTexSize * wh_ratio;
vec2 coord_dest = vertTexCoord.st + translation / scale;
the result is the top image
and when I try this code
vec2 ratio = gl_FragCoord.xy *wh_ratio;
vec2 coord_dest = vertTexCoord.st +translation / ratio ;
the result is the middle image
and when I try this one
vec2 coord_dest = vertTexCoord.st +translation / wh_ratio ;
the result is the bottom image
Sorry i post a single image because I cannot post more than one pic with my beginner reputation :)
I fix the display bug for the full window display, but now it's the y coord who is reverse for the translation, that's weird because the texture velocity and direction are not reversed in y, the reverse y effect is in the interpretation. that's happened on the 3 mode. I try to reverse coord_dest.y like that
float coord_dest_y = mix(coord_dest.y, vertTexCoord.t, 0);
gl_FragColor = texture2D(texture, vec2(coord_dest.x, coord_dest_y));
but that's change nothing.
I try : float coord_dest_y = mix(coord_dest.y, 0, vertTexCoord.t);but that's make something really strange, so that's don't work too...
here the full the GLSL code
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
#define PI 3.1415926535897932384626433832795
varying vec4 vertTexCoord;
uniform sampler2D texture;
uniform int mode;
uniform sampler2D vel_texture;
uniform sampler2D dir_texture;
uniform vec2 wh_grid_ratio;
uniform vec2 wh_renderer_ratio;
vec2 cartesian_coord(float angle) {
float x = cos(angle);
float y = sin(angle);
return vec2(x,y);
}
vec2 translate(float fdir, float fvel) {
//float angle = mix(PI, -PI,fdir);
float angle = mix(fdir, PI, -PI);
return cartesian_coord(angle) *fvel ;
}
void main() {
vec2 ratio = gl_FragCoord.xy *wh_renderer_ratio;
vec4 vel = texture2D(vel_texture, ratio);
vec4 dir = texture2D(dir_texture, ratio);
float direction = dir.x;
float velocity = vel.x;
vec2 translation = translate(direction,velocity);
// mode 0 perfect
// mode 1 interesting
// mode 2 bizarre, but fun
// mode 500 warp image direction
// mode 501 warp image velocity
// perfect
if(mode == 0) {
vec2 scale = gl_FragCoord.xy *wh_renderer_ratio;
vec2 coord_dest = vertTexCoord.st +translation /scale;
float coord_dest_y = mix(coord_dest.y, vertTexCoord.t, 0);
// float coord_dest_y = mix(coord_dest.y, 0, vertTexCoord.t);
gl_FragColor = texture2D(texture, vec2(coord_dest.x, coord_dest_y));
// gl_FragColor = texture2D(texture, coord_dest);
}
// interesting
if(mode == 1) {
vec2 scale = gl_FragCoord.xy *wh_grid_ratio;
vec2 coord_dest = vertTexCoord.st +translation /scale ;
gl_FragColor = texture2D(texture, coord_dest);
}
// bizarre
if(mode == 2) {
vec2 coord_dest = vertTexCoord.st +translation /wh_grid_ratio;
gl_FragColor = texture2D(texture, coord_dest);
}
// velocity
if(mode == 500 ) {
vec4 tex_colour = texture2D(vel_texture, vertTexCoord.st);;
gl_FragColor = tex_colour;
}
// direction force field
if(mode == 501) {
vec4 tex_colour = texture2D(dir_texture, vertTexCoord.st);;
gl_FragColor = tex_colour;
}
}
and the picture result here, to see the cursor error y in the final warping
enter image description here

OpenGL height based fog

I am reading Inigo Quilez Fog article and I just can't understand few things when he talks about fog based on height.
He has a shader function about height based fog but I have problems understanding how to make it work.
He uses this function to apply fog
vec3 applyFog( in vec3 rgb, // original color of the pixel
in float distance ) // camera to point distance
{
float fogAmount = 1.0 - exp( -distance*b );
vec3 fogColor = vec3(0.5,0.6,0.7);
return mix( rgb, fogColor, fogAmount );
}
then he has the other one based to calculage fog based on height
vec3 applyFog( in vec3 rgb, // original color of the pixel
in float distance, // camera to point distance
in vec3 rayOri, // camera position
in vec3 rayDir ) // camera to point vector
{
float fogAmount = c * exp(-rayOri.y*b) * (1.0-exp( -distance*rayDir.y*b ))/rayDir.y;
vec3 fogColor = vec3(0.5,0.6,0.7);
return mix( rgb, fogColor, fogAmount );
}
I can understand how the shader works but I don't know how to use it with mine. For now I am just trying to learn how the whole fog world in GLSL works but it looks that there is just a lot about it to learn. :D
#version 400 core
in vec3 Position;
in vec3 Normal;
//in vec4 positionToCamera;
//in float visibility;
uniform vec3 color;
uniform vec3 CameraPosition;
uniform float near;
uniform float far;
uniform vec3 fogColor;
uniform bool enableBlending;
uniform float c;
uniform float b;
uniform int fogType;
vec3 applyFogDepth( vec3 rgb, // original color of the pixel
float distance, // camera to point distance
vec3 rayOri, // camera position
vec3 rayDir) // camera to point vector
{
//float cc = 1.0;
//float bb = 1.1;
float fogAmount = c * exp(-rayOri.y*b) * (1.0 - exp(-distance*rayDir.y*b)) / rayDir.y;
return mix(rgb, fogColor, fogAmount );
}
// Fog with Sun factor
vec3 applyFogSun( vec3 rgb,// original color of the pixel
float distance, // camera to point distance
vec3 rayDir, // camera to point vector
vec3 sunDir) // sun light direction
{
float fogAmount = 1.0 - exp(-distance*b);
float sunAmount = max(dot(rayDir, sunDir), 0.0);
vec3 fog = mix(fogColor, // bluish
vec3(1.0, 0.9, 0.7), // yellowish
pow(sunAmount, 8.0));
return mix(rgb, fog, fogAmount);
}
//Exponential fog
vec3 applyFog( vec3 rgb, // original color of the pixel
float distance) // camera to point distance
{
float fogAmount = 1.0 - exp(-distance*b);
return mix(rgb, fogColor, fogAmount);
//return rgb*( exp(-distance*b)) + fogColor*(1.0 - exp(-distance*b));
}
float LinearizeDepth(float depth)
{
float z = depth * 2.0 - 1.0; // Back to NDC
return (2.0 * near * far) / (far + near - z * (far - near));
}
out vec4 gl_FragColor;
void main(void) {
vec3 fog = vec3(0.0);
//-5.0f, 900.0f, 400.0f camera coord
vec3 lightPosition = vec3(0.0, 1200.0, -6000.0);
vec3 lightDirection = normalize(lightPosition - Position);
vec3 direction = normalize(CameraPosition - Position);
float depth = LinearizeDepth(gl_FragCoord.z) / far;
switch (fogType) {
case 0:
fog = applyFog(color, depth);
break;
case 1:
fog = applyFogSun(color, depth, direction, lightDirection);
break;
case 2:
//fog = mix(applyFog(color, depth), applyFogDepth(color, depth, CameraPosition, CameraPosition - Position), 0.5) ;
fog = applyFogDepth(color, depth, CameraPosition, CameraPosition - Position);
break;
}
//calculate light
float diff = max(dot(Normal, lightDirection), 0.0);
vec3 diffuse = diff * color;
float fogAmount = 1.0 - exp(-depth*b);
vec3 finalColor = vec3(0.0);
if (enableBlending)
finalColor = mix(diffuse, fog, fogAmount);
else
finalColor = fog;
gl_FragColor = vec4(finalColor,1.0);
//gl_FragColor = vec4(vec3(LinearizeDepth(visibility) / far), 1.0f);
}
The first image is the first function to apply fog and in the second image is the second function.