Blobs shader GLSL - glsl

I want to create a similar background with a shader to these images:
:
These are just blurred blobs with colors, distributed across the whole page:
Here's my current progress: https://codesandbox.io/s/lucid-bas-wvlzl9?file=/src/components/Background/Background.tsx
Vertex shader:
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
Fragment shader:
precision highp float;
uniform float uTime;
uniform float uAmplitude;
uniform float uFrequency;
varying vec2 vUv;
uniform vec2 uResolution;
vec4 Sphere(vec2 position, float radius)
{
// float dist = radius / distance(vUv, position);
// float strength = 0.01 / distance(vUv, position);
float strength = 0.1 / distance(vec2(vUv.x, (vUv.y - 0.5) * 8. + 0.5), vec2(0.));
return vec4(strength * strength);
}
void main()
{
vec2 uv = vUv;
vec4 pixel = vec4(0.0, 0.0, 0.0, 0.0);
vec2 positions[4];
positions[0] = vec2(.5, .5);
// positions[1] = vec2(sin(uTime * 3.0) * 0.5, (cos(uTime * 1.3) * 0.6) + vUv.y);
// positions[2] = vec2(sin(uTime * 2.1) * 0.1, (cos(uTime * 1.9) * 0.8) + vUv.y);
// positions[3] = vec2(sin(uTime * 1.1) * 1.1, (cos(uTime * 2.6) * 0.7) + vUv.y);
for (int i = 0; i < 2; i++)
pixel += Sphere(positions[i], 0.22);
pixel = pixel * pixel;
gl_FragColor = pixel;
}

For each blob, you can multiply it's color by a a noise function and then a 2D gaussian curve centered in a random point. Then add all the blobs together. I only added the ones of the adjacent cells to make it scrollable and the numbers in the for loops might be increased for bigger blobs.
here is my code :
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 u_resolution;
uniform vec2 u_mouse;
uniform float u_time;
const float blobSize = 0.125;
const float cellSize = .75;
const float noiseScale = .375;
const float background = .125;
const float blobsLuminosity = .75;
const float blobsSaturation = .5;
vec2 random2(vec2 st){
st = vec2( dot(st,vec2(127.1,311.7)),
dot(st,vec2(269.5,183.3)) );
return -1.0 + 2.0*fract(sin(st)*43758.5453123);
}
// Gradient Noise by Inigo Quilez - iq/2013
// https://www.shadertoy.com/view/XdXGW8
float noise(vec2 st) {
vec2 i = floor(st);
vec2 f = fract(st);
vec2 u = f*f*(3.0-2.0*f);
return mix( mix( dot( random2(i + vec2(0.0,0.0) ), f - vec2(0.0,0.0) ),
dot( random2(i + vec2(1.0,0.0) ), f - vec2(1.0,0.0) ), u.x),
mix( dot( random2(i + vec2(0.0,1.0) ), f - vec2(0.0,1.0) ),
dot( random2(i + vec2(1.0,1.0) ), f - vec2(1.0,1.0) ), u.x), u.y)*.5+.5;
}
float gaussFunction(vec2 st, vec2 p, float r) {
return exp(-dot(st-p, st-p)/2./r/r);
}
// Function from IƱigo Quiles
// https://www.shadertoy.com/view/MsS3Wc
vec3 hsb2rgb( in vec3 c ){
vec3 rgb = clamp(abs(mod(c.x*6.0+vec3(0.0,4.0,2.0),
6.0)-3.0)-1.0,
0.0,
1.0 );
rgb = rgb*rgb*(3.0-2.0*rgb);
return c.z * mix( vec3(1.0), rgb, c.y);
}
vec3 hash32(vec2 p)
{
vec3 p3 = fract(vec3(p.xyx) * vec3(.1031, .1030, .0973));
p3 += dot(p3, p3.yxz+33.33);
return fract((p3.xxy+p3.yzz)*p3.zyx);
}
vec3 blobs(vec2 st){
vec2 i = floor(st/cellSize);
vec3 c = vec3(0.);
for(int x = -1; x <= 1; x++)
for(int y = -1; y <= 1; y++){
vec3 h = hash32(i+vec2(x, y));
c += hsb2rgb(vec3(h.z, blobsSaturation, blobsLuminosity)) * gaussFunction(st/cellSize, i + vec2(x, y) + h.xy, blobSize) * smoothstep(0., 1., noise(noiseScale*st/cellSize / blobSize));
//c += hsb2rgb(vec3(h.z, blobsSaturation, blobsLuminosity)) * gaussFunction(st/cellSize, i + vec2(x, y) + h.xy, blobSize) * noise(noiseScale*st/cellSize / blobSize);
}
return c + vec3(background);
}
float map(float x, float a, float b, float c, float d){
return (x-a)/(b-a)*(d-c)+c;
}
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
st.x *= u_resolution.x/u_resolution.y;
vec3 color = vec3(0.0);
color = vec3(blobs(st - u_mouse/u_resolution.xy*4.));
gl_FragColor = vec4(color,1.0);
}
made in this shader editor.

Related

Compute Normals After Vertex Deformation?

I am coding a vertex and a fragment shader trying to distort the surface of some water and then computing blinn-phong lighting on the surface. I am able to successfully compute the deformed matrices with a simple noise function, but how can I find the distorted normals? Since it isn't a linear transformation I am stuck, could anyone help?
Here are the relevant files:
vertex shader:
#version 150
uniform mat4 u_Model;
uniform mat4 u_ModelInvTr;
uniform mat4 u_ViewProj;
uniform vec4 u_Color;
uniform int u_Time;
in vec4 vs_Pos; // The array of vertex positions passed to the shader
in vec4 vs_Nor; // The array of vertex normals passed to the shader
in vec4 vs_Col; // The array of vertex colors passed to the shader.
in vec2 vs_UV; // UV coords for texture to pass thru to fragment shader
in float vs_Anim; // 0.f or 1.f To pass thru to fragment shader
in float vs_T2O;
out vec4 fs_Pos;
out vec4 fs_Nor;
out vec4 fs_LightVec;
out vec4 fs_Col;
out vec2 fs_UVs;
out float fs_Anim;
out float fs_dimVal;
out float fs_T2O;
uniform vec4 u_CamPos;
out vec4 fs_CamPos;
const vec4 lightDir = normalize(vec4(0.0, 1.f, 0.0, 0));
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c, oc * axis.x * axis.y - axis.z * s, oc * axis.z * axis.x + axis.y * s, 0.0, oc * axis.x * axis.y + axis.z * s, oc * axis.y * axis.y + c, oc * axis.y * axis.z - axis.x * s, 0.0,oc * axis.z * axis.x - axis.y * s, oc * axis.y * axis.z + axis.x * s, oc * axis.z * axis.z + c, 0.0, 0.0, 0.0, 0.0, 1.0);
}
vec4 rotateLightVec(float deg, vec4 LV) {
mat4 R = rotationMatrix(vec3(0,0,1), deg);
return R * LV;
}
float random1(vec3 p) {
return fract(sin(dot(p, vec3(127.1, 311.7, 191.999)))*43758.5453);
}
vec3 random2( vec3 p ) {
return fract( sin( vec3(dot(p, vec3(127.1, 311.7, 58.24)), dot(p, vec3(269.5, 183.3, 657.3)), dot(p, vec3(420.69, 69.420, 469.20))) ) * 43758.5453);
}
void main()
{
fs_Col = vs_Col;
fs_UVs = vs_UV;
fs_Anim = vs_Anim;
fs_T2O = vs_T2O;
mat3 invTranspose = mat3(u_ModelInvTr);
fs_Nor = vec4(invTranspose * vec3(vs_Nor), 0);
vec4 modelposition = u_Model * vs_Pos;
if (vs_Anim != 0) { // if we want to animate this surface
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y <= 4.f/16.f;
if (water) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.05) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
} else if (lava) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.01) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
}
}
fs_dimVal = random1(modelposition.xyz/100.f);
fs_LightVec = rotateLightVec(0.001 * u_Time, lightDir); // Compute the direction in which the light source lies
fs_CamPos = u_CamPos; // uniform handle for the camera position instead of the inverse
fs_Pos = modelposition;
gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is
// used to render the final positions of the geometry's vertices
}
fragment shader:
#version 330
uniform vec4 u_Color; // The color with which to render this instance of geometry.
uniform sampler2D textureSampler;
uniform int u_Time;
uniform mat4 u_ViewProj;
uniform mat4 u_Model;
in vec4 fs_Pos;
in vec4 fs_Nor;
in vec4 fs_LightVec;
in vec4 fs_Col;
in vec2 fs_UVs;
in float fs_Anim;
in float fs_T2O;
in float fs_dimVal;
out vec4 out_Col;
in vec4 fs_CamPos;
float random1(vec3 p) {
return fract(sin(dot(p,vec3(127.1, 311.7, 191.999)))
*43758.5453);
}
float random1b(vec3 p) {
return fract(sin(dot(p,vec3(169.1, 355.7, 195.999)))
*95751.5453);
}
float mySmoothStep(float a, float b, float t) {
t = smoothstep(0, 1, t);
return mix(a, b, t);
}
float cubicTriMix(vec3 p) {
vec3 pFract = fract(p);
float llb = random1(floor(p) + vec3(0,0,0));
float lrb = random1(floor(p) + vec3(1,0,0));
float ulb = random1(floor(p) + vec3(0,1,0));
float urb = random1(floor(p) + vec3(1,1,0));
float llf = random1(floor(p) + vec3(0,0,1));
float lrf = random1(floor(p) + vec3(1,0,1));
float ulf = random1(floor(p) + vec3(0,1,1));
float urf = random1(floor(p) + vec3(1,1,1));
float mixLoBack = mySmoothStep(llb, lrb, pFract.x);
float mixHiBack = mySmoothStep(ulb, urb, pFract.x);
float mixLoFront = mySmoothStep(llf, lrf, pFract.x);
float mixHiFront = mySmoothStep(ulf, urf, pFract.x);
float mixLo = mySmoothStep(mixLoBack, mixLoFront, pFract.z);
float mixHi = mySmoothStep(mixHiBack, mixHiFront, pFract.z);
return mySmoothStep(mixLo, mixHi, pFract.y);
}
float fbm(vec3 p) {
float amp = 0.5;
float freq = 4.0;
float sum = 0.0;
for(int i = 0; i < 8; i++) {
sum += cubicTriMix(p * freq) * amp;
amp *= 0.5;
freq *= 2.0;
}
return sum;
}
void main()
{
vec4 diffuseColor = texture(textureSampler, fs_UVs);
bool apply_lambert = true;
float specularIntensity = 0;
if (fs_Anim != 0) {
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y < 4.f/16.f;
if (lava) {
// slowly gyrate texture and lighten and darken with random dimVal from vert shader
vec2 movingUVs = vec2(fs_UVs.x + fs_Anim * 0.065/16 * sin(0.01*u_Time),
fs_UVs.y - fs_Anim * 0.065/16 * sin(0.01*u_Time + 3.14159/2));
diffuseColor = texture(textureSampler, movingUVs);
vec4 warmerColor = diffuseColor + vec4(0.3, 0.3, 0, 0);
vec4 coolerColor = diffuseColor - vec4(0.1, 0.1, 0, 0);
diffuseColor = mix(warmerColor, coolerColor, 0.5 + fs_dimVal * 0.65*sin(0.02*u_Time));
apply_lambert = false;
} else if (water) {
// blend between 3 different points in texture to create a wavy subtle change over time
vec2 offsetUVs = vec2(fs_UVs.x - 0.5f/16.f, fs_UVs.y - 0.5f/16.f);
diffuseColor = texture(textureSampler, fs_UVs);
vec4 altColor = texture(textureSampler, offsetUVs);
altColor.x += fs_dimVal * pow(altColor.x+.15, 5);
altColor.y += fs_dimVal * pow(altColor.y+.15, 5);
altColor.z += 0.5 * fs_dimVal * pow(altColor.z+.15, 5);
diffuseColor = mix(diffuseColor, altColor, 0.5 + 0.35*sin(0.05*u_Time));
offsetUVs -= 0.25f/16.f;
vec4 newColor = texture(textureSampler, offsetUVs);
diffuseColor = mix(diffuseColor, newColor, 0.5 + 0.5*sin(0.025*u_Time)) + fs_dimVal * vec4(0.025);
diffuseColor.a = 0.7;
// ----------------------------------------------------
// Blinn-Phong Shading
// ----------------------------------------------------
vec4 lightDir = normalize(fs_LightVec - fs_Pos);
vec4 viewDir = normalize(fs_CamPos - fs_Pos);
vec4 halfVec = normalize(lightDir + viewDir);
float shininess = 400.f;
float specularIntensity = max(pow(dot(halfVec, normalize(fs_Nor)), shininess), 0);
}
}
// Calculate the diffuse term for Lambert shading
float diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec));
// Avoid negative lighting values
diffuseTerm = clamp(diffuseTerm, 0, 1);
float ambientTerm = 0.3;
float lightIntensity = diffuseTerm + ambientTerm; //Add a small float value to the color multiplier
//to simulate ambient lighting. This ensures that faces that are not
//lit by our point light are not completely black.
vec3 col = diffuseColor.rgb;
// Compute final shaded color
if (apply_lambert) {
col = col * lightIntensity + col * specularIntensity;
}
// & Check the rare, special case where we draw face between two diff transparent blocks as opaque
if (fs_T2O != 0) {
out_Col = vec4(col, 1.f);
} else {
out_Col = vec4(col, diffuseColor.a);
}
// distance fog!
vec4 fogColor = vec4(0.6, 0.75, 0.9, 1.0);
float FC = gl_FragCoord.z / gl_FragCoord.w / 124.f;
float falloff = clamp(1.05 - exp(-1.05f * (FC - 0.9f)), 0.f, 1.f);
out_Col = mix(out_Col, fogColor, falloff);
}
I tried implementing blinn-phong in the fragment shader, but I think it is wrong simple from the wrong normals. I think this can be done with some sort of tangent and cross product solution, but how can I know the tangent of the surface given we only know the vertex position?
I am not using unity, just bare c++ and most of the answers I am finding online are for java or unity which I do not understand.`

Why shadows are not rendered when PBR directional light is applied?

I've got a problem with rendering hard shadows in a PBR pipeline.
I believe there is something wrong with PBR calculations because with a Blinn-Phong lighting model everything looks fine.
These are lightning calculations - basic PBR
struct DirectionalLight
{
vec3 direction;
};
layout(std140, binding = 2) uniform Scene
{
DirectionalLight directionalLight;
vec3 viewPosition;
} u_scene;
layout(std140, binding = 4) uniform Material
{
vec4 baseColor;
float roughness;
float metalness;
} u_material;
const float PI = 3.14159265359;
const float epsilon = 0.00001;
int lightCount = 1;
vec3 CalculateDirectionalLight(vec3 N, vec3 V, float NdotV, vec3 F0)
{
vec3 result;
for(int i = 0; i < lightCount; ++i) {
vec3 L = normalize(-u_scene.directionalLight.direction);
float NdotL = max(0.0f, dot(N, L));
vec3 H = normalize(V + L);
float NdotH = max(0.0f, dot(N, H));
vec3 F = FresnelSchlickRoughness(max(0.0f, dot(H, V)), F0, u_material.roughness);
float D = NDFGGX(NdotH, u_material.roughness);
float G = GeometrySmith(NdotL, NdotV, u_material.roughness);
vec3 kd = (1.0f - F) * (1.0f - u_material.metalness);
vec3 diffuse = kd * u_material.baseColor.rgb;
vec3 nominator = F * G * D;
float denominator = max(epsilon, 4.0f * NdotV * NdotL);
vec3 specular = nominator / denominator;
specular = clamp(specular, vec3(0.0f), vec3(10.0f));
result += (diffuse + specular) /* u_material.radiance */ * NdotL;
}
return result;
}
float NDFGGX(float NdotH, float roughness)
{
float alpha = roughness * roughness;
float alphaSq = alpha * alpha;
float denom = (NdotH * NdotH) * (alphaSq - 1.0) + 1.0;
return alphaSq / (PI * denom * denom);
}
float GeometrySchlickGGX(float Ndot, float k)
{
float nom = Ndot;
float denom = Ndot * (1.0 - k) + k;
return nom / denom;
}
float GeometrySmith(float NdotL, float NdotV, float roughness)
{
float r = (roughness + 1.0f);
float k = (r * r) / 8.0f;
float ggx2 = GeometrySchlickGGX(NdotV, k);
float ggx1 = GeometrySchlickGGX(NdotL, k);
return ggx1 * ggx2;
}
vec3 FresnelSchlick(float cosTheta, vec3 F0)
{
return F0 + (1.0 - F0) * pow(1.0 - cosTheta, 5.0);
}
vec3 FresnelSchlickRoughness(float cosTheta, vec3 F0, float roughness)
{
return F0 + (max(vec3(1.0 - roughness), F0) - F0) * pow(1.0 - cosTheta, 5.0);
}
shadow functions
layout(binding = 2) uniform sampler2D u_shadowMap;
float ShadowFade = 1.0;
float GetShadowBias()
{
const float MINIMUM_SHADOW_BIAS = 0.002;
float bias = max(MINIMUM_SHADOW_BIAS * (1.0 - dot(normalize(v_normal), -normalize(u_scene.directionalLight.direction))), MINIMUM_SHADOW_BIAS);
return bias;
}
float HardShadows_DirectionalLight(vec4 fragPosLightSpace)
{
vec3 shadowCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
float bias = GetShadowBias();
float shadowMapDepth = texture(u_shadowMap, vec2(shadowCoords.xy * 0.5 + 0.5)).r;
return step(shadowCoords.z, shadowMapDepth + bias) * ShadowFade;
}
and the main function
void main()
{
vec3 F0 = vec3(0.04f);
F0 = mix(F0, u_material.baseColor.rgb, u_material.metalness);
vec3 N = normalize(v_normal);
vec3 V = normalize(u_scene.viewPosition - v_position);
float NdotV = max(0.0f, dot(N, V));
//v_positionFromLight is calculated in a vertex shader like this:
//v_positionFromLight = u_lightViewProjection * vec4(v_position, 1.0f);
//where v_position is modelMatrix * a_position;
//where a_position is a input position of a vertex
float shadow = HardShadows_DirectionalLight(v_positionFromLight);
vec3 ambient = u_material.baseColor.rgb * 0.3f;
vec3 lightContribution = ambient + CalculateDirectionalLight(N, V, NdotV, F0) * shadow;
f_color = vec4(lightContribution, 1.0);
}
and this is how the scene looks like - there should be visible shadows, but there aren't:
I've tested 2 things.
First - Blinn-Phong lighting model - shadows render just fine.
Second - output shadow calculations without PBR lightning
like this:
void main()
{
float shadow = HardShadows_DirectionalLight(v_positionFromLight);
vec3 ambient = u_material.baseColor.rgb * 0.3f;
f_color = vec4(ambient * shadow, 1.0f);
}
and it also works (besides that they're not placed in a good spot, but that is another topic):
Why this PBR model does not work with shadows?
How can I fix it?

How to adjust Color Properly in a Noise Pattern?

I am trying to write a noise pattern that resembles wood in GLSL. Here is my current code:
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 u_resolution;
float random (in vec2 st) {
return fract(sin(dot(st.xy,
vec2(12.9898,78.233))) * 43758.545312);
}
float noise(vec2 st) {
vec2 i = floor(st);
vec2 f = fract(st);
vec2 u = f*f*(3.0-2.0*f);
return mix( mix( random(i + vec2(0.0,0.0) ),
random(i + vec2(1.0,0.0) ), u.x),
mix( random(i + vec2(0.0,1.0) ),
random(i + vec2(1.0,1.0) ), u.x), u.y);
}
mat2 rotate2d(float angle)
{
return mat2(cos(angle), -sin(angle),
sin(angle), cos(angle));
}
float lines(in vec2 pos, float b){
float scale = 10.0;
pos *= scale;
return smoothstep(0.0, 0.5+b * 0.5, abs((sin(pos.x*3.1415)+b*2.0))*0.5);
}
vec3 c1 = vec3(0.134,0.109,0.705);
vec3 c2 = vec3(0.000,0.411,0.665);
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
vec2 pos = st.yx*vec2(11.0, 3.0);
vec3 color = vec3(1.0);
float pattern = pos.x;
color = mix(c1, c2, pattern);
// Add noise
pos = rotate2d(noise(pos)) * pos;
// Draw lines
pattern = lines(pos, 0.7);
gl_FragColor = vec4(pattern, color);
}
In its current state it makes my pattern look reddish-pink with some weird gradient showing up at the bottom. I want to make the pink areas brown while my pattern stays black. Can someone show how to adjust my color variables to do this?
The color channels are red, green, blue. Hence, red is the first component:
vec3 c1 = vec3(0.705,0.109,0.134);
vec3 c2 = vec3(0.665,0.411,0.000);
You need to mix the colors after pattern is set:
void main() {
// [...]
// Draw lines
pattern = lines(pos, 0.7);
color = mix(c1, c2, pattern);
gl_FragColor = vec4(color, 1.0);
}
See the result when you use a dark brown and a light broun:
vec3 c1 = vec3(50.0, 40.0, 30.0) / 255.0;
vec3 c2 = vec3(200.0, 150.0, 100.0) / 255.0;
#ifdef GL_ES
precision mediump float;
#endif
uniform vec2 u_resolution;
float random (in vec2 st) {
return fract(sin(dot(st.xy, vec2(12.9898,78.233))) * 43758.545312);
}
float noise(vec2 st) {
vec2 i = floor(st);
vec2 f = fract(st);
vec2 u = f*f*(3.0-2.0*f);
return mix( mix( random(i + vec2(0.0,0.0) ),
random(i + vec2(1.0,0.0) ), u.x),
mix( random(i + vec2(0.0,1.0) ),
random(i + vec2(1.0,1.0) ), u.x), u.y);
}
mat2 rotate2d(float angle)
{
return mat2(cos(angle), -sin(angle), sin(angle), cos(angle));
}
float lines(in vec2 pos, float b){
float scale = 10.0;
pos *= scale;
return smoothstep(0.0, 0.5+b * 0.5, abs((sin(pos.x*3.1415)+b*2.0))*0.5);
}
vec3 c1 = vec3(50.0, 40.0, 30.0) / 255.0;
vec3 c2 = vec3(200.0, 150.0, 100.0) / 255.0;
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
vec2 pos = st.yx*vec2(11.0, 3.0);
// Add noise
pos = rotate2d(noise(pos)) * pos;
// Draw lines
float pattern = lines(pos, 0.7);
vec3 color = mix(c1, c2, pattern);
gl_FragColor = vec4(color, 1.0);
}

GLSL Atmospheric Scattering Not Scaling With Transformations

I am trying to implement atmospheric scatting in GLSL version 4.10. I am adapting the shaders from the this Shadertoy shader https://www.shadertoy.com/view/lslXDr. The atmosphere in my program is created from a scaled version of the planet sphere.
I have the actual scattering equations working, but the inner radius of the atmosphere does not line up with the outer radius of the sphere for most camera positions. I know this is from the radius of the atmosphere being bigger than the planet sphere, but I cannot seem to get it to scale right.
My problem is best illustrated here. The model is scaled up in these pictures. As can be seen, the atmosphere inner radius does not match the radius of the planet (the dark blue sphere).
Here the model is scaled and translated. The atmosphere is off center from the camera and the inner atmosphere is still not lined up with the planet.
Here is the vertex shader, which is essentially a pass through shader
#version 410
in vec4 vPosition;
in vec3 vNormal;
out vec3 fPosition;
out mat3 m;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
fPosition = vec3(vPosition);
m = mat3(model);
gl_Position = projection*view*model*vPosition;
}
And the fragment shader.
#version 410
uniform float time;
uniform vec3 camPosition;
uniform float fInnerRadius;
uniform float fOuterRadius;
in vec3 fPosition;
in mat3 m;
out vec4 FragColor;
const float PI = 3.14159265359;
const float degToRad = PI / 180.0;
const float MAX = 10000.0;
float K_R = 0.166;
const float K_M = 0.0025;
const float E = 14.3;
const vec3 C_R = vec3(0.3, 0.7, 1.0);
const float G_M = -0.85;
float SCALE_H = 4.0 / (fOuterRadius - fInnerRadius);
float SCALE_L = 1.0 / (fOuterRadius - fInnerRadius);
const int numOutScatter = 10;
const float fNumOutScatter = 10.0;
const int numInScatter = 10;
const float fNumInScatter = 10.0;
vec3 rayDirection(vec3 camPosition) {
vec3 ray = m*fPosition - camPosition;
float far = length(ray);
return ray /= far;
}
vec2 rayIntersection(vec3 p, vec3 dir, float radius ) {
float b = dot( p, dir );
float c = dot( p, p ) - radius * radius;
float d = b * b - c;
if ( d < 0.0 ) {
return vec2( MAX, -MAX );
}
d = sqrt( d );
float near = -b - d;
float far = -b + d;
return vec2(near, far);
}
// Mie
// g : ( -0.75, -0.999 )
// 3 * ( 1 - g^2 ) 1 + c^2
// F = ----------------- * -------------------------------
// 2 * ( 2 + g^2 ) ( 1 + g^2 - 2 * g * c )^(3/2)
float miePhase( float g, float c, float cc ) {
float gg = g * g;
float a = ( 1.0 - gg ) * ( 1.0 + cc );
float b = 1.0 + gg - 2.0 * g * c;
b *= sqrt( b );
b *= 2.0 + gg;
return 1.5 * a / b;
}
// Reyleigh
// g : 0
// F = 3/4 * ( 1 + c^2 )
float rayleighPhase( float cc ) {
return 0.75 * ( 1.0 + cc );
}
float density(vec3 p) {
return exp(-(length(p) - fInnerRadius) * SCALE_H);
}
float optic(vec3 p, vec3 q) {
vec3 step = (q - p) / fNumOutScatter;
vec3 v = p + step * 0.5;
float sum = 0.0;
for(int i = 0; i < numOutScatter; i++) {
sum += density(v);
v += step;
}
sum *= length(step)*SCALE_L;
return sum;
}
vec3 inScatter(vec3 o, vec3 dir, vec2 e, vec3 l) {
float len = (e.y - e.x) / fNumInScatter;
vec3 step = dir * len;
vec3 p = o + dir * e.x;
vec3 v = p + dir * (len * 0.5);
vec3 sum = vec3(0.0);
for(int i = 0; i < numInScatter; i++) {
vec2 f = rayIntersection(v, l, fOuterRadius);
vec3 u = v + l * f.y;
float n = (optic(p, v) + optic(v, u))*(PI * 4.0);
sum += density(v)* exp(-n * ( K_R * C_R + K_M ));
v += step;
}
sum *= len * SCALE_L;
float c = dot(dir, -l);
float cc = c * c;
return sum * ( K_R * C_R * rayleighPhase( cc ) + K_M * miePhase( G_M, c, cc ) ) * E;
}
void main (void)
{
vec3 dir = rayDirection(vec3(camPosition.x, 0.0, camPosition.z));
vec3 eye = vec3(camPosition.x, 0.0, camPosition.z);
vec3 l = normalize(vec3(0.0, 0.0, 1.0));
vec2 e = rayIntersection(eye, dir, fOuterRadius);
if ( e.x > e.y ) {
discard;
}
vec2 f = rayIntersection(eye, dir, fInnerRadius);
e.y = min(e.y, f.x);
vec3 I = inScatter(eye, dir, e, l);
FragColor = vec4(I, 1.0);
}
If needed here is the code that draws the atmosphere. The code that draws the planet has essentially the same transformations sans the scaleFactor.
void drawAtmosphere()
{
glUseProgram(atmosphereShader);
v = getViewMatrix();
vec3 Position = getCameraPosition();
float scaleFactor = 1.25;
m = multiplymat4(translate(0.0, 0.0, -10), scale(fScale*scaleFactor));
float fOuter = (fScale*scaleFactor);
float fInner = fScale;
glUniform1f(glGetUniformLocation(atmosphereShader, "fInnerRadius"), fInner);
glUniform1f(glGetUniformLocation(atmosphereShader, "fOuterRadius"), fOuter);
glUniform3f(glGetUniformLocation(atmosphereShader, "camPosition"), Position.x, Position.y, Position.z);
glUniform1f(glGetUniformLocation(atmosphereShader, "time"), glfwGetTime());
initMVP(atmosphereShader, m, v);
glBindVertexArray (atmosphereVAO);
glDrawArrays( GL_TRIANGLES, 0, planet.vertexNumber);
glBindVertexArray(0);
}
Any help, or anything that can point me in the right direction is appreciated.
Found the problem was caused by incorrect calculation of the camera position and not taking into account the model space of the object. I uploaded a stripped down version of the code here.
Hopefully this will help anyone trying to implement Sean O'Neil's atmosphere code.

Shader for a spotlight

How can I get this shader to have a smooth edge on the spot light instead of a hard one? In addition, the shader has to cope with a variable value of GL_SPOT_CUTOFF. Note that not all the lights are spot lights -- GL_LIGHT0 is a point light.
varying vec3 N;
varying vec3 v;
#define MAX_LIGHTS 2
void main (void)
{
vec4 finalColour;
float spotEffect;
for (int i=0; i<MAX_LIGHTS; i++)
{
vec3 L = normalize(gl_LightSource[i].position.xyz - v);
vec3 E = normalize(-v);
vec3 R = normalize(-reflect(L,N));
spotEffect = dot(normalize(gl_LightSource[i].spotDirection),
normalize(-L));
if (spotEffect > gl_LightSource[i].spotCosCutoff) {
vec4 Iamb = gl_FrontLightProduct[i].ambient;
vec4 Idiff = gl_FrontLightProduct[i].diffuse * max(dot(N,L), 0.0);
Idiff = clamp(Idiff, 0.0, 1.0);
vec4 Ispec = gl_FrontLightProduct[i].specular
* pow(max(dot(R,E),0.0),0.3*gl_FrontMaterial.shininess);
Ispec = clamp(Ispec, 0.0, 1.0);
finalColour += Iamb + Idiff + Ispec;
}
}
gl_FragColor = gl_FrontLightModelProduct.sceneColor + finalColour;
}
The scene looks like this:
This shader from http://www.ozone3d.net/tutorials/glsl_lighting_phong_p3.php produces the soft edges to the spotlight you are after.
[Pixel_Shader]
varying vec3 normal, lightDir, eyeVec;
const float cos_outer_cone_angle = 0.8; // 36 degrees
void main (void)
{
vec4 final_color =
(gl_FrontLightModelProduct.sceneColor * gl_FrontMaterial.ambient) +
(gl_LightSource[0].ambient * gl_FrontMaterial.ambient);
vec3 L = normalize(lightDir);
vec3 D = normalize(gl_LightSource[0].spotDirection);
float cos_cur_angle = dot(-L, D);
float cos_inner_cone_angle = gl_LightSource[0].spotCosCutoff;
float cos_inner_minus_outer_angle =
cos_inner_cone_angle - cos_outer_cone_angle;
//****************************************************
// Don't need dynamic branching at all, precompute
// falloff(i will call it spot)
float spot = 0.0;
spot = clamp((cos_cur_angle - cos_outer_cone_angle) /
cos_inner_minus_outer_angle, 0.0, 1.0);
//****************************************************
vec3 N = normalize(normal);
float lambertTerm = max( dot(N,L), 0.0);
if(lambertTerm > 0.0)
{
final_color += gl_LightSource[0].diffuse *
gl_FrontMaterial.diffuse *
lambertTerm * spot;
vec3 E = normalize(eyeVec);
vec3 R = reflect(-L, N);
float specular = pow( max(dot(R, E), 0.0),
gl_FrontMaterial.shininess );
final_color += gl_LightSource[0].specular *
gl_FrontMaterial.specular *
specular * spot;
}
gl_FragColor = final_color;