drawing more than one of my glowing lights makes them share the light, cant split them - glsl

I am trying to create two independent glowing lights but when a make the second share the light stretches between the 2
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fragCoord/iResolution.xy;
uv =(fragCoord-.5*iResolution.xy)/iResolution.y;
vec3 col = vec3(0.);
float radius = 0.5;
float glowSpeed = 1.;
vec2 glowPos = vec2(uv.x, uv.y+0.5);
vec2 glowPos2 = vec2(uv.x+0.5, uv.y+0.0);
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv+glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv+glowPos2);
col += vec3(glowCol1, glowCol1, 0.);
col += vec3(glowCol2, glowCol2, 0.);
// Output to screen
fragColor = vec4(col, 1);
}
enter image description here

uv is a position relative to the fragment currently being processed. So the position of a light source must not depend on uv. e.g.:
vec2 glowPos = vec2(0.5, 0.5);
vec2 glowPos2 = vec2(-0.5, -0.5);
The distance between 2 points is the length of the vector from one point to the other. A vector between 2 points is calculated by subtracting one point from the other, but not by calculating the sum:
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos2);
The result for glowCol1 and glowCol2 can become negative. Thus, one light source would negatively affect the other. You must clamp the result in the range [0, 1]:
glowCol1 = clamp(glowCol1, 0.0, 1.0);
glowCol2 = clamp(glowCol2, 0.0, 1.0);
Complete and working shader:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fragCoord/iResolution.xy;
uv = uv * 2.0 - 1.0;
uv.x *= iResolution.x / iResolution.y;
vec3 col = vec3(0.);
float radius = 0.5;
float glowSpeed = 1.;
vec2 glowPos = vec2(0.5, 0.5);
vec2 glowPos2 = vec2(-0.5, -0.5);
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos2);
glowCol1 = clamp(glowCol1, 0.0, 1.0);
glowCol2 = clamp(glowCol2, 0.0, 1.0);
col += vec3(glowCol1, glowCol1, 0.);
col += vec3(glowCol2, glowCol2, 0.);
// Output to screen
fragColor = vec4(col, 1);
}

Related

Fish-eye warping about mouse position - fragment shader

I'm trying to create a fish-eye effect but only in a small radius around the mouse position. I've been able to modify this code to work about the mouse position (demo) but I can't figure out where the zooming is coming from. I'm expecting the output to warp the image similarly to this (ignore the color inversion for the sake of this question):
Relevant code:
// Check if within given radius of the mouse
vec2 diff = myUV - u_mouse - 0.5;
float distance = dot(diff, diff); // square of distance, saves a square-root
// Add fish-eye
if(distance <= u_radius_squared) {
vec2 xy = 2.0 * (myUV - u_mouse) - 1.0;
float d = length(xy * maxFactor);
float z = sqrt(1.0 - d * d);
float r = atan(d, z) / PI;
float phi = atan(xy.y, xy.x);
myUV.x = d * r * cos(phi) + 0.5 + u_mouse.x;
myUV.y = d * r * sin(phi) + 0.5 + u_mouse.y;
}
vec3 tex = texture2D(tMap, myUV).rgb;
gl_FragColor.rgb = tex;
This is my first shader, so other improvements besides fixing this issue are also welcome.
Compute the vector from the current fragment to the mouse and the length of the vector:
vec2 diff = myUV - u_mouse;
float distance = length(diff);
The new texture coordinate is the sum of the mouse position and the scaled direction vector:
myUV = u_mouse + normalize(diff) * u_radius * f(distance/u_radius);
For instance:
uniform float u_radius;
uniform vec2 u_mouse;
void main()
{
vec2 diff = myUV - u_mouse;
float distance = length(diff);
if (distance <= u_radius)
{
float scale = (1.0 - cos(distance/u_radius * PI * 0.5));
myUV = u_mouse + normalize(diff) * u_radius * scale;
}
vec3 tex = texture2D(tMap, myUV).rgb;
gl_FragColor = vec4(tex, 1.0);
}

How to feather the chroma mask from edges

This is the shader i am using to do chroma key , the shader works well but i need to feather the edges of the chroma mask.
How can i do that ?
#version 430 core
uniform sampler2D u_tex;
vec4 keyRGBA = vec4(86.0 / 255.0 , 194.0 / 255.0, 46.0 / 255.0 , 1.0); // key color as rgba
vec2 keyCC; // the CC part of YCC color model of key color
uniform vec2 rangeSpill = vec2(0.1, .52); // the smoothstep range for spill detection
uniform vec2 range = vec2(0.05, 0.21); // the smoothstep range for chroma detection
in vec2 texCoord;
out vec4 FragColor;
vec2 RGBToCC(vec4 rgba) {
float Y = 0.299 * rgba.r + 0.587 * rgba.g + 0.114 * rgba.b;
return vec2((rgba.b - Y) * 0.565, (rgba.r - Y) * 0.713);
}
vec2 RGBAToCC (float r, float g, float b) {
float y = 0.299 * r + 0.587 * g + 0.114 * b;
return vec2((b - y) * 0.565, (r - y) * 0.713);
}
vec3 RGBToYCC( vec3 col )
{
float y = 0.299 * col.r + 0.587 * col.g + 0.114 * col.b;
return vec3( y ,(col.b - y) * 0.565, (col.r - y) * 0.713);
}
vec3 YCCToRGB( vec3 col )
{
float R = col.x + (col.z - 128) * 1.40200;
float G = col.x + (col.y - 128) * -0.34414 + (col.z - 128) * -0.71414;
float B = col.x + (col.y - 128) * 1.77200;
return vec3( R , G , B);
}
vec3 hueShift( vec3 color, float hueAdjust ){
vec3 kRGBToYPrime = vec3 (0.299, 0.587, 0.114);
vec3 kRGBToI = vec3 (0.596, -0.275, -0.321);
vec3 kRGBToQ = vec3 (0.212, -0.523, 0.311);
vec3 kYIQToR = vec3 (1.0, 0.956, 0.621);
vec3 kYIQToG = vec3 (1.0, -0.272, -0.647);
vec3 kYIQToB = vec3 (1.0, -1.107, 1.704);
float YPrime = dot (color, kRGBToYPrime);
float I = dot (color, kRGBToI);
float Q = dot (color, kRGBToQ);
float hue = atan (Q, I);
float chroma = sqrt (I * I + Q * Q);
hue += hueAdjust;
Q = chroma * sin (hue);
I = chroma * cos (hue);
vec3 yIQ = vec3 (YPrime, I, Q);
return vec3( dot (yIQ, kYIQToR), dot (yIQ, kYIQToG), dot (yIQ, kYIQToB) );
}
float GetYComponent( vec3 color){
vec3 kRGBToYPrime = vec3 (0.299, 0.587, 0.114);
vec3 kRGBToI = vec3 (0.596, -0.275, -0.321);
vec3 kRGBToQ = vec3 (0.212, -0.523, 0.311);
vec3 kYIQToR = vec3 (1.0, 0.956, 0.621);
vec3 kYIQToG = vec3 (1.0, -0.272, -0.647);
vec3 kYIQToB = vec3 (1.0, -1.107, 1.704);
float YPrime = dot (color, kRGBToYPrime);
return YPrime;
}
void main() {
vec4 src1Color = texture2D(u_tex, texCoord);
keyCC = RGBAToCC( keyRGBA.r , keyRGBA.g , keyRGBA.b );
vec2 CC = RGBToCC(src1Color);
float mask = sqrt(pow(keyCC.x - CC.x, 2.0) + pow(keyCC.y - CC.y, 2.0));
mask = smoothstep(rangeSpill.x + 0.5, rangeSpill.y, mask);
if (mask > 0.0 && mask < .8)
{
src1Color = vec4( hueShift(src1Color.rgb , 1.8 ) , src1Color.a ); // spill remover
}
// Now the spill is removed do the chroma
vec2 CC2 = RGBToCC(src1Color);
float mask2 = sqrt(pow(keyCC.x - CC2.x, 2.0) + pow(keyCC.y - CC2.y, 2.0));
mask2 = smoothstep(range.x, range.y, mask2);
if (mask2 == 0.0) { discard; }
else if (mask2 == 1.0)
{
FragColor = vec4(src1Color.rgb , mask2);
}
else
{
vec4 col = max(src1Color - (1.0 - mask2) * keyRGBA, 0.0);
FragColor = vec4(hueShift(col.rgb , 0.3 ) , col.a); // do color correction
}
}
This is the base image
This is the result after chroma keying.
Also there is not much information avaliable for chroma keying if someone could also give some information about adding more details in the shader.
Effectively, you need to extrude the areas where the Chroma key matched. While you could just sample in a pattern (instead of a single point) in a single render pass, that's not quite efficient.
Instead you should rather write the mask to a 1bit (or as much as you would like for transparency) mask texture first. Then you can run a simple 1D shader in X and Y direction over that mask to extrude the already excluded areas by a fixed amount. You need a temporary texture for playing ping-pong either way, and splitting X and Y dimensions requires far less samples in total.
E.g. the minimum opacity in a range of 5px, or a Gaussian blur with a scaler / clamp to keep already full transparent pixels still transparent.
Ultimately, combine your final mask with the source image as usual.

Implementing a gooey effect with a shader (Processing 3)

I'm trying to replicate a web design trick known as "gooey effect" (see it live here).
It's a technique applying SVG filters on moving ellipses in order to get a blob-like motion. The process is rather simple:
apply a gaussian blur
increase the contrast of the alpha channel only
The combination of the two creates a blob effect
The last step (increasing the alpha channel contrast) is usually done through a "color matrix filter".
A color matrix is composed of 5 columns (RGBA + offset) and 4 rows.
The values in the first four columns are multiplied with the source red, green, blue, and alpha values respectively. The fifth column value is added (offset).
In CSS, increasing the alpha channel contrast is as simple as calling a SVG filter and specifying the contrast value (here 18):
<feColorMatrix in="blur" mode="matrix" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 18 -7" result="goo" />
In Processing though, it seems to be a bit more complicated. I believe (I may be wrong) the only way to apply a color matrix filter is to create one in a shader. After a few tries I came up with these (very basic) vertex and fragment shaders for color rendering:
colorvert.glsl
uniform mat4 transform;
attribute vec4 position;
attribute vec4 color;
varying vec4 vertColor;
uniform vec4 o=vec4(0, 0, 0, -9);
uniform lowp mat4 colorMatrix = mat4(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 60.0);
void main() {
gl_Position = transform * position;
vertColor = (color * colorMatrix) + o ;
}
colorfrag.glsl
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
varying vec4 vertColor;
void main() {
gl_FragColor = vertColor;
}
PROBLEM:
The color matrix is partially working: changing the RGB values do affect the colors but changing the alpha values (last row) don't !
When trying to combine the shader with a Gaussian filter, the drawn ellipse stays blurry even after I set the alpha channel contrast to 60 (like in the codepen example):
PShader colmat;
void setup() {
size(200, 200, P2D);
colmat = loadShader("colorfrag.glsl", "colorvert.glsl");
}
void draw() {
background(100);
shader(colmat);
noStroke();
fill(255, 30, 30);
ellipse(width/2, height/2, 40, 40);
filter(BLUR,6);
}
The same thing happens when I implement the color matrix within #cansik 's Gaussian blur shader (from the PostFX library). I can see the colors changing but not the alpha contrast:
blurFrag.glsl
/ Adapted from:
// http://callumhay.blogspot.com/2010/09/gaussian-blur-shader-glsl.html
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
uniform sampler2D texture;
uniform vec4 o=vec4(0, 0, 0, 0);
uniform lowp mat4 colorMatrix = mat4(1, 0.0, 0.0, 0.0,
0.0, 1, 0.0, 0.0,
0.0, 0.0, 1, 0.0,
0, 0.0, 0.0, 60.0); //Alpha contrast set to 60
varying vec2 center;
// The inverse of the texture dimensions along X and Y
uniform vec2 texOffset;
varying vec4 vertColor;
varying vec4 vertTexCoord;
uniform int blurSize;
uniform int horizontalPass; // 0 or 1 to indicate vertical or horizontal pass
uniform float sigma; // The sigma value for the gaussian function: higher value means more blur
// A good value for 9x9 is around 3 to 5
// A good value for 7x7 is around 2.5 to 4
// A good value for 5x5 is around 2 to 3.5
// ... play around with this based on what you need <span class="Emoticon Emoticon1"><span>:)</span></span>
const float pi = 3.14159265;
void main() {
float numBlurPixelsPerSide = float(blurSize / 2);
vec2 blurMultiplyVec = 0 < horizontalPass ? vec2(1.0, 0.0) : vec2(0.0, 1.0);
// Incremental Gaussian Coefficent Calculation (See GPU Gems 3 pp. 877 - 889)
vec3 incrementalGaussian;
incrementalGaussian.x = 1.0 / (sqrt(2.0 * pi) * sigma);
incrementalGaussian.y = exp(-0.5 / (sigma * sigma));
incrementalGaussian.z = incrementalGaussian.y * incrementalGaussian.y;
vec4 avgValue = vec4(0.0, 0.0, 0.0, 0.0);
float coefficientSum = 0.0;
// Take the central sample first...
avgValue += texture2D(texture, vertTexCoord.st) * incrementalGaussian.x;
coefficientSum += incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
// Go through the remaining 8 vertical samples (4 on each side of the center)
for (float i = 1.0; i <= numBlurPixelsPerSide; i++) {
avgValue += texture2D(texture, vertTexCoord.st - i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
avgValue += texture2D(texture, vertTexCoord.st + i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
coefficientSum += 2.0 * incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
}
gl_FragColor = (avgValue / coefficientSum ) * colorMatrix;
}
Setting glBlendFunc and enabling glEnable(GL_BLEND) in the main .pde file didn't fix the issue either.
sketch.pde
import ch.bildspur.postfx.builder.*;
import ch.bildspur.postfx.pass.*;
import ch.bildspur.postfx.*;
import processing.opengl.*;
import com.jogamp.opengl.*;
PostFX fx;
void setup() {
size(200, 200, P2D);
fx = new PostFX(this);
}
void draw() {
background(100);
GL gl = ((PJOGL)beginPGL()).gl.getGL();
gl.glEnable(GL.GL_BLEND);
gl.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE);
gl.glDisable(GL.GL_DEPTH_TEST);
noStroke();
fill(255, 30, 30);
ellipse(width/2, height/2, 40, 40);
fx.render().blur(80, 14).compose();
}
Questions:
Why does the alpha channel contrast not work ? How can I make it work ?
Is there something wrong with the way I implemented the color matrix ?
Do you know a better way to implement that gooey effect ?
Any help would be much appreciated !
Thank you
#noahbuddy from the Processing Forum could find a solution to the problem so I'm posting it here.
To preserve transparency, with or without shaders, use an offscreen
buffer (PGraphics). For example, saving a PNG image with transparent
background.
I removed the contrast matrix from #cansik 's blur shader and instead
put it into a separate filter.
blurfrag.glsl
// Adapted from:
// http://callumhay.blogspot.com/2010/09/gaussian-blur-shader-glsl.html
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
uniform sampler2D texture;
// The inverse of the texture dimensions along X and Y
uniform vec2 texOffset;
varying vec4 vertColor;
varying vec4 vertTexCoord;
uniform int blurSize;
uniform int horizontalPass; // 0 or 1 to indicate vertical or horizontal pass
uniform float sigma; // The sigma value for the gaussian function: higher value means more blur
// A good value for 9x9 is around 3 to 5
// A good value for 7x7 is around 2.5 to 4
// A good value for 5x5 is around 2 to 3.5
// ... play around with this based on what you need <span class="Emoticon Emoticon1"><span>:)</span></span>
const float pi = 3.14159265;
void main() {
float numBlurPixelsPerSide = float(blurSize / 2);
vec2 blurMultiplyVec = 0 < horizontalPass ? vec2(1.0, 0.0) : vec2(0.0, 1.0);
// Incremental Gaussian Coefficent Calculation (See GPU Gems 3 pp. 877 - 889)
vec3 incrementalGaussian;
incrementalGaussian.x = 1.0 / (sqrt(2.0 * pi) * sigma);
incrementalGaussian.y = exp(-0.5 / (sigma * sigma));
incrementalGaussian.z = incrementalGaussian.y * incrementalGaussian.y;
vec4 avgValue = vec4(0.0, 0.0, 0.0, 0.0);
float coefficientSum = 0.0;
// Take the central sample first...
avgValue += texture2D(texture, vertTexCoord.st) * incrementalGaussian.x;
coefficientSum += incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
// Go through the remaining 8 vertical samples (4 on each side of the center)
for (float i = 1.0; i <= numBlurPixelsPerSide; i++) {
avgValue += texture2D(texture, vertTexCoord.st - i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
avgValue += texture2D(texture, vertTexCoord.st + i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
coefficientSum += 2.0 * incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
}
gl_FragColor = avgValue / coefficientSum;
}
colfrag.glsl
#define PROCESSING_TEXTURE_SHADER
uniform sampler2D texture;
varying vec4 vertTexCoord;
uniform vec4 o = vec4(0, 0, 0, -7.0);
uniform lowp mat4 colorMatrix = mat4(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 18.0);
void main() {
vec4 pix = texture2D(texture, vertTexCoord.st);
vec4 color = (pix * colorMatrix) + o;
gl_FragColor = color;
}
sketch.pde
PShader contrast, blurry;
PGraphics buf;
void setup() {
size(200, 200, P2D);
buf = createGraphics(width, height, P2D);
contrast = loadShader("colfrag.glsl");
blurry = loadShader("blurFrag.glsl");
// Don't forget to set these
blurry.set("sigma", 4.5);
blurry.set("blurSize", 9);
}
void draw() {
background(100);
buf.beginDraw();
// Reset transparency
// Note, the color used here will affect your edges
// even with zero for alpha
buf.background(100, 0); // set to match main background
buf.noStroke();
buf.fill(255, 30, 30);
buf.ellipse(width/2, height/2, 40, 40);
buf.ellipse(mouseX, mouseY, 40, 40);
blurry.set("horizontalPass", 1);
buf.filter(blurry);
blurry.set("horizontalPass", 0);
buf.filter(blurry);
buf.endDraw();
shader(contrast);
image(buf, 0,0, width,height);
}
Personally I think the sweet spot lies somewhere:
between 8 and 11 for the alpha contrast
between -7 and -9 for the alpha offset
uniform vec4 o = vec4(0, 0, 0, -9.0);
uniform lowp mat4 colorMatrix = mat4(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
1.0, 1.0, 1.0, 11.0);
bewteen 10 and 15 for "sigma"
bewteen 30 and 40 for "blurSize"
blurry.set("sigma", 14.5)
blurry.set("blurSize", 35)
I've coded 2d metaballs before using signed distance functions and marching square algorithms but I find this solution to be the most efficient one. Performance wise I can display up to 4500 balls at 60 fps on a 800x600 canvas (tested on an entry-level 2012 imac desktop with Python Mode).
Unfortunately I'm not able to debug the exact issue, but I have a couple of ideas that hopefully might help you make some progress:
For a simpler/cheaper effect you can use the dilate filter
You can find other metaballs shaders on shadertoy and tweak the code a bit so you can run it in Processing
For example https://www.shadertoy.com/view/MlcGWn becomes:
// https://www.shadertoy.com/view/MlcGWn
uniform float iTime;
uniform vec2 iResolution;
vec3 Sphere(vec2 uv, vec2 position, float radius)
{
float dist = radius / distance(uv, position);
return vec3(dist * dist);
}
void main()
{
vec2 uv = 2.0 * vec2(gl_FragCoord.xy - 0.5 * iResolution.xy) / iResolution.y;
vec3 pixel = vec3(0.0, 0.0, 0.0);
vec2 positions[4];
positions[0] = vec2(sin(iTime * 1.4) * 1.3, cos(iTime * 2.3) * 0.4);
positions[1] = vec2(sin(iTime * 3.0) * 0.5, cos(iTime * 1.3) * 0.6);
positions[2] = vec2(sin(iTime * 2.1) * 0.1, cos(iTime * 1.9) * 0.8);
positions[3] = vec2(sin(iTime * 1.1) * 1.1, cos(iTime * 2.6) * 0.7);
for (int i = 0; i < 4; i++)
pixel += Sphere(uv, positions[i], 0.22);
pixel = step(1.0, pixel) * pixel;
gl_FragColor = vec4(pixel, 1.0);
}
and in Processing:
PShader shader;
void setup(){
size(900,900,P2D);
shader = loadShader("metaballs.glsl");
shader.set("iResolution",(float)width/2,(float)height/2);
}
void draw(){
shader.set("iTime", millis() * 0.001);
shader(shader);
rect(0,0,width,height);
}
or https://www.shadertoy.com/view/ldtSRX
// https://www.shadertoy.com/view/ldtSRX
uniform vec2 iResolution;
uniform vec2 iMouse;
uniform float iTime;
struct Metaball{
vec2 pos;
float r;
vec3 col;
};
vec4 calcball( Metaball ball, vec2 uv)
{
float dst = ball.r / (pow(abs(uv.x - ball.pos.x), 2.) + pow(abs(uv.y - ball.pos.y), 2.));
return vec4(ball.col * dst, dst);
}
vec3 doballs( vec2 uv )
{
Metaball mouse;
mouse.pos = iMouse.xy / iResolution.yy;
mouse.r = .015;
mouse.col = vec3(.5);
Metaball mb1, mb2, mb3, mb4;
mb1.pos = vec2(1.3, .55+.2*sin(iTime*.5)); mb1.r = .05; mb1.col = vec3(0., 1., 0.);
mb2.pos = vec2(.6, .45); mb2.r = .02; mb2.col = vec3(0., .5, 1.);
mb3.pos = vec2(.85, .65); mb3.r = .035; mb3.col = vec3(1., .2, 0.);
mb4.pos = vec2(1.+.5*sin(iTime), .2); mb4.r = .02; mb4.col = vec3(1., 1., 0.);
vec4 ball1 = calcball(mb1, uv);
vec4 ball2 = calcball(mb2, uv);
vec4 ball3 = calcball(mb3, uv);
vec4 ball4 = calcball(mb4, uv);
vec4 subball1 = calcball(mouse, uv);
float res = ball1.a + ball2.a + ball3.a + ball4.a;
res -= subball1.a;
float threshold = res >= 1.5 ? 1. : 0.;
vec3 color = (ball1.rgb + ball2.rgb + ball3.rgb + ball4.rgb - subball1.rgb) / res;
color *= threshold;
color = clamp(color, 0., 1.);
return color;
}
#define ANTIALIAS 1
void main()
{
vec2 uv = gl_FragCoord.xy / iResolution.yy;
vec3 color = doballs(uv);
#ifdef ANTIALIAS
float uvs = .75 / iResolution.y;
color *= .5;
color += doballs(vec2(uv.x + uvs, uv.y))*.125;
color += doballs(vec2(uv.x - uvs, uv.y))*.125;
color += doballs(vec2(uv.x, uv.y + uvs))*.125;
color += doballs(vec2(uv.x, uv.y - uvs))*.125;
#if ANTIALIAS == 2
color *= .5;
color += doballs(vec2(uv.x + uvs*.85, uv.y + uvs*.85))*.125;
color += doballs(vec2(uv.x - uvs*.85, uv.y + uvs*.85))*.125;
color += doballs(vec2(uv.x - uvs*.85, uv.y - uvs*.85))*.125;
color += doballs(vec2(uv.x + uvs*.85, uv.y - uvs*.85))*.125;
#endif
#endif
gl_FragColor = vec4(color, 1.);
}
and in Processing:
PShader shader;
PVector mouse = new PVector();
void setup(){
size(900,900,P2D);
shader = loadShader("metaballs.glsl");
shader.set("iResolution",(float)width/2,(float)height/2);
}
void draw(){
mouse.set(mouseX,mouseY);
shader.set("iMouse", mouse);
shader.set("iTime", millis() * 0.001);
shader(shader);
rect(0,0,width,height);
}

WebGL Normal calculations from position texture

Iam trying to create a procedural water puddle in webGL with "water ripples" by vertex displacement.
The problem I'm having is that I get a noise I can't explain.
Below is the first pass vertex shader where I calculate the vertex positions that i later render to a texture that i then use in the second pass.
void main() {
float damping = 0.5;
vNormal = normal;
// wave radius
float timemod = 0.55;
float ttime = mod(time , timemod);
float frequency = 2.0*PI/waveWidth;
float phase = frequency * 0.21;
vec4 v = vec4(position,1.0);
// Loop through array of start positions
for(int i = 0; i < 200; i++){
float cCenterX = ripplePos[i].x;
float cCenterY = ripplePos[i].y;
vec2 center = vec2(cCenterX, cCenterY) ;
if(center.x == 0.0 && center.y == 0.0)
center = normalize(center);
// wave width
float tolerance = 0.005;
radius = sqrt(pow( uv.x - center.x , 2.0) + pow( uv.y -center.y, 2.0));
// Creating a ripple
float w_height = (tolerance - (min(tolerance,pow(ripplePos[i].z-radius*10.0,2.0)) )) * (1.0-ripplePos[i].z/timemod) *5.82;
// -2.07 in the end to keep plane at right height. Trial and error solution
v.z += waveHeight*(1.0+w_height/tolerance) / 2.0 - 2.07;
vNormal = normal+v.z;
}
vPosition = v.xyz;
gl_Position = projectionMatrix * modelViewMatrix * v;
}
And the first pass fragment shader that writes to the texture:
void main()
{
vec3 p = normalize(vPosition);
p.x = (p.x+1.0)*0.5;
p.y = (p.y+1.0)*0.5;
gl_FragColor = vec4( normalize(p), 1.0);
}
The second vertex shader is a standard passthrough.
Second pass fragmentshader is where I try to calculate the normals to be used for light calculations.
void main() {
float w = 1.0 / 200.0;
float h = 1.0 / 200.0;
// Nearest Nieghbours
vec3 p0 = texture2D(rttTexture, vUV).xyz;
vec3 p1 = texture2D(rttTexture, vUV + vec2(-w, 0)).xyz;
vec3 p2 = texture2D(rttTexture, vUV + vec2( w, 0)).xyz;
vec3 p3 = texture2D(rttTexture, vUV + vec2( 0, h)).xyz;
vec3 p4 = texture2D(rttTexture, vUV + vec2( 0, -h)).xyz;
vec3 nVec1 = p2 - p0;
vec3 nVec2 = p3 - p0;
vec3 vNormal = cross(nVec1, nVec2);
vec3 N = normalize(vNormal);
float theZ = texture2D(rttTexture, vUV).r;
//gl_FragColor = vec4(1.,.0,1.,1.);
//gl_FragColor = texture2D(tDiffuse, vUV);
gl_FragColor = vec4(vec3(N), 1.0);
}
The result is this:
The image displays the normalmap and the noise I'm refering to is the inconsistency of the blue.
Here is a live demonstration:
http://oskarhavsvik.se/jonasgerling_water_ripple/waterRTT-clean.html
I appreciate any tips and pointers, not only fixes for this problem. But the code in genereal, I'm here to learn.
After a brief look it seems like your problem is in storing x/y positions.
gl_FragColor = vec4(vec3(p0*0.5+0.5), 1.0);
You don't need to store them anyway, because the texel position implicitly gives the x/y value. Just change your normal points to something like this...
vec3 p2 = vec3(1, 0, texture2D(rttTexture, vUV + vec2(w, 0)).z);
Rather than 1, 0 you will want to use a scale appropriate to the size of your displayed quad relative to the wave height. Anyway, the result now looks like this.
The height/z seems to be scaled by distance from the centre, so I went looking for a normalize() and removed it...
vec3 p = vPosition;
gl_FragColor = vec4(p*0.5+0.5, 1.0);
The normals now look like this...

Physically based shader not producing desired results

Over the past ~2-3 weeks, i've been learning about Physically Based Shading and I just cannot wrap my head around some of the problems I'm having.
Fragment Shader
#version 430
#define PI 3.14159265358979323846
// Inputs
in vec3 inputNormal;
vec3 fNormal;
// Material
float reflectance = 1.0; // 0 to 1
float roughness = 0.5;
vec3 specularColor = vec3(1.0, 1.0, 1.0); // f0
// Values
vec3 lightVector = vec3(1, 1, 1); // Light (l)
vec3 eyeVector = vec3(2.75, 1.25, 1.25); // Camera (v)
vec3 halfVector = normalize(lightVector + eyeVector); // L + V / |L + V|
out vec4 fColor; // Output Color
// Specular Functions
vec3 D(vec3 h) // Normal Distribution Function - GGX/Trowbridge-Reitz
{
float alpha = roughness * roughness;
float alpha2 = alpha * alpha;
float NoH = dot(fNormal, h);
float finalTerm = ((NoH * NoH) * (alpha2 - 1.0) + 1.0);
return vec3(alpha2 / (PI * (finalTerm * finalTerm)));
}
vec3 Gsub(vec3 v) // Sub Function of G
{
float k = ((roughness + 1.0) * (roughness + 1.0)) / 8;
return vec3(dot(fNormal, v) / ((dot(fNormal, v)) * (1.0 - k) + k));
}
vec3 G(vec3 l, vec3 v, vec3 h) // Geometric Attenuation Term - Schlick Modified (k = a/2)
{
return Gsub(l) * Gsub(v);
}
vec3 F(vec3 v, vec3 h) // Fresnel - Schlick Modified (Spherical Gaussian Approximation)
{
vec3 f0 = specularColor; // right?
return f0 + (1.0 - f0) * pow(2, (-5.55473 * (dot(v, h)) - 6.98316) * (dot(v, h)));
}
vec3 specular()
{
return (D(halfVector) * F(eyeVector, halfVector) * G(lightVector, eyeVector, halfVector)) / 4 * ((dot(fNormal, lightVector)) * (dot(fNormal, eyeVector)));
}
vec3 diffuse()
{
float NoL = dot(fNormal, lightVector);
vec3 result = vec3(reflectance / PI);
return result * NoL;
}
void main()
{
fNormal = normalize(inputNormal);
fColor = vec4(diffuse() + specular(), 1.0);
//fColor = vec4(D(halfVector), 1.0);
}
So far I have been able to fix up some things and now I get a better result.
However it now seems clear that the highlight is way too big; this originates from the normal distribution function (Specular D).
Your coding of GGX/Trowbridge-Reitz is wrong:
vec3 NxH = fNormal * h;
The star * means a term by term product where you want a dot product
Also
float alphaTerm = (alpha * alpha - 1.0) + 1.0;
Is not correct since the formula multiplies n.m by (alpha * alpha - 1.0) before adding 1.0. Yours formula is equal to alpha*alpha!
Try:
// Specular
vec3 D(vec3 h) // Normal Distribution Function - GGX/Trowbridge-Reitz
{
float alpha = roughness * roughness;
float NxH = dot(fNormal,h);
float alpha2 = alpha*alpha;
float t = ((NxH * NxH) * (alpha2 - 1.0) + 1.0);
return alpha2 / (PI * t * t);
}
In many other places you use * instead of dot. You need to correct all these. Also, check for your formulas, many seem incorrect.