I am trying to create two independent glowing lights but when a make the second share the light stretches between the 2
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fragCoord/iResolution.xy;
uv =(fragCoord-.5*iResolution.xy)/iResolution.y;
vec3 col = vec3(0.);
float radius = 0.5;
float glowSpeed = 1.;
vec2 glowPos = vec2(uv.x, uv.y+0.5);
vec2 glowPos2 = vec2(uv.x+0.5, uv.y+0.0);
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv+glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv+glowPos2);
col += vec3(glowCol1, glowCol1, 0.);
col += vec3(glowCol2, glowCol2, 0.);
// Output to screen
fragColor = vec4(col, 1);
}
enter image description here
uv is a position relative to the fragment currently being processed. So the position of a light source must not depend on uv. e.g.:
vec2 glowPos = vec2(0.5, 0.5);
vec2 glowPos2 = vec2(-0.5, -0.5);
The distance between 2 points is the length of the vector from one point to the other. A vector between 2 points is calculated by subtracting one point from the other, but not by calculating the sum:
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos2);
The result for glowCol1 and glowCol2 can become negative. Thus, one light source would negatively affect the other. You must clamp the result in the range [0, 1]:
glowCol1 = clamp(glowCol1, 0.0, 1.0);
glowCol2 = clamp(glowCol2, 0.0, 1.0);
Complete and working shader:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fragCoord/iResolution.xy;
uv = uv * 2.0 - 1.0;
uv.x *= iResolution.x / iResolution.y;
vec3 col = vec3(0.);
float radius = 0.5;
float glowSpeed = 1.;
vec2 glowPos = vec2(0.5, 0.5);
vec2 glowPos2 = vec2(-0.5, -0.5);
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos2);
glowCol1 = clamp(glowCol1, 0.0, 1.0);
glowCol2 = clamp(glowCol2, 0.0, 1.0);
col += vec3(glowCol1, glowCol1, 0.);
col += vec3(glowCol2, glowCol2, 0.);
// Output to screen
fragColor = vec4(col, 1);
}
I'm trying to replicate a web design trick known as "gooey effect" (see it live here).
It's a technique applying SVG filters on moving ellipses in order to get a blob-like motion. The process is rather simple:
apply a gaussian blur
increase the contrast of the alpha channel only
The combination of the two creates a blob effect
The last step (increasing the alpha channel contrast) is usually done through a "color matrix filter".
A color matrix is composed of 5 columns (RGBA + offset) and 4 rows.
The values in the first four columns are multiplied with the source red, green, blue, and alpha values respectively. The fifth column value is added (offset).
In CSS, increasing the alpha channel contrast is as simple as calling a SVG filter and specifying the contrast value (here 18):
<feColorMatrix in="blur" mode="matrix" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 18 -7" result="goo" />
In Processing though, it seems to be a bit more complicated. I believe (I may be wrong) the only way to apply a color matrix filter is to create one in a shader. After a few tries I came up with these (very basic) vertex and fragment shaders for color rendering:
colorvert.glsl
uniform mat4 transform;
attribute vec4 position;
attribute vec4 color;
varying vec4 vertColor;
uniform vec4 o=vec4(0, 0, 0, -9);
uniform lowp mat4 colorMatrix = mat4(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 60.0);
void main() {
gl_Position = transform * position;
vertColor = (color * colorMatrix) + o ;
}
colorfrag.glsl
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
varying vec4 vertColor;
void main() {
gl_FragColor = vertColor;
}
PROBLEM:
The color matrix is partially working: changing the RGB values do affect the colors but changing the alpha values (last row) don't !
When trying to combine the shader with a Gaussian filter, the drawn ellipse stays blurry even after I set the alpha channel contrast to 60 (like in the codepen example):
PShader colmat;
void setup() {
size(200, 200, P2D);
colmat = loadShader("colorfrag.glsl", "colorvert.glsl");
}
void draw() {
background(100);
shader(colmat);
noStroke();
fill(255, 30, 30);
ellipse(width/2, height/2, 40, 40);
filter(BLUR,6);
}
The same thing happens when I implement the color matrix within #cansik 's Gaussian blur shader (from the PostFX library). I can see the colors changing but not the alpha contrast:
blurFrag.glsl
/ Adapted from:
// http://callumhay.blogspot.com/2010/09/gaussian-blur-shader-glsl.html
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
uniform sampler2D texture;
uniform vec4 o=vec4(0, 0, 0, 0);
uniform lowp mat4 colorMatrix = mat4(1, 0.0, 0.0, 0.0,
0.0, 1, 0.0, 0.0,
0.0, 0.0, 1, 0.0,
0, 0.0, 0.0, 60.0); //Alpha contrast set to 60
varying vec2 center;
// The inverse of the texture dimensions along X and Y
uniform vec2 texOffset;
varying vec4 vertColor;
varying vec4 vertTexCoord;
uniform int blurSize;
uniform int horizontalPass; // 0 or 1 to indicate vertical or horizontal pass
uniform float sigma; // The sigma value for the gaussian function: higher value means more blur
// A good value for 9x9 is around 3 to 5
// A good value for 7x7 is around 2.5 to 4
// A good value for 5x5 is around 2 to 3.5
// ... play around with this based on what you need <span class="Emoticon Emoticon1"><span>:)</span></span>
const float pi = 3.14159265;
void main() {
float numBlurPixelsPerSide = float(blurSize / 2);
vec2 blurMultiplyVec = 0 < horizontalPass ? vec2(1.0, 0.0) : vec2(0.0, 1.0);
// Incremental Gaussian Coefficent Calculation (See GPU Gems 3 pp. 877 - 889)
vec3 incrementalGaussian;
incrementalGaussian.x = 1.0 / (sqrt(2.0 * pi) * sigma);
incrementalGaussian.y = exp(-0.5 / (sigma * sigma));
incrementalGaussian.z = incrementalGaussian.y * incrementalGaussian.y;
vec4 avgValue = vec4(0.0, 0.0, 0.0, 0.0);
float coefficientSum = 0.0;
// Take the central sample first...
avgValue += texture2D(texture, vertTexCoord.st) * incrementalGaussian.x;
coefficientSum += incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
// Go through the remaining 8 vertical samples (4 on each side of the center)
for (float i = 1.0; i <= numBlurPixelsPerSide; i++) {
avgValue += texture2D(texture, vertTexCoord.st - i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
avgValue += texture2D(texture, vertTexCoord.st + i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
coefficientSum += 2.0 * incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
}
gl_FragColor = (avgValue / coefficientSum ) * colorMatrix;
}
Setting glBlendFunc and enabling glEnable(GL_BLEND) in the main .pde file didn't fix the issue either.
sketch.pde
import ch.bildspur.postfx.builder.*;
import ch.bildspur.postfx.pass.*;
import ch.bildspur.postfx.*;
import processing.opengl.*;
import com.jogamp.opengl.*;
PostFX fx;
void setup() {
size(200, 200, P2D);
fx = new PostFX(this);
}
void draw() {
background(100);
GL gl = ((PJOGL)beginPGL()).gl.getGL();
gl.glEnable(GL.GL_BLEND);
gl.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE);
gl.glDisable(GL.GL_DEPTH_TEST);
noStroke();
fill(255, 30, 30);
ellipse(width/2, height/2, 40, 40);
fx.render().blur(80, 14).compose();
}
Questions:
Why does the alpha channel contrast not work ? How can I make it work ?
Is there something wrong with the way I implemented the color matrix ?
Do you know a better way to implement that gooey effect ?
Any help would be much appreciated !
Thank you
#noahbuddy from the Processing Forum could find a solution to the problem so I'm posting it here.
To preserve transparency, with or without shaders, use an offscreen
buffer (PGraphics). For example, saving a PNG image with transparent
background.
I removed the contrast matrix from #cansik 's blur shader and instead
put it into a separate filter.
blurfrag.glsl
// Adapted from:
// http://callumhay.blogspot.com/2010/09/gaussian-blur-shader-glsl.html
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
uniform sampler2D texture;
// The inverse of the texture dimensions along X and Y
uniform vec2 texOffset;
varying vec4 vertColor;
varying vec4 vertTexCoord;
uniform int blurSize;
uniform int horizontalPass; // 0 or 1 to indicate vertical or horizontal pass
uniform float sigma; // The sigma value for the gaussian function: higher value means more blur
// A good value for 9x9 is around 3 to 5
// A good value for 7x7 is around 2.5 to 4
// A good value for 5x5 is around 2 to 3.5
// ... play around with this based on what you need <span class="Emoticon Emoticon1"><span>:)</span></span>
const float pi = 3.14159265;
void main() {
float numBlurPixelsPerSide = float(blurSize / 2);
vec2 blurMultiplyVec = 0 < horizontalPass ? vec2(1.0, 0.0) : vec2(0.0, 1.0);
// Incremental Gaussian Coefficent Calculation (See GPU Gems 3 pp. 877 - 889)
vec3 incrementalGaussian;
incrementalGaussian.x = 1.0 / (sqrt(2.0 * pi) * sigma);
incrementalGaussian.y = exp(-0.5 / (sigma * sigma));
incrementalGaussian.z = incrementalGaussian.y * incrementalGaussian.y;
vec4 avgValue = vec4(0.0, 0.0, 0.0, 0.0);
float coefficientSum = 0.0;
// Take the central sample first...
avgValue += texture2D(texture, vertTexCoord.st) * incrementalGaussian.x;
coefficientSum += incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
// Go through the remaining 8 vertical samples (4 on each side of the center)
for (float i = 1.0; i <= numBlurPixelsPerSide; i++) {
avgValue += texture2D(texture, vertTexCoord.st - i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
avgValue += texture2D(texture, vertTexCoord.st + i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
coefficientSum += 2.0 * incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
}
gl_FragColor = avgValue / coefficientSum;
}
colfrag.glsl
#define PROCESSING_TEXTURE_SHADER
uniform sampler2D texture;
varying vec4 vertTexCoord;
uniform vec4 o = vec4(0, 0, 0, -7.0);
uniform lowp mat4 colorMatrix = mat4(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 18.0);
void main() {
vec4 pix = texture2D(texture, vertTexCoord.st);
vec4 color = (pix * colorMatrix) + o;
gl_FragColor = color;
}
sketch.pde
PShader contrast, blurry;
PGraphics buf;
void setup() {
size(200, 200, P2D);
buf = createGraphics(width, height, P2D);
contrast = loadShader("colfrag.glsl");
blurry = loadShader("blurFrag.glsl");
// Don't forget to set these
blurry.set("sigma", 4.5);
blurry.set("blurSize", 9);
}
void draw() {
background(100);
buf.beginDraw();
// Reset transparency
// Note, the color used here will affect your edges
// even with zero for alpha
buf.background(100, 0); // set to match main background
buf.noStroke();
buf.fill(255, 30, 30);
buf.ellipse(width/2, height/2, 40, 40);
buf.ellipse(mouseX, mouseY, 40, 40);
blurry.set("horizontalPass", 1);
buf.filter(blurry);
blurry.set("horizontalPass", 0);
buf.filter(blurry);
buf.endDraw();
shader(contrast);
image(buf, 0,0, width,height);
}
Personally I think the sweet spot lies somewhere:
between 8 and 11 for the alpha contrast
between -7 and -9 for the alpha offset
uniform vec4 o = vec4(0, 0, 0, -9.0);
uniform lowp mat4 colorMatrix = mat4(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
1.0, 1.0, 1.0, 11.0);
bewteen 10 and 15 for "sigma"
bewteen 30 and 40 for "blurSize"
blurry.set("sigma", 14.5)
blurry.set("blurSize", 35)
I've coded 2d metaballs before using signed distance functions and marching square algorithms but I find this solution to be the most efficient one. Performance wise I can display up to 4500 balls at 60 fps on a 800x600 canvas (tested on an entry-level 2012 imac desktop with Python Mode).
Unfortunately I'm not able to debug the exact issue, but I have a couple of ideas that hopefully might help you make some progress:
For a simpler/cheaper effect you can use the dilate filter
You can find other metaballs shaders on shadertoy and tweak the code a bit so you can run it in Processing
For example https://www.shadertoy.com/view/MlcGWn becomes:
// https://www.shadertoy.com/view/MlcGWn
uniform float iTime;
uniform vec2 iResolution;
vec3 Sphere(vec2 uv, vec2 position, float radius)
{
float dist = radius / distance(uv, position);
return vec3(dist * dist);
}
void main()
{
vec2 uv = 2.0 * vec2(gl_FragCoord.xy - 0.5 * iResolution.xy) / iResolution.y;
vec3 pixel = vec3(0.0, 0.0, 0.0);
vec2 positions[4];
positions[0] = vec2(sin(iTime * 1.4) * 1.3, cos(iTime * 2.3) * 0.4);
positions[1] = vec2(sin(iTime * 3.0) * 0.5, cos(iTime * 1.3) * 0.6);
positions[2] = vec2(sin(iTime * 2.1) * 0.1, cos(iTime * 1.9) * 0.8);
positions[3] = vec2(sin(iTime * 1.1) * 1.1, cos(iTime * 2.6) * 0.7);
for (int i = 0; i < 4; i++)
pixel += Sphere(uv, positions[i], 0.22);
pixel = step(1.0, pixel) * pixel;
gl_FragColor = vec4(pixel, 1.0);
}
and in Processing:
PShader shader;
void setup(){
size(900,900,P2D);
shader = loadShader("metaballs.glsl");
shader.set("iResolution",(float)width/2,(float)height/2);
}
void draw(){
shader.set("iTime", millis() * 0.001);
shader(shader);
rect(0,0,width,height);
}
or https://www.shadertoy.com/view/ldtSRX
// https://www.shadertoy.com/view/ldtSRX
uniform vec2 iResolution;
uniform vec2 iMouse;
uniform float iTime;
struct Metaball{
vec2 pos;
float r;
vec3 col;
};
vec4 calcball( Metaball ball, vec2 uv)
{
float dst = ball.r / (pow(abs(uv.x - ball.pos.x), 2.) + pow(abs(uv.y - ball.pos.y), 2.));
return vec4(ball.col * dst, dst);
}
vec3 doballs( vec2 uv )
{
Metaball mouse;
mouse.pos = iMouse.xy / iResolution.yy;
mouse.r = .015;
mouse.col = vec3(.5);
Metaball mb1, mb2, mb3, mb4;
mb1.pos = vec2(1.3, .55+.2*sin(iTime*.5)); mb1.r = .05; mb1.col = vec3(0., 1., 0.);
mb2.pos = vec2(.6, .45); mb2.r = .02; mb2.col = vec3(0., .5, 1.);
mb3.pos = vec2(.85, .65); mb3.r = .035; mb3.col = vec3(1., .2, 0.);
mb4.pos = vec2(1.+.5*sin(iTime), .2); mb4.r = .02; mb4.col = vec3(1., 1., 0.);
vec4 ball1 = calcball(mb1, uv);
vec4 ball2 = calcball(mb2, uv);
vec4 ball3 = calcball(mb3, uv);
vec4 ball4 = calcball(mb4, uv);
vec4 subball1 = calcball(mouse, uv);
float res = ball1.a + ball2.a + ball3.a + ball4.a;
res -= subball1.a;
float threshold = res >= 1.5 ? 1. : 0.;
vec3 color = (ball1.rgb + ball2.rgb + ball3.rgb + ball4.rgb - subball1.rgb) / res;
color *= threshold;
color = clamp(color, 0., 1.);
return color;
}
#define ANTIALIAS 1
void main()
{
vec2 uv = gl_FragCoord.xy / iResolution.yy;
vec3 color = doballs(uv);
#ifdef ANTIALIAS
float uvs = .75 / iResolution.y;
color *= .5;
color += doballs(vec2(uv.x + uvs, uv.y))*.125;
color += doballs(vec2(uv.x - uvs, uv.y))*.125;
color += doballs(vec2(uv.x, uv.y + uvs))*.125;
color += doballs(vec2(uv.x, uv.y - uvs))*.125;
#if ANTIALIAS == 2
color *= .5;
color += doballs(vec2(uv.x + uvs*.85, uv.y + uvs*.85))*.125;
color += doballs(vec2(uv.x - uvs*.85, uv.y + uvs*.85))*.125;
color += doballs(vec2(uv.x - uvs*.85, uv.y - uvs*.85))*.125;
color += doballs(vec2(uv.x + uvs*.85, uv.y - uvs*.85))*.125;
#endif
#endif
gl_FragColor = vec4(color, 1.);
}
and in Processing:
PShader shader;
PVector mouse = new PVector();
void setup(){
size(900,900,P2D);
shader = loadShader("metaballs.glsl");
shader.set("iResolution",(float)width/2,(float)height/2);
}
void draw(){
mouse.set(mouseX,mouseY);
shader.set("iMouse", mouse);
shader.set("iTime", millis() * 0.001);
shader(shader);
rect(0,0,width,height);
}
i'm trying to bind 2 textures for my shader. But for some reason it always seems to take the last image that i defined. Am i doing something wrong?
GLuint textures[2];
glEnable(GL_TEXTURE_2D);
glGenTextures(2, textures);
glBindTexture(GL_TEXTURE_2D, textures[0]);
glfwLoadTexture2D("C:\\front.tga", GLFW_BUILD_MIPMAPS_BIT);
glBindTexture(GL_TEXTURE_2D, textures[1]);
glfwLoadTexture2D("C:\\reflect.tga", GLFW_BUILD_MIPMAPS_BIT);
In this case i see 'reflect.tga' for both the reflection and refraction in my shader...
const vec3 Xunitvec = vec3 (1.0, 0.0, 0.0);
const vec3 Yunitvec = vec3 (0.0, 1.0, 0.0);
uniform vec3 BaseColor;
uniform float Depth;
uniform float MixRatio;
// need to scale our framebuffer - it has a fixed width/height of 2048
uniform float FrameWidth;
uniform float FrameHeight;
uniform sampler2D EnvMap;
uniform sampler2D RefractionMap;
varying vec3 Normal;
varying vec3 EyeDir;
varying vec4 EyePos;
varying float LightIntensity;
void main (void)
{
// Compute reflection vector
vec3 reflectDir = reflect(EyeDir, Normal);
// Compute altitude and azimuth angles
vec2 index;
index.y = dot(normalize(reflectDir), Yunitvec);
reflectDir.y = 0.0;
index.x = dot(normalize(reflectDir), Xunitvec) * 0.5;
// Translate index values into proper range
if (reflectDir.z >= 0.0)
index = (index + 1.0) * 0.5;
else
{
index.t = (index.t + 1.0) * 0.5;
index.s = (-index.s) * 0.5 + 1.0;
}
// if reflectDir.z >= 0.0, s will go from 0.25 to 0.75
// if reflectDir.z < 0.0, s will go from 0.75 to 1.25, and
// that's OK, because we've set the texture to wrap.
// Do a lookup into the environment map.
vec3 envColor = vec3 (texture2D(EnvMap, index));
// calc fresnels term. This allows a view dependant blend of reflection/refraction
float fresnel = abs(dot(normalize(EyeDir), Normal));
fresnel *= MixRatio;
fresnel = clamp(fresnel, 0.1, 0.9);
// calc refraction
vec3 refractionDir = normalize(EyeDir) - normalize(Normal);
// Scale the refraction so the z element is equal to depth
float depthVal = Depth / -refractionDir.z;
// perform the div by w
float recipW = 1.0 / EyePos.w;
vec2 eye = EyePos.xy * vec2(recipW);
// calc the refraction lookup
index.s = (eye.x + refractionDir.x * depthVal);
index.t = (eye.y + refractionDir.y * depthVal);
// scale and shift so we're in the range 0-1
index.s = index.s / 2.0 + 0.5;
index.t = index.t / 2.0 + 0.5;
// as we're looking at the framebuffer, we want it clamping at the edge of the rendered scene, not the edge of the texture,
// so we clamp before scaling to fit
float recip1k = 1.0 / 2048.0;
index.s = clamp(index.s, 0.0, 1.0 - recip1k);
index.t = clamp(index.t, 0.0, 1.0 - recip1k);
// scale the texture so we just see the rendered framebuffer
index.s = index.s * FrameWidth * recip1k;
index.t = index.t * FrameHeight * recip1k;
vec3 RefractionColor = vec3 (texture2D(RefractionMap, index));
// Add lighting to base color and mix
vec3 base = LightIntensity * BaseColor;
envColor = mix(envColor, RefractionColor, fresnel);
envColor = mix(envColor, base, 0.2);
gl_FragColor = vec4 (envColor, 1.0);
}
A sampler uniform does not bind a texture object but a texture unit. And texture objects are bound to texture units. So the sequence for binding a texture to a shader is
glActiveTexture(GL_TEXTURE0 + texture_unit1);
glBindTexture(GL_TEXTURE_..., texture_object1);
glActiveTexture(GL_TEXTURE0 + texture_unit2);
glBindTexture(GL_TEXTURE_..., texture_object2);
glUniform1i(sampler1_location, texture_unit1);
glUniform1i(sampler2_location, texture_unit2);
Texture Units are in the range 0...GL_MAX_TEXTURE_UNITS.