I'm trying to handle the transform of texture in fragment shader.
the resolution of window is (640,360), the rotation is 30 degree, and the scale is vec2(0.5,0.5).
this is what I want:
here is my fragment shader:
precision mediump float;
varying vec2 v_texCoord;
uniform sampler2D s_texture;
mat3 makeTranslation(vec2 t) {
mat3 m = mat3(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, t.x, t.y, 1.0);
return m;
}
mat3 makeRotation( float angleInRadians ){
float c = cos(angleInRadians);
float s = sin(angleInRadians);
mat3 m = mat3(c, -s, 0, s, c, 0, 0, 0, 1);
return m;
}
mat3 makeScale(vec2 s) {
mat3 m = mat3( s.x, 0, 0, 0, s.y, 0, 0, 0, 1);
return m;
}
void main(){
vec2 position = vec2(0.0,0.0);
vec2 scale = vec2(0.5,0.5);
float rotation = 30.0;
float r = rotation/180.0*3.14159;
vec2 size = vec2(640.0,480.0);
mat3 mt = makeTranslation( translation );
mat3 mr = makeRotation( r );
mat3 ms = makeScale( 1.0/scale );
//transform
vec3 newCoord = vec3(v_texCoord.xy,1.0);
newCoord = mt*newCoord;
newCoord = mr*ms*vec3(newCoord.x - 0.5, newCoord.y - 0.5,0.0) + vec3(0.5, 0.5, 0.0);
gl_FragColor = texture2D(s_texture, vec2(newCoord.x, newCoord.y) );
}
the result is:
As you can see, the result is incorrect.
so, I apply a ratio of rectangle size to the texcoord.y:
//transform
float fy = 0.5*(1.0 - size.y*1.0/size.x);
newCoord.y = (newCoord.y-0.5)*size.y/size.x+fy;
newCoord = mt*newCoord; \n"
newCoord = mr*ms*vec3(newCoord.x - 0.5, newCoord.y - 0.5,0.0) + vec3(0.5, 0.5, 0.0);
newCoord.y = (newCoord.y+0.5)*size.x/size.y-fy;
what I've got:
the rectangle is correct, but the position of center point is incorrect.
So, how to get the right result?
thanks.
here is the origin texture:
Getting the right order of operations is very important.
When you receive your texture coordinates, they are in the range [0, 1]. However, you need to translate them, so that they are in [-0.5, 0.5], before you rotate them, so that you are rotating around the center of the texture. Then apply your scale, and finally your translation.
you dont include screen and texture size proportions
Shadertoy code: (iChannel0 is texture, or BufA then uncomment line 26)
mat3 makeTranslation(vec2 t) {
mat3 m = mat3(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, t.x, t.y, 1.0);
return m;
}
mat3 makeRotation( float angleInRadians ){
float c = cos(angleInRadians);
float s = sin(angleInRadians);
mat3 m = mat3(c, -s, 0, s, c, 0, 0, 0, 1);
return m;
}
mat3 makeScale(vec2 s) {
mat3 m = mat3( s.x, 0, 0, 0, s.y, 0, 0, 0, 1);
return m;
}
void mainImage( out vec4 fragColor, in vec2 fragCoord ){
vec2 screen_res = iResolution.xy/iResolution.y;
vec2 uv = fragCoord/iResolution.y-0.5*screen_res;
vec2 position = vec2(0.0,0.0);
vec2 scale = vec2(0.5,0.5);
float rotation = 180.*iMouse.x/iResolution.x;
float r = rotation/180.0*3.14159;
vec2 size = vec2(640.0,480.0);
//vec2 target_res = vec2(textureSize(iChannel0,0).xy)/float(textureSize(iChannel0,0).y);
vec2 target_res = vec2(1.,.5);
mat3 mt = makeTranslation( vec2(0.5*target_res) );
mat3 mr = makeRotation( r );
mat3 ms = makeScale( 1.0/scale );
//transform
vec3 newCoord = vec3(uv.xy,1.0);
newCoord = mt*newCoord;
newCoord = mr*ms*vec3(newCoord.x - 0.5*target_res.x, newCoord.y - 0.5*target_res.y,0.0) + vec3(0.5*target_res, 0.0);
newCoord.xy*= 1./target_res;
fragColor = texture(iChannel0, vec2(newCoord.x, newCoord.y) );
}
Related
I need to calculate projection of point on specific line segment in shader (OpenGL ES 2).
Here is how I test the algorithm:
I draw simple triangle with points A(0, 0.5), B(1, -0.5), C(-1, -0.5).
I calculate projection of every point on line segment AC.
I draw points with a projection in the middle of a line segment AC in blue. And the remaining points in green.
I expect to get a green triangle with a blue line perpendicular to the side AC. But blue line is not perpendicular to AC.
I check projection formula in code with drawing on canvas and got expected result.
What's my mistake?
Result of shader:
Vertex shader:
uniform mat4 matrix;
attribute vec4 position;
varying vec4 vPosition;
void main()
{
vPosition = matrix * position;
gl_Position = matrix * position;
}
Fragment shader:
precision mediump float;
varying vec4 vPosition;
void main()
{
vec2 P = vPosition.xy;
vec2 A = vec2(0.0, 0.5);
vec2 B = vec2(-1.0, -0.5);
vec2 AP = P - A;
vec2 AB = B - A;
vec2 projection = A + dot(AP, AB) / dot(AB, AB) * AB;
if(projection.x > -0.51 && projection.x < -0.49 && projection.y > -0.01 && projection.y < 0.01) {
gl_FragColor = vec4(0.0, 0.0, 1.0, 1.0);
} else {
gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);
}
}
You didn't consider the rectangular aspect of the of the window. When the normalized device coordinates in the range [-1, 1] are mapped to the viewport rectangle (see glViewport) then the triangle gets stretched. This causes that angles of 90 degree are not maintained.
Add a uniform variable to the fragment shader which contains the width and height of the viewport:
uniform vec2 u_resolution;
Calculate the aspect ratio:
float aspect = u_resolution.x / u_resolution.y;
Of course you can initialize the variable float aspect, by a constant value, too.
e.g. float aspect = 16.0/9.0;
Correct the coordinates of the points A, B and P according to the aspect ratio:
vec2 P = vPosition.xy;
vec2 A = vec2(0.0, 0.5);
vec2 B = vec2(-1.0, -0.5);
A.x *= aspect;
B.x *= aspect;
P.x *= aspect;
And consider the aspect ration when evaluating the result projection:
vec2 projection = A + dot(AP, AB) / dot(AB, AB) * AB;
projection.x /= aspect;
The final fragment shader may look like this:
precision mediump float;
varying vec4 vPosition;
uniform vec2 u_resolution;
void main()
{
float aspect = u_resolution.x / u_resolution.y;
vec2 as = vec2(aspect, 1.0);
vec2 P = as * vPosition.xy;
vec2 A = as * vec2(0.0, 0.5);
vec2 B = as * vec2(-1.0, -0.5);
vec2 AP = P - A;
vec2 AB = B - A;
vec2 projection = A + dot(AP, AB) / dot(AB, AB) * AB / as;
if(projection.x > -0.51 && projection.x < -0.49 && projection.y > -0.01 && projection.y < 0.01) {
gl_FragColor = vec4(0.0, 0.0, 1.0, 1.0);
} else {
gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);
}
}
On my scene, I rendering landscape (approximately 522000 triangles, from a heights map, all points of the heights map are used to create a triangle mesh) and shadow mapping / blur is active. I noticed a strong fall in FPS. At the moment I have one light source - a kind of "sun". Therefore, light source far_plane is extremely high - 512 (the maximum point of the landscape is 128, the minimum point is 0). I need to somehow optimize it to get better performance.
An example of a generated landscape with heights map 512x512
My first idea is to reduce the number of triangles in the grid. I think for a heights map 512*512 522 thousand triangles is too much. Also, when scaling, the landscape is extremely smooth, even without averaging the normals. This is the reason to make the terrain lower polygonal.
If I reduce the height of the vertices (scale them) and, accordingly, reduce far_plane of light source, can this give an increase in performance?
My shaders:
Vertex shader:
#version 130
in vec4 a_Position; // Per-vertex position information we will pass in.
void main() {
gl_Position = a_Position;
}
Geometry shader:
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices=3) out;
uniform mat4 u_Model, u_View, u_Projection;
uniform float greenValue = 64;
uniform float greyValue = 96;
out vec3 norm;
out vec4 v_Position;
out mat4 model, view, projection;
out vec4 ambientColor;
out vec4 diffuseColor;
out vec4 specularColor;
void main() {
vec4 v1Eye = u_View * u_Model * gl_in[0].gl_Position;
vec4 v2Eye = u_View * u_Model * gl_in[1].gl_Position;
vec4 v3Eye = u_View * u_Model * gl_in[2].gl_Position;
vec4 v1v2 = v1Eye - v2Eye;
vec4 v2v3 = v2Eye - v3Eye;
vec3 normal = cross(vec3(v1v2), vec3(v2v3));
normal = normalize(normal);
if (normal.z < 0) normal = -normal;
mat4 MVPMatrix = u_Projection * u_View * u_Model;
for (int i = 0; i < gl_in.length(); i++) {
v_Position = gl_in[i].gl_Position;
gl_Position = MVPMatrix * gl_in[i].gl_Position;
model = u_Model;
view = u_View;
projection = u_Projection;
norm = normal;
if (v_Position.y < greenValue) {
ambientColor = vec4(0, 1, 0, 1);
diffuseColor = ambientColor;
specularColor = vec4(0, 0, 0, 1);
} else if (v_Position.y < greyValue) {
ambientColor = vec4(0.4, 0.4, 0.4, 1);
diffuseColor = ambientColor;
specularColor = vec4(0, 0, 0, 1);
} else {
ambientColor = vec4(1, 1, 1, 1);
diffuseColor = ambientColor;
specularColor = ambientColor;
}
EmitVertex();
}
EndPrimitive();
}
Fragment shader:
#version 330 core
precision mediump float; // Set the default precision to medium. We don't need as high of a
// precision in the fragment shader.
#define MAX_LAMPS_COUNT 8 // Max lamps count.
uniform vec3 u_ViewPos; // Camera position
uniform int u_LampsCount; // Lamps count
uniform float brightnessThreshold = 0.3; // brightness threshold variable
uniform float far_plane; // shadow matrix far plane
in mat4 model, view, projection;
in vec4 v_Position; // Position for this fragment in world space
in vec4 ambientColor;
in vec4 diffuseColor;
in vec4 specularColor;
in vec3 norm;
struct Lamp {
float ambientStrength;
float diffuseStrength;
float specularStrength;
float kc; // constant term
float kl; // linear term
float kq; // quadratic term
int shininess;
vec3 lampPos; // in world space
vec3 lampColor;
};
uniform samplerCube shadowMaps[MAX_LAMPS_COUNT];
uniform Lamp u_Lamps[MAX_LAMPS_COUNT];
vec3 fragPos;
vec3 fragWorldPos;
vec3 lampEyePos; // Transformed lamp position into eye space
float shadow;
// for PCF
vec3 sampleOffsetDirections[20] = vec3[] (
vec3(1, 1, 1), vec3(1, -1, 1), vec3(-1, -1, 1), vec3(-1, 1, 1),
vec3(1, 1, -1), vec3(1, -1, -1), vec3(-1, -1, -1), vec3(-1, 1, -1),
vec3(1, 1, 0), vec3(1, -1, 0), vec3(-1, -1, 0), vec3(-1, 1, 0),
vec3(1, 0, 1), vec3(-1, 0, 1), vec3(1, 0, -1), vec3(-1, 0, -1),
vec3(0, 1, 1), vec3(0, -1, 1), vec3(0, -1, -1), vec3(0, 1, -1)
);
// output colors
layout(location = 0) out vec4 fragColor;
layout(location = 1) out vec4 fragBrightColor;
float calculateShadow(vec3 lightDir, int index) {
// get vector between fragment position and light position
vec3 fragToLight = fragWorldPos - u_Lamps[index].lampPos;
// now get current linear depth as the length between the fragment and light position
float currentDepth = length(fragToLight);
// now test for shadows
//float bias = max(0.5 * (1.0 - dot(norm, lightDir)), 0.005);
float bias = 1;
// PCF
float viewDistance = length(u_ViewPos - fragWorldPos);
float diskRadius = (1.0 + (viewDistance / far_plane)) / 25.0;
for (int i = 0; i < 20; ++i) {
float closestDepth = texture(shadowMaps[index], fragToLight + sampleOffsetDirections[i] * diskRadius).r;
closestDepth *= far_plane; // Undo mapping [0;1]
if(currentDepth - bias > closestDepth) shadow += 1.0;
}
shadow /= 20;
//fragColor = vec4(vec3(closestDepth / far_plane), 1.0); // visualizing
return shadow;
}
float calculateAttenuation(Lamp lamp) {
float distance = length(lampEyePos - fragPos);
return 1.0 / (
lamp.kc +
lamp.kl * distance +
lamp.kq * (distance * distance)
);
}
vec4 toVec4(vec3 v) {
return vec4(v, 1);
}
// The entry point for our fragment shader.
void main() {
fragWorldPos = vec3(model * v_Position);
// Transform the vertex into eye space
mat4 mvMatrix = view * model;
fragPos = vec3(mvMatrix * v_Position);
vec3 viewDir = normalize(u_ViewPos - fragPos);
vec3 ambientResult = vec3(0, 0, 0); // result of ambient lighting for all lamps
vec3 diffuseResult = vec3(0, 0, 0); // result of diffuse lighting for all lamps
vec3 specularResult = vec3(0, 0, 0); // result of specular lighting for all lamps
for (int i = 0; i<u_LampsCount; i++) {
lampEyePos = vec3(view * toVec4(u_Lamps[i].lampPos));
// attenuation
float attenuation = calculateAttenuation(u_Lamps[i]);
// ambient
vec3 ambient = u_Lamps[i].ambientStrength * u_Lamps[i].lampColor * attenuation;
// diffuse
vec3 lightDir = normalize(lampEyePos - fragPos);
float diff = max(dot(norm, lightDir), 0.0);
vec3 diffuse = u_Lamps[i].diffuseStrength * diff * u_Lamps[i].lampColor * attenuation;
// specular
vec3 reflectDir = reflect(-lightDir, norm);
float spec = pow(max(dot(viewDir, reflectDir), 0.0), u_Lamps[i].shininess);
vec3 specular = u_Lamps[i].specularStrength * spec * u_Lamps[i].lampColor * attenuation;
// calculate shadow
shadow = calculateShadow(lightDir, i);
// result for this(i) lamp
ambientResult += ambient;
diffuseResult += diffuse * (1-shadow);
specularResult += specular * (1-shadow);
}
fragColor =
toVec4(ambientResult) * ambientColor +
toVec4(diffuseResult) * diffuseColor +
toVec4(specularResult) * specularColor;
// brightness calculation
float brightness = dot(fragColor.rgb, vec3(0.2126, 0.7152, 0.0722));
if (brightness > brightnessThreshold) fragBrightColor = vec4(fragColor.rgb, 1.0);
}
And my shadow shaders:
Vertex shader:
#version 130
attribute vec3 a_Position;
uniform mat4 u_ModelMatrix;
void main() {
gl_Position = u_ModelMatrix * vec4(a_Position, 1.0);
}
Geometry shader:
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices=18) out;
uniform mat4 shadowMatrices[6];
out vec4 fragPos; // FragPos from GS (output per emitvertex)
void main() {
for(int face = 0; face < 6; face++) {
gl_Layer = face; // built-in variable that specifies to which face we render.
// for each triangle's vertices
for(int i = 0; i < 3; i++) {
fragPos = gl_in[i].gl_Position;
gl_Position = shadowMatrices[face] * fragPos;
EmitVertex();
}
EndPrimitive();
}
}
Fragment shader:
#version 330 core
in vec4 fragPos; // world space
uniform vec3 lightPos; // world space
uniform float far_plane; // shadow matrix far plane
void main()
{
float lightDistance = length(fragPos.xyz - lightPos);
// map to [0;1] range by dividing by far_plane
lightDistance = lightDistance / far_plane;
// write this as modified depth
gl_FragDepth = lightDistance;
}
I hope for your help in optimizing this scene.
I'm trying to replicate a web design trick known as "gooey effect" (see it live here).
It's a technique applying SVG filters on moving ellipses in order to get a blob-like motion. The process is rather simple:
apply a gaussian blur
increase the contrast of the alpha channel only
The combination of the two creates a blob effect
The last step (increasing the alpha channel contrast) is usually done through a "color matrix filter".
A color matrix is composed of 5 columns (RGBA + offset) and 4 rows.
The values in the first four columns are multiplied with the source red, green, blue, and alpha values respectively. The fifth column value is added (offset).
In CSS, increasing the alpha channel contrast is as simple as calling a SVG filter and specifying the contrast value (here 18):
<feColorMatrix in="blur" mode="matrix" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 18 -7" result="goo" />
In Processing though, it seems to be a bit more complicated. I believe (I may be wrong) the only way to apply a color matrix filter is to create one in a shader. After a few tries I came up with these (very basic) vertex and fragment shaders for color rendering:
colorvert.glsl
uniform mat4 transform;
attribute vec4 position;
attribute vec4 color;
varying vec4 vertColor;
uniform vec4 o=vec4(0, 0, 0, -9);
uniform lowp mat4 colorMatrix = mat4(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 60.0);
void main() {
gl_Position = transform * position;
vertColor = (color * colorMatrix) + o ;
}
colorfrag.glsl
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
varying vec4 vertColor;
void main() {
gl_FragColor = vertColor;
}
PROBLEM:
The color matrix is partially working: changing the RGB values do affect the colors but changing the alpha values (last row) don't !
When trying to combine the shader with a Gaussian filter, the drawn ellipse stays blurry even after I set the alpha channel contrast to 60 (like in the codepen example):
PShader colmat;
void setup() {
size(200, 200, P2D);
colmat = loadShader("colorfrag.glsl", "colorvert.glsl");
}
void draw() {
background(100);
shader(colmat);
noStroke();
fill(255, 30, 30);
ellipse(width/2, height/2, 40, 40);
filter(BLUR,6);
}
The same thing happens when I implement the color matrix within #cansik 's Gaussian blur shader (from the PostFX library). I can see the colors changing but not the alpha contrast:
blurFrag.glsl
/ Adapted from:
// http://callumhay.blogspot.com/2010/09/gaussian-blur-shader-glsl.html
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
uniform sampler2D texture;
uniform vec4 o=vec4(0, 0, 0, 0);
uniform lowp mat4 colorMatrix = mat4(1, 0.0, 0.0, 0.0,
0.0, 1, 0.0, 0.0,
0.0, 0.0, 1, 0.0,
0, 0.0, 0.0, 60.0); //Alpha contrast set to 60
varying vec2 center;
// The inverse of the texture dimensions along X and Y
uniform vec2 texOffset;
varying vec4 vertColor;
varying vec4 vertTexCoord;
uniform int blurSize;
uniform int horizontalPass; // 0 or 1 to indicate vertical or horizontal pass
uniform float sigma; // The sigma value for the gaussian function: higher value means more blur
// A good value for 9x9 is around 3 to 5
// A good value for 7x7 is around 2.5 to 4
// A good value for 5x5 is around 2 to 3.5
// ... play around with this based on what you need <span class="Emoticon Emoticon1"><span>:)</span></span>
const float pi = 3.14159265;
void main() {
float numBlurPixelsPerSide = float(blurSize / 2);
vec2 blurMultiplyVec = 0 < horizontalPass ? vec2(1.0, 0.0) : vec2(0.0, 1.0);
// Incremental Gaussian Coefficent Calculation (See GPU Gems 3 pp. 877 - 889)
vec3 incrementalGaussian;
incrementalGaussian.x = 1.0 / (sqrt(2.0 * pi) * sigma);
incrementalGaussian.y = exp(-0.5 / (sigma * sigma));
incrementalGaussian.z = incrementalGaussian.y * incrementalGaussian.y;
vec4 avgValue = vec4(0.0, 0.0, 0.0, 0.0);
float coefficientSum = 0.0;
// Take the central sample first...
avgValue += texture2D(texture, vertTexCoord.st) * incrementalGaussian.x;
coefficientSum += incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
// Go through the remaining 8 vertical samples (4 on each side of the center)
for (float i = 1.0; i <= numBlurPixelsPerSide; i++) {
avgValue += texture2D(texture, vertTexCoord.st - i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
avgValue += texture2D(texture, vertTexCoord.st + i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
coefficientSum += 2.0 * incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
}
gl_FragColor = (avgValue / coefficientSum ) * colorMatrix;
}
Setting glBlendFunc and enabling glEnable(GL_BLEND) in the main .pde file didn't fix the issue either.
sketch.pde
import ch.bildspur.postfx.builder.*;
import ch.bildspur.postfx.pass.*;
import ch.bildspur.postfx.*;
import processing.opengl.*;
import com.jogamp.opengl.*;
PostFX fx;
void setup() {
size(200, 200, P2D);
fx = new PostFX(this);
}
void draw() {
background(100);
GL gl = ((PJOGL)beginPGL()).gl.getGL();
gl.glEnable(GL.GL_BLEND);
gl.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE);
gl.glDisable(GL.GL_DEPTH_TEST);
noStroke();
fill(255, 30, 30);
ellipse(width/2, height/2, 40, 40);
fx.render().blur(80, 14).compose();
}
Questions:
Why does the alpha channel contrast not work ? How can I make it work ?
Is there something wrong with the way I implemented the color matrix ?
Do you know a better way to implement that gooey effect ?
Any help would be much appreciated !
Thank you
#noahbuddy from the Processing Forum could find a solution to the problem so I'm posting it here.
To preserve transparency, with or without shaders, use an offscreen
buffer (PGraphics). For example, saving a PNG image with transparent
background.
I removed the contrast matrix from #cansik 's blur shader and instead
put it into a separate filter.
blurfrag.glsl
// Adapted from:
// http://callumhay.blogspot.com/2010/09/gaussian-blur-shader-glsl.html
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
uniform sampler2D texture;
// The inverse of the texture dimensions along X and Y
uniform vec2 texOffset;
varying vec4 vertColor;
varying vec4 vertTexCoord;
uniform int blurSize;
uniform int horizontalPass; // 0 or 1 to indicate vertical or horizontal pass
uniform float sigma; // The sigma value for the gaussian function: higher value means more blur
// A good value for 9x9 is around 3 to 5
// A good value for 7x7 is around 2.5 to 4
// A good value for 5x5 is around 2 to 3.5
// ... play around with this based on what you need <span class="Emoticon Emoticon1"><span>:)</span></span>
const float pi = 3.14159265;
void main() {
float numBlurPixelsPerSide = float(blurSize / 2);
vec2 blurMultiplyVec = 0 < horizontalPass ? vec2(1.0, 0.0) : vec2(0.0, 1.0);
// Incremental Gaussian Coefficent Calculation (See GPU Gems 3 pp. 877 - 889)
vec3 incrementalGaussian;
incrementalGaussian.x = 1.0 / (sqrt(2.0 * pi) * sigma);
incrementalGaussian.y = exp(-0.5 / (sigma * sigma));
incrementalGaussian.z = incrementalGaussian.y * incrementalGaussian.y;
vec4 avgValue = vec4(0.0, 0.0, 0.0, 0.0);
float coefficientSum = 0.0;
// Take the central sample first...
avgValue += texture2D(texture, vertTexCoord.st) * incrementalGaussian.x;
coefficientSum += incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
// Go through the remaining 8 vertical samples (4 on each side of the center)
for (float i = 1.0; i <= numBlurPixelsPerSide; i++) {
avgValue += texture2D(texture, vertTexCoord.st - i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
avgValue += texture2D(texture, vertTexCoord.st + i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
coefficientSum += 2.0 * incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
}
gl_FragColor = avgValue / coefficientSum;
}
colfrag.glsl
#define PROCESSING_TEXTURE_SHADER
uniform sampler2D texture;
varying vec4 vertTexCoord;
uniform vec4 o = vec4(0, 0, 0, -7.0);
uniform lowp mat4 colorMatrix = mat4(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 18.0);
void main() {
vec4 pix = texture2D(texture, vertTexCoord.st);
vec4 color = (pix * colorMatrix) + o;
gl_FragColor = color;
}
sketch.pde
PShader contrast, blurry;
PGraphics buf;
void setup() {
size(200, 200, P2D);
buf = createGraphics(width, height, P2D);
contrast = loadShader("colfrag.glsl");
blurry = loadShader("blurFrag.glsl");
// Don't forget to set these
blurry.set("sigma", 4.5);
blurry.set("blurSize", 9);
}
void draw() {
background(100);
buf.beginDraw();
// Reset transparency
// Note, the color used here will affect your edges
// even with zero for alpha
buf.background(100, 0); // set to match main background
buf.noStroke();
buf.fill(255, 30, 30);
buf.ellipse(width/2, height/2, 40, 40);
buf.ellipse(mouseX, mouseY, 40, 40);
blurry.set("horizontalPass", 1);
buf.filter(blurry);
blurry.set("horizontalPass", 0);
buf.filter(blurry);
buf.endDraw();
shader(contrast);
image(buf, 0,0, width,height);
}
Personally I think the sweet spot lies somewhere:
between 8 and 11 for the alpha contrast
between -7 and -9 for the alpha offset
uniform vec4 o = vec4(0, 0, 0, -9.0);
uniform lowp mat4 colorMatrix = mat4(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
1.0, 1.0, 1.0, 11.0);
bewteen 10 and 15 for "sigma"
bewteen 30 and 40 for "blurSize"
blurry.set("sigma", 14.5)
blurry.set("blurSize", 35)
I've coded 2d metaballs before using signed distance functions and marching square algorithms but I find this solution to be the most efficient one. Performance wise I can display up to 4500 balls at 60 fps on a 800x600 canvas (tested on an entry-level 2012 imac desktop with Python Mode).
Unfortunately I'm not able to debug the exact issue, but I have a couple of ideas that hopefully might help you make some progress:
For a simpler/cheaper effect you can use the dilate filter
You can find other metaballs shaders on shadertoy and tweak the code a bit so you can run it in Processing
For example https://www.shadertoy.com/view/MlcGWn becomes:
// https://www.shadertoy.com/view/MlcGWn
uniform float iTime;
uniform vec2 iResolution;
vec3 Sphere(vec2 uv, vec2 position, float radius)
{
float dist = radius / distance(uv, position);
return vec3(dist * dist);
}
void main()
{
vec2 uv = 2.0 * vec2(gl_FragCoord.xy - 0.5 * iResolution.xy) / iResolution.y;
vec3 pixel = vec3(0.0, 0.0, 0.0);
vec2 positions[4];
positions[0] = vec2(sin(iTime * 1.4) * 1.3, cos(iTime * 2.3) * 0.4);
positions[1] = vec2(sin(iTime * 3.0) * 0.5, cos(iTime * 1.3) * 0.6);
positions[2] = vec2(sin(iTime * 2.1) * 0.1, cos(iTime * 1.9) * 0.8);
positions[3] = vec2(sin(iTime * 1.1) * 1.1, cos(iTime * 2.6) * 0.7);
for (int i = 0; i < 4; i++)
pixel += Sphere(uv, positions[i], 0.22);
pixel = step(1.0, pixel) * pixel;
gl_FragColor = vec4(pixel, 1.0);
}
and in Processing:
PShader shader;
void setup(){
size(900,900,P2D);
shader = loadShader("metaballs.glsl");
shader.set("iResolution",(float)width/2,(float)height/2);
}
void draw(){
shader.set("iTime", millis() * 0.001);
shader(shader);
rect(0,0,width,height);
}
or https://www.shadertoy.com/view/ldtSRX
// https://www.shadertoy.com/view/ldtSRX
uniform vec2 iResolution;
uniform vec2 iMouse;
uniform float iTime;
struct Metaball{
vec2 pos;
float r;
vec3 col;
};
vec4 calcball( Metaball ball, vec2 uv)
{
float dst = ball.r / (pow(abs(uv.x - ball.pos.x), 2.) + pow(abs(uv.y - ball.pos.y), 2.));
return vec4(ball.col * dst, dst);
}
vec3 doballs( vec2 uv )
{
Metaball mouse;
mouse.pos = iMouse.xy / iResolution.yy;
mouse.r = .015;
mouse.col = vec3(.5);
Metaball mb1, mb2, mb3, mb4;
mb1.pos = vec2(1.3, .55+.2*sin(iTime*.5)); mb1.r = .05; mb1.col = vec3(0., 1., 0.);
mb2.pos = vec2(.6, .45); mb2.r = .02; mb2.col = vec3(0., .5, 1.);
mb3.pos = vec2(.85, .65); mb3.r = .035; mb3.col = vec3(1., .2, 0.);
mb4.pos = vec2(1.+.5*sin(iTime), .2); mb4.r = .02; mb4.col = vec3(1., 1., 0.);
vec4 ball1 = calcball(mb1, uv);
vec4 ball2 = calcball(mb2, uv);
vec4 ball3 = calcball(mb3, uv);
vec4 ball4 = calcball(mb4, uv);
vec4 subball1 = calcball(mouse, uv);
float res = ball1.a + ball2.a + ball3.a + ball4.a;
res -= subball1.a;
float threshold = res >= 1.5 ? 1. : 0.;
vec3 color = (ball1.rgb + ball2.rgb + ball3.rgb + ball4.rgb - subball1.rgb) / res;
color *= threshold;
color = clamp(color, 0., 1.);
return color;
}
#define ANTIALIAS 1
void main()
{
vec2 uv = gl_FragCoord.xy / iResolution.yy;
vec3 color = doballs(uv);
#ifdef ANTIALIAS
float uvs = .75 / iResolution.y;
color *= .5;
color += doballs(vec2(uv.x + uvs, uv.y))*.125;
color += doballs(vec2(uv.x - uvs, uv.y))*.125;
color += doballs(vec2(uv.x, uv.y + uvs))*.125;
color += doballs(vec2(uv.x, uv.y - uvs))*.125;
#if ANTIALIAS == 2
color *= .5;
color += doballs(vec2(uv.x + uvs*.85, uv.y + uvs*.85))*.125;
color += doballs(vec2(uv.x - uvs*.85, uv.y + uvs*.85))*.125;
color += doballs(vec2(uv.x - uvs*.85, uv.y - uvs*.85))*.125;
color += doballs(vec2(uv.x + uvs*.85, uv.y - uvs*.85))*.125;
#endif
#endif
gl_FragColor = vec4(color, 1.);
}
and in Processing:
PShader shader;
PVector mouse = new PVector();
void setup(){
size(900,900,P2D);
shader = loadShader("metaballs.glsl");
shader.set("iResolution",(float)width/2,(float)height/2);
}
void draw(){
mouse.set(mouseX,mouseY);
shader.set("iMouse", mouse);
shader.set("iTime", millis() * 0.001);
shader(shader);
rect(0,0,width,height);
}
I'm trying to implement bump mapping on a cube using OpenGL and GLSL. However, when I rotate my cube around, only the left-facing square and right-facing square appear (that is, in the negative x and positive x direction), the other four faces of the cube (top, bottom, front, back) are black. Here is an example:
My mesh class consists of an indexList and vertexList (the Vertex class contains x, y, z, etc). I'm using this simple cube.ply model, which contains s,t texture coordinates. Borrowing from this example, I calculate the tangent vectors as follows:
void Mesh::computeTangents() {
for (size_t i = 0; i < m_indexList.size(); i += 3) {
Vertex v1 = m_vertexList[m_indexList[i]];
Vertex v2 = m_vertexList[m_indexList[i+1]];
Vertex v3 = m_vertexList[m_indexList[i+2]];
glm::vec3 pos1 = glm::vec3(v1.getX(), v1.getY(), v1.getZ());
glm::vec3 pos2 = glm::vec3(v2.getX(), v2.getY(), v2.getZ());
glm::vec3 pos3 = glm::vec3(v3.getX(), v3.getY(), v3.getZ());
glm::vec2 tex1 = glm::vec2(v1.getS(), v1.getT());
glm::vec2 tex2 = glm::vec2(v2.getS(), v2.getT());
glm::vec2 tex3 = glm::vec2(v3.getS(), v3.getT());
glm::vec3 edge1 = glm::normalize(pos2 - pos1);
glm::vec3 edge2 = glm::normalize(pos3 - pos1);
glm::vec2 texEdge1 = glm::normalize(tex2 - tex1);
glm::vec2 texEdge2 = glm::normalize(tex3 - tex1);
float det = (texEdge1.x * texEdge2.y) - (texEdge1.y * texEdge2.x);
glm::vec3 tangent;
if(fabsf(det) < 1e-6f) {
tangent.x = 1.0;
tangent.y = 0.0;
tangent.z = 0.0;
}
else {
det = 1.0 / det;
tangent.x = (texEdge2.y * edge1.x - texEdge1.y * edge2.x) * det;
tangent.y = (texEdge2.y * edge1.y - texEdge1.y * edge2.y) * det;
tangent.z = (texEdge2.y * edge1.z - texEdge1.y * edge2.z) * det;
glm::normalize(tangent);
}
m_vertexList[m_indexList[i]].setTanX(tangent.x);
m_vertexList[m_indexList[i]].setTanY(tangent.y);
m_vertexList[m_indexList[i]].setTanZ(tangent.z);
m_vertexList[m_indexList[i+1]].setTanX(tangent.x);
m_vertexList[m_indexList[i+1]].setTanY(tangent.y);
m_vertexList[m_indexList[i+1]].setTanZ(tangent.z);
m_vertexList[m_indexList[i+2]].setTanX(tangent.x);
m_vertexList[m_indexList[i+2]].setTanY(tangent.y);
m_vertexList[m_indexList[i+2]].setTanZ(tangent.z);
}
}
If I output the values of the tangent vector for each triangle, I get these values:
1, 0, 0
1, 0, 0
0, 0, -1
0, 0, -1
0, 0, 1
0, 0, 1
-1, 0, 0
-1, 0, 0,
1, 0, 0
1, 0, 0
1, 0, 0
1, 0, 0
If these are correct, then the problem is likely in the shader. My shader is as follows (mostly taken from a book):
vert:
attribute vec4 vertexPosition;
attribute vec3 vertexNormal;
attribute vec2 vertexTexture;
attribute vec3 vertexTangent;
varying vec2 texCoord;
varying vec3 viewDirection;
varying vec3 lightDirection;
uniform vec3 diffuseColor;
uniform float shininess;
uniform vec4 lightPosition;
uniform mat4 modelViewMatrix;
uniform mat4 normalMatrix;
uniform mat4 MVP; // modelview projection
void main() {
vec4 eyePosition = modelViewMatrix * vertexPosition;
vec3 N = normalize(vec3(normalMatrix * vec4(vertexNormal, 1.0)));
vec3 T = normalize(vec3(normalMatrix * vec4(vertexTangent, 1.0)));
vec3 B = normalize(cross(N, T));
vec3 v;
v.x = dot(lightPosition.xyz, T);
v.y = dot(lightPosition.xyz, B);
v.z = dot(lightPosition.xyz, N);
lightDirection = normalize(v);
v.x = dot(eyePosition.xyz, T);
v.y = dot(eyePosition.xyz, B);
v.z = dot(eyePosition.xyz, N);
viewDirection = normalize(v);
texCoord = vertexTexture;
gl_Position = MVP * vertexPosition;
}
Frag:
varying vec2 texCoord;
varying vec3 viewDirection;
varying vec3 lightDirection;
uniform vec3 diffuseColor;
uniform float shininess;
void main() {
float bumpDensity = 16.0;
float bumpSize = 0.15;
vec2 c = bumpDensity * texCoord;
vec2 p = fract(c) - vec2(0.5);
float d, f;
d = dot(p, p);
f = 1.0 / sqrt(d + 1.0);
if (d >= bumpSize) {
p = vec2(0.0);
f = 1.0;
}
vec3 normalDelta = vec3(p.x, p.y, 1.0) * f;
vec3 litColor = diffuseColor * max(dot(normalDelta, lightDirection), 0.0);
vec3 reflectDir = reflect(lightDirection, normalDelta);
float spec = max(dot(viewDirection, reflectDir), 0.0);
spec *= shininess;
litColor = min(litColor + spec, vec3(1.0));
gl_FragColor = vec4(litColor, 1.0);
}
Edit: Changed background to more clearly see the black faces. Also, I incorrectly noted which faces were actually appearing before.
Edit 2: I'm finding that the value of max(dot(normalDelta, lightDirection), 0.0) in the fragment shader returns 0 for those faces. However, I still don't know why.
Edit 3: Well, the problem turned out to be that I was passing the wrong index of the tangent vectors in my Vertex class. That is, I had 10 in this line instead of 9:
glVertexAttribPointer(v3, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)(sizeof( float ) * 9));
Okay, so the problem is that any face with a normal on the xz-plane isn't rendering properly, i.e. the ones that are rendering properly are the ones with normals along the y-axis (top and bottom faces).
And your list of tangents:
1, 0, 0
1, 0, 0
0, 0, -1
0, 0, -1
0, 0, 1
0, 0, 1
-1, 0, 0
-1, 0, 0
1, 0, 0
1, 0, 0
1, 0, 0
1, 0, 0
Are all either along the x or z axes. So what I'm guessing might be happening is that your normals and tangents are pointing in the same (or opposite) directions, in which case this line in your vertex shader:
vec3 B = normalize(cross(N, T));
is going to result in vec3(0.0, 0.0, 0.0), which can't be normalized.
My recommendation is to try manually giving the x and z faces tangents along the y-axis, see if that makes a difference. If it helps, then you know that the problem is with your computation of the tangents.
If it doesn't help, you can troubleshoot by outputting values from the fragment shader as on-screen colours. Try displaying the viewDirection, lightDirection, normalDelta, reflectDir, and anything else you can think of to see which variable is causing the blackness.
Bonus: Switch the glClear color to something other than black? So that it doesn't look like a face floating in space.
I'm working on OpenGL 2.1 and have some problems with alpha value on gl_FragColor.
Whole code:
uniform sampler2D texture_0;
uniform vec3 uColor;
varying vec2 varTexCoords;
void main(void)
{
//vec4 col = texture2D(texture_0, varTexCoords);
vec4 col = vec4(0.0, 0.0, 0.0, 0.5);
gl_FragColor = col;
}
Can someone explain to me why:
Works:
vec4 col = texture2D(texture_0, varTexCoords);
//vec4 col = vec4(0.0, 0.0, 0.0, 0.5);
gl_FragColor = col;
Doesn't work:
//vec4 col = texture2D(texture_0, varTexCoords);
vec4 col = vec4(0.0, 0.0, 0.0, 0.5);
gl_FragColor = col;
Works:
vec4 col = texture2D(texture_0, varTexCoords);
col.rgb = uColor;
//col.a = 0.5;
gl_FragColor = col;
Also works:
vec4 col = texture2D(texture_0, varTexCoords);
col.rgb = uColor;
col.a *= 0.5;
gl_FragColor = col;
Doesn't work:
vec4 col = texture2D(texture_0, varTexCoords);
col.rgb = uColor;
col.a = 0.5;
gl_FragColor = col;
And this one dosen't work even though many examples seem to use it:
gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);
Error occurence in code is here:
glEnableVertexAttribArray(textureCoords);
CHECK_GL_ERROR("glEnableVertexAttribArrayCheck");
All code related to the shader:
inline void Renderer::renderText(float x, float y, string msg) {
mat4 proj;
Matrix::projection2D(proj,
(float) nScreenWidth_, (float) nScreenHeight_, 0.0f);
mat4 res, restmp;
mat4 pos;
mat4 rot;
mat4 scale;
//Vector3D p(72.0f, 88.0f, 1.0f);
//Vector3D p(20.0f, 20, 1.0f);
Vector3D r(0.0f, 0.0f, 0.0f);
Vector3D s(1.0f, nScreenWidth_ / nScreenHeight_, 1.0f);
//Matrix::translate(pos, p.getX(), p.getY(), p.getZ());
//Matrix::rotateZ(rot, r.getZ());
float widthMod = nScreenWidth_ / 100.0f;
float heightMod = nScreenHeight_ / 100.0f;
Matrix::translate(pos, x * widthMod, y * heightMod, 1.0f);
Matrix::rotateZ(rot, r.getZ());
//Matrix::scale(scale, s.getX() * widthMod, s.getY() * heightMod, 1.0f);
Matrix::scale(scale, 16.0f, 16.0f, 1.0f);
Matrix::multiply(proj, pos, res);
Matrix::multiply(res, rot, restmp);
Matrix::multiply(restmp, scale, res);
// Select shader program to use.
int shaderId = features_->getText()->getShaderId();
glUseProgram(shaderId);
CHECK_GL_ERROR("glUseProgram");
int matrix = glGetUniformLocation(shaderId, "uWVP");
int color = glGetUniformLocation(shaderId, "uColor");
int texture = glGetUniformLocation(shaderId, "texture_0");
CHECK_GL_ERROR("glGetUniformLocation");
int textureCoords = glGetAttribLocation(shaderId, "attrTexCoords");
int vertices = glGetAttribLocation(shaderId, "attrPos");
CHECK_GL_ERROR("glGetAttribLocation");
// Specify WVP matrix.
glUniformMatrix4fv(matrix, 1, false, res);
CHECK_GL_ERROR("glUniformMatrix4fv");
// Bind the texture.
glActiveTexture(GL_TEXTURE0);
CHECK_GL_ERROR("glActiveTexture");
glBindTexture(GL_TEXTURE_2D, features_->getText()->getFontMapId());
CHECK_GL_ERROR("glBindTexture");
glUniform1i(texture, 0);
CHECK_GL_ERROR("glUniform1i");
glEnableVertexAttribArray(vertices);
CHECK_GL_ERROR("glEnableVertexAttribArray");
glBindBuffer(GL_ARRAY_BUFFER, 0);
CHECK_GL_ERROR("glBindBuffer");
glEnable(GL_BLEND);
CHECK_GL_ERROR("glEnable");
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
CHECK_GL_ERROR("glBlendFunc");
//string text = output_;
for (unsigned int i = 0; i < msg.length(); i++) {
unsigned short l = static_cast<unsigned short>(msg[i]) - 32;
mat4 delta, resmove;
Matrix::translate(delta, 1.6f, 0.0f, 0.0f);
Matrix::multiply(res, delta, resmove);
Matrix::copy(resmove, res);
glUniformMatrix4fv(matrix, 1, false, res);
CHECK_GL_ERROR("glUniformMatrix4fv");
float col[] = {0.0f, 1.0f, 0.0f};
glUniform3fv(color, 1, col);
CHECK_GL_ERROR("glUniform3fv");
glVertexAttribPointer(vertices, 3, GL_FLOAT, GL_FALSE, 0,
features_->getText()->vertices_);
CHECK_GL_ERROR("glVertexAttribPointer");
glEnableVertexAttribArray(textureCoords);
CHECK_GL_ERROR("glEnableVertexAttribArrayCheck");
glVertexAttribPointer(textureCoords, 2, GL_FLOAT, GL_FALSE, 0,
features_->getText()->getSymbol(l));
CHECK_GL_ERROR("glVertexAttribPointer");
glDrawArrays(GL_TRIANGLES, 0, 18 / 3);
CHECK_GL_ERROR("glDrawArrays");
}
glDisable(GL_BLEND);
CHECK_GL_ERROR("glDisable");
}
The error is GL_INVALID_VALUE and only occurs after executing code, not after compiling and linking shader.
This is probably what is happening :
(I say "compiler" here, but it's probably the linker that does the actual purging)
The shader compliler drops this one :
varying vec2 varTexCoords;
If the compiler determines that a variable is not used, it will be discarded.
The last example is good :
vec4 col = texture2D(texture_0, varTexCoords);
col.rgb = uColor;
col.a = 0.5;
gl_FragColor = col;
The compiler understands that the original value in col is overwritten by the uColor uniform and the 0.5 constant. The texture read is dropped, so the varying is also dropped.
Then your attrTexCoords will also most likely be dropped, so your textureCoords variable containing the attrib locations is -1.
Here on the other hand, the compiler cannot remove the texture read because col.bg will contain values from the texture.
vec4 col = texture2D(texture_0, varTexCoords);
col.r = uColor.r;
col.a = 0.5;
gl_FragColor = col;