In OpenGL 2.1, we could create a post-processing effect by rendering to a FBO using a fullscreen quad. OpenGL 3.1 removes GL_QUADS, so we have to emulate this using two triangles instead.
Unfortunately, I am getting a strange issue when trying to apply this technique: a diagonal line appears in the hypotenuse of the two triangles!
This screenshot demonstrates the issue:
I did not have a diagonal line in OpenGL 2.1 using GL_QUADS - it appeared in OpenGL 3.x core when I switched to GL_TRIANGLES. Unfortunately, most tutorials online suggest using two triangles and none of them show this issue.
// Fragment shader
#version 140
precision highp float;
uniform sampler2D ColorTexture;
uniform vec2 SampleDistance;
in vec4 TextureCoordinates0;
out vec4 FragData;
#define NORM (1.0 / (1.0 + 2.0 * (0.95894917 + 0.989575414)))
//const float w0 = 0.845633832 * NORM;
//const float w1 = 0.909997233 * NORM;
const float w2 = 0.95894917 * NORM;
const float w3 = 0.989575414 * NORM;
const float w4 = 1 * NORM;
const float w5 = 0.989575414 * NORM;
const float w6 = 0.95894917 * NORM;
//const float w7 = 0.909997233 * NORM;
//const float w8 = 0.845633832 * NORM;
void main(void)
{
FragData =
texture(ColorTexture, TextureCoordinates0.st - 2.0 * SampleDistance) * w2 +
texture(ColorTexture, TextureCoordinates0.st - SampleDistance) * w3+
texture(ColorTexture, TextureCoordinates0.st) * w4 +
texture(ColorTexture, TextureCoordinates0.st + SampleDistance) * w5 +
texture(ColorTexture, TextureCoordinates0.st + 2.0 * SampleDistance) * w6
;
}
// Vertex shader
#version 140
precision highp float;
uniform mat4 ModelviewProjection; // loaded with orthographic projection (-1,-1):(1,1)
uniform mat4 TextureMatrix; // loaded with identity matrix
in vec3 Position;
in vec2 TexCoord;
out vec4 TextureCoordinates0;
void main(void)
{
gl_Position = ModelviewProjection * vec4(Position, 1.0);
TextureCoordinates0 = (TextureMatrix * vec4(TexCoord, 0.0, 0.0));
}
What am I doing wrong? Is there a suggested way to perform a fullscreen post-processing effect in OpenGL 3.x/4.x core?
Reto Koradi is correct and I suspect that is what I meant when I wrote my original comment; I honestly do not remember this question.
You absolutely have to swap 2 of the vertex indices to draw a strip instead of a quad or you wind up with a smaller triangle cut out of part of the quad.
ASCII art showing difference between primitives generated in 0,1,2,3 order:
GL_QUADS GL_TRIANGLE_STRIP GL_TRIANGLE_FAN
0-3 0 3 0-3
| | |X| |\|
1-2 1-2 1-2
0-2
|/|
1-3
As for why this may produce better results than two triangles using 6 different vertices, floating-point variance may be to blame. Indexed rendering in general will usually solve this, but using a primitive type like GL_TRIANGLE_STRIP requires 2 fewer indices to draw a quad as two triangles.
Related
I'm creating a terrain mesh, and following this SO answer I'm trying to migrate my CPU computed normals to a shader based version, in order to improve performances by reducing my mesh resolution and using a normal map computed in the fragment shader.
I'm using MapBox height map for the terrain data. Tiles look like this:
And elevation at each pixel is given by the following formula:
const elevation = -10000.0 + ((red * 256.0 * 256.0 + green * 256.0 + blue) * 0.1);
My original code first creates a dense mesh (256*256 squares of 2 triangles) and then computes triangle and vertices normals. To get a visually satisfying result I was diving the elevation by 5000 to match the tile's width & height in my scene (in the future I'll do a proper computation to display the real elevation).
I was drawing with these simple shaders:
Vertex shader:
uniform mat4 u_Model;
uniform mat4 u_View;
uniform mat4 u_Projection;
attribute vec3 a_Position;
attribute vec3 a_Normal;
attribute vec2 a_TextureCoordinates;
varying vec3 v_Position;
varying vec3 v_Normal;
varying mediump vec2 v_TextureCoordinates;
void main() {
v_TextureCoordinates = a_TextureCoordinates;
v_Position = vec3(u_View * u_Model * vec4(a_Position, 1.0));
v_Normal = vec3(u_View * u_Model * vec4(a_Normal, 0.0));
gl_Position = u_Projection * u_View * u_Model * vec4(a_Position, 1.0);
}
Fragment shader:
precision mediump float;
varying vec3 v_Position;
varying vec3 v_Normal;
varying mediump vec2 v_TextureCoordinates;
uniform sampler2D texture;
void main() {
vec3 lightVector = normalize(-v_Position);
float diffuse = max(dot(v_Normal, lightVector), 0.1);
highp vec4 textureColor = texture2D(texture, v_TextureCoordinates);
gl_FragColor = vec4(textureColor.rgb * diffuse, textureColor.a);
}
It was slow but gave visually satisfying results:
Now, I removed all the CPU based normals computation code, and replaced my shaders by those:
Vertex shader:
#version 300 es
precision highp float;
precision highp int;
uniform mat4 u_Model;
uniform mat4 u_View;
uniform mat4 u_Projection;
in vec3 a_Position;
in vec2 a_TextureCoordinates;
out vec3 v_Position;
out vec2 v_TextureCoordinates;
out mat4 v_Model;
out mat4 v_View;
void main() {
v_TextureCoordinates = a_TextureCoordinates;
v_Model = u_Model;
v_View = u_View;
v_Position = vec3(u_View * u_Model * vec4(a_Position, 1.0));
gl_Position = u_Projection * u_View * u_Model * vec4(a_Position, 1.0);
}
Fragment shader:
#version 300 es
precision highp float;
precision highp int;
in vec3 v_Position;
in vec2 v_TextureCoordinates;
in mat4 v_Model;
in mat4 v_View;
uniform sampler2D u_dem;
uniform sampler2D u_texture;
out vec4 color;
const vec2 size = vec2(2.0,0.0);
const ivec3 offset = ivec3(-1,0,1);
float getAltitude(vec4 pixel) {
float red = pixel.x;
float green = pixel.y;
float blue = pixel.z;
return (-10000.0 + ((red * 256.0 * 256.0 + green * 256.0 + blue) * 0.1)) * 6.0; // Why * 6 and not / 5000 ??
}
void main() {
float s01 = getAltitude(textureOffset(u_dem, v_TextureCoordinates, offset.xy));
float s21 = getAltitude(textureOffset(u_dem, v_TextureCoordinates, offset.zy));
float s10 = getAltitude(textureOffset(u_dem, v_TextureCoordinates, offset.yx));
float s12 = getAltitude(textureOffset(u_dem, v_TextureCoordinates, offset.yz));
vec3 va = (vec3(size.xy, s21 - s01));
vec3 vb = (vec3(size.yx, s12 - s10));
vec3 normal = normalize(cross(va, vb));
vec3 transformedNormal = normalize(vec3(v_View * v_Model * vec4(normal, 0.0)));
vec3 lightVector = normalize(-v_Position);
float diffuse = max(dot(transformedNormal, lightVector), 0.1);
highp vec4 textureColor = texture(u_texture, v_TextureCoordinates);
color = vec4(textureColor.rgb * diffuse, textureColor.a);
}
It now loads nearly instantly, but something is wrong:
in the fragment shader I had to multiply the elevation by 6 rather than dividing by 5000 to get something close to my original code
the result is not as good. Especially when I tilt the scene, the shadows are very dark (the more I tilt the darker they get):
Can you spot what causes that difference?
EDIT: I created two JSFiddles:
first version with CPU computed vertices normals: http://jsfiddle.net/tautin/tmugzv6a/10
second version with GPU computed normal map: http://jsfiddle.net/tautin/8gqa53e1/42
The problem appears when you play with the tilt slider.
There were three problems I could find.
One you saw and fixed by trial and error, which is that the scale of your height calculation was wrong. In CPU, your color coordinates varies from 0 to 255, but on GLSL, texture values are normalized from 0 to 1, so the correct height calculation is:
return (-10000.0 + ((red * 256.0 * 256.0 + green * 256.0 + blue) * 0.1 * 256.0)) / Z_SCALE;
But for this shader purpose, the -10000.00 doesn't matter, so you can do:
return (red * 256.0 * 256.0 + green * 256.0 + blue) * 0.1 * 256.0 / Z_SCALE;
The second problem is that the scale of your x and y coordinates was also wrong. In the CPU code the distance between two neighbor points is (SIZE * 2.0 / (RESOLUTION + 1)), but in GPU, you had set it to 1. The correct way to define your size variable is:
const float SIZE = 2.0;
const float RESOLUTION = 255.0;
const vec2 size = vec2(2.0 * SIZE / (RESOLUTION + 1.0), 0.0);
Notice that I increased the resolution to 255 because I assume this is what you want (one minus the texture resolution). Also, this is needed to match the value of offset, which you defined as:
const ivec3 offset = ivec3(-1,0,1);
To use a different RESOLUTION value, you will have to adjust offset accordingly, e.g. for RESOLUTION == 127, offset = ivec3(-2,0,2), i.e. the offset must be <real texture resolution>/(RESOLUTION + 1), which limits the possibilities for RESOLUTION, since offset must be integer.
The third problem is that you used a different normal calculation algorithm in the GPU, which strikes to me as having lower resolution than the one used on CPU, because you use the four outer pixels of a cross, but ignores the central one. It seems that this is not the full story, but I can't explain why they are so different. I tried to implement the exact CPU algorithm as I thought it should be, but it yield different results. Instead, I had to use the following algorithm, which is similar but not exactly the same, to get an almost identical result (if you increase the CPU resolution to 255):
float s11 = getAltitude(texture(u_dem, v_TextureCoordinates));
float s21 = getAltitude(textureOffset(u_dem, v_TextureCoordinates, offset.zy));
float s10 = getAltitude(textureOffset(u_dem, v_TextureCoordinates, offset.yx));
vec3 va = (vec3(size.xy, s21 - s11));
vec3 vb = (vec3(size.yx, s10 - s11));
vec3 normal = normalize(cross(va, vb));
This is the original CPU solution, but with RESOLUTION=255: http://jsfiddle.net/k0fpxjd8/
This is the final GPU solution: http://jsfiddle.net/7vhpuqd8/
Initial situation
I want to visualize simulation data in openGL.
My data consists of particle positions (x, y, z) where each particle has some properties (like density, temperature, ...) which will be used for coloring. Those (SPH) particles (100k to several millions), grouped together, actually represent planets, in case you wonder. I want to render those particles as small 3D spheres and add ambient, diffuse and specular lighting.
Status quo and questions
In MY case: In which coordinate frame do I do the lightning calculations? Which way is the "best" to pass the various components through the pipeline?
I saw that it is common to do it in view space which is also very intuitive. However: The normals at the different fragment positions are calculated in the fragment shader in clip space coordinates (see appended fragment shader). Can I actually convert them "back" into view space to do the lightning calculations in view space for all the fragments? Would there be any advantage compared to doing it in clip space?
It would be easier to get the normals in view space if I would use meshes for each sphere but I think with several million particles this would decrease performance drastically, so better do it with sphere intersection, would you agree?
PS: I don't need a model matrix since all the particles are already in place.
//VERTEX SHADER
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 2) in float density;
uniform float radius;
uniform vec3 lightPos;
uniform vec3 viewPos;
out vec4 lightDir;
out vec4 viewDir;
out vec4 viewPosition;
out vec4 posClip;
out float vertexColor;
// transformation matrices
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
lightDir = projection * view * vec4(lightPos - position, 1.0f);
viewDir = projection * view * vec4(viewPos - position, 1.0f);
viewPosition = projection * view * vec4(lightPos, 1.0f);
posClip = projection * view * vec4(position, 1.0f);
gl_Position = posClip;
gl_PointSize = radius;
vertexColor = density;
}
I know that projective divion happens for the gl_Position variable, does that actually happen to ALL vec4's which are passed from the vertex to the fragment shader? If not, maybe the calculations in the fragment shader would be wrong?
And the fragment shader where the normals and diffuse/specular lightning calculations in clip space:
//FRAGMENT SHADER
#version 330 core
in float vertexColor;
in vec4 lightDir;
in vec4 viewDir;
in vec4 posClip;
in vec4 viewPosition;
uniform vec3 lightColor;
vec4 colormap(float x); // returns vec4(r, g, b, a)
out vec4 vFragColor;
void main(void)
{
// AMBIENT LIGHT
float ambientStrength = 0.0;
vec3 ambient = ambientStrength * lightColor;
// Normal calculation done in clip space (first from texture (gl_PointCoord 0 to 1) coord to NDC( -1 to 1))
vec3 normal;
normal.xy = gl_PointCoord * 2.0 - vec2(1.0); // transform from 0->1 point primitive coords to NDC -1->1
float mag = dot(normal.xy, normal.xy); // sqrt(x=1) = sqrt(x)
if (mag > 1.0) // discard fragments outside sphere
discard;
normal.z = sqrt(1.0 - mag); // because x^2 + y^2 + z^2 = 1
// DIFFUSE LIGHT
float diff = max(0.0, dot(vec3(lightDir), normal));
vec3 diffuse = diff * lightColor;
// SPECULAR LIGHT
float specularStrength = 0.1;
vec3 viewDir = normalize(vec3(viewPosition) - vec3(posClip));
vec3 reflectDir = reflect(-vec3(lightDir), normal);
float shininess = 64;
float spec = pow(max(dot(vec3(viewDir), vec3(reflectDir)), 0.0), shininess);
vec3 specular = specularStrength * spec * lightColor;
vFragColor = colormap(vertexColor / 8) * vec4(ambient + diffuse + specular, 1);
}
Now this actually "kind of" works but i have the feeling that also the sides of the sphere which do NOT face the light source are being illuminated, which shouldn't happen. How can I fix this?
Some weird effect: In this moment the light source is actually BEHIND the left planet (it just peaks out a little bit at the top left), bit still there are diffuse and specular effects going on. This side should be actually pretty dark! =(
Also at this moment I get some glError: 1282 error in the fragment shader and I don't know where it comes from since the shader program actually compiles and runs, any suggestions? :)
The things that you are drawing aren't actually spheres. They just look like them from afar. This is absolutely ok if you are fine with that. If you need geometrically correct spheres (with correct sizes and with a correct projection), you need to do proper raycasting. This seems to be a comprehensive guide on this topic.
1. What coordinate system?
In the end, it is up to you. The coordinate system just needs to fulfill some requirements. It must be angle-preserving (because lighting is all about angles). And if you need distance-based attenuation, it should also be distance-preserving. The world and the view coordinate systems usually fulfill these requirements. Clip space is not suited for lighting calculations as neither angles nor distances are preserved. Furthermore, gl_PointCoord is in none of the usual coordinate systems. It is its own coordinate system and you should only use it together with other coordinate systems if you know their relation.
2. Meshes or what?
Meshes are absolutely not suited to render spheres. As mentioned above, raycasting or some screen-space approximation are better choices. Here is an example shader that I used in my projects:
#version 330
out vec4 result;
in fData
{
vec4 toPixel; //fragment coordinate in particle coordinates
vec4 cam; //camera position in particle coordinates
vec4 color; //sphere color
float radius; //sphere radius
} frag;
uniform mat4 p; //projection matrix
void main(void)
{
vec3 v = frag.toPixel.xyz - frag.cam.xyz;
vec3 e = frag.cam.xyz;
float ev = dot(e, v);
float vv = dot(v, v);
float ee = dot(e, e);
float rr = frag.radius * frag.radius;
float radicand = ev * ev - vv * (ee - rr);
if(radicand < 0)
discard;
float rt = sqrt(radicand);
float lambda = max(0, (-ev - rt) / vv); //first intersection on the ray
float lambda2 = (-ev + rt) / vv; //second intersection on the ray
if(lambda2 < lambda) //if the first intersection is behind the camera
discard;
vec3 hit = lambda * v; //intersection point
vec3 normal = (frag.cam.xyz + hit) / frag.radius;
vec4 proj = p * vec4(hit, 1); //intersection point in clip space
gl_FragDepth = ((gl_DepthRange.diff * proj.z / proj.w) + gl_DepthRange.near + gl_DepthRange.far) / 2.0;
vec3 vNormalized = -normalize(v);
float nDotL = dot(vNormalized, normal);
vec3 c = frag.color.rgb * nDotL + vec3(0.5, 0.5, 0.5) * pow(nDotL, 120);
result = vec4(c, frag.color.a);
}
3. Perspective division
Perspective division is not applied to your attributes. The GPU does perspective division on the data that you pass via gl_Position on the way to transforming them to screen space. But you will never actually see this perspective-divided position unless you do it yourself.
4. Light in the dark
This might be the result of you mixing different coordinate systems or doing lighting calculations in clip space. Btw, the specular part is usually not multiplied by the material color. This is light that gets reflected directly at the surface. It does not penetrate the surface (which would absorb some colors depending on the material). That's why those highlights are usually white (or whatever light color you have), even on black objects.
After changing my current deferred renderer to use a logarithmic depth buffer I can not work out, for the life of me, how to reconstruct world-space depth from the depth buffer values.
When I had the OpenGL default z/w depth written I could easily calculate this value by transforming from window-space to NDC-space then perform inverse perspective transformation.
I did this all in the second pass fragment shader:
uniform sampler2D depth_tex;
uniform mat4 inv_view_proj_mat;
in vec2 uv_f;
vec3 reconstruct_pos(){
float z = texture(depth_tex, uv_f).r;
vec4 pos = vec4(uv_f, z, 1.0) * 2.0 - 1.0;
pos = inv_view_proj_mat * pos;
return pos.xyz / pos.w;
}
and got a result that looked pretty correct:
But now the road to a simple z value is not so easy (doesn't seem like it should be so hard either).
My vertex shader for my first pass with log depth:
#version 330 core
#extension GL_ARB_shading_language_420pack : require
layout(location = 0) in vec3 pos;
layout(location = 1) in vec2 uv;
uniform mat4 mvp_mat;
uniform float FC;
out vec2 uv_f;
out float logz_f;
out float FC_2_f;
void main(){
gl_Position = mvp_mat * vec4(pos, 1.0);
logz_f = 1.0 + gl_Position.w;
gl_Position.z = (log2(max(1e-6, logz_f)) * FC - 1.0) * gl_Position.w;
FC_2_f = FC * 0.5;
}
And my fragment shader:
#version 330 core
#extension GL_ARB_shading_language_420pack : require
// other uniforms and output variables
in vec2 uv_f;
in float FC_2_f;
void main(){
gl_FragDepth = log2(logz_f) * FC_2_f;
}
I have tried a few different approaches to get back the z-position correctly, all failing.
If I redefine my reconstruct_pos in the second pass to be:
vec3 reconstruct_pos(){
vec4 pos = vec4(uv_f, get_depth(), 1.0) * 2.0 - 1.0;
pos = inv_view_proj_mat * pos;
return pos.xyz / pos.w;
}
This is my current attempt at reconstructing Z:
uniform float FC;
float get_depth(){
float log2logz_FC_2 = texture(depth_tex, uv_f).r;
float logz = pow(2, log2logz_FC_2 / (FC * 0.5));
float pos_z = log2(max(1e-6, logz)) * FC - 1.0; // pos.z
return pos_z;
}
Explained:
log2logz_FC_2: the value written to depth buffer, so log2(1.0 + gl_Position.w) * (FC / 2)
logz: simply 1.0 + gl_Position.w
pos_z: the value of gl_Position.z before perspective devide
return value: gl_Position.z
Of course, that's just my working. I'm not sure what these values actually hold in the end, because I think I've screwed up some of the math or not correctly understood the transformations going on.
What is the correct way to get my world-space Z position from this logarithmic depth buffer?
In the end I was going about this all wrong. The way to get the world-space position back using a log buffer is:
Retrieve depth from texture
Reconstruct gl_Position.w
linearize reconstructed depth
translate to world space
Here's my implementation in glsl:
in vec2 uv_f;
uniform float nearz;
uniform float farz;
uniform mat4 inv_view_proj_mat;
float linearize_depth(in float depth){
float a = farz / (farz - nearz);
float b = farz * nearz / (nearz - farz);
return a + b / depth;
}
float reconstruct_depth(){
float depth = texture(depth_tex, uv_f).r;
return pow(2.0, depth * log2(farz + 1.0)) - 1.0;
}
vec3 reconstruct_world_pos(){
vec4 wpos =
inv_view_proj_mat *
(vec4(uv_f, linearize_depth(reconstruct_depth()), 1.0) * 2.0 - 1.0);
return wpos.xyz / wpos.w;
}
Which gives me the same result (but with better precision) as when I was using the default OpenGL depth buffer.
I am having trouble with getting integer operations working in the OpenGL ES 2.0 shaders.
GL_SHADING_LANGUAGE_VERSION: OpenGL ES GLSL ES 1.00
One the example lines where I'm having issues is: color.r = floor(f / 65536);
I get this error:
Error linking program: ERROR:SEMANTIC-4 (vertex shader, line 13) Operator not supported for operand types
To give more context to what's happening, within the library that I am working with the only way to pass the color is as a float to which 3 integers have been bit shifted into. 3 (8-bit) int -> float pass to shader | float - > r g b (using integer manipulation) this all works fine on the normal OpenGL but having trouble making this work on the raspberry pi.
Full vexter shader code here:
attribute vec4 position;
varying vec3 Texcoord;
uniform mat4 model;
uniform mat4 view;
uniform mat4 proj;
vec3 unpackColor(float f)
{
vec3 color;
f -= 0x1000000;
color.r = floor(f / 65536);
color.g = floor((f - color.r * 65536) / 256.0);
color.b = floor(f - color.r * 65536 - color.g * 256.0);
return color / 256.0;
}
void main()
{
Texcoord = unpackColor(position.w);
gl_Position = proj * view * model * vec4(position.xyz, 1.0);
}
Any ideas how to get this working?
Is it possible to somehow modify this fragment shader so that it doesn't use the oes_texture_float extension? Because I get an error on the machine which is supposed to run a webgl animation.
I set up my scene using three.js webglrenderer and a cube with a shadermaterial applied to it. On My macbook pro, everything works fine, but on some windows machine I get the error "float textures not supported" (I've searched and found that this probably has to do with oes_texture_float extension)
So I'm guessing I need to change my fragment shader? Or am I missing the point completely?
<script type="x-shader/x-vertex" id="vertexshader">
// switch on high precision floats
#ifdef GL_ES
precision highp float;
#endif
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
</script>
<script type="x-shader/x-fragment" id="fragmentshader">
#ifdef GL_ES
precision mediump float;
#endif
#define PI 3.14159265
uniform float time;
uniform vec2 resolution;
float f(float x) {
return (sin(x * 1.50 * PI ) + 19.0);
}
float q(vec2 p) {
float s = (f(p.x + 0.85)) / 2.0;
float c = smoothstep(0.9, 1.20, 1.0 - abs(p.y - s));
return c;
}
vec3 aurora(vec2 p, float time) {
vec3 c1 = q( vec2(p.x, p.y / 0.051) + vec2(time / 3.0, -0.3)) * vec3(2.90, 0.50, 0.10);
vec3 c2 = q( vec2(p.x, p.y / 0.051) + vec2(time, -0.2)) * vec3(1.3, .6, 0.3);
vec3 c3 = q( vec2(p.x, p.y / 0.051) + vec2(time / 5.0, -0.5)) * vec3(1.7, 0.4, 0.20);
return c1+c2+c3;
}
void main( void ) {
vec2 p = ( gl_FragCoord.xy / resolution.xy );
vec3 c = aurora(p, time);
gl_FragColor = vec4(1.0-c, c);
}
</script>
EDIT: this has nothing to do with the floating point texture, but rather with something in my fragment shader. Three.js gives me the error: "Can't initialise shader, VALIDATE_STATUS"
"Or am I missing the point completely?" - Indeed you are. The shaders don't care about the underlying texture format (you don't even use any textures in those shaders you posted!), so they don't have anything to do with your problem.
It's the application code that uses a float texture somewhere and needs to be changed accordingly. But from the fact that your shader doesn't use any textures at all (and I guess you haven't explicitly created a float texture elsewhere), it's probably three.js' internals that need a float texture somewhere, maybe as render target. So you need to search for ways to disable this requirement if possible.
Unless it's a three.js ism you haven't defined projectionMatrix, modelViewMatrix, and position in your vertex shader.
Try adding
uniform mat4 projectionMatrix;
uniform mat4 modelViewMatrix;
attribute vec4 position;
To the top of the first shader?