Optimize Normalmap rotation of a vector - glsl

I use a quaternion to rotate the normal vector of a mesh into the direction the normal map vector. I thought I could use the Quaternion. So I create a quaternion with the angle between the normal of the mesh and (0,0,1). Shortened the methods in libgdx, creation of the Quaternion looks like this
vec4 createQuaternion(vec3 normal) {
float l_ang = acos(clamp(normal.z, -CONST_ONE, CONST_ONE))/CONST_TWO;
float l_sin = sin(l_ang);
return normalize(vec4(-normal.y * l_sin, normal.x * l_sin, CONST_ZERO, cos(l_ang)));
}
with this Quaternion I can rotate the normal of the mesh in the direction of the normal map. Therefore I use a method I've seen in the libgdx Quaternion (while v is the normal from the normalmap and m is the quaternion):
vec3 rotateNormal(vec3 v, vec4 m) {
vec4 tmp1 = vec4(v,CONST_ZERO);
vec4 tmp2 = vec4(m);
// conjugate
tmp2.x = -tmp2.x;
tmp2.y = -tmp2.y;
tmp2.z = -tmp2.z;
tmp2 = mulLeft(tmp1, tmp2);
tmp1 = mulLeft(m, tmp2);
v.x = tmp1.x;
v.y = tmp1.y;
v.z = tmp1.z;
return v;
}
The method mulLeft looks like that:
vec4 mulLeft(vec4 q, vec4 a) {
float newX = q.w * a.x + q.x * a.w + q.y * a.z - q.z * a.y;
float newY = q.w * a.y + q.y * a.w + q.z * a.x - q.x * a.z;
float newZ = q.w * a.z + q.z * a.w + q.x * a.y - q.y * a.x;
float newW = q.w * a.w - q.x * a.x - q.y * a.y - q.z * a.z;
a.x = newX;
a.y = newY;
a.z = newZ;
a.w = newW;
return a;
}
For the use in the shader I just call:
normal = rotateNormal(normalMap, createQuaternion(normalMesh));
and it works as expected.
The only thing I am considered about is that I can imagine that there is a shorter way to write that. Especially the mulLeft Method. Is there?
What do you think about the whole method, turning the vector by quaternion instead of using NTB?
ntb looks a bit like too much calculation while animating the mesh.
Edit:
This is the a test with my sourcecode
This is a Test with Tenfour's suggestion
Edit2:
I shortened the whole thing to:
vec3 rotateNormal(vec3 normalMap, vec3 normal) {
// create quaternion from cross(normal, vec3(0,0,1))
float l_ang = acos(clamp(normal.z, -CONST_ONE, CONST_ONE))/CONST_TWO;
float l_sin = sin(l_ang);
vec4 quat = normalize(vec4(-normal.y * l_sin, normal.x * l_sin, CONST_ZERO, cos(l_ang)));
// shortened function to double mulQuat the normalMap on the quaternion
return vec3(
quat.x*quat.x*normalMap.x - quat.y*quat.y*normalMap.x - quat.z*quat.z*normalMap.x + quat.w*quat.w*normalMap.x - CONST_TWO*quat.z*quat.w*normalMap.y + CONST_TWO*quat.y*quat.w*normalMap.z + CONST_TWO*quat.x *(quat.y*normalMap.y + quat.z*normalMap.z),
-(quat.x*quat.x*normalMap.y) - quat.z*quat.z* normalMap.y + (quat.y*quat.y + quat.w*quat.w ) * normalMap.y + CONST_TWO*quat.z *(quat.w*normalMap.x + quat.y*normalMap.z) + CONST_TWO*quat.x* (quat.y*normalMap.x - quat.w*normalMap.z),
quat.y*(-CONST_TWO*quat.w*normalMap.x + CONST_TWO*quat.z*normalMap.y) + CONST_TWO*quat.x*(quat.z*normalMap.x + quat.w*normalMap.y) - quat.x*quat.x*normalMap.z - quat.y*quat.y*normalMap.z + (quat.z*quat.z + quat.w*quat.w )* normalMap.z
);
}
Input is the normal from the normal map and the normal of the mesh.
I am sure there's something better than that. what could that be?

I found this
vec3 rotate_vector( vec4 quat, vec3 vec )
{
return vec + 2.0 * cross( cross( vec, quat.xyz ) + quat.w * vec, quat.xyz );
}
here. Does that work? Not sure how much less underlying math there is, but built-in GLSL functions can often take advantage of GPU optimizations.

Related

OpenGL Triangle pipe around line segment

I would like to ask how can I render in geometry shader a triangle pipe from a line segment?
I first compute perpendicular vector "perp" to the line vector "axis".
Then I rotate the "perp" vector few times by "rotate" function.
Since mesh is composed from 8 vertices I am trying to use "triangle_strip".
My current code :
#version 330 core
layout (lines) in;
layout(triangle_strip, max_vertices = 8) out;//triangle_strip
uniform float u_thickness ;
uniform vec2 u_viewportSize ;
uniform bool u_scale_width_by_zoom ;
in gl_PerVertex
{
vec4 gl_Position;
//float gl_PointSize;
//float gl_ClipDistance[];
} gl_in[];
vec4 rotate(vec4 p, float x, float y, float z,float angle )
{
vec3 q;
q[0] = p[0] * (x*x * (1.0 - cos(angle)) + cos(angle))
+ p[1] * (x*y * (1.0 - cos(angle)) + z * sin(angle))
+ p[2] * (x*z * (1.0 - cos(angle)) - y * sin(angle));
q[1] = p[0] * (y*x * (1.0 - cos(angle)) - z * sin(angle))
+ p[1]* (y*y * (1.0 - cos(angle)) + cos(angle))
+ p[2] * (y*z * (1.0 - cos(angle)) + x * sin(angle));
q[2] = p[0] * (z*x * (1.0 - cos(angle)) + y * sin(angle))
+ p[1] * (z*y * (1.0 - cos(angle)) - x * sin(angle))
+ p[2] * (z*z * (1.0 - cos(angle)) + cos(angle));
return vec4(q, 0.0);
}
void main() {
//https://stackoverflow.com/questions/54686818/glsl-geometry-shader-to-replace-gllinewidth
vec4 p1 = gl_in[0].gl_Position;
vec4 p2 = gl_in[1].gl_Position;
//tube
// Specify the axis to rotate about:
vec4 axis = p2-p1;
float x = axis[0];
float y = axis[1];
float z = axis[2];
axis = normalize(axis);
//float length = hypotf(axis[0], hypotf(axis[1], axis[2]));
float length = sqrt((axis[0]*axis[0]) + sqrt(axis[1]*axis[1]+ axis[2]*axis[2]));
float dir_scalar = (axis[0] > 0.0) ? length : -length;
float xt = axis[0] + dir_scalar;
float dot = -axis[1] / (dir_scalar * xt);
vec3 perp_0 = vec3(dot * xt, 1.0f + dot * axis.y, dot * axis.z);
perp_0 = normalize(perp_0);
vec4 perp = vec4(perp_0,0)*u_thickness*0.001;
//side0
vec4 p1_1 = p1+perp;
vec4 p1_2 = p2+perp;
vec4 perp_rot_2=rotate(perp,x,y,z,60.0 * 3.14 / 180.0);
vec4 p2_1 = p1+perp_rot_2;
vec4 p2_2 = p2+perp_rot_2;
vec4 perp_rot_3=rotate(perp,x,y,z, 120.0 * 3.14 / 180.0);
vec4 p3_1 = p1+perp_rot_3;
vec4 p3_2 = p2+perp_rot_3;
gl_Position = p1_1;
EmitVertex();
gl_Position = p1_2;
EmitVertex();
gl_Position = p2_1;
EmitVertex();
gl_Position = p2_2;
EmitVertex();
gl_Position = p3_1;
EmitVertex();
gl_Position = p3_2;
EmitVertex();
gl_Position = p1_1;
EmitVertex();
gl_Position = p1_2;
EmitVertex();
EndPrimitive();
}
It produces wrong results:

Calculate rotation matrix to transform one vector to another?

There are several questions about this on Stack Overflow, but most use Quaternions and I am looking for something that does not. And this question is all over the web, but it is surprisingly hard to find a straightforward example in code. I am looking for a C/C++, or GLSL/Metal solution for the rotation matrix to transform one vector to another. I found this which seems to answer the question: http://www.j3d.org/matrix_faq/matrfaq_latest.html#Q38
However, it is not working for me in practice. With this I am getting the correct transformations at the ninety-degree angles (ie transforming 0,0,1 to 0,1,0), but in between it twists in the wrong direction. Here is the code I am using -- does anyone see what is wrong with it?
In this case I am always transforming from a fixed start normal of (0,0,1).
// matrix to rotate from (0,0,1) to specified direction
float4x4 directionMatrix(float3 direction) {
float3 normal = float3(0.0,0.0,1.0);
float3 V = normalize(cross(normal, direction));
float phi = acos(dot(normal,direction));
float rcos = cos(phi);
float rsin = sin(phi);
float4x4 M;
M[0].x = rcos + V.x * V.x * (1.0 - rcos);
M[1].x = V.z * rsin + V.y * V.x * (1.0 - rcos);
M[2].x = -V.y * rsin + V.z * V.x * (1.0 - rcos);
M[3].x = 0.0;
M[0].y = -V.z * rsin + V.x * V.y * (1.0 - rcos);
M[1].y = rcos + V.y * V.y * (1.0 - rcos);
M[2].y = -V.x * rsin + V.z * V.y * (1.0 - rcos);
M[3].y = 0.0;
M[0].z = V.y * rsin + V.x * V.z * (1.0 - rcos);
M[1].z = -V.x * rsin + V.y * V.z * (1.0 - rcos);
M[2].z = rcos + V.z * V.z * (1.0 - rcos);
M[3].z = 0.0;
M[0].w = 0.0;
M[1].w = 0.0;
M[2].w = 0.0;
M[3].w = 1.0;
return M;
}
I tried transposing this, in case the original article used the opposite column-row majority of mine, but it did not help.
EDIT: The problem here turned out to be that this code works for a left-handed coordinate system, where mine is right-handed. So to get this to work you just have to multiply phi by -1. I also edited the code to add normalization of the cross vector, as suggested in the comments.

Point light Dual-paraboloid VSM in deferred rendering

I've been following this tutorial to implement my variance shadow mapping feature for point light in deferred rendering.
I'm using GLSL 3.3, left-handed coordinate system. Here is what I've been doing:
I render the scene to dual-paraboloid maps, storing depth and depth * depth.
Result:
Above image contains front and back maps. The point light is at the center of scene, you can see where it glows yellow the most.
Then I set up a full-screen shader pass.
I do this by transforming the tutorial code from FX to GLSL.
Author's .fx code:
float4 TexturePS(float3 normalW : TEXCOORD0, float2 tex0 : TEXCOORD1, float3 pos : TEXCOORD2) : COLOR
{
float4 texColor = tex2D(TexS, tex0 * TexScale);
pos = mul(float4(pos, 1.0f), LightView);
float L = length(pos);
float3 P0 = pos / L;
float alpha = .5f + pos.z / LightAttenuation;
P0.z = P0.z + 1;
P0.x = P0.x / P0.z;
P0.y = P0.y / P0.z;
P0.z = L / LightAttenuation;
P0.x = .5f * P0.x + .5f;
P0.y = -.5f * P0.y + .5f;
float3 P1 = pos / L;
P1.z = 1 - P1.z;
P1.x = P1.x / P1.z;
P1.y = P1.y / P1.z;
P1.z = L / LightAttenuation;
P1.x = .5f * P1.x + .5f;
P1.y = -.5f * P1.y + .5f;
float depth;
float mydepth;
float2 moments;
if(alpha >= 0.5f)
{
moments = tex2D(ShadowFrontS, P0.xy).xy;
depth = moments.x;
mydepth = P0.z;
}
else
{
moments = tex2D(ShadowBackS, P1.xy).xy;
depth = moments.x;
mydepth = P1.z;
}
float lit_factor = (mydepth <= moments[0]);
float E_x2 = moments.y;
float Ex_2 = moments.x * moments.x;
float variance = min(max(E_x2 - Ex_2, 0.0) + SHADOW_EPSILON, 1.0);
float m_d = (moments.x - mydepth);
float p = variance / (variance + m_d * m_d); //Chebychev's inequality
texColor.xyz *= max(lit_factor, p + .2f);
return texColor;
}
My translated GLSL code:
void main() {
vec3 in_vertex = texture(scenePosTexture, texCoord).xyz; // get 3D vertex from 2D screen coordinate
vec4 vert = lightViewMat * vec4(in_vertex, 1); // project vertex to point light space (view from light position, look target is -Z)
float L = length(vert.xyz);
float distance = length(lightPos - in_vertex);
float denom = distance / lightRad + 1;
float attenuation = 1.0 / (denom * denom);
// to determine which paraboloid map to use
float alpha = vert.z / attenuation + 0.5f;
vec3 P0 = vert.xyz / L;
P0.z = P0.z + 1;
P0.x = P0.x / P0.z;
P0.y = P0.y / P0.z;
P0.z = L / attenuation;
P0.x = .5f * P0.x + .5f;
P0.y = -.5f * P0.y + .5f;
vec3 P1 = vert.xyz / L;
P1.z = 1 - P1.z;
P1.x = P1.x / P1.z;
P1.y = P1.y / P1.z;
P1.z = L / attenuation;
P1.x = .5f * P1.x + .5f;
P1.y = -.5f * P1.y + .5f;
// Variance shadow mapping
float depth;
float mydepth;
vec2 moments;
if(alpha >= 0.5f)
{
moments = texture(shadowMapFrontTexture, P0.xy).xy;
depth = moments.x;
mydepth = P0.z;
}
else
{
moments = texture(shadowMapBackTexture, P1.xy).xy;
depth = moments.x;
mydepth = P1.z;
}
// Original .fx code is: float lit_factor = (mydepth <= moments[0]);
// I'm not sure my translated code belew is correct
float lit_factor = 0;
if (mydepth <= moments.x)
lit_factor = mydepth;
else
lit_factor = moments.x;
float E_x2 = moments.y;
float Ex_2 = moments.x * moments.x;
float variance = min(max(E_x2 - Ex_2, 0.0) + SHADOW_EPSILON, 1.0);
float m_d = (moments.x - mydepth);
float p = variance / (variance + m_d * m_d); //Chebychev's inequality
fragColor = texture(sceneTexture, texCoord).rgb; // sample original color
fragColor.rgb *= max(lit_factor, p + .2f);
}
Render result
Right now I'm clueless about where I'm gonna touch to render the shadow correctly. Could someone point it out for me?
Some friend of mine pointed out that the Y component is flipped, that's why shadow looked like up-side down. After adding minus to P0 and P1's Y, it starts to show quite reasonable shadow:
But another problem is the location of shadow is wrong.
Why do you duplicate the paraboloid projection computation ?
You compute it on 'vert', then 'P0' and 'P1', shouldn't you do it only on 'P0' and 'P1' ? (The original code doesn't do this thing on 'pos').
EDIT:
Your lit_factor is wrong, it should be either 0.0 or 1.0.
You could use the step() GLSL intrinsic, in this way :
float lit_factor = step(mydepth, moments[0]);

Rotate quad made in geometry shader

I'm drawing a quad using Geometry Shader, but can't figure out how to rotate it with angle.
void main(void)
{
float scaleX = 2.0f / u_resolution.x;
float scaleY = 2.0f / u_resolution.y;
float nx = (u_position.x * scaleX) - 1.0f;
float ny = -(u_position.y * scaleY) + 1.0f;
float nw = u_size.x * scaleX;
float nh = u_size.y * scaleY;
gl_Position = vec4( nx+nw, ny, 0.0, 1.0 );
texcoord = vec2( 1.0, 0.0 );
EmitVertex();
gl_Position = vec4(nx, ny, 0.0, 1.0 );
texcoord = vec2( 0.0, 0.0 );
EmitVertex();
gl_Position = vec4( nx+nw, ny-nh, 0.0, 1.0 );
texcoord = vec2( 1.0, 1.0 );
EmitVertex();
gl_Position = vec4(nx, ny-nh, 0.0, 1.0 );
texcoord = vec2( 0.0, 1.0 );
EmitVertex();
EndPrimitive();
}
Should I use a rotation matrix or sin and cos? I'm not too great at math.
You don't have to use matrices, but you need to use those sine functions. Here is a way to rotate a 3D position about some arbitrary axis by some angle specified in degrees:
// This is the 3D position that we want to rotate:
vec3 p = position.xyz;
// Specify the axis to rotate about:
float x = 0.0;
float y = 0.0;
float z = 1.0;
// Specify the angle in radians:
float angle = 90.0 * 3.14 / 180.0; // 90 degrees, CCW
vec3 q;
q.x = p.x * (x*x * (1.0 - cos(angle)) + cos(angle))
+ p.y * (x*y * (1.0 - cos(angle)) + z * sin(angle))
+ p.z * (x*z * (1.0 - cos(angle)) - y * sin(angle));
q.y = p.x * (y*x * (1.0 - cos(angle)) - z * sin(angle))
+ p.y * (y*y * (1.0 - cos(angle)) + cos(angle))
+ p.z * (y*z * (1.0 - cos(angle)) + x * sin(angle));
q.z = p.x * (z*x * (1.0 - cos(angle)) + y * sin(angle))
+ p.y * (z*y * (1.0 - cos(angle)) - x * sin(angle))
+ p.z * (z*z * (1.0 - cos(angle)) + cos(angle));
gl_Position = vec4(q, 1.0);
If you know that you are rotating about some standard x-, y-, or z-axis, you can simplify the "algorithm" a lot by defining it explicitly for that standard axis.
Notice how we rotate about the z-axis in the above code. For example, rotation about the x-axis would be (x,y,z) = (1,0,0). You could set the variables to anything, but the values should result in the axis being a unit vector (if that even matters.)
Then again, you might as well use matrices:
vec3 n = vec3(0.0, 0.0, 1.0); // the axis to rotate about
// Specify the rotation transformation matrix:
mat3 m = mat3(
n.x*n.x * (1.0f - cos(angle)) + cos(angle), // column 1 of row 1
n.x*n.y * (1.0f - cos(angle)) + n.z * sin(angle), // column 2 of row 1
n.x*n.z * (1.0f - cos(angle)) - n.y * sin(angle), // column 3 of row 1
n.y*n.x * (1.0f - cos(angle)) - n.z * sin(angle), // column 1 of row 2
n.y*n.y * (1.0f - cos(angle)) + cos(angle), // ...
n.y*n.z * (1.0f - cos(angle)) + n.x * sin(angle), // ...
n.z*n.x * (1.0f - cos(angle)) + n.y * sin(angle), // column 1 of row 3
n.z*n.y * (1.0f - cos(angle)) - n.x * sin(angle), // ...
n.z*n.z * (1.0f - cos(angle)) + cos(angle) // ...
);
// Apply the rotation to our 3D position:
vec3 q = m * p;
gl_Position = vec4(q, 1.0);
Notice how the elements of the matrix are laid out such that we first complete the first column, and then the second, and so on; the matrix is in column-major order. This matters when you try to transfer a matrix written in mathematical notation into a data type in your code. You basically need to transpose it (to flip the elements diagonally) in order to use it in your code. Also, we are essentially multiplying a matrix on left with a column vector on right.
If you need a 4-by-4 homogeneous matrix, then you would simply add an extra column and a row into the above matrix, such that both the new rightmost column and bottommost row would consist of [0 0 0 1]:
vec4 p = position.xyzw; // new dimension
vec3 n = ...; // same
mat4 m = mat4( // new dimension
...
0.0,
...
0.0,
...
0.0,
0.0,
0.0,
0.0,
1.0
);
vec4 q = m * p;
gl_Position = q;
Again, notice the order of the multiplication when applying the rotation, it is important because it affects the end result. What happens in the multiplication is basically that a new vector is formed by calculating the dot-product of the position vector and each column in the matrix; each coordinate in the resulting vector is the dot-product of the original vector and a single column in the matrix (see the first code example.)
The
q.x = p.x * (x*x * (1.0 - cos(angle)) + cos(angle))
+ p.y * (x*y * (1.0 - cos(angle)) + z * sin(angle))
+ p.z * (x*z * (1.0 - cos(angle)) - y * sin(angle));
Is same as:
q.x = dot(p, m[0]);
One could even compose the matrix with itself: m = m*m; which would result in a 180-degree counterclockwise rotation matrix, depending on the angle used.

Physically based shader not producing desired results

Over the past ~2-3 weeks, i've been learning about Physically Based Shading and I just cannot wrap my head around some of the problems I'm having.
Fragment Shader
#version 430
#define PI 3.14159265358979323846
// Inputs
in vec3 inputNormal;
vec3 fNormal;
// Material
float reflectance = 1.0; // 0 to 1
float roughness = 0.5;
vec3 specularColor = vec3(1.0, 1.0, 1.0); // f0
// Values
vec3 lightVector = vec3(1, 1, 1); // Light (l)
vec3 eyeVector = vec3(2.75, 1.25, 1.25); // Camera (v)
vec3 halfVector = normalize(lightVector + eyeVector); // L + V / |L + V|
out vec4 fColor; // Output Color
// Specular Functions
vec3 D(vec3 h) // Normal Distribution Function - GGX/Trowbridge-Reitz
{
float alpha = roughness * roughness;
float alpha2 = alpha * alpha;
float NoH = dot(fNormal, h);
float finalTerm = ((NoH * NoH) * (alpha2 - 1.0) + 1.0);
return vec3(alpha2 / (PI * (finalTerm * finalTerm)));
}
vec3 Gsub(vec3 v) // Sub Function of G
{
float k = ((roughness + 1.0) * (roughness + 1.0)) / 8;
return vec3(dot(fNormal, v) / ((dot(fNormal, v)) * (1.0 - k) + k));
}
vec3 G(vec3 l, vec3 v, vec3 h) // Geometric Attenuation Term - Schlick Modified (k = a/2)
{
return Gsub(l) * Gsub(v);
}
vec3 F(vec3 v, vec3 h) // Fresnel - Schlick Modified (Spherical Gaussian Approximation)
{
vec3 f0 = specularColor; // right?
return f0 + (1.0 - f0) * pow(2, (-5.55473 * (dot(v, h)) - 6.98316) * (dot(v, h)));
}
vec3 specular()
{
return (D(halfVector) * F(eyeVector, halfVector) * G(lightVector, eyeVector, halfVector)) / 4 * ((dot(fNormal, lightVector)) * (dot(fNormal, eyeVector)));
}
vec3 diffuse()
{
float NoL = dot(fNormal, lightVector);
vec3 result = vec3(reflectance / PI);
return result * NoL;
}
void main()
{
fNormal = normalize(inputNormal);
fColor = vec4(diffuse() + specular(), 1.0);
//fColor = vec4(D(halfVector), 1.0);
}
So far I have been able to fix up some things and now I get a better result.
However it now seems clear that the highlight is way too big; this originates from the normal distribution function (Specular D).
Your coding of GGX/Trowbridge-Reitz is wrong:
vec3 NxH = fNormal * h;
The star * means a term by term product where you want a dot product
Also
float alphaTerm = (alpha * alpha - 1.0) + 1.0;
Is not correct since the formula multiplies n.m by (alpha * alpha - 1.0) before adding 1.0. Yours formula is equal to alpha*alpha!
Try:
// Specular
vec3 D(vec3 h) // Normal Distribution Function - GGX/Trowbridge-Reitz
{
float alpha = roughness * roughness;
float NxH = dot(fNormal,h);
float alpha2 = alpha*alpha;
float t = ((NxH * NxH) * (alpha2 - 1.0) + 1.0);
return alpha2 / (PI * t * t);
}
In many other places you use * instead of dot. You need to correct all these. Also, check for your formulas, many seem incorrect.