Rotate a direction vector by another direction - glsl

I have a direction vector (A) that is pointing upward (0, 1, 0) and I want to be able to rotate it to another direction in a shader but only on one axis, in this case, the Z axis. This can be done using a vector (B) or a scalar.
As an example, without rotation the vector should point to the top (0, 1, 0), but with a 90° clockwise rotation the vector should point to (1, 0, 0).

If the rotation axis is always the z-axis (0, 0, 1) and the projection of the vector A into the xy-plane should be on the vector B, then the solution is:
vec3 A, B;
vec3 AB = vec3(length(A.xy) * normalize(B.xy), A.z);
A more general solution, with an arbitrary, normalized rotation axis R would be:
vec3 A, B;
vec3 R; // rotation axis (normalized)
vec3 A_r = R * dot(A, R); // component of A, in the direction of R
vec3 A_prj = A - A_r; // component of A, in the rotation plane
vec3 B_r = R * dot(B, R); // component of B, in the direction of R
vec3 B_prj = B - B_r; // component of B, in the rotation plane
vec3 AB = length(A_prj) * normalize(B_prj) + A_r;

Related

I need help converting this 2D sky shader to 3D

I found this shader function on github and managed to get it working in GameMaker Studio 2, my current programming suite of choice. However this is a 2D effect that doesn't take into account the camera up vector, nor fov. Is there anyway that can be added into this? I'm only intermediate skill level when it comes to shaders so I'm not sure exactly what route to take, or whether it would even be considered worth it at this point, or if I should start with a different example.
uniform vec3 u_sunPosition;
varying vec2 v_vTexcoord;
varying vec4 v_vColour;
varying vec3 v_vPosition;
#define PI 3.141592
#define iSteps 16
#define jSteps 8
vec2 rsi(vec3 r0, vec3 rd, float sr) {
// ray-sphere intersection that assumes
// the sphere is centered at the origin.
// No intersection when result.x > result.y
float a = dot(rd, rd);
float b = 2.0 * dot(rd, r0);
float c = dot(r0, r0) - (sr * sr);
float d = (b*b) - 4.0*a*c;
if (d < 0.0) return vec2(1e5,-1e5);
return vec2(
(-b - sqrt(d))/(2.0*a),
(-b + sqrt(d))/(2.0*a)
);
}
vec3 atmosphere(vec3 r, vec3 r0, vec3 pSun, float iSun, float rPlanet, float rAtmos, vec3 kRlh, float kMie, float shRlh, float shMie, float g) {
// Normalize the sun and view directions.
pSun = normalize(pSun);
r = normalize(r);
// Calculate the step size of the primary ray.
vec2 p = rsi(r0, r, rAtmos);
if (p.x > p.y) return vec3(0,0,0);
p.y = min(p.y, rsi(r0, r, rPlanet).x);
float iStepSize = (p.y - p.x) / float(iSteps);
// Initialize the primary ray time.
float iTime = 0.0;
// Initialize accumulators for Rayleigh and Mie scattering.
vec3 totalRlh = vec3(0,0,0);
vec3 totalMie = vec3(0,0,0);
// Initialize optical depth accumulators for the primary ray.
float iOdRlh = 0.0;
float iOdMie = 0.0;
// Calculate the Rayleigh and Mie phases.
float mu = dot(r, pSun);
float mumu = mu * mu;
float gg = g * g;
float pRlh = 3.0 / (16.0 * PI) * (1.0 + mumu);
float pp = 1.0 + gg - 2.0 * mu * g;
float pMie = 3.0 / (8.0 * PI) * ((1.0 - gg) * (mumu + 1.0)) / (sign(pp)*pow(abs(pp), 1.5) * (2.0 + gg));
// Sample the primary ray.
for (int i = 0; i < iSteps; i++) {
// Calculate the primary ray sample position.
vec3 iPos = r0 + r * (iTime + iStepSize * 0.5);
// Calculate the height of the sample.
float iHeight = length(iPos) - rPlanet;
// Calculate the optical depth of the Rayleigh and Mie scattering for this step.
float odStepRlh = exp(-iHeight / shRlh) * iStepSize;
float odStepMie = exp(-iHeight / shMie) * iStepSize;
// Accumulate optical depth.
iOdRlh += odStepRlh;
iOdMie += odStepMie;
// Calculate the step size of the secondary ray.
float jStepSize = rsi(iPos, pSun, rAtmos).y / float(jSteps);
// Initialize the secondary ray time.
float jTime = 0.0;
// Initialize optical depth accumulators for the secondary ray.
float jOdRlh = 0.0;
float jOdMie = 0.0;
// Sample the secondary ray.
for (int j = 0; j < jSteps; j++) {
// Calculate the secondary ray sample position.
vec3 jPos = iPos + pSun * (jTime + jStepSize * 0.5);
// Calculate the height of the sample.
float jHeight = length(jPos) - rPlanet;
// Accumulate the optical depth.
jOdRlh += exp(-jHeight / shRlh) * jStepSize;
jOdMie += exp(-jHeight / shMie) * jStepSize;
// Increment the secondary ray time.
jTime += jStepSize;
}
// Calculate attenuation.
vec3 attn = exp(-(kMie * (iOdMie + jOdMie) + kRlh * (iOdRlh + jOdRlh)));
// Accumulate scattering.
totalRlh += odStepRlh * attn;
totalMie += odStepMie * attn;
// Increment the primary ray time.
iTime += iStepSize;
}
// Calculate and return the final color.
return iSun * (pRlh * kRlh * totalRlh + pMie * kMie * totalMie);
}
vec3 ACESFilm( vec3 x )
{
float tA = 2.51;
float tB = 0.03;
float tC = 2.43;
float tD = 0.59;
float tE = 0.14;
return clamp((x*(tA*x+tB))/(x*(tC*x+tD)+tE),0.0,1.0);
}
void main() {
vec3 color = atmosphere(
normalize( v_vPosition ), // normalized ray direction
vec3(0,6372e3,0), // ray origin
u_sunPosition, // position of the sun
22.0, // intensity of the sun
6371e3, // radius of the planet in meters
6471e3, // radius of the atmosphere in meters
vec3(5.5e-6, 13.0e-6, 22.4e-6), // Rayleigh scattering coefficient
21e-6, // Mie scattering coefficient
8e3, // Rayleigh scale height
1.2e3, // Mie scale height
0.758 // Mie preferred scattering direction
);
// Apply exposure.
color = ACESFilm( color );
gl_FragColor = vec4(color, 1.0);
}
However this is a 2D effect that doesn't take into account the camera up vector, nor fov.
If you want to draw a sky in 3D, then you have to draw the on the back plane of the normalized device space. The normalized device space is is a cube with the left, bottom near of (-1, -1, -1) and the right, top, f ar of (1, 1, 1).
The back plane is the quad with:
bottom left: -1, -1, 1
bottom right: 1, -1, 1
top right: -1, -1, 1
top left: -1, -1, 1
Render this quad. Note, the vertex coordinates have not to be transformed by any matrix, because the are normalized device space coordinates. But you have to transform the ray which is used for the sky (the direction which is passed to atmosphere).
This ray has to be a direction in world space, from the camera position to the the sky. By the vertex coordinate of the quad you can get a ray in normalized device space. You have tor transform this ray to world space. The inverse projection matrix (MATRIX_PROJECTION) transforms from normalized devices space to view space and the inverse view matrix (MATRIX_VIEW) transforms form view space to world space. Use this matrices in the vertex shader:
attribute vec3 in_Position;
varying vec3 v_world_ray;
void main()
{
gl_Position = vec4(inPos, 1.0);
vec3 proj_ray = vec3(inverse(gm_Matrices[MATRIX_PROJECTION]) * vec4(inPos.xyz, 1.0));
v_world_ray = vec3(inverse(gm_Matrices[MATRIX_VIEW]) * vec4(proj_ray.xyz, 0.0));
}
In the fragment shader you have to rotate the ray by 90° around the x axis, but that is just caused by the way the ray is interpreted by function atmosphere:
varying vec3 v_world_ray;
// [...]
void main() {
vec3 world_ray = vec3(v_world_ray.x, v_world_ray.z, -v_world_ray.y);
vec3 color = atmosphere(
normalize( world_ray.xyz ), // normalized ray direction
vec3(0,6372e3,0), // ray origin
u_sunPosition, // position of the sun
22.0, // intensity of the sun
6371e3, // radius of the planet in meters
6471e3, // radius of the atmosphere in meters
vec3(5.5e-6, 13.0e-6, 22.4e-6), // Rayleigh scattering coefficient
21e-6, // Mie scattering coefficient
8e3, // Rayleigh scale height
1.2e3, // Mie scale height
0.758 // Mie preferred scattering direction
);
// Apply exposure.
color = ACESFilm( color );
fragColor = vec4(color.rgb, 1.0);
}

What is wrong with my code while trying to calculate if the point "point" is in the triangle or not?

Can anybody see what is wrong with my code?
float intersect(Ray ray, Triangle triangle) {
float scalar = (0 - dot(triangle.normal, ray.origin)) / dot(triangle.normal, ray.dir);
vec3 point = ray.origin + scalar * ray.dir;
Berrycentric coordinates
vec3 A = vec3(1, 0, 0);
vec3 B = vec3(0, 1, 0);
vec3 C = vec3(0, 0, 1);
Using the Berrycentric coordinates
to calculate tby
matrixtby = [A-point] ==>
tby = inverse(matrix)[A-point]
mat3 matrix = mat3(point, A - B, A - C);
vec3 tby = transpose(matrix) * vec3(A - point);
float t = tby.x;
float beta = tby.y;
float gamma = tby.z;
if (beta + gamma < 1 && beta > 0 && gamma > 0 && t > 0)
return scalar;
return -1.0;
}
What I created so far
TriangleStruct
struct Triangle
{
vec3 p1;
vec3 p2;
vec3 p3;
vec3 normal;
Material material;
};
The points
vec3 p1 = vec3(-0.3,0.2,0.5);
vec3 p2 = vec3(0.3,0.2,0.5);
vec3 p3 = vec3(0.15,0.0,0.0);
vec3 p4 = vec3(0.15,0.2,0.5);
The triangles
{
Triangle 1
scene.triangles[0].p1 = p1;
scene.triangles[0].p2 = p4;
scene.triangles[0].p3 = p3;
scene.triangles[0].normal = normalize(cross((p4-p1), (p3-p1)));
Triangle 2
scene.triangles[1].p1 = p3;
scene.triangles[1].p2 = p2;
scene.triangles[1].p3 = p1;
scene.triangles[1].normal = normalize(cross((p2-p3), (p1-p3)));
Triangle 3
scene.triangles[2].p1 =p3;
scene.triangles[2].p2 = p4;
scene.triangles[2].p3 = p2;
scene.triangles[2].normal = normalize(cross((p4-p3), (p2-p3)));
...
}
From what I understood you try to check whether ray goes through triangle.
The very first error that I see is that point:
vec3 point = ray.origin + scalar * ray.dir;
has a meaningless definition. You calculate intersection of the ray with a parallel plane to the triangle that goes through the origin. Unless this plane for some miraculous reason coincides with the triangle's plane, all forthcoming calculations with this point is devout of any meaning.
To fix this you need to define scalar like that:
float scalar = dot(triangle.normal,triangle.x) - dot(triangle.normal,ray.origin);
scalar /= dot(triangle.normal,ray.dir);
where triangle.x is any point of the triangle.
After you fix this issue it will be possible to discuss whether other parts of your code make sense.
Also please provide more information about the details of the code, i.e., relevant pieces of the implementation.
Now about how to check whether ray intersects the triangle.
The method of intersecting ray with triangle's plane and then checking whether the point is inside the triangle in 2-dimansional setting - is not very good in terms of stability. So avoid it.
A more simple and direct method is to compute the difference vectors:
vec3 dvec1 = tringle.p1 - ray.origin;
vec3 dvec2 = tringle.p2 - ray.origin;
vec3 dvec3 = tringle.p3 - ray.origin;
and then check whether ray.dir can be expressed as a linear sum of dvec1, dvec2, and dvec3 with positive coefficients. This can be achieved by computing the inverse of the matrix mat3(dvec1,dvec2,dvec3) and multiplying it by ray.dir (this way you obtain the coefficients that are needed to express ray.dir linear sum of dvec1, dvec2, and dvec3).
However, the matrix method isn't perfectly stable due divisions. This can be improved further by implementing a logically equivalent code just without divisions.
vec3 dvec12 = cross_product(dvec1, dvec2);
if(dot(dvec12,dvec3)*dot(dvec12,ray.dir) < 0.) return false;
vec3 dvec23 = cross_product(dvec2, dvec3);
if(dot(dvec23,dvec1)*dot(dvec23,ray.dir) < 0.) return false;
vec3 dvec31 = cross_product(dvec3, dvec1);
if(dot(dvec31,dvec2)*dot(dvec31,ray.dir) < 0.) return false;
return true;
In the first if we check if ray.dir is on the same half plane as dvec3 with respect to the plane spanned by {dvec1,dvec2}. If it isn't then ray won't intersect the triangle.
Then we repeat this check for other vectors.
I recommend to rewrite your code like this:
bool PointInOrOn( vec3 P1, vec3 P2, vec3 A, vec3 B )
{
vec3 CP1 = cross( B - A, P1 - A )
vec3 CP2 = cross( B - A, P2 - A )
return step(0.0, dot( CP1, CP2 ));
}
float intersect(Ray ray, Triangle triangle)
{
vec3 D = normalize(ray.dir); // skip normalize, if ray.dir is normalized
vec3 N = normalize(triangle.normal); // skip normalize, if triangle.normal is normalized
float d = dot(triangle.p1 - ray.origin, N) / dot(D, N)
vec3 X = ray.origin + D * d;
float isIn = PointInOrOn( X, triangle.p1, triangle.p2, triangle.p3 ) *
PointInOrOn( X, triangle.p2, triangle.p3, triangle.p1 ) *
PointInOrOn( X, triangle.p3, triangle.p1, triangle.p2 );
if ( isIn > 0.01 )
return d;
return -1.0;
}
See the following explanation.
Intersection of a ray and a triangle primitive
The ray is defined by a point R0 and a normalized direction D.
The plane is defined by a triangle with the three points PA, PB, and PC.
The normal vector of the plane can be calculated by the cross product of 2 legs of the triangle:
N = normalize( cross(PC-PA, PB-PA)
The normal distance n of the point R0 to the plane is:
n = | R0 - PA | * cos(alpha) = dot(PA - R0, N)
It follows that the distance d of the intersection point X to the origin of the ray R0 is:
d = n / cos(beta) = n / dot(D, N)
The intersection point X is:
X = R0 + D * d = R0 + D * dot(PA - R0, N) / dot(D, N)
To find out, if a point is inside a triangle, has to be tested, if the line from a corner point to the intersection point is between the to legs which are connect to the corner point. The triangle is defined by the points A, B, C and the point to be tested is P:
bool PointInOrOn( P1, P2, A, B )
{
CP1 = cross( B - A, P1 - A )
CP2 = cross( B - A, P2 - A )
return dot( CP1, CP2 ) >= 0
}
bool PointInOrOnTriangle( P, A, B, C )
{
return PointInOrOn( P, A, B, C ) &&
PointInOrOn( P, B, C, A ) &&
PointInOrOn( P, C, A, B )
}

Adaptive Depth Bias for Shadow Maps Ray Casting

I have found this paper dealing with how to compute the perfect bias when dealing with shadow map.
The idea is to:
get the texel used when sampling the shadowMap
project the texel location back to eyeSpace (ray tracing)
get the difference between your frament.z and the intersection with
the fragment's face.
This way you have calculated the error which serve as the appropriate bias for z-fighting.
Now I am trying to implement it, but I experiment some troubles:
I am using a OrthoProjectionMatrix, so i think I don't need to divide by w back and forth.
I am good until I am computing the ray intersection with the face.
I have a lot of faces failing the test, and my bias is way to important.
This is my fragment shader code:
float getBias(float depthFromTexture)
{
vec3 n = lightFragNormal.xyz;
//no need to divide by w, we got an ortho projection
//we are in NDC [-1,1] we go to [0,1]
//vec4 smTexCoord = 0.5 * shadowCoord + vec4(0.5, 0.5, 0.5, 0.0);
vec4 smTexCoord = shadowCoord;
//we are in [0,1] we go to texture_space [0,1]->[0,shadowMap.dimension]:[0,1024]
//get the nearest index in the shadow map, the texel corresponding to our fragment we use floor (125.6,237.9) -> (125,237)
vec2 delta = vec2(xPixelOffset, yPixelOffset);
vec2 textureDim = vec2(1/xPixelOffset, 1/yPixelOffset);
vec2 index = floor(smTexCoord.xy * textureDim);
//we get the center of the current texel, we had 0.5 to put us in the middle (125,237) -> (125.5,237.5)
//we go back to [0,1024] -> [0,1], (125.5,237.5) -> (0.12, 0.23)
vec2 nlsGridCenter = delta*(index + vec2(0.5f, 0.5f));
// go back to NDC [0,1] -> [-1,1]
vec2 lsGridCenter = 2.0 * nlsGridCenter - vec2(1.0);
//compute lightSpace grid direction, multiply by the inverse projection matrice or
vec4 lsGridCenter4 = inverse(lightProjectionMatrix) * vec4(lsGridCenter, -frustrumNear, 0);
vec3 lsGridLineDir = vec3(normalize(lsGridCenter4));
/** Plane ray intersection **/
// Locate the potential occluder for the shading fragment
//compute the distance t we need to continue in the gridDir direction, the point is "t" far
float ls_t_hit = dot(n, lightFragmentCoord.xyz) / dot(n, lsGridLineDir);
if(ls_t_hit<=0){
return 0; // i got a lot of negativ values it shouldn t be the case
}
//compute the point p with the face
vec3 ls_hit_p = ls_t_hit * lsGridLineDir;
float intersectionDepth = lightProjectionMatrix * vec4(ls_hit_p, 1.0f).z / 2 + 0.5;
float fragmentDepth = lightProjectionMatrix * lightFragmentCoord.z / 2 + 0.5;
float result = abs(intersectionDepth - fragmentDepth);
return result;
}
I am struggling with this line:
vec4 lsGridCenter4 = inverse(lightProjectionMatrix) * vec4(lsGridCenter, -frustrumNear, 0);
i don't know if i am correct maybe:
vec4(lsGridCenter, -frustrumNear, 1);
and of course the plane intersection
from wikipedia:
where:
l = my vector normalized direction
Po = a point belonging to the the plane
l0 = offset of the vector, I think it's the origin so in eye space it should be (0,0,0) i might be wrong here
n = normal of the plane, the normal of my fragment in eyespace
in my code:
float ls_t_hit = dot(n, lightFragmentCoord.xyz) / dot(n, lsGridLineDir);

About glm quaternion rotation

I want to make some rotation by quaternion.
The glm library was done this very well.
The following was my codes:
vec3 v(0.0f, 0.0f, 1.0f);
float deg = 45.0f * 0.5f;
quat q(glm::cos(glm::radians(deg)), 0, glm::sin(glm::radians(deg)), 0);
vec3 newv = q*v;
printf("v %f %f %f \n", newv[0], newv[1], newv[2]);
My question is which in many articles the formula of the rotation by quaternion was
rotated_v = q*v*q_conj
It's weird. In glm the vector "v" just multiply by the quaternion "q" can do the rotation.
It confused me.
After doing some research.
I found the definition of the operation "*" in glm quaternion and what is going on in there.
This implementation is based on those sites.
Quaternion vector rotation optimisation,
A faster quaternion-vector multiplication,
Here's two version of the rotation by quaternion.
//rotate vector
vec3 qrot(vec4 q, vec3 v)
{
return v + 2.0*cross(q.xyz, cross(q.xyz,v) + q.w*v);
}
//rotate vector (alternative)
vec3 qrot_2(vec4 q, vec3 v)
{
return v*(q.w*q.w - dot(q.xyz,q.xyz)) + 2.0*q.xyz*dot(q.xyz,v) +
2.0*q.w*cross(q.xyz,v);
}
If someone can proof that.
I would really appreciate it.
It works when the imaginary part of your quaternion is perpendicular with your vector.
It's your case vec3(0,sin(angle),0) is perpendicular with vec3(0,0,1);
You will see that you need to multiply by the conjugate when it's not right.
q quaternion, v vector.
when you do q * v normally you will obtain a 4D vector, another quaternion.
We just don't care about the first component and assume it's 0, a pure quaternion. when you do q * v * q' you are sure to obtain a pure quaternion which translate to a good 3D vector
You can test with non perpendicular vector/quaternion and you will see that your rotation is not right
https://www.3dgep.com/understanding-quaternions/

GPU Pro 5 Area Lights

i'm trying to implement the area lights described in GPU Pro 5 in GLSL, but i'm having some trouble with the projections.
here is the shader code i'm currently using for diffuse lightning:
vec3 linePlaneIntersection
(
vec3 linePoint, vec3 lineNormal,
vec3 planeCenter, vec3 planeNormal
)
{
float t = (dot(planeNormal, planeCenter - linePoint) / dot(planeNormal, lineNormal));
return linePoint + lineNormal * t;
}
vec3 projectOnPlane(vec3 point, vec3 planeCenter, vec3 planeNormal)
{
float distance = dot(planeNormal, point - planeCenter);
return point - distance * planeNormal;
}
vec3 diffuse_color(vec3 p, vec3 surfaceDiffuseColor, vec3 n)
{
// for point p with normal n
vec3 directionToLight = normalize(light.position.xyz - p);
vec3 planeNormal = directionToLight;
planeNormal = light.orientation.xyz;
// intersect a ray from p in direction nII with light plane,
// creating point pI
vec3 nII = n;
if(dot(n, light.orientation.xyz) > 0.0)
{
// light plane points away from n, skew in direction of light plane
nII = -light.orientation.xyz;
}
vec3 pI = linePlaneIntersection(p, nII, light.position.xyz, planeNormal);
// project point p on the light plane, creating point pII
vec3 pII = projectOnPlane(p, light.position.xyz, planeNormal);
// create halfway vector h between ppI and ppII
vec3 ppI = pI - p;
vec3 ppII = pII - p;
vec3 h = normalize(ppI + ppII);
// intersect ray from p in direction h with the light plane,
// creating point pd
vec3 pd = linePlaneIntersection(p, h, light.position.xyz, planeNormal);
// treat vector ppd as light vector for diffuse equation
vec3 ppd = normalize(pd - p);
// distance from point p to point pI on dArea
float r = distance(p, pI);
// angle between light vector ppd and surface normal n
float cosP = clamp(dot(ppd, n), 0.0, 1.0);
// angle between surface normal and light plane orientation normal
float cosO = clamp(dot(n, -light.orientation.xyz), 0.0, 1.0);
float dArea = light.dAreaRadiance.x;
float radiance = light.dAreaRadiance.y;
return radiance * surfaceDiffuseColor * cosP * cosO * dArea / (r * r);
}
the light has the position {0, 100, 0} and the orientation {0, -1, 0}.
if i use the light orientation as the plane normal for the projections, the light always comes directly from the top, even when i'm changing the position on the x axis.
when i use the direction to the light position as the plane normal, it seems to work, but i'm pretty sure it is still not correct.